blktap2: make blktap2 work for auto translated mode with hvm domain.
This patch makes blktap2 work for hvm domain with auto translated mode.
(I.e. IA64 HVM domain case as Kuwamura reported its bug.)
blktap2 has introduces new feature that pages from the self domain
can be handled. However it doesn't work for auto translated mode
because blktap2 relies on p2m table manipulation. But the p2m
doesn't make sense for auto translated mode.
So self grant mapping is used instead.
Just passing same page to blktap2 daemon doesn't work because
when doing io, the page is locked, so the given page from blktap2
block device is already locked. When blktap2 daemon issues IO on
the page, it tries to lock it resulting in dead lock.
So resorted to self grant.
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff --git a/drivers/xen/blktap2/device.c b/drivers/xen/blktap2/device.c
--- a/drivers/xen/blktap2/device.c
+++ b/drivers/xen/blktap2/device.c
@@ -225,6 +225,8 @@ blktap_device_fast_flush(struct blktap *
struct grant_handle_pair *khandle;
unsigned long kvaddr, uvaddr, offset;
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST * 2];
+ grant_handle_t self_gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int self_gref_nr = 0;
cnt = 0;
ring = &tap->ring;
@@ -283,6 +285,10 @@ blktap_device_fast_flush(struct blktap *
if (PageBlkback(page)) {
ClearPageBlkback(page);
set_page_private(page, 0);
+ } else if (
+ xen_feature(XENFEAT_auto_translated_physmap)) {
+ self_gref[self_gref_nr] = khandle->kernel;
+ self_gref_nr++;
}
}
map[offset] = NULL;
@@ -301,6 +307,11 @@ blktap_device_fast_flush(struct blktap *
zap_page_range(ring->vma,
MMAP_VADDR(ring->user_vstart, usr_idx, 0),
request->nr_pages << PAGE_SHIFT, NULL);
+ else {
+ for (i = 0; i < self_gref_nr; i++) {
+ gnttab_end_foreign_access_ref(self_gref[i]);
+ }
+ }
}
/*
@@ -323,7 +334,8 @@ blktap_unmap(struct blktap *tap, struct
MMAP_VADDR(tap->ring.user_vstart, usr_idx, i),
request->handles[i].user);
- if (request->handles[i].kernel == INVALID_GRANT_HANDLE) {
+ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
+ request->handles[i].kernel == INVALID_GRANT_HANDLE) {
kvaddr = request_to_kaddr(request, i);
blktap_umap_uaddr(&init_mm, kvaddr);
flush_tlb_kernel_page(kvaddr);
@@ -540,7 +552,7 @@ blktap_map_foreign(struct blktap *tap,
return err;
}
-static void
+static int
blktap_map(struct blktap *tap,
struct blktap_request *request,
unsigned int seg, struct page *page)
@@ -549,25 +561,68 @@ blktap_map(struct blktap *tap,
int usr_idx;
struct blktap_ring *ring;
unsigned long uvaddr, kvaddr;
+ int err = 0;
ring = &tap->ring;
usr_idx = request->usr_idx;
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, seg);
kvaddr = request_to_kaddr(request, seg);
- pte = mk_pte(page, ring->vma->vm_page_prot);
- blktap_map_uaddr(ring->vma->vm_mm, uvaddr, pte_mkwrite(pte));
- flush_tlb_page(ring->vma, uvaddr);
- blktap_map_uaddr(&init_mm, kvaddr, mk_pte(page, PAGE_KERNEL));
- flush_tlb_kernel_page(kvaddr);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ pte = mk_pte(page, ring->vma->vm_page_prot);
+ blktap_map_uaddr(ring->vma->vm_mm, uvaddr, pte_mkwrite(pte));
+ flush_tlb_page(ring->vma, uvaddr);
+ blktap_map_uaddr(&init_mm, kvaddr, mk_pte(page, PAGE_KERNEL));
+ flush_tlb_kernel_page(kvaddr);
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, pte_mfn(pte));
- request->handles[seg].kernel = INVALID_GRANT_HANDLE;
- request->handles[seg].user = INVALID_GRANT_HANDLE;
+ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, pte_mfn(pte));
+ request->handles[seg].kernel = INVALID_GRANT_HANDLE;
+ } else {
+ /* grant this page access to self domain and map it. */
+ domid_t domid = 0; /* XXX my domian id: grant table hypercall
+ doesn't understand DOMID_SELF */
+ int gref;
+ uint32_t flags;
+ struct gnttab_map_grant_ref map;
+ struct page *tap_page;
+
+ gref = gnttab_grant_foreign_access(
+ domid, page_to_pfn(page),
+ (request->operation == BLKIF_OP_WRITE)?
+ GTF_readonly: 0);
+
+ flags = GNTMAP_host_map |
+ (request->operation == BLKIF_OP_WRITE ?
+ GNTMAP_readonly : 0);
+
+ gnttab_set_map_op(&map, kvaddr, flags, gref, domid);
+
+ /* enable chained tap devices */
+ tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ set_page_private(tap_page, page_private(page));
+ SetPageBlkback(tap_page);
+
+ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+ &map, 1);
+ BUG_ON(err);
+
+ err = vm_insert_page(ring->vma, uvaddr, tap_page);
+ if (err) {
+ struct gnttab_unmap_grant_ref unmap;
+ gnttab_set_unmap_op(&unmap, kvaddr,
+ GNTMAP_host_map, gref);
+ (void)HYPERVISOR_grant_table_op(
+ GNTTABOP_unmap_grant_ref, &unmap, 1);
+ } else
+ request->handles[seg].kernel = gref;
+ }
+ request->handles[seg].user = INVALID_GRANT_HANDLE;
BTDBG("pending_req: %p, seg: %d, page: %p, kvaddr: 0x%08lx, "
"uvaddr: 0x%08lx\n", request, seg, page, kvaddr,
uvaddr);
+
+ return err;
}
static int
@@ -630,10 +685,11 @@ blktap_device_process_request(struct blk
goto out;
} else {
/* do it the old fashioned way */
- blktap_map(tap,
- request,
- i,
- sg->page);
+ if (blktap_map(tap,
+ request,
+ i,
+ sg->page))
+ goto out;
}
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
--
yamahata
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|