# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1244107977 -3600
# Node ID 908662be11ba4e0b92f43097caf7625ef84bf6d9
# Parent e6bffb2fc52314d542721c60c65cbb2a7779b4c9
blktap2: reduce TLB flush scope
c/s 885 added very coarse TLB flushing. Since these flushes always
follow single page updates, single page flushes (when available) are
sufficient.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
To linux-2.6-pvops:
* Stripped arch/i386/mm/hypervisor.c exports
* Stripped xen_invlpg_all(kvaddr) use on CONFIG_X86
Signed-off-by: Daniel Stodden <daniel.stodden@xxxxxxxxxx>
diff -r e6bffb2fc523 -r 908662be11ba drivers/xen/blktap/device.c
--- a/drivers/xen/blktap/device.c Thu Jun 04 10:32:34 2009 +0100
+++ b/drivers/xen/blktap/device.c Thu Jun 04 10:32:57 2009 +0100
@@ -197,6 +197,12 @@
PAGE_SIZE, blktap_umap_uaddr_fn, mm);
}
+static inline void
+flush_tlb_kernel_page(unsigned long kvaddr)
+{
+ flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+}
+
static void
blktap_device_end_dequeued_request(struct blktap_device *dev,
struct request *req, int error)
@@ -326,7 +332,7 @@
if (request->handles[i].kernel == INVALID_GRANT_HANDLE) {
kvaddr = request_to_kaddr(request, i);
blktap_umap_uaddr(&init_mm, kvaddr);
- flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+ flush_tlb_kernel_page(kvaddr);
set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
@@ -561,9 +567,9 @@
pte = mk_pte(page, ring->vma->vm_page_prot);
blktap_map_uaddr(ring->vma->vm_mm, uvaddr, pte_mkwrite(pte));
- flush_tlb_mm(ring->vma->vm_mm);
+ flush_tlb_page(ring->vma, uvaddr);
blktap_map_uaddr(&init_mm, kvaddr, mk_pte(page, PAGE_KERNEL));
- flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+ flush_tlb_kernel_page(kvaddr);
set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, pte_mfn(pte));
request->handles[seg].kernel = INVALID_GRANT_HANDLE;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|