WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86, hvm: Only invalidate qemu mapcache o

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86, hvm: Only invalidate qemu mapcache on XENMEM_decrease_reservation.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 26 Jan 2008 03:30:07 -0800
Delivery-date: Sat, 26 Jan 2008 03:30:04 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1201198166 0
# Node ID 7a2824f99a2878bb2e078e92d8a4347db7508c06
# Parent  31adb5c972d03e45cb746cd2305126ea2571282f
x86, hvm: Only invalidate qemu mapcache on XENMEM_decrease_reservation.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |   41 +++++++++++++++++++++++----------------
 xen/include/asm-x86/hvm/domain.h |    2 +
 2 files changed, 27 insertions(+), 16 deletions(-)

diff -r 31adb5c972d0 -r 7a2824f99a28 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jan 24 14:41:26 2008 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jan 24 18:09:26 2008 +0000
@@ -1470,6 +1470,14 @@ static long hvm_grant_table_op(
     return do_grant_table_op(cmd, uop, count);
 }
 
+static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+    long rc = do_memory_op(cmd, arg);
+    if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
+        current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1;
+    return rc;
+}
+
 typedef unsigned long hvm_hypercall_t(
     unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
@@ -1479,7 +1487,7 @@ typedef unsigned long hvm_hypercall_t(
 #if defined(__i386__)
 
 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
-    HYPERCALL(memory_op),
+    [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     HYPERCALL(xen_version),
     HYPERCALL(event_channel_op),
@@ -1489,7 +1497,7 @@ static hvm_hypercall_t *hvm_hypercall32_
 
 #else /* defined(__x86_64__) */
 
-static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
+static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
 {
     extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
     long rc;
@@ -1515,7 +1523,7 @@ static long do_memory_op_compat32(int cm
         h.gpfn = u.gpfn;
 
         this_cpu(guest_handles_in_xen_space) = 1;
-        rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
+        rc = hvm_memory_op(cmd, guest_handle_from_ptr(&h, void));
         this_cpu(guest_handles_in_xen_space) = 0;
 
         break;
@@ -1531,7 +1539,7 @@ static long do_memory_op_compat32(int cm
 }
 
 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
-    HYPERCALL(memory_op),
+    [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     HYPERCALL(xen_version),
     HYPERCALL(event_channel_op),
@@ -1540,7 +1548,7 @@ static hvm_hypercall_t *hvm_hypercall64_
 };
 
 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
-    [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)do_memory_op_compat32,
+    [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32,
     [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
     HYPERCALL(xen_version),
     HYPERCALL(event_channel_op),
@@ -1552,8 +1560,9 @@ static hvm_hypercall_t *hvm_hypercall32_
 
 int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
+    struct vcpu *curr = current;
     struct segment_register sreg;
-    int flush, mode = hvm_guest_x86_mode(current);
+    int mode = hvm_guest_x86_mode(curr);
     uint32_t eax = regs->eax;
 
     switch ( mode )
@@ -1563,7 +1572,7 @@ int hvm_do_hypercall(struct cpu_user_reg
 #endif
     case 4:
     case 2:
-        hvm_get_segment_register(current, x86_seg_ss, &sreg);
+        hvm_get_segment_register(curr, x86_seg_ss, &sreg);
         if ( unlikely(sreg.attr.fields.dpl == 3) )
         {
     default:
@@ -1580,13 +1589,6 @@ int hvm_do_hypercall(struct cpu_user_reg
         return HVM_HCALL_completed;
     }
 
-    /*
-     * NB. In future flush only on decrease_reservation.
-     * For now we also need to flush when pages are added, as qemu-dm is not
-     * yet capable of faulting pages into an existing valid mapcache bucket.
-     */
-    flush = ((eax == __HYPERVISOR_memory_op) ||
-             (eax == __HYPERVISOR_grant_table_op)); /* needed ? */
     this_cpu(hc_preempted) = 0;
 
 #ifdef __x86_64__
@@ -1619,8 +1621,15 @@ int hvm_do_hypercall(struct cpu_user_reg
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u -> %lx",
                 eax, (unsigned long)regs->eax);
 
-    return (this_cpu(hc_preempted) ? HVM_HCALL_preempted :
-            flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
+    if ( this_cpu(hc_preempted) )
+        return HVM_HCALL_preempted;
+
+    if ( unlikely(curr->domain->arch.hvm_domain.qemu_mapcache_invalidate) &&
+         test_and_clear_bool(curr->domain->arch.hvm_domain.
+                             qemu_mapcache_invalidate) )
+        return HVM_HCALL_invalidate;
+
+    return HVM_HCALL_completed;
 }
 
 static void hvm_latch_shinfo_size(struct domain *d)
diff -r 31adb5c972d0 -r 7a2824f99a28 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Thu Jan 24 14:41:26 2008 +0000
+++ b/xen/include/asm-x86/hvm/domain.h  Thu Jan 24 18:09:26 2008 +0000
@@ -77,6 +77,8 @@ struct hvm_domain {
 #if CONFIG_PAGING_LEVELS == 3
     bool_t                 amd_npt_4gb_warning;
 #endif
+
+    bool_t                 qemu_mapcache_invalidate;
 };
 
 #endif /* __ASM_X86_HVM_DOMAIN_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86, hvm: Only invalidate qemu mapcache on XENMEM_decrease_reservation., Xen patchbot-unstable <=