WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Merge

# HG changeset patch
# User tdeegan@xxxxxxxxxxxxxxxxxxxxx
# Node ID a926e72e04918baee78bdcff7b6fafaf94323672
# Parent  20bb80e54f21311ed0ef81be7e72cfbc0593a3ea
# Parent  9956c3a3bd8411eeb6c19bb7442d0b33db09d2c9
Merge
---
 xen/arch/x86/mm/shadow/common.c  |   14 +++-----------
 xen/arch/x86/mm/shadow/private.h |    2 +-
 xen/include/asm-x86/mm.h         |    2 +-
 3 files changed, 5 insertions(+), 13 deletions(-)

diff -r 20bb80e54f21 -r a926e72e0491 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Mon Aug 28 22:44:31 2006 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Tue Aug 29 09:42:35 2006 +0100
@@ -397,22 +397,14 @@ shadow_validate_guest_pt_write(struct vc
     ASSERT(shadow_lock_is_acquired(v->domain));
     rc = __shadow_validate_guest_entry(v, gmfn, entry, size);
     if ( rc & SHADOW_SET_FLUSH )
-    {
-        // Flush everyone except the local processor, which will flush when it
-        // re-enters the HVM guest.
-        //
-        cpumask_t mask = d->domain_dirty_cpumask;
-        cpu_clear(v->processor, mask);
-        flush_tlb_mask(mask);
-    }
+        /* Need to flush TLBs to pick up shadow PT changes */
+        flush_tlb_mask(d->domain_dirty_cpumask);
     if ( rc & SHADOW_SET_ERROR ) 
     {
         /* This page is probably not a pagetable any more: tear it out of the 
          * shadows, along with any tables that reference it */
         shadow_remove_all_shadows_and_parents(v, gmfn);
     }
-    /* We ignore the other bits: since we are about to change CR3 on
-     * VMENTER we don't need to do any extra TLB flushes. */ 
 }
 
 
@@ -1129,7 +1121,7 @@ sh_gfn_to_mfn_foreign(struct domain *d, 
 
 
 #if CONFIG_PAGING_LEVELS > 2
-    if ( gpfn > (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
+    if ( gpfn >= (RO_MPT_VIRT_END-RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
         /* This pfn is higher than the p2m map can hold */
         return _mfn(INVALID_MFN);
 #endif
diff -r 20bb80e54f21 -r a926e72e0491 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Mon Aug 28 22:44:31 2006 +0100
+++ b/xen/arch/x86/mm/shadow/private.h  Tue Aug 29 09:42:35 2006 +0100
@@ -555,7 +555,7 @@ vcpu_gfn_to_mfn_nofault(struct vcpu *v, 
         return _mfn(gfn);
 
 #if CONFIG_PAGING_LEVELS > 2
-    if ( gfn > (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
+    if ( gfn >= (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
         /* This pfn is higher than the p2m map can hold */
         return _mfn(INVALID_MFN);
 #endif
diff -r 20bb80e54f21 -r a926e72e0491 xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h  Mon Aug 28 22:44:31 2006 +0100
+++ b/xen/include/asm-x86/mm.h  Tue Aug 29 09:42:35 2006 +0100
@@ -368,7 +368,7 @@ static inline unsigned long get_mfn_from
     int ret;
 
 #if CONFIG_PAGING_LEVELS > 2
-    if ( pfn > (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof (l1_pgentry_t) ) 
+    if ( pfn >= (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t) ) 
         /* This pfn is higher than the p2m map can hold */
         return INVALID_MFN;
 #endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>