WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Shadow: tidy the virtual-TLB translation

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Shadow: tidy the virtual-TLB translation cache.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 19 Dec 2007 12:40:09 -0800
Delivery-date: Wed, 19 Dec 2007 12:40:32 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1198059114 0
# Node ID 9d447ba0c99af9d6ad842b30079163cd05f1939a
# Parent  0335b9fe2f1003c0c10f081748cb567164ea9361
Shadow: tidy the virtual-TLB translation cache.
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/multi.c   |   19 +++++++++++--------
 xen/arch/x86/mm/shadow/private.h |   32 +++++++++++++++++---------------
 xen/arch/x86/mm/shadow/types.h   |   38 --------------------------------------
 3 files changed, 28 insertions(+), 61 deletions(-)

diff -r 0335b9fe2f10 -r 9d447ba0c99a xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Dec 19 10:10:37 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Dec 19 10:11:54 2007 +0000
@@ -2829,6 +2829,12 @@ static int sh_page_fault(struct vcpu *v,
         goto not_a_shadow_fault;
     }
 
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+    /* Remember this successful VA->GFN translation for later. */
+    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), 
+                regs->error_code | PFEC_page_present);
+#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
+
     /* Make sure there is enough free shadow memory to build a chain of
      * shadow tables. (We never allocate a top-level shadow on this path,
      * only a 32b l1, pae l1, or 64b l3+2+1. Note that while
@@ -3113,10 +3119,10 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l
     gfn_t gfn;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
-    struct shadow_vtlb t = {0};
     /* Check the vTLB cache first */
-    if ( vtlb_lookup(v, va, pfec[0], &t) ) 
-        return t.frame_number;
+    unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
+    if ( VALID_GFN(vtlb_gfn) ) 
+        return vtlb_gfn;
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
     if ( guest_walk_tables(v, va, &gw, pfec[0], 0) != 0 )
@@ -3128,11 +3134,8 @@ sh_gva_to_gfn(struct vcpu *v, unsigned l
     gfn = guest_walk_to_gfn(&gw);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
-    t.page_number = va >> PAGE_SHIFT;
-    t.frame_number = gfn_x(gfn);
-    t.flags = accumulate_guest_flags(v, &gw); 
-    t.pfec = pfec[0];
-    vtlb_insert(v, t);
+    /* Remember this successful VA->GFN translation for later. */
+    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), pfec[0]);
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
     return gfn_x(gfn);
diff -r 0335b9fe2f10 -r 9d447ba0c99a xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Wed Dec 19 10:10:37 2007 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Wed Dec 19 10:11:54 2007 +0000
@@ -688,8 +688,7 @@ void shadow_continue_emulation(
  *
  * We keep a cache of virtual-to-physical translations that we have seen 
  * since the last TLB flush.  This is safe to use for frame translations, 
- * but callers that use the rights need to re-check the actual guest tables
- * before triggering a fault.
+ * but callers need to re-check the actual guest tables if the lookup fails.
  * 
  * Lookups and updates are protected by a per-vTLB (and hence per-vcpu)
  * lock.  This lock is held *only* while reading or writing the table,
@@ -702,8 +701,9 @@ struct shadow_vtlb {
 struct shadow_vtlb {
     unsigned long page_number;      /* Guest virtual address >> PAGE_SHIFT  */
     unsigned long frame_number;     /* Guest physical address >> PAGE_SHIFT */
-    uint32_t pfec;  /* Pagefault code for the lookup that filled this entry */
-    uint32_t flags; /* Accumulated guest pte flags, or 0 for an empty slot. */
+    uint32_t pfec;     /* PF error code of the lookup that filled this
+                        * entry.  A pfec of zero means the slot is empty
+                        * (since that would require us to re-try anyway) */
 };
 
 /* Call whenever the guest flushes hit actual TLB */
@@ -720,32 +720,34 @@ static inline int vtlb_hash(unsigned lon
 }
 
 /* Put a translation into the vTLB, potentially clobbering an old one */
-static inline void vtlb_insert(struct vcpu *v, struct shadow_vtlb entry)
-{
+static inline void vtlb_insert(struct vcpu *v, unsigned long page,
+                               unsigned long frame, uint32_t pfec)
+{
+    struct shadow_vtlb entry = 
+        { .page_number = page, .frame_number = frame, .pfec = pfec };
     spin_lock(&v->arch.paging.vtlb_lock);
-    v->arch.paging.vtlb[vtlb_hash(entry.page_number)] = entry;
+    v->arch.paging.vtlb[vtlb_hash(page)] = entry;
     spin_unlock(&v->arch.paging.vtlb_lock);
 }
 
-/* Look a translation up in the vTLB.  Returns 0 if not found. */
-static inline int vtlb_lookup(struct vcpu *v, unsigned long va, uint32_t pfec,
-                              struct shadow_vtlb *result) 
+/* Look a translation up in the vTLB.  Returns INVALID_GFN if not found. */
+static inline unsigned long vtlb_lookup(struct vcpu *v,
+                                        unsigned long va, uint32_t pfec)
 {
     unsigned long page_number = va >> PAGE_SHIFT;
-    int rv = 0;
+    unsigned long frame_number = INVALID_GFN;
     int i = vtlb_hash(page_number);
 
     spin_lock(&v->arch.paging.vtlb_lock);
-    if ( v->arch.paging.vtlb[i].flags != 0 
+    if ( v->arch.paging.vtlb[i].pfec != 0
          && v->arch.paging.vtlb[i].page_number == page_number 
          /* Any successful walk that had at least these pfec bits is OK */
          && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
     {
-        rv = 1; 
-        result[0] = v->arch.paging.vtlb[i];
+        frame_number = v->arch.paging.vtlb[i].frame_number;
     }
     spin_unlock(&v->arch.paging.vtlb_lock);
-    return rv;
+    return frame_number;
 }
 #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
 
diff -r 0335b9fe2f10 -r 9d447ba0c99a xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Wed Dec 19 10:10:37 2007 +0000
+++ b/xen/arch/x86/mm/shadow/types.h    Wed Dec 19 10:11:54 2007 +0000
@@ -527,44 +527,6 @@ struct shadow_walk_t
 #endif
 #endif /* GUEST_PAGING_LEVELS >= 3 */
 
-static inline u32
-accumulate_guest_flags(struct vcpu *v, walk_t *gw)
-{
-    u32 accumulated_flags;
-
-    if ( unlikely(!(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT)) )
-        return 0;
-        
-    // We accumulate the permission flags with bitwise ANDing.
-    // This works for the PRESENT bit, RW bit, and USER bit.
-    // For the NX bit, however, the polarity is wrong, so we accumulate the
-    // inverse of the NX bit.
-    //
-    accumulated_flags =  guest_l1e_get_flags(gw->l1e) ^ _PAGE_NX_BIT;
-    accumulated_flags &= guest_l2e_get_flags(gw->l2e) ^ _PAGE_NX_BIT;
-
-    // Note that PAE guests do not have USER or RW or NX bits in their L3s.
-    //
-#if GUEST_PAGING_LEVELS == 3
-    accumulated_flags &=
-        ~_PAGE_PRESENT | (guest_l3e_get_flags(gw->l3e) & _PAGE_PRESENT);
-#elif GUEST_PAGING_LEVELS >= 4
-    accumulated_flags &= guest_l3e_get_flags(gw->l3e) ^ _PAGE_NX_BIT;
-    accumulated_flags &= guest_l4e_get_flags(gw->l4e) ^ _PAGE_NX_BIT;
-#endif
-
-    // Revert the NX bit back to its original polarity
-    accumulated_flags ^= _PAGE_NX_BIT;
-
-    // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
-    // entries (since even the guest kernel runs in ring 3).
-    //
-    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
-        accumulated_flags |= _PAGE_USER;
-
-    return accumulated_flags;
-}
-
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
 /******************************************************************************

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Shadow: tidy the virtual-TLB translation cache., Xen patchbot-unstable <=