WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Attached patch avoids "Bad L1 flags 80" for VMX domains.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Attached patch avoids "Bad L1 flags 80" for VMX domains. Thanks Ian for
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 24 Oct 2005 08:06:10 +0000
Delivery-date: Mon, 24 Oct 2005 08:06:12 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1c62a4149b11fd3af95eaf97bc35fc26292698ae
# Parent  a90d670c98b9df3bd32a107594882ed33c598917
Attached patch avoids "Bad L1 flags 80" for VMX domains. Thanks Ian for
the suggestions.

Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>

diff -r a90d670c98b9 -r 1c62a4149b11 xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     Sun Oct 23 21:45:15 2005
+++ b/xen/arch/x86/shadow.c     Mon Oct 24 07:04:38 2005
@@ -871,6 +871,7 @@
 
     perfc_incrc(shadow_mark_mfn_out_of_sync_calls);
 
+    entry->v = v;
     entry->gpfn = gpfn;
     entry->gmfn = mfn;
     entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
@@ -937,6 +938,7 @@
     entry->writable_pl1e =
         l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
     ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
+    entry->va = va;
 
     // Increment shadow's page count to represent the reference
     // inherent in entry->writable_pl1e
@@ -1340,6 +1342,7 @@
             guest_l1_pgentry_t *guest1 = guest;
             l1_pgentry_t *shadow1 = shadow;
             guest_l1_pgentry_t *snapshot1 = snapshot;
+            int unshadow_l1 = 0;
 
             ASSERT(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) ||
                    shadow_mode_write_all(d));
@@ -1358,8 +1361,15 @@
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
                      guest_l1e_has_changed(guest1[i], snapshot1[i], 
PAGE_FLAG_MASK) )
                 {
-                    need_flush |= validate_pte_change(d, guest1[i], 
&shadow1[i]);
-                    set_guest_back_ptr(d, shadow1[i], smfn, i);
+                    int error; 
+
+                    error = validate_pte_change(d, guest1[i], &shadow1[i]);
+                    if ( error ==  -1 ) 
+                        unshadow_l1 = 1;
+                    else {
+                        need_flush |= error;
+                        set_guest_back_ptr(d, shadow1[i], smfn, i);
+                    }
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
                     //
@@ -1371,6 +1381,19 @@
             perfc_incrc(resync_l1);
             perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
             perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, 
PT_UPDATES);
+            if (unshadow_l1) {
+                pgentry_64_t l2e;
+
+                __shadow_get_l2e(entry->v, entry->va, &l2e);
+                if (entry_get_flags(l2e) & _PAGE_PRESENT) {
+                    entry_remove_flags(l2e, _PAGE_PRESENT);
+                    __shadow_set_l2e(entry->v, entry->va, &l2e);
+
+                    if (entry->v == current)
+                        need_flush = 1;
+                }
+            }
+
             break;
         }
 #if defined (__i386__)
diff -r a90d670c98b9 -r 1c62a4149b11 xen/arch/x86/shadow32.c
--- a/xen/arch/x86/shadow32.c   Sun Oct 23 21:45:15 2005
+++ b/xen/arch/x86/shadow32.c   Mon Oct 24 07:04:38 2005
@@ -1829,6 +1829,7 @@
 
     perfc_incrc(shadow_mark_mfn_out_of_sync_calls);
 
+    entry->v = v;
     entry->gpfn = gpfn;
     entry->gmfn = mfn;
     entry->snapshot_mfn = shadow_make_snapshot(d, gpfn, mfn);
@@ -1875,6 +1876,7 @@
     entry->writable_pl1e =
         l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
     ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
+    entry->va = va;
 
     // Increment shadow's page count to represent the reference
     // inherent in entry->writable_pl1e
@@ -2320,6 +2322,7 @@
             l1_pgentry_t *guest1 = guest;
             l1_pgentry_t *shadow1 = shadow;
             l1_pgentry_t *snapshot1 = snapshot;
+            int unshadow_l1 = 0;
 
             ASSERT(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) ||
                    shadow_mode_write_all(d));
@@ -2346,8 +2349,15 @@
                 if ( (i < min_snapshot) || (i > max_snapshot) ||
                      l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
                 {
-                    need_flush |= validate_pte_change(d, guest1[i], 
&shadow1[i]);
-                    set_guest_back_ptr(d, shadow1[i], smfn, i);
+                    int error;
+
+                    error = validate_pte_change(d, guest1[i], &shadow1[i]);
+                    if ( error ==  -1 ) 
+                        unshadow_l1 = 1;
+                    else {
+                        need_flush |= error;
+                        set_guest_back_ptr(d, shadow1[i], smfn, i);
+                    }
 
                     // can't update snapshots of linear page tables -- they
                     // are used multiple times...
@@ -2359,6 +2369,19 @@
             perfc_incrc(resync_l1);
             perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
             perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, 
PT_UPDATES);
+            if (unshadow_l1) {
+                l2_pgentry_t l2e;
+
+                __shadow_get_l2e(entry->v, entry->va, &l2e);
+                if (l2e_get_flags(l2e) & _PAGE_PRESENT) {
+                    l2e_remove_flags(l2e, _PAGE_PRESENT);
+                    __shadow_set_l2e(entry->v, entry->va, l2e);
+
+                    if (entry->v == current)
+                        need_flush = 1;
+                }
+            }
+
             break;
         }
         case PGT_l2_shadow:
diff -r a90d670c98b9 -r 1c62a4149b11 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Sun Oct 23 21:45:15 2005
+++ b/xen/include/asm-x86/shadow.h      Mon Oct 24 07:04:38 2005
@@ -302,10 +302,12 @@
 
 struct out_of_sync_entry {
     struct out_of_sync_entry *next;
+    struct vcpu   *v;
     unsigned long gpfn;    /* why is this here? */
     unsigned long gmfn;
     unsigned long snapshot_mfn;
     unsigned long writable_pl1e; /* NB: this is a machine address */
+    unsigned long va;
 };
 
 #define out_of_sync_extra_size 127
@@ -384,6 +386,10 @@
 
     nl1e = l1e;
     l1e_remove_flags(nl1e, _PAGE_GLOBAL);
+
+    if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
+        return 0;
+
     res = get_page_from_l1e(nl1e, d);
 
     if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
@@ -959,13 +965,15 @@
             //
             perfc_incrc(validate_pte_changes3);
 
-            if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
-                 !shadow_get_page_from_l1e(new_spte, d) )
-                new_spte = l1e_empty();
             if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
             {
                 shadow_put_page_from_l1e(old_spte, d);
                 need_flush = 1;
+            }
+            if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
+                 !shadow_get_page_from_l1e(new_spte, d) ) {
+                new_spte = l1e_empty();
+                need_flush = -1; /* need to unshadow the page */
             }
         }
         else

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Attached patch avoids "Bad L1 flags 80" for VMX domains. Thanks Ian for, Xen patchbot -unstable <=