WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Add a defensive batched tlb flush to free_page_type(), t

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Add a defensive batched tlb flush to free_page_type(), to
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 03 Dec 2005 12:04:13 +0000
Delivery-date: Sat, 03 Dec 2005 12:05:17 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 42d4059108d4d9a4b0dfb33155cd5e85d08bc297
# Parent  b21be984945a8b11698dc66ef6dde8a5babdf18f
Add a defensive batched tlb flush to free_page_type(), to
ensure the linear_pg_table remains in sync with modified
page table structure. Otherwise we can update stale entries
and screw reference counts (but probably only when running
a malicious domain).

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r b21be984945a -r 42d4059108d4 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Dec  2 17:00:49 2005
+++ b/xen/arch/x86/mm.c Fri Dec  2 17:56:24 2005
@@ -128,8 +128,9 @@
 
 /* Used to defer flushing of memory structures. */
 static struct {
-#define DOP_FLUSH_TLB   (1<<0) /* Flush the TLB.                 */
-#define DOP_RELOAD_LDT  (1<<1) /* Reload the LDT shadow mapping. */
+#define DOP_FLUSH_TLB      (1<<0) /* Flush the local TLB.                    */
+#define DOP_FLUSH_ALL_TLBS (1<<1) /* Flush TLBs of all VCPUs of current dom. */
+#define DOP_RELOAD_LDT     (1<<2) /* Reload the LDT shadow mapping.          */
     unsigned int   deferred_ops;
     /* If non-NULL, specifies a foreign subject domain for some operations. */
     struct domain *foreign;
@@ -1323,14 +1324,28 @@
     struct domain *owner = page_get_owner(page);
     unsigned long gpfn;
 
-    if ( unlikely((owner != NULL) && shadow_mode_enabled(owner)) )
-    {
-        mark_dirty(owner, page_to_pfn(page));
-        if ( unlikely(shadow_mode_refcounts(owner)) )
-            return;
-        gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
-        ASSERT(VALID_M2P(gpfn));
-        remove_shadow(owner, gpfn, type & PGT_type_mask);
+    if ( likely(owner != NULL) )
+    {
+        /*
+         * We have to flush before the next use of the linear mapping
+         * (e.g., update_va_mapping()) or we could end up modifying a page
+         * that is no longer a page table (and hence screw up ref counts).
+         */
+        percpu_info[smp_processor_id()].deferred_ops |= DOP_FLUSH_ALL_TLBS;
+
+        if ( unlikely(shadow_mode_enabled(owner)) )
+        {
+            /* Raw page tables are rewritten during save/restore. */
+            if ( !shadow_mode_translate(owner) )
+                mark_dirty(owner, page_to_pfn(page));
+
+            if ( shadow_mode_refcounts(owner) )
+                return;
+
+            gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
+            ASSERT(VALID_M2P(gpfn));
+            remove_shadow(owner, gpfn, type & PGT_type_mask);
+        }
     }
 
     switch ( type & PGT_type_mask )
@@ -1600,11 +1615,14 @@
     deferred_ops = percpu_info[cpu].deferred_ops;
     percpu_info[cpu].deferred_ops = 0;
 
-    if ( deferred_ops & DOP_FLUSH_TLB )
+    if ( deferred_ops & (DOP_FLUSH_ALL_TLBS|DOP_FLUSH_TLB) )
     {
         if ( shadow_mode_enabled(d) )
             shadow_sync_all(d);
-        local_flush_tlb();
+        if ( deferred_ops & DOP_FLUSH_ALL_TLBS )
+            flush_tlb_mask(d->cpumask);
+        else
+            local_flush_tlb();
     }
         
     if ( deferred_ops & DOP_RELOAD_LDT )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Add a defensive batched tlb flush to free_page_type(), to, Xen patchbot -unstable <=