WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] merge

# HG changeset patch
# User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
# Node ID 6d83c86ebfe913b23bdd1e59278e0b4f605047dd
# Parent  70fe022d3589f9964f89b469d0922957e230fa2c
# Parent  c3b4b9dc23acabb7e9b5ee491a1f62d0415c4a9e
merge
---
 xen/arch/x86/mm.c            |   52 ++++++++++++++++++++++++-------------------
 xen/include/asm-x86/shadow.h |   13 +++++++---
 2 files changed, 39 insertions(+), 26 deletions(-)

diff -r 70fe022d3589 -r 6d83c86ebfe9 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Sep 20 18:32:54 2006 +0100
+++ b/xen/arch/x86/mm.c Wed Sep 20 18:33:26 2006 +0100
@@ -1490,24 +1490,26 @@ static int mod_l4_entry(l4_pgentry_t *pl
 
 int alloc_page_type(struct page_info *page, unsigned long type)
 {
-    struct domain *owner = page_get_owner(page);
-
-    if ( owner != NULL )
-        mark_dirty(owner, page_to_mfn(page));
+    int rc;
 
     switch ( type & PGT_type_mask )
     {
     case PGT_l1_page_table:
-        return alloc_l1_table(page);
+        rc = alloc_l1_table(page);
+        break;
     case PGT_l2_page_table:
-        return alloc_l2_table(page, type);
+        rc = alloc_l2_table(page, type);
+        break;
     case PGT_l3_page_table:
-        return alloc_l3_table(page);
+        rc = alloc_l3_table(page);
+        break;
     case PGT_l4_page_table:
-        return alloc_l4_table(page);
+        rc = alloc_l4_table(page);
+        break;
     case PGT_gdt_page:
     case PGT_ldt_page:
-        return alloc_segdesc_page(page);
+        rc = alloc_segdesc_page(page);
+        break;
     default:
         printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 
                type, page->u.inuse.type_info,
@@ -1515,7 +1517,15 @@ int alloc_page_type(struct page_info *pa
         BUG();
     }
 
-    return 0;
+    /*
+     * A page is dirtied when its type count becomes non-zero.
+     * It is safe to mark dirty here because any PTE modifications in
+     * alloc_l?_table have now happened. The caller has already set the type
+     * and incremented the reference count.
+     */
+    mark_dirty(page_get_owner(page), page_to_mfn(page));
+
+    return rc;
 }
 
 
@@ -1580,7 +1590,6 @@ void put_page_type(struct page_info *pag
 void put_page_type(struct page_info *page)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
-    struct domain *owner = page_get_owner(page);
 
  again:
     do {
@@ -1615,16 +1624,13 @@ void put_page_type(struct page_info *pag
     }
     while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
 
-    if( likely(owner != NULL) )
-    {
-        if (shadow_mode_enabled(owner))
-        {
-            if (shadow_lock_is_acquired(owner))  /* this is a shadow page */
-                return;
-
-            mark_dirty(owner, page_to_mfn(page));
-        }
-    }
+    /*
+     * A page is dirtied when its type count becomes zero.
+     * We cannot set the dirty flag earlier than this because we must wait
+     * until the type count has been zeroed by the CMPXCHG above.
+     */
+    if ( unlikely((nx & PGT_count_mask) == 0) )
+        mark_dirty(page_get_owner(page), page_to_mfn(page));
 }
 
 
@@ -1984,6 +1990,7 @@ int do_mmuext_op(
                 break;
             }
 
+            /* A page is dirtied when its pin status is set. */
             mark_dirty(d, mfn);
            
             break;
@@ -2006,8 +2013,9 @@ int do_mmuext_op(
                 {
                     shadow_lock(d);
                     shadow_remove_all_shadows(v, _mfn(mfn));
+                    /* A page is dirtied when its pin status is cleared. */
+                    sh_mark_dirty(d, _mfn(mfn));
                     shadow_unlock(d);
-                    mark_dirty(d, mfn);
                 }
             }
             else
diff -r 70fe022d3589 -r 6d83c86ebfe9 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Sep 20 18:32:54 2006 +0100
+++ b/xen/include/asm-x86/shadow.h      Wed Sep 20 18:33:26 2006 +0100
@@ -325,12 +325,17 @@ void sh_do_mark_dirty(struct domain *d, 
 void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
 static inline void mark_dirty(struct domain *d, unsigned long gmfn)
 {
-    if ( shadow_mode_log_dirty(d) )
-    {
+    int caller_locked;
+
+    if ( unlikely(d == NULL) || likely(!shadow_mode_log_dirty(d)) )
+        return;
+
+    caller_locked = shadow_lock_is_acquired(d);
+    if ( !caller_locked )
         shadow_lock(d);
-        sh_do_mark_dirty(d, _mfn(gmfn));
+    sh_do_mark_dirty(d, _mfn(gmfn));
+    if ( !caller_locked )
         shadow_unlock(d);
-    }
 }
 
 /* Internal version, for when the shadow lock is already held */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>