WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Clean up mark_dirty() usage in mm.c

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Clean up mark_dirty() usage in mm.c some more.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 20 Sep 2006 19:30:13 +0000
Delivery-date: Wed, 20 Sep 2006 12:30:51 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 3e39cc16dd4164c7b5debb6649e6531cf8d9fd22
# Parent  596b0e4fbef4069bfb3ee9807f2ccfdd65052c46
[XEN] Clean up mark_dirty() usage in mm.c some more.
Add clarifying comments. Move mark_dirty() in alloc_page_type()
to end of the function (more correct).
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c            |   51 +++++++++++++++++++++++++------------------
 xen/include/asm-x86/shadow.h |   13 +++++++---
 2 files changed, 39 insertions(+), 25 deletions(-)

diff -r 596b0e4fbef4 -r 3e39cc16dd41 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Sep 20 16:52:15 2006 +0100
+++ b/xen/arch/x86/mm.c Wed Sep 20 17:58:10 2006 +0100
@@ -1490,24 +1490,26 @@ static int mod_l4_entry(l4_pgentry_t *pl
 
 int alloc_page_type(struct page_info *page, unsigned long type)
 {
-    struct domain *owner = page_get_owner(page);
-
-    if ( owner != NULL )
-        mark_dirty(owner, page_to_mfn(page));
+    int rc;
 
     switch ( type & PGT_type_mask )
     {
     case PGT_l1_page_table:
-        return alloc_l1_table(page);
+        rc = alloc_l1_table(page);
+        break;
     case PGT_l2_page_table:
-        return alloc_l2_table(page, type);
+        rc = alloc_l2_table(page, type);
+        break;
     case PGT_l3_page_table:
-        return alloc_l3_table(page);
+        rc = alloc_l3_table(page);
+        break;
     case PGT_l4_page_table:
-        return alloc_l4_table(page);
+        rc = alloc_l4_table(page);
+        break;
     case PGT_gdt_page:
     case PGT_ldt_page:
-        return alloc_segdesc_page(page);
+        rc = alloc_segdesc_page(page);
+        break;
     default:
         printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n", 
                type, page->u.inuse.type_info,
@@ -1515,7 +1517,15 @@ int alloc_page_type(struct page_info *pa
         BUG();
     }
 
-    return 0;
+    /*
+     * A page is dirtied when its type count becomes non-zero.
+     * It is safe to mark dirty here because any PTE modifications in
+     * alloc_l?_table have now happened. The caller has already set the type
+     * and incremented the reference count.
+     */
+    mark_dirty(page_get_owner(page), page_to_mfn(page));
+
+    return rc;
 }
 
 
@@ -1615,16 +1625,13 @@ void put_page_type(struct page_info *pag
     }
     while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
 
-    if( likely(owner != NULL) )
-    {
-        if (shadow_mode_enabled(owner))
-        {
-            if (shadow_lock_is_acquired(owner))  /* this is a shadow page */
-                return;
-
-            mark_dirty(owner, page_to_mfn(page));
-        }
-    }
+    /*
+     * A page is dirtied when its type count becomes zero.
+     * We cannot set the dirty flag earlier than this because we must wait
+     * until the type count has been zeroed by the CMPXCHG above.
+     */
+    if ( unlikely((nx & PGT_count_mask) == 0) )
+        mark_dirty(owner, page_to_mfn(page));
 }
 
 
@@ -1984,6 +1991,7 @@ int do_mmuext_op(
                 break;
             }
 
+            /* A page is dirtied when its pin status is set. */
             mark_dirty(d, mfn);
            
             break;
@@ -2006,8 +2014,9 @@ int do_mmuext_op(
                 {
                     shadow_lock(d);
                     shadow_remove_all_shadows(v, _mfn(mfn));
+                    /* A page is dirtied when its pin status is cleared. */
+                    sh_mark_dirty(d, _mfn(mfn));
                     shadow_unlock(d);
-                    mark_dirty(d, mfn);
                 }
             }
             else
diff -r 596b0e4fbef4 -r 3e39cc16dd41 xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Sep 20 16:52:15 2006 +0100
+++ b/xen/include/asm-x86/shadow.h      Wed Sep 20 17:58:10 2006 +0100
@@ -325,12 +325,17 @@ void sh_do_mark_dirty(struct domain *d, 
 void sh_do_mark_dirty(struct domain *d, mfn_t gmfn);
 static inline void mark_dirty(struct domain *d, unsigned long gmfn)
 {
-    if ( shadow_mode_log_dirty(d) )
-    {
+    int caller_locked;
+
+    if ( unlikely(d == NULL) || likely(!shadow_mode_log_dirty(d)) )
+        return;
+
+    caller_locked = shadow_lock_is_acquired(d);
+    if ( !caller_locked )
         shadow_lock(d);
-        sh_do_mark_dirty(d, _mfn(gmfn));
+    sh_do_mark_dirty(d, _mfn(gmfn));
+    if ( !caller_locked )
         shadow_unlock(d);
-    }
 }
 
 /* Internal version, for when the shadow lock is already held */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Clean up mark_dirty() usage in mm.c some more., Xen patchbot-unstable <=