WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Log dirty radix tree code cleanup. Also d

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Log dirty radix tree code cleanup. Also do not deference non-existent
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 22 Nov 2007 12:00:31 -0800
Delivery-date: Thu, 22 Nov 2007 12:01:58 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1195243575 0
# Node ID 8e98c3d6a55ff9388b9c76bff977d4ff0bd11e31
# Parent  86e4b37a06ccc6c7c0ea75b5af9e6116b5d6a382
Log dirty radix tree code cleanup. Also do not deference non-existent
pointer in paging_new_log_dirty_*() functions if allocation fails.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/mm/paging.c         |  129 +++++++++++++++++++++++----------------
 xen/arch/x86/mm/shadow/private.h |    8 +-
 2 files changed, 82 insertions(+), 55 deletions(-)

diff -r 86e4b37a06cc -r 8e98c3d6a55f xen/arch/x86/mm/paging.c
--- a/xen/arch/x86/mm/paging.c  Fri Nov 16 19:07:46 2007 +0000
+++ b/xen/arch/x86/mm/paging.c  Fri Nov 16 20:06:15 2007 +0000
@@ -101,36 +101,37 @@ static mfn_t paging_new_log_dirty_page(s
     mfn_t mfn;
     struct page_info *page = alloc_domheap_page(NULL);
 
-    if ( unlikely(page == NULL) ) {
+    if ( unlikely(page == NULL) )
+    {
         d->arch.paging.log_dirty.failed_allocs++;
         return _mfn(INVALID_MFN);
     }
+
     d->arch.paging.log_dirty.allocs++;
     mfn = page_to_mfn(page);
     *mapping_p = map_domain_page(mfn_x(mfn));
+
     return mfn;
 }
 
-
 static mfn_t paging_new_log_dirty_leaf(struct domain *d, uint8_t **leaf_p)
 {
     mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p);
-    clear_page(*leaf_p);
+    if ( mfn_valid(mfn) )
+        clear_page(*leaf_p);
     return mfn;
 }
-        
 
 static mfn_t paging_new_log_dirty_node(struct domain *d, mfn_t **node_p)
 {
     int i;
     mfn_t mfn = paging_new_log_dirty_page(d, (void **)node_p);
-    for (i = 0; i < LOGDIRTY_NODE_ENTRIES; i++)
-        (*node_p)[i] = _mfn(INVALID_MFN);
+    if ( mfn_valid(mfn) )
+        for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
+            (*node_p)[i] = _mfn(INVALID_MFN);
     return mfn;
 }
-        
-
-/* allocate bitmap resources for log dirty */
+
 int paging_alloc_log_dirty_bitmap(struct domain *d)
 {
     mfn_t *mapping;
@@ -139,7 +140,8 @@ int paging_alloc_log_dirty_bitmap(struct
         return 0;
 
     d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d, &mapping);
-    if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) {
+    if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) )
+    {
         /* Clear error indicator since we're reporting this one */
         d->arch.paging.log_dirty.failed_allocs = 0;
         return -ENOMEM;
@@ -149,45 +151,57 @@ int paging_alloc_log_dirty_bitmap(struct
     return 0;
 }
 
-
 static void paging_free_log_dirty_page(struct domain *d, mfn_t mfn)
 {
     d->arch.paging.log_dirty.allocs--;
     free_domheap_page(mfn_to_page(mfn));
 }    
 
-/* free bitmap resources */
 void paging_free_log_dirty_bitmap(struct domain *d)
 {
+    mfn_t *l4, *l3, *l2;
     int i4, i3, i2;
 
-    if (mfn_valid(d->arch.paging.log_dirty.top)) {
-        mfn_t *l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
-        printk("%s: used %d pages for domain %d dirty logging\n",
-               __FUNCTION__, d->arch.paging.log_dirty.allocs, d->domain_id);
-        for (i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++) {
-            if (mfn_valid(l4[i4])) {
-                mfn_t *l3 = map_domain_page(mfn_x(l4[i4]));
-                for (i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++) {
-                    if (mfn_valid(l3[i3])) {
-                        mfn_t *l2 = map_domain_page(mfn_x(l3[i3]));
-                        for (i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++)
-                            if (mfn_valid(l2[i2]))
-                                paging_free_log_dirty_page(d, l2[i2]);
-                        unmap_domain_page(l2);
-                        paging_free_log_dirty_page(d, l3[i3]);
-                    }
-                }
-                unmap_domain_page(l3);
-                paging_free_log_dirty_page(d, l4[i4]);
-            }
+    if ( !mfn_valid(d->arch.paging.log_dirty.top) )
+        return;
+
+    dprintk(XENLOG_DEBUG, "%s: used %d pages for domain %d dirty logging\n",
+            __FUNCTION__, d->arch.paging.log_dirty.allocs, d->domain_id);
+
+    l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+
+    for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ )
+    {
+        if ( !mfn_valid(l4[i4]) )
+            continue;
+
+        l3 = map_domain_page(mfn_x(l4[i4]));
+
+        for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
+        {
+            if ( !mfn_valid(l3[i3]) )
+                continue;
+
+            l2 = map_domain_page(mfn_x(l3[i3]));
+
+            for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
+                if ( mfn_valid(l2[i2]) )
+                    paging_free_log_dirty_page(d, l2[i2]);
+
+            unmap_domain_page(l2);
+            paging_free_log_dirty_page(d, l3[i3]);
         }
-        unmap_domain_page(l4);
-        paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
-        d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
-        ASSERT(d->arch.paging.log_dirty.allocs == 0);
-        d->arch.paging.log_dirty.failed_allocs = 0;
-    }
+
+        unmap_domain_page(l3);
+        paging_free_log_dirty_page(d, l4[i4]);
+    }
+
+    unmap_domain_page(l4);
+    paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
+
+    d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+    ASSERT(d->arch.paging.log_dirty.allocs == 0);
+    d->arch.paging.log_dirty.failed_allocs = 0;
 }
 
 int paging_log_dirty_enable(struct domain *d)
@@ -369,39 +383,52 @@ int paging_log_dirty_op(struct domain *d
 
     pages = 0;
     l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
-    for ( i4 = 0; pages < sc->pages && i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) {
+
+    for ( i4 = 0;
+          (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES);
+          i4++ )
+    {
         l3 = mfn_valid(l4[i4]) ? map_domain_page(mfn_x(l4[i4])) : NULL;
-        for ( i3 = 0; pages < sc->pages && i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) {
-            l2 = l3 && mfn_valid(l3[i3]) ? map_domain_page(mfn_x(l3[i3])) : 
NULL;
-            for ( i2 = 0; pages < sc->pages && i2 < LOGDIRTY_NODE_ENTRIES; 
i2++ ) {
+        for ( i3 = 0;
+              (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+              i3++ )
+        {
+            l2 = ((l3 && mfn_valid(l3[i3])) ?
+                  map_domain_page(mfn_x(l3[i3])) : NULL);
+            for ( i2 = 0;
+                  (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
+                  i2++ )
+            {
                 static uint8_t zeroes[PAGE_SIZE];
                 unsigned int bytes = PAGE_SIZE;
-                l1 = l2 && mfn_valid(l2[i2]) ? map_domain_page(mfn_x(l2[i2])) 
: zeroes;
+                l1 = ((l2 && mfn_valid(l2[i2])) ?
+                      map_domain_page(mfn_x(l2[i2])) : zeroes);
                 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
-                if ( likely(peek) ) {
-                    if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, 
l1, bytes) != 0) {
+                if ( likely(peek) )
+                {
+                    if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3,
+                                              l1, bytes) != 0 )
+                    {
                         rv = -EFAULT;
                         goto out;
                     }
                 }
-
                 if ( clean && l1 != zeroes )
                     clear_page(l1);
-
                 pages += bytes << 3;
-                if (l1 != zeroes)
+                if ( l1 != zeroes )
                     unmap_domain_page(l1);
             }
-            if (l2)
+            if ( l2 )
                 unmap_domain_page(l2);
         }
-        if (l3)
+        if ( l3 )
             unmap_domain_page(l3);
     }
     unmap_domain_page(l4);
 
-    if (pages < sc->pages)
+    if ( pages < sc->pages )
         sc->pages = pages;
 
     log_dirty_unlock(d);
diff -r 86e4b37a06cc -r 8e98c3d6a55f xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Fri Nov 16 19:07:46 2007 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Fri Nov 16 20:06:15 2007 +0000
@@ -503,7 +503,7 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 
     if ( unlikely(!VALID_M2P(pfn)) )
         return 0;
     
-    if (d->arch.paging.log_dirty.failed_allocs > 0)
+    if ( d->arch.paging.log_dirty.failed_allocs > 0 )
         /* If we have any failed allocations our dirty log is bogus.
          * Since we can't signal an error here, be conservative and
          * report "dirty" in this case.  (The only current caller,
@@ -515,19 +515,19 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 
     l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
     mfn = l4[L4_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l4);
-    if (!mfn_valid(mfn))
+    if ( !mfn_valid(mfn) )
         return 0;
 
     l3 = map_domain_page(mfn_x(mfn));
     mfn = l3[L3_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l3);
-    if (!mfn_valid(mfn))
+    if ( !mfn_valid(mfn) )
         return 0;
 
     l2 = map_domain_page(mfn_x(mfn));
     mfn = l2[L2_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l2);
-    if (!mfn_valid(mfn))
+    if ( !mfn_valid(mfn) )
         return 0;
 
     l1 = map_domain_page(mfn_x(mfn));

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Log dirty radix tree code cleanup. Also do not deference non-existent, Xen patchbot-unstable <=