WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 07 of 13] x86/mm: Fix memory-sharing code's locking d

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 07 of 13] x86/mm: Fix memory-sharing code's locking discipline
From: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Date: Fri, 13 May 2011 17:28:49 +0100
Delivery-date: Fri, 13 May 2011 09:33:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1305304122@xxxxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1305304122@xxxxxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.6.4
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1305302438 -3600
# Node ID 078133a7063061dba94cea4ec207d6c60add5633
# Parent  fdcdfbf227bc11494ecdd820ba2fe6d1a4223b99
x86/mm: Fix memory-sharing code's locking discipline.

memshr_audit is sometimes called with the shr_lock held.  Make it so for
every call.

Move the unsharing loop in p2m_teardown out of the p2m_lock to avoid
deadlocks.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r fdcdfbf227bc -r 078133a70630 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Fri May 13 17:00:38 2011 +0100
+++ b/xen/arch/x86/mm/mem_sharing.c     Fri May 13 17:00:38 2011 +0100
@@ -224,7 +224,7 @@ static void mem_sharing_audit(void)
     int bucket;
     struct page_info *pg;
 
-    shr_lock();
+    ASSERT(shr_locked_by_me());
 
     for(bucket=0; bucket < SHR_HASH_LENGTH; bucket++)
     {
@@ -284,8 +284,6 @@ static void mem_sharing_audit(void)
             e = e->next;
         }
     }
-
-    shr_unlock();
 }
 #endif
 
@@ -633,10 +631,10 @@ int mem_sharing_unshare_page(struct doma
     shr_handle_t handle;
     struct list_head *le;
 
+    shr_lock();
     mem_sharing_audit();
+    
     /* Remove the gfn_info from the list */
-    shr_lock();
-    
     mfn = gfn_to_mfn(d, gfn, &p2mt);
     
     /* Has someone already unshared it? */
@@ -736,7 +734,6 @@ int mem_sharing_domctl(struct domain *d,
         case XEN_DOMCTL_MEM_SHARING_OP_CONTROL:
         {
             d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
-            mem_sharing_audit();
             rc = 0;
         }
         break;
@@ -749,7 +746,6 @@ int mem_sharing_domctl(struct domain *d,
                 return -EINVAL;
             rc = mem_sharing_nominate_page(d, gfn, 0, &handle);
             mec->u.nominate.handle = handle;
-            mem_sharing_audit();
         }
         break;
 
@@ -765,7 +761,6 @@ int mem_sharing_domctl(struct domain *d,
                 return -EINVAL;
             rc = mem_sharing_nominate_page(d, gfn, 3, &handle);
             mec->u.nominate.handle = handle;
-            mem_sharing_audit();
         }
         break;
 
@@ -774,7 +769,6 @@ int mem_sharing_domctl(struct domain *d,
             shr_handle_t sh = mec->u.share.source_handle;
             shr_handle_t ch = mec->u.share.client_handle;
             rc = mem_sharing_share_pages(sh, ch); 
-            mem_sharing_audit();
         }
         break;
 
@@ -782,7 +776,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             if(!mem_sharing_enabled(d))
                 return -EINVAL;
-            mem_sharing_audit();
             rc = mem_sharing_sharing_resume(d);
         }
         break;
@@ -791,7 +784,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             unsigned long gfn = mec->u.debug.u.gfn;
             rc = mem_sharing_debug_gfn(d, gfn);
-            mem_sharing_audit();
         }
         break;
 
@@ -799,7 +791,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             unsigned long mfn = mec->u.debug.u.mfn;
             rc = mem_sharing_debug_mfn(mfn);
-            mem_sharing_audit();
         }
         break;
 
@@ -807,7 +798,6 @@ int mem_sharing_domctl(struct domain *d,
         {
             grant_ref_t gref = mec->u.debug.u.gref;
             rc = mem_sharing_debug_gref(d, gref);
-            mem_sharing_audit();
         }
         break;
 
@@ -816,6 +806,10 @@ int mem_sharing_domctl(struct domain *d,
             break;
     }
 
+    shr_lock();
+    mem_sharing_audit();
+    shr_unlock();
+
     return rc;
 }
 
diff -r fdcdfbf227bc -r 078133a70630 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri May 13 17:00:38 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri May 13 17:00:38 2011 +0100
@@ -304,12 +304,10 @@ void p2m_teardown(struct p2m_domain *p2m
     if (p2m == NULL)
         return;
 
-    p2m_lock(p2m);
-
 #ifdef __x86_64__
     for ( gfn=0; gfn < p2m->max_mapped_pfn; gfn++ )
     {
-        mfn = p2m->get_entry(p2m, gfn, &t, &a, p2m_query);
+        mfn = gfn_to_mfn_type_p2m(p2m, gfn, &t, &a, p2m_query);
         if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
         {
             ASSERT(!p2m_is_nestedp2m(p2m));
@@ -319,6 +317,8 @@ void p2m_teardown(struct p2m_domain *p2m
     }
 #endif
 
+    p2m_lock(p2m);
+
     p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel