WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/mm: Enforce ordering constraints for

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/mm: Enforce ordering constraints for the page alloc lock in the PoD code
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Fri, 11 Nov 2011 04:33:40 +0000
Delivery-date: Thu, 10 Nov 2011 20:40:23 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <tim@xxxxxxx>
# Date 1320923555 0
# Node ID 3e726d259039e31eed07ff0ad63b77ab419125ac
# Parent  ff5c8383eb9fcc57e2eba8c3fa254568f594a7f2
x86/mm: Enforce ordering constraints for the page alloc lock in the PoD code

The page alloc lock is sometimes used in the PoD code, with an
explicit expectation of ordering. Use our ordering constructs in the
mm layer to enforce this.

The additional book-keeping variables are kept in the arch_domain
sub-struct, as they are x86-specific to the whole domain.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r ff5c8383eb9f -r 3e726d259039 xen/arch/x86/mm/mm-locks.h
--- a/xen/arch/x86/mm/mm-locks.h        Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/arch/x86/mm/mm-locks.h        Thu Nov 10 11:12:35 2011 +0000
@@ -174,6 +174,18 @@
 #define p2m_unlock(p)         mm_unlock(&(p)->lock)
 #define p2m_locked_by_me(p)   mm_locked_by_me(&(p)->lock)
 
+/* Page alloc lock (per-domain)
+ *
+ * This is an external lock, not represented by an mm_lock_t. However, 
+ * pod code uses it in conjunction with the p2m lock, and expecting
+ * the ordering which we enforce here.
+ * The lock is not recursive. */
+
+declare_mm_order_constraint(page_alloc)
+#define page_alloc_mm_pre_lock()   mm_enforce_order_lock_pre_page_alloc()
+#define page_alloc_mm_post_lock(l) mm_enforce_order_lock_post_page_alloc(&(l), 
NULL)
+#define page_alloc_mm_unlock(l)    mm_enforce_order_unlock((l), NULL)
+
 /* Paging lock (per-domain)
  *
  * For shadow pagetables, this lock protects
diff -r ff5c8383eb9f -r 3e726d259039 xen/arch/x86/mm/p2m-pod.c
--- a/xen/arch/x86/mm/p2m-pod.c Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/arch/x86/mm/p2m-pod.c Thu Nov 10 11:12:35 2011 +0000
@@ -45,6 +45,20 @@
 
 #define superpage_aligned(_x)  (((_x)&(SUPERPAGE_PAGES-1))==0)
 
+/* Enforce lock ordering when grabbing the "external" page_alloc lock */
+static inline void lock_page_alloc(struct p2m_domain *p2m)
+{
+    page_alloc_mm_pre_lock();
+    spin_lock(&(p2m->domain->page_alloc_lock));
+    page_alloc_mm_post_lock(p2m->domain->arch.page_alloc_unlock_level);
+}
+
+static inline void unlock_page_alloc(struct p2m_domain *p2m)
+{
+    page_alloc_mm_unlock(p2m->domain->arch.page_alloc_unlock_level);
+    spin_unlock(&(p2m->domain->page_alloc_lock));
+}
+
 /*
  * Populate-on-demand functionality
  */
@@ -100,7 +114,7 @@
         unmap_domain_page(b);
     }
 
-    spin_lock(&d->page_alloc_lock);
+    lock_page_alloc(p2m);
 
     /* First, take all pages off the domain list */
     for(i=0; i < 1 << order ; i++)
@@ -128,7 +142,7 @@
      * This may cause "zombie domains" since the page will never be freed. */
     BUG_ON( d->arch.relmem != RELMEM_not_started );
 
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
 
     return 0;
 }
@@ -245,7 +259,7 @@
 
         /* Grab the lock before checking that pod.super is empty, or the last
          * entries may disappear before we grab the lock. */
-        spin_lock(&d->page_alloc_lock);
+        lock_page_alloc(p2m);
 
         if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES
              && !page_list_empty(&p2m->pod.super) )
@@ -257,7 +271,7 @@
 
         ASSERT(page != NULL);
 
-        spin_unlock(&d->page_alloc_lock);
+        unlock_page_alloc(p2m);
 
         /* Then free them */
         for ( i = 0 ; i < (1 << order) ; i++ )
@@ -378,7 +392,7 @@
     BUG_ON(!d->is_dying);
     spin_barrier(&p2m->lock.lock);
 
-    spin_lock(&d->page_alloc_lock);
+    lock_page_alloc(p2m);
 
     while ( (page = page_list_remove_head(&p2m->pod.super)) )
     {
@@ -403,7 +417,7 @@
 
     BUG_ON(p2m->pod.count != 0);
 
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
 }
 
 int
@@ -417,7 +431,7 @@
     if ( !(d = page_get_owner(p)) || !(p2m = p2m_get_hostp2m(d)) )
         return 0;
 
-    spin_lock(&d->page_alloc_lock);
+    lock_page_alloc(p2m);
     bmfn = mfn_x(page_to_mfn(p));
     page_list_for_each_safe(q, tmp, &p2m->pod.super)
     {
@@ -448,12 +462,12 @@
         }
     }
 
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
     return 0;
 
 pod_hit:
     page_list_add_tail(p, &d->arch.relmem_list);
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
     return 1;
 }
 
@@ -994,7 +1008,7 @@
     if ( q == p2m_guest && gfn > p2m->pod.max_guest )
         p2m->pod.max_guest = gfn;
 
-    spin_lock(&d->page_alloc_lock);
+    lock_page_alloc(p2m);
 
     if ( p2m->pod.count == 0 )
         goto out_of_memory;
@@ -1008,7 +1022,7 @@
 
     BUG_ON((mfn_x(mfn) & ((1 << order)-1)) != 0);
 
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
 
     gfn_aligned = (gfn >> order) << order;
 
@@ -1040,7 +1054,7 @@
 
     return 0;
 out_of_memory:
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
 
     printk("%s: Out of populate-on-demand memory! tot_pages %" PRIu32 " 
pod_entries %" PRIi32 "\n",
            __func__, d->tot_pages, p2m->pod.entry_count);
@@ -1049,7 +1063,7 @@
     return -1;
 remap_and_retry:
     BUG_ON(order != PAGE_ORDER_2M);
-    spin_unlock(&d->page_alloc_lock);
+    unlock_page_alloc(p2m);
 
     /* Remap this 2-meg region in singleton chunks */
     gfn_aligned = (gfn>>order)<<order;
diff -r ff5c8383eb9f -r 3e726d259039 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/include/asm-x86/domain.h      Thu Nov 10 11:12:35 2011 +0000
@@ -269,6 +269,9 @@
 
     struct paging_domain paging;
     struct p2m_domain *p2m;
+    /* To enforce lock ordering in the pod code wrt the 
+     * page_alloc lock */
+    int page_alloc_unlock_level;
 
     /* nestedhvm: translate l2 guest physical to host physical */
     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
diff -r ff5c8383eb9f -r 3e726d259039 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Thu Nov 10 11:12:35 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Thu Nov 10 11:12:35 2011 +0000
@@ -270,6 +270,11 @@
      * + p2m_pod_demand_populate() grabs both; the p2m lock to avoid
      *   double-demand-populating of pages, the page_alloc lock to
      *   protect moving stuff from the PoD cache to the domain page list.
+     *
+     * We enforce this lock ordering through a construct in mm-locks.h.
+     * This demands, however, that we store the previous lock-ordering
+     * level in effect before grabbing the page_alloc lock. The unlock
+     * level is stored in the arch section of the domain struct.
      */
     struct {
         struct page_list_head super,   /* List of superpages                */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/mm: Enforce ordering constraints for the page alloc lock in the PoD code, Xen patchbot-unstable <=