WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Make shadow_lock() acquire the recursive per-domain BIGL

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Make shadow_lock() acquire the recursive per-domain BIGLOCK. This change
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Thu, 12 May 2005 10:29:11 +0000
Delivery-date: Thu, 12 May 2005 11:02:56 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1451, 2005/05/12 11:29:11+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Make shadow_lock() acquire the recursive per-domain BIGLOCK. This change
        is easily reverted at the top of shadow.h. This also fixes a problem
        with nested shadow_locking -- this is okay because BIGLOCK is nestable.
        Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>



 arch/x86/shadow.c        |   14 ++++++------
 include/asm-x86/domain.h |    2 -
 include/asm-x86/shadow.h |   52 +++++++++++++++++++++++++++++++++++++++--------
 3 files changed, 52 insertions(+), 16 deletions(-)


diff -Nru a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
--- a/xen/arch/x86/shadow.c     2005-05-12 07:03:26 -04:00
+++ b/xen/arch/x86/shadow.c     2005-05-12 07:03:26 -04:00
@@ -1217,7 +1217,7 @@
     int               i, rc = 0;
     struct exec_domain *ed;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     SH_VLOG("shadow mode table op %lx %lx count %d",
             pagetable_val(d->exec_domain[0]->arch.guest_table),  /* XXX SMP */
@@ -1813,7 +1813,7 @@
     struct pfn_info *page = &frame_table[mfn];
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(pfn_valid(mfn));
 
 #ifndef NDEBUG
@@ -1943,7 +1943,7 @@
     l2_pgentry_t l2e;
     unsigned long l1pfn, l1mfn;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(VALID_M2P(l2pfn));
 
     perfc_incrc(shadow_out_of_sync_calls);
@@ -2127,7 +2127,7 @@
     u32 found = 0, fixups, write_refs;
     unsigned long prediction, predicted_gpfn, predicted_smfn;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(VALID_MFN(readonly_gmfn));
 
     perfc_incrc(remove_write_access);
@@ -2245,7 +2245,7 @@
     if ( unlikely(!shadow_mode_enabled(d)) )
         return 0;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     perfc_incrc(remove_all_access);
 
     for (i = 0; i < shadow_ht_buckets; i++)
@@ -2287,7 +2287,7 @@
     int unshadow;
     int changed;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     for ( entry = d->arch.out_of_sync; entry; entry = entry->next)
     {
@@ -2485,7 +2485,7 @@
 
     perfc_incrc(shadow_sync_all);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     // First, remove all write permissions to the page tables
     //
diff -Nru a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h      2005-05-12 07:03:25 -04:00
+++ b/xen/include/asm-x86/domain.h      2005-05-12 07:03:25 -04:00
@@ -30,7 +30,7 @@
 
     /* Shadow mode status and controls. */
     unsigned int shadow_mode;  /* flags to control shadow table operation */
-    spinlock_t   shadow_lock;
+    unsigned int shadow_nest;  /* Recursive depth of shadow_lock() nesting */
     /* Shadow mode has tainted page reference counts? */
     unsigned int shadow_tainted_refcnts;
 
diff -Nru a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      2005-05-12 07:03:25 -04:00
+++ b/xen/include/asm-x86/shadow.h      2005-05-12 07:03:25 -04:00
@@ -60,9 +60,45 @@
 #define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \
      (PERDOMAIN_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
 
-#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
-#define shadow_lock(_d)      do { 
ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); 
spin_lock(&(_d)->arch.shadow_lock); } while (0)
-#define shadow_unlock(_d)    spin_unlock(&(_d)->arch.shadow_lock)
+/*
+ * For now we use the per-domain BIGLOCK rather than a shadow-specific lock.
+ * We usually have the BIGLOCK already acquired anyway, so this is unlikely
+ * to cause much unnecessary extra serialisation. Also it's a recursive
+ * lock, and there are some code paths containing nested shadow_lock().
+ * The #if0'ed code below is therefore broken until such nesting is removed.
+ */
+#if 0
+#define shadow_lock_init(_d)                    \
+    spin_lock_init(&(_d)->arch.shadow_lock)
+#define shadow_lock_is_acquired(_d)             \
+    spin_is_locked(&(_d)->arch.shadow_lock)
+#define shadow_lock(_d)                         \
+do {                                            \
+    ASSERT(!shadow_lock_is_acquired(_d));       \
+    spin_lock(&(_d)->arch.shadow_lock);         \
+} while (0)
+#define shadow_unlock(_d)                       \
+do {                                            \
+    ASSERT(!shadow_lock_is_acquired(_d));       \
+    spin_unlock(&(_d)->arch.shadow_lock);       \
+} while (0)
+#else
+#define shadow_lock_init(_d)                    \
+    ((_d)->arch.shadow_nest = 0)
+#define shadow_lock_is_acquired(_d)             \
+    (spin_is_locked(&(_d)->big_lock) && ((_d)->arch.shadow_nest != 0))
+#define shadow_lock(_d)                         \
+do {                                            \
+    LOCK_BIGLOCK(_d);                           \
+    (_d)->arch.shadow_nest++;                   \
+} while (0)
+#define shadow_unlock(_d)                       \
+do {                                            \
+    ASSERT(shadow_lock_is_acquired(_d));        \
+    (_d)->arch.shadow_nest--;                   \
+    UNLOCK_BIGLOCK(_d);                         \
+} while (0)
+#endif
 
 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((L1_PAGETABLE_ENTRIES - 1) - 
(_max)) << 16) | (_min))
 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
@@ -403,7 +439,7 @@
     unsigned long pfn;
     int           rc = 0;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(d->arch.shadow_dirty_bitmap != NULL);
 
     if ( !VALID_MFN(mfn) )
@@ -1137,7 +1173,7 @@
                           ? __gpfn_to_mfn(d, gpfn)
                           : INVALID_MFN);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(gpfn == (gpfn & PGT_mfn_mask));
     ASSERT(stype && !(stype & ~PGT_type_mask));
 
@@ -1186,7 +1222,7 @@
     struct shadow_status *x;
     u32 pttype = PGT_none, type;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(gpfn == (gpfn & PGT_mfn_mask));
 
     perfc_incrc(shadow_max_type);
@@ -1280,7 +1316,7 @@
     struct shadow_status *p, *x, *n, *head;
     unsigned long key = gpfn | stype;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(!(gpfn & ~PGT_mfn_mask));
     ASSERT(stype && !(stype & ~PGT_type_mask));
 
@@ -1362,7 +1398,7 @@
 
     SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     ASSERT(shadow_mode_translate(d) || gpfn);
     ASSERT(!(gpfn & ~PGT_mfn_mask));

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Make shadow_lock() acquire the recursive per-domain BIGLOCK. This change, BitKeeper Bot <=