WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2 of 5] credit2: Flexible cpu-to-schedule-spinlock ma

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 2 of 5] credit2: Flexible cpu-to-schedule-spinlock mappings
From: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Date: Wed, 14 Apr 2010 11:26:20 +0100
Cc: george.dunlap@xxxxxxxxxxxxx
Delivery-date: Wed, 14 Apr 2010 03:35:17 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1271240778@silas>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1271240778@silas>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
4 files changed, 40 insertions(+), 19 deletions(-)
xen/arch/ia64/vmx/vmmu.c   |    6 +++---
xen/common/sched_credit.c  |    8 ++++----
xen/common/schedule.c      |   18 ++++++++++--------
xen/include/xen/sched-if.h |   27 +++++++++++++++++++++++----


Credit2 shares a runqueue between several cpus.  Rather than have
double locking and dealing with the cpu-to-runqueue races, allow
the scheduler to redefine the sched_lock-to-cpu mapping.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

diff -r 2631707c54b3 -r 21d0f640b0c0 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed Apr 14 11:16:58 2010 +0100
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed Apr 14 11:16:58 2010 +0100
@@ -394,7 +394,7 @@
     if (cpu != current->processor)
         return;
     local_irq_save(flags);
-    if (!spin_trylock(&per_cpu(schedule_data, cpu).schedule_lock))
+    if (!spin_trylock(per_cpu(schedule_data, cpu).schedule_lock))
         goto bail2;
     if (v->processor != cpu)
         goto bail1;
@@ -416,7 +416,7 @@
     ia64_dv_serialize_data();
     args->vcpu = NULL;
 bail1:
-    spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);
+    spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
 bail2:
     local_irq_restore(flags);
 }
@@ -446,7 +446,7 @@
         do {
             cpu = v->processor;
             if (cpu != current->processor) {
-                spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
+                spin_barrier(per_cpu(schedule_data, cpu).schedule_lock);
                 /* Flush VHPT on remote processors. */
                 smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1);
             } else {
diff -r 2631707c54b3 -r 21d0f640b0c0 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Wed Apr 14 11:16:58 2010 +0100
+++ b/xen/common/sched_credit.c Wed Apr 14 11:16:58 2010 +0100
@@ -789,7 +789,7 @@
 
     spc->runq_sort_last = sort_epoch;
 
-    spin_lock_irqsave(&per_cpu(schedule_data, cpu).schedule_lock, flags);
+    spin_lock_irqsave(per_cpu(schedule_data, cpu).schedule_lock, flags);
 
     runq = &spc->runq;
     elem = runq->next;
@@ -814,7 +814,7 @@
         elem = next;
     }
 
-    spin_unlock_irqrestore(&per_cpu(schedule_data, cpu).schedule_lock, flags);
+    spin_unlock_irqrestore(per_cpu(schedule_data, cpu).schedule_lock, flags);
 }
 
 static void
@@ -1130,7 +1130,7 @@
          * cause a deadlock if the peer CPU is also load balancing and trying
          * to lock this CPU.
          */
-        if ( !spin_trylock(&per_cpu(schedule_data, peer_cpu).schedule_lock) )
+        if ( !spin_trylock(per_cpu(schedule_data, peer_cpu).schedule_lock) )
         {
             CSCHED_STAT_CRANK(steal_trylock_failed);
             continue;
@@ -1140,7 +1140,7 @@
          * Any work over there to steal?
          */
         speer = csched_runq_steal(peer_cpu, cpu, snext->pri);
-        spin_unlock(&per_cpu(schedule_data, peer_cpu).schedule_lock);
+        spin_unlock(per_cpu(schedule_data, peer_cpu).schedule_lock);
         if ( speer != NULL )
             return speer;
     }
diff -r 2631707c54b3 -r 21d0f640b0c0 xen/common/schedule.c
--- a/xen/common/schedule.c     Wed Apr 14 11:16:58 2010 +0100
+++ b/xen/common/schedule.c     Wed Apr 14 11:16:58 2010 +0100
@@ -131,7 +131,7 @@
     s_time_t delta;
 
     ASSERT(v->runstate.state != new_state);
-    ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock));
+    ASSERT(spin_is_locked(per_cpu(schedule_data,v->processor).schedule_lock));
 
     vcpu_urgent_count_update(v);
 
@@ -340,7 +340,7 @@
     /* Switch to new CPU, then unlock old CPU. */
     v->processor = new_cpu;
     spin_unlock_irqrestore(
-        &per_cpu(schedule_data, old_cpu).schedule_lock, flags);
+        per_cpu(schedule_data, old_cpu).schedule_lock, flags);
 
     /* Wake on new CPU. */
     vcpu_wake(v);
@@ -846,7 +846,7 @@
 
     sd = &this_cpu(schedule_data);
 
-    spin_lock_irq(&sd->schedule_lock);
+    spin_lock_irq(sd->schedule_lock);
 
     stop_timer(&sd->s_timer);
     
@@ -862,7 +862,7 @@
 
     if ( unlikely(prev == next) )
     {
-        spin_unlock_irq(&sd->schedule_lock);
+        spin_unlock_irq(sd->schedule_lock);
         trace_continue_running(next);
         return continue_running(prev);
     }
@@ -900,7 +900,7 @@
     ASSERT(!next->is_running);
     next->is_running = 1;
 
-    spin_unlock_irq(&sd->schedule_lock);
+    spin_unlock_irq(sd->schedule_lock);
 
     perfc_incr(sched_ctx);
 
@@ -968,7 +968,9 @@
 
     for_each_possible_cpu ( i )
     {
-        spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
+        spin_lock_init(&per_cpu(schedule_data, i)._lock);
+        per_cpu(schedule_data, i).schedule_lock
+            = &per_cpu(schedule_data, i)._lock;
         init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
     }
 
@@ -1005,10 +1007,10 @@
 
     for_each_online_cpu ( i )
     {
-        spin_lock(&per_cpu(schedule_data, i).schedule_lock);
+        spin_lock(per_cpu(schedule_data, i).schedule_lock);
         printk("CPU[%02d] ", i);
         SCHED_OP(dump_cpu_state, i);
-        spin_unlock(&per_cpu(schedule_data, i).schedule_lock);
+        spin_unlock(per_cpu(schedule_data, i).schedule_lock);
     }
 
     local_irq_restore(flags);
diff -r 2631707c54b3 -r 21d0f640b0c0 xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Wed Apr 14 11:16:58 2010 +0100
+++ b/xen/include/xen/sched-if.h        Wed Apr 14 11:16:58 2010 +0100
@@ -10,8 +10,19 @@
 
 #include <xen/percpu.h>
 
+/*
+ * In order to allow a scheduler to remap the lock->cpu mapping,
+ * we have a per-cpu pointer, along with a pre-allocated set of
+ * locks.  The generic schedule init code will point each schedule lock
+ * pointer to the schedule lock; if the scheduler wants to remap them,
+ * it can simply modify the schedule locks.
+ * 
+ * For cache betterness, keep the actual lock in the same cache area
+ * as the rest of the struct.  Just have the scheduler point to the
+ * one it wants (This may be the one right in front of it).*/
 struct schedule_data {
-    spinlock_t          schedule_lock;  /* spinlock protecting curr        */
+    spinlock_t         *schedule_lock,
+                       _lock;
     struct vcpu        *curr;           /* current task                    */
     struct vcpu        *idle;           /* idle task for this cpu          */
     void               *sched_priv;
@@ -27,11 +38,19 @@
 
     for ( ; ; )
     {
+        /* NB: For schedulers with multiple cores per runqueue,
+         * a vcpu may change processor w/o changing runqueues;
+         * so we may release a lock only to grab it again.
+         *
+         * If that is measured to be an issue, then the check
+         * should be changed to checking if the locks pointed to
+         * by cpu and v->processor are still the same.
+         */
         cpu = v->processor;
-        spin_lock(&per_cpu(schedule_data, cpu).schedule_lock);
+        spin_lock(per_cpu(schedule_data, cpu).schedule_lock);
         if ( likely(v->processor == cpu) )
             break;
-        spin_unlock(&per_cpu(schedule_data, cpu).schedule_lock);
+        spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
     }
 }
 
@@ -42,7 +61,7 @@
 
 static inline void vcpu_schedule_unlock(struct vcpu *v)
 {
-    spin_unlock(&per_cpu(schedule_data, v->processor).schedule_lock);
+    spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
 }
 
 #define vcpu_schedule_unlock_irq(v) \

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel