# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1281368167 -3600
# Node ID fc29e13f669df8a78e9bcd35de56ca07e1c6a637
# Parent 93074767205e33fc1351a61343a5c5a0fde4f9d5
scheduler: Implement yield for credit1
This patch implements 'yield' for credit1. It does this by attempting
to put yielding vcpu behind a single lower-priority vcpu on the
runqueue. If no lower-priority vcpus are in the queue, it will go at
the back (which if the queue is empty, will also be the front).
Runqueues are sorted every 30ms, so that's the longest this priority
inversion can happen.
For workloads with heavy concurrency hazard, and guest which implement
yield-on-spinlock, this patch significantly increases performance and
total system throughput.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/common/sched_credit.c | 40 +++++++++++++++++++++++++++++++++++++++-
1 files changed, 39 insertions(+), 1 deletion(-)
diff -r 93074767205e -r fc29e13f669d xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Mon Aug 09 16:35:27 2010 +0100
+++ b/xen/common/sched_credit.c Mon Aug 09 16:36:07 2010 +0100
@@ -64,7 +64,8 @@
/*
* Flags
*/
-#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */
+#define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */
+#define CSCHED_FLAG_VCPU_YIELD 0x0002 /* VCPU yielding */
/*
@@ -106,6 +107,12 @@
#endif /* CSCHED_STATS */
+
+/*
+ * Boot parameters
+ */
+int sched_credit_default_yield = 0;
+boolean_param("sched_credit_default_yield", sched_credit_default_yield);
/*
* Physical CPU
@@ -202,6 +209,18 @@ __runq_insert(unsigned int cpu, struct c
break;
}
+ /* If the vcpu yielded, try to put it behind one lower-priority
+ * runnable vcpu if we can. The next runq_sort will bring it forward
+ * within 30ms if the queue too long. */
+ if ( svc->flags & CSCHED_FLAG_VCPU_YIELD
+ && __runq_elem(iter)->pri > CSCHED_PRI_IDLE )
+ {
+ iter=iter->next;
+
+ /* Some sanity checks */
+ BUG_ON(iter == runq);
+ }
+
list_add_tail(&svc->runq_elem, iter);
}
@@ -748,6 +767,18 @@ csched_vcpu_wake(const struct scheduler
__runq_tickle(cpu, svc);
}
+static void
+csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc)
+{
+ struct csched_vcpu * const sv = CSCHED_VCPU(vc);
+
+ if ( !sched_credit_default_yield )
+ {
+ /* Let the scheduler know that this vcpu is trying to yield */
+ sv->flags |= CSCHED_FLAG_VCPU_YIELD;
+ }
+}
+
static int
csched_dom_cntl(
const struct scheduler *ops,
@@ -1280,6 +1311,12 @@ csched_schedule(
snext = CSCHED_VCPU(idle_vcpu[cpu]);
snext->pri = CSCHED_PRI_TS_BOOST;
}
+
+ /*
+ * Clear YIELD flag before scheduling out
+ */
+ if ( scurr->flags & CSCHED_FLAG_VCPU_YIELD )
+ scurr->flags &= ~(CSCHED_FLAG_VCPU_YIELD);
/*
* SMP Load balance:
@@ -1509,6 +1546,7 @@ const struct scheduler sched_credit_def
.sleep = csched_vcpu_sleep,
.wake = csched_vcpu_wake,
+ .yield = csched_vcpu_yield,
.adjust = csched_dom_cntl,
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|