# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1281684799 -3600
# Node ID 378d3cf5d7a1a71ff8bf7c88d02ac08563218e61
# Parent 31d200e5b922f342bd6649ecbbab4ccbc190bf4b
credit1: Make weight per-vcpu
Change the meaning of credit1's "weight" parameter to be per-vcpu,
rather than per-VM.
At the moment, the "weight" parameter for a VM is set on a per-VM
basis. This means that when cpu time is scarce, two VMs with the same
weight will be given the same amount of total cpu time, no matter how
many vcpus it has. I.e., if a VM has 1 vcpu, that vcpu will get x% of
cpu time; if a VM has 2 vcpus, each vcpu will get (x/2)% of the cpu
time.
I believe this is a counter-intuitive interface. Users often choose
to add vcpus; when they do so, it's with the expectation that a VM
will need and use more cpu time. In my experience, however, users
rarely change the weight parameter. So the normal course of events is
for a user to decide a VM needs more processing power, add more cpus,
but doesn't change the weight. The VM still gets the same amount of
cpu time, but less efficiently allocated (because it's divided).
The attached patch changes the meaning of the "weight" parameter, to
be per-vcpu. Each vcpu is given the weight. So if you add an extra
vcpu, your VM will get more cpu time as well.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
xen/common/sched_credit.c | 27 +++++++++++++++++----------
1 files changed, 17 insertions(+), 10 deletions(-)
diff -r 31d200e5b922 -r 378d3cf5d7a1 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Fri Aug 13 08:31:49 2010 +0100
+++ b/xen/common/sched_credit.c Fri Aug 13 08:33:19 2010 +0100
@@ -555,10 +555,11 @@ __csched_vcpu_acct_start(struct csched_p
sdom->active_vcpu_count++;
list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
+ /* Make weight per-vcpu */
+ prv->weight += sdom->weight;
if ( list_empty(&sdom->active_sdom_elem) )
{
list_add(&sdom->active_sdom_elem, &prv->active_sdom);
- prv->weight += sdom->weight;
}
}
@@ -576,13 +577,13 @@ __csched_vcpu_acct_stop_locked(struct cs
CSCHED_VCPU_STAT_CRANK(svc, state_idle);
CSCHED_STAT_CRANK(acct_vcpu_idle);
+ BUG_ON( prv->weight < sdom->weight );
sdom->active_vcpu_count--;
list_del_init(&svc->active_vcpu_elem);
+ prv->weight -= sdom->weight;
if ( list_empty(&sdom->active_vcpu) )
{
- BUG_ON( prv->weight < sdom->weight );
list_del_init(&sdom->active_sdom_elem);
- prv->weight -= sdom->weight;
}
}
@@ -804,8 +805,8 @@ csched_dom_cntl(
{
if ( !list_empty(&sdom->active_sdom_elem) )
{
- prv->weight -= sdom->weight;
- prv->weight += op->u.credit.weight;
+ prv->weight -= sdom->weight * sdom->active_vcpu_count;
+ prv->weight += op->u.credit.weight * sdom->active_vcpu_count;
}
sdom->weight = op->u.credit.weight;
}
@@ -976,9 +977,9 @@ csched_acct(void* dummy)
BUG_ON( is_idle_domain(sdom->dom) );
BUG_ON( sdom->active_vcpu_count == 0 );
BUG_ON( sdom->weight == 0 );
- BUG_ON( sdom->weight > weight_left );
-
- weight_left -= sdom->weight;
+ BUG_ON( (sdom->weight * sdom->active_vcpu_count) > weight_left );
+
+ weight_left -= ( sdom->weight * sdom->active_vcpu_count );
/*
* A domain's fair share is computed using its weight in competition
@@ -991,7 +992,9 @@ csched_acct(void* dummy)
credit_peak = sdom->active_vcpu_count * CSCHED_CREDITS_PER_ACCT;
if ( prv->credit_balance < 0 )
{
- credit_peak += ( ( -prv->credit_balance * sdom->weight) +
+ credit_peak += ( ( -prv->credit_balance
+ * sdom->weight
+ * sdom->active_vcpu_count) +
(weight_total - 1)
) / weight_total;
}
@@ -1002,11 +1005,15 @@ csched_acct(void* dummy)
if ( credit_cap < credit_peak )
credit_peak = credit_cap;
+ /* FIXME -- set cap per-vcpu as well...? */
credit_cap = ( credit_cap + ( sdom->active_vcpu_count - 1 )
) / sdom->active_vcpu_count;
}
- credit_fair = ( ( credit_total * sdom->weight) + (weight_total - 1)
+ credit_fair = ( ( credit_total
+ * sdom->weight
+ * sdom->active_vcpu_count )
+ + (weight_total - 1)
) / weight_total;
if ( credit_fair < credit_peak )
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|