The attached two patches need previous patches(1-3).
credit_boost_xen.patch : modification to xen hypervisor
credit_boost_tools.patch: modification to tools
By applying these two patches, boost credit is introduced to the credit
scheduler. The credit scheduler comes to be able to give priority to
latency-sensitive domain.
In order to set a domain to latency-sensitive domain, you enable boost
credit of the domain. There is two method.
1. Using xm command, set upper bound value of boost credit of the
domain. It is specified by not the value of credit but the millisecond.
It is named max boost period.
e.g. domain:0, max boost period:100ms
xm sched-bcredit -d 0 -m 100
2. Using xm command, set upper bound value of boost credit of the domain
and set boost ratio. Boost ratio is ratio to one CPU that is used for
distributing boost credit. Boost credit corresponding to boost ratio is
distributed in place of credit.
e.g. domain:0, max boost period:500ms, boost ratio:80(80% to one CPU)
xm sched-bcredit -d 0 -m 500 -r 80
Best regards,
Naoki
diff -r e0e26c5c0218 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Mon Dec 08 09:46:24 2008 +0900
+++ b/xen/common/sched_credit.c Mon Dec 08 16:36:46 2008 +0900
@@ -47,6 +47,7 @@
(CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_TSLICE)
#define CSCHED_CREDITS_PER_ACCT \
(CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_ACCT)
+#define CSCHED_MSECS_PER_BOOST_TSLICE 2
/*
@@ -200,6 +201,7 @@ struct csched_vcpu {
struct csched_dom *sdom;
struct vcpu *vcpu;
atomic_t credit;
+ atomic_t boost_credit;
uint16_t flags;
int16_t pri;
s_time_t start_time;
@@ -225,6 +227,8 @@ struct csched_dom {
uint16_t active_vcpu_count;
uint16_t weight;
uint16_t cap;
+ uint16_t boost_ratio;
+ uint16_t max_boost_period;
};
/*
@@ -239,6 +243,8 @@ struct csched_private {
cpumask_t idlers;
uint32_t weight;
uint32_t credit;
+ uint32_t boost_credit;
+ uint16_t total_boost_ratio;
int credit_balance;
uint32_t runq_sort;
CSCHED_STATS_DEFINE()
@@ -249,6 +255,10 @@ struct csched_private {
* Global variables
*/
static struct csched_private csched_priv;
+
+/* opt_credit_tslice: time slice for BOOST priority */
+static unsigned int opt_credit_tslice = CSCHED_MSECS_PER_BOOST_TSLICE;
+integer_param("credit_tslice", opt_credit_tslice);
static void csched_tick(void *_cpu);
@@ -340,6 +350,14 @@ __runq_tickle(unsigned int cpu, struct c
cpus_or(mask, mask, csched_priv.idlers);
cpus_and(mask, mask, new->vcpu->cpu_affinity);
}
+ }
+
+ /* If new VCPU has boost credit, signal the CPU. */
+ if ( new->pri == CSCHED_PRI_TS_BOOST &&
+ new->sdom->max_boost_period && cpus_empty(mask) )
+ {
+ CSCHED_STAT_CRANK(tickle_local_other);
+ cpu_set(cpu, mask);
}
/* Send scheduler interrupts to designated CPUs */
@@ -503,6 +521,8 @@ __csched_vcpu_acct_start_locked(struct c
{
list_add(&sdom->active_sdom_elem, &csched_priv.active_sdom);
csched_priv.weight += sdom->weight;
+ csched_priv.boost_credit += (sdom->boost_ratio *
+ CSCHED_CREDITS_PER_TSLICE) / 100;
}
}
}
@@ -535,6 +555,8 @@ __csched_vcpu_acct_stop_locked(struct cs
BUG_ON( csched_priv.weight < sdom->weight );
list_del_init(&sdom->active_sdom_elem);
csched_priv.weight -= sdom->weight;
+ csched_priv.boost_credit -= (sdom->boost_ratio *
+ CSCHED_CREDITS_PER_TSLICE) / 100;
}
}
@@ -545,14 +567,6 @@ csched_vcpu_acct(unsigned int cpu)
ASSERT( current->processor == cpu );
ASSERT( svc->sdom != NULL );
-
- /*
- * If this VCPU's priority was boosted when it last awoke, reset it.
- * If the VCPU is found here, then it's consuming a non-negligeable
- * amount of CPU resources and should no longer be boosted.
- */
- if ( svc->pri == CSCHED_PRI_TS_BOOST )
- svc->pri = CSCHED_PRI_TS_UNDER;
/*
* Put this VCPU and domain back on the active list if it was
@@ -595,6 +609,7 @@ csched_vcpu_init(struct vcpu *vc)
svc->sdom = sdom;
svc->vcpu = vc;
atomic_set(&svc->credit, 0);
+ atomic_set(&svc->boost_credit, 0);
svc->flags = 0U;
svc->pri = is_idle_domain(dom) ? CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
CSCHED_VCPU_STATS_RESET(svc);
@@ -718,25 +733,73 @@ csched_dom_cntl(
{
op->u.credit.weight = sdom->weight;
op->u.credit.cap = sdom->cap;
+ op->u.credit.max_boost_period = sdom->max_boost_period;
+ op->u.credit.boost_ratio = sdom->boost_ratio;
}
else
{
+ uint16_t weight = (uint16_t)~0U;
+
ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
spin_lock_irqsave(&csched_priv.lock, flags);
- if ( op->u.credit.weight != 0 )
+ if ( (op->u.credit.weight != 0) &&
+ (sdom->boost_ratio == 0 || op->u.credit.boost_ratio == 0) )
+ {
+ weight = op->u.credit.weight;
+ }
+
+ if ( op->u.credit.cap != (uint16_t)~0U )
+ sdom->cap = op->u.credit.cap;
+
+ if ( (op->u.credit.max_boost_period != (uint16_t)~0U) &&
+ (op->u.credit.max_boost_period >= CSCHED_MSECS_PER_TICK ||
+ op->u.credit.max_boost_period == 0) )
+ {
+ sdom->max_boost_period = op->u.credit.max_boost_period;
+ }
+
+ if ( (op->u.credit.boost_ratio != (uint16_t)~0U) &&
+ ((csched_priv.total_boost_ratio - sdom->boost_ratio +
+ op->u.credit.boost_ratio) <= 100 * csched_priv.ncpus) &&
+ (sdom->max_boost_period || op->u.credit.boost_ratio == 0) )
+ {
+ uint16_t new_bc, old_bc;
+
+ new_bc = (op->u.credit.boost_ratio *
+ CSCHED_CREDITS_PER_TSLICE) / 100;
+ old_bc = (sdom->boost_ratio *
+ CSCHED_CREDITS_PER_TSLICE) / 100;
+
+ csched_priv.total_boost_ratio -= sdom->boost_ratio;
+ csched_priv.total_boost_ratio += op->u.credit.boost_ratio;
+
+ sdom->boost_ratio = op->u.credit.boost_ratio;
+
+ if ( !list_empty(&sdom->active_sdom_elem) )
+ {
+ csched_priv.boost_credit -= old_bc;
+ csched_priv.boost_credit += new_bc;
+ }
+ if ( new_bc == 0 )
+ {
+ if ( sdom->weight == 0 )
+ weight = CSCHED_DEFAULT_WEIGHT;
+ }
+ else
+ weight = 0;
+ }
+
+ if ( weight != (uint16_t)~0U )
{
if ( !list_empty(&sdom->active_sdom_elem) )
{
csched_priv.weight -= sdom->weight;
- csched_priv.weight += op->u.credit.weight;
+ csched_priv.weight += weight;
}
- sdom->weight = op->u.credit.weight;
- }
-
- if ( op->u.credit.cap != (uint16_t)~0U )
- sdom->cap = op->u.credit.cap;
+ sdom->weight = weight;
+ }
spin_unlock_irqrestore(&csched_priv.lock, flags);
}
@@ -765,6 +828,8 @@ csched_dom_init(struct domain *dom)
sdom->dom = dom;
sdom->weight = CSCHED_DEFAULT_WEIGHT;
sdom->cap = 0U;
+ sdom->boost_ratio = 0U;
+ sdom->max_boost_period = 0U;
dom->sched_priv = sdom;
return 0;
@@ -780,15 +845,16 @@ csched_dom_destroy(struct domain *dom)
/*
* This is a O(n) optimized sort of the runq.
*
- * Time-share VCPUs can only be one of two priorities, UNDER or OVER. We walk
- * through the runq and move up any UNDERs that are preceded by OVERS. We
- * remember the last UNDER to make the move up operation O(1).
+ * Time-share VCPUs can only be one of three priorities, BOOST, UNDER or OVER.
+ * We walk through the runq and move up any BOOSTs that are preceded by UNDERs
+ * or OVERs, and any UNDERs that are preceded by OVERS. We remember the last
+ * BOOST and UNDER to make the move up operation O(1).
*/
static void
csched_runq_sort(unsigned int cpu)
{
struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
- struct list_head *runq, *elem, *next, *last_under;
+ struct list_head *runq, *elem, *next, *last_boost, *last_under;
struct csched_vcpu *svc_elem;
unsigned long flags;
int sort_epoch;
@@ -803,14 +869,26 @@ csched_runq_sort(unsigned int cpu)
runq = &spc->runq;
elem = runq->next;
- last_under = runq;
+ last_boost = last_under = runq;
while ( elem != runq )
{
next = elem->next;
svc_elem = __runq_elem(elem);
- if ( svc_elem->pri >= CSCHED_PRI_TS_UNDER )
+ if ( svc_elem->pri == CSCHED_PRI_TS_BOOST )
+ {
+ /* does elem need to move up the runq? */
+ if ( elem->prev != last_boost )
+ {
+ list_del(elem);
+ list_add(elem, last_boost);
+ }
+ if ( last_boost == last_under )
+ last_under = elem;
+ last_boost = elem;
+ }
+ else if ( svc_elem->pri == CSCHED_PRI_TS_UNDER )
{
/* does elem need to move up the runq? */
if ( elem->prev != last_under )
@@ -846,6 +924,14 @@ csched_acct(void)
int credit;
int64_t credit_sum;
int credit_average;
+ /* for boost credit */
+ uint32_t bc_total;
+ uint32_t bc_fair;
+ int boost_credit;
+ int max_boost_credit;
+ int64_t bc_sum;
+ int bc_average;
+
spin_lock_irqsave(&csched_priv.lock, flags);
@@ -854,8 +940,12 @@ csched_acct(void)
{
svc = list_entry(iter_vcpu, struct csched_vcpu, inactive_vcpu_elem);
- if ( atomic_read(&svc->credit)
- <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1) )
+ max_boost_credit = svc->sdom->max_boost_period *
+ (CSCHED_CREDITS_PER_TSLICE/CSCHED_MSECS_PER_TSLICE);
+ if ( (atomic_read(&svc->credit)
+ <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1)) ||
+ (atomic_read(&svc->boost_credit)
+ <= (max_boost_credit - CSCHED_CREDITS_PER_TICK)) )
{
__csched_vcpu_acct_start_locked(svc);
}
@@ -863,6 +953,7 @@ csched_acct(void)
weight_total = csched_priv.weight;
credit_total = csched_priv.credit;
+ bc_total = csched_priv.boost_credit;
/* Converge balance towards 0 when it drops negative */
if ( csched_priv.credit_balance < 0 )
@@ -871,7 +962,7 @@ csched_acct(void)
CSCHED_STAT_CRANK(acct_balance);
}
- if ( unlikely(weight_total == 0) )
+ if ( unlikely(weight_total == 0 && bc_total == 0) )
{
csched_priv.credit_balance = 0;
spin_unlock_irqrestore(&csched_priv.lock, flags);
@@ -886,26 +977,43 @@ csched_acct(void)
credit_xtra = 0;
credit_cap = 0U;
+ /* Firstly, subtract boost credits from credit_total. */
+ if ( bc_total != 0 )
+ {
+ credit_total -= bc_total;
+ credit_balance += bc_total;
+ }
+
+ /* Avoid 0 divide error */
+ if ( weight_total == 0 )
+ weight_total = 1;
+
list_for_each_safe( iter_sdom, next_sdom, &csched_priv.active_sdom )
{
sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
BUG_ON( is_idle_domain(sdom->dom) );
BUG_ON( sdom->active_vcpu_count == 0 );
- BUG_ON( sdom->weight == 0 );
BUG_ON( sdom->weight > weight_left );
+
+ max_boost_credit = sdom->max_boost_period *
+ (CSCHED_CREDITS_PER_TSLICE /
CSCHED_MSECS_PER_TSLICE);
/* Compute the average of active VCPUs. */
credit_sum = 0;
+ bc_sum = 0;
list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
{
svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
BUG_ON( sdom != svc->sdom );
credit_sum += atomic_read(&svc->credit);
+ bc_sum += atomic_read(&svc->boost_credit);
}
credit_average = ( credit_sum + (sdom->active_vcpu_count - 1)
) / sdom->active_vcpu_count;
+ bc_average = ( bc_sum + (sdom->active_vcpu_count - 1)
+ ) / sdom->active_vcpu_count;
weight_left -= sdom->weight;
@@ -940,7 +1048,9 @@ csched_acct(void)
if ( credit_fair < credit_peak )
{
- credit_xtra = 1;
+ /* credit_fair is 0 if weight is 0. */
+ if ( sdom->weight != 0 )
+ credit_xtra = 1;
}
else
{
@@ -972,6 +1082,10 @@ csched_acct(void)
credit_fair = ( credit_fair + ( sdom->active_vcpu_count - 1 )
) / sdom->active_vcpu_count;
+ /* Compute fair share of boost credit per VCPU */
+ bc_fair = ( ((sdom->boost_ratio * CSCHED_CREDITS_PER_ACCT)/100) +
+ (sdom->active_vcpu_count - 1)
+ ) / sdom->active_vcpu_count;
list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
{
@@ -982,6 +1096,42 @@ csched_acct(void)
credit = atomic_read(&svc->credit);
atomic_add(credit_average - credit + credit_fair, &svc->credit);
credit = atomic_read(&svc->credit);
+
+ /* Balance and increment boost credit */
+ boost_credit = atomic_read(&svc->boost_credit);
+ atomic_add(bc_average - boost_credit + bc_fair,
&svc->boost_credit);
+ boost_credit = atomic_read(&svc->boost_credit);
+
+ /*
+ * Upper bound on boost credits.
+ * Add excess to credit.
+ */
+ if ( boost_credit > max_boost_credit )
+ {
+ atomic_add(boost_credit - max_boost_credit, &svc->credit);
+ credit = atomic_read(&svc->credit);
+ atomic_set(&svc->boost_credit, max_boost_credit);
+ boost_credit = atomic_read(&svc->boost_credit);
+ }
+ /*
+ * If credit is negative,
+ * boost credits compensate credit.
+ */
+ if ( credit < 0 && boost_credit > 0 )
+ {
+ if ( boost_credit > -credit )
+ {
+ atomic_sub(-credit, &svc->boost_credit);
+ atomic_add(-credit, &svc->credit);
+ }
+ else
+ {
+ atomic_sub(boost_credit, &svc->boost_credit);
+ atomic_add(boost_credit, &svc->credit);
+ }
+ credit = atomic_read(&svc->credit);
+ boost_credit = atomic_read(&svc->boost_credit);
+ }
/*
* Recompute priority or, if VCPU is idling, remove it from
@@ -1011,7 +1161,10 @@ csched_acct(void)
}
else
{
- svc->pri = CSCHED_PRI_TS_UNDER;
+ if ( boost_credit > 0 )
+ svc->pri = CSCHED_PRI_TS_BOOST;
+ else
+ svc->pri = CSCHED_PRI_TS_UNDER;
/* Unpark any capped domains whose credits go positive */
if ( svc->flags & CSCHED_FLAG_VCPU_PARKED)
@@ -1026,18 +1179,37 @@ csched_acct(void)
svc->flags &= ~CSCHED_FLAG_VCPU_PARKED;
}
- /* Upper bound on credits means VCPU stops earning */
+ /*
+ * Upper bound on credits and boost credits means VCPU stops
+ * earning
+ */
if ( credit > CSCHED_CREDITS_PER_TSLICE )
{
- __csched_vcpu_acct_stop_locked(svc);
+ atomic_add(credit - CSCHED_CREDITS_PER_TSLICE,
+ &svc->boost_credit);
+ boost_credit = atomic_read(&svc->boost_credit);
credit = CSCHED_CREDITS_PER_TSLICE;
atomic_set(&svc->credit, credit);
+
+ if ( boost_credit > max_boost_credit )
+ {
+ atomic_set(&svc->boost_credit, max_boost_credit);
+ __csched_vcpu_acct_stop_locked(svc);
+ }
}
}
- CSCHED_VCPU_STAT_SET(svc, credit_last, credit);
- CSCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
- credit_balance += credit;
+ if ( sdom->boost_ratio == 0 )
+ {
+ CSCHED_VCPU_STAT_SET(svc, credit_last, credit);
+ CSCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
+ credit_balance += credit;
+ }
+ else
+ {
+ CSCHED_VCPU_STAT_SET(svc, credit_last, boost_credit);
+ CSCHED_VCPU_STAT_SET(svc, credit_incr, bc_fair);
+ }
}
}
@@ -1221,6 +1393,22 @@ csched_schedule(s_time_t now)
) /
( MILLISECS(CSCHED_MSECS_PER_TSLICE) /
CSCHED_CREDITS_PER_TSLICE );
+ if ( scurr->pri == CSCHED_PRI_TS_BOOST )
+ {
+ int boost_credit = atomic_read(&scurr->boost_credit);
+
+ if ( boost_credit > consumed )
+ {
+ atomic_sub(consumed, &scurr->boost_credit);
+ consumed = 0;
+ }
+ else
+ {
+ atomic_sub(boost_credit, &scurr->boost_credit);
+ consumed -= boost_credit;
+ scurr->pri = CSCHED_PRI_TS_UNDER;
+ }
+ }
if ( consumed > 0 && !is_idle_vcpu(current) )
atomic_sub(consumed, &scurr->credit);
@@ -1264,7 +1452,17 @@ csched_schedule(s_time_t now)
/*
* Return task to run next...
*/
- ret.time = MILLISECS(CSCHED_MSECS_PER_TSLICE);
+ if ( snext->pri == CSCHED_PRI_TS_BOOST )
+ {
+ struct csched_vcpu * const svc = __runq_elem(runq->next);
+
+ if ( svc->pri == CSCHED_PRI_TS_BOOST )
+ ret.time = MILLISECS(opt_credit_tslice);
+ else
+ ret.time = MILLISECS(CSCHED_MSECS_PER_TICK);
+ }
+ else
+ ret.time = MILLISECS(CSCHED_MSECS_PER_TSLICE);
ret.task = snext->vcpu;
snext->start_time = now;
@@ -1287,7 +1485,11 @@ csched_dump_vcpu(struct csched_vcpu *svc
if ( sdom )
{
- printk(" credit=%i [w=%u]", atomic_read(&svc->credit), sdom->weight);
+ printk(" credit=%i bc=%i [w=%u,bc=%i]",
+ atomic_read(&svc->credit),
+ atomic_read(&svc->boost_credit),
+ sdom->weight,
+ (sdom->boost_ratio * CSCHED_CREDITS_PER_TSLICE)/100);
#ifdef CSCHED_STATS
printk(" (%d+%u) {a/i=%u/%u m=%u+%u}",
svc->stats.credit_last,
@@ -1353,6 +1555,8 @@ csched_dump(void)
"\tcredit balance = %d\n"
"\tweight = %u\n"
"\trunq_sort = %u\n"
+ "\tboost_credit = %u\n"
+ "\ttotal_boost_ratio = %u\n"
"\tdefault-weight = %d\n"
"\tmsecs per tick = %dms\n"
"\tcredits per tick = %d\n"
@@ -1364,6 +1568,8 @@ csched_dump(void)
csched_priv.credit_balance,
csched_priv.weight,
csched_priv.runq_sort,
+ csched_priv.boost_credit,
+ csched_priv.total_boost_ratio,
CSCHED_DEFAULT_WEIGHT,
CSCHED_MSECS_PER_TICK,
CSCHED_CREDITS_PER_TICK,
@@ -1417,6 +1623,8 @@ csched_init(void)
csched_priv.credit = 0U;
csched_priv.credit_balance = 0;
csched_priv.runq_sort = 0U;
+ csched_priv.boost_credit = 0;
+ csched_priv.total_boost_ratio = 0;
CSCHED_STATS_RESET();
}
diff -r e0e26c5c0218 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h Mon Dec 08 09:46:24 2008 +0900
+++ b/xen/include/public/domctl.h Mon Dec 08 16:36:46 2008 +0900
@@ -311,6 +311,8 @@ struct xen_domctl_scheduler_op {
struct xen_domctl_sched_credit {
uint16_t weight;
uint16_t cap;
+ uint16_t max_boost_period;
+ uint16_t boost_ratio;
} credit;
} u;
};
diff -r e0e26c5c0218 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Mon Dec 08 09:46:24 2008 +0900
+++ b/tools/python/xen/lowlevel/xc/xc.c Mon Dec 08 16:36:45 2008 +0900
@@ -1281,18 +1281,26 @@ static PyObject *pyxc_sched_credit_domai
uint32_t domid;
uint16_t weight;
uint16_t cap;
- static char *kwd_list[] = { "domid", "weight", "cap", NULL };
- static char kwd_type[] = "I|HH";
+ uint16_t max_boost_period;
+ uint16_t boost_ratio;
+ static char *kwd_list[] = { "domid", "weight", "cap",
+ "max_boost_period", "boost_ratio", NULL };
+ static char kwd_type[] = "I|HHhh";
struct xen_domctl_sched_credit sdom;
weight = 0;
cap = (uint16_t)~0U;
+ max_boost_period = (uint16_t)~0U;
+ boost_ratio = (uint16_t)~0U;
if( !PyArg_ParseTupleAndKeywords(args, kwds, kwd_type, kwd_list,
- &domid, &weight, &cap) )
+ &domid, &weight, &cap,
+ &max_boost_period, &boost_ratio) )
return NULL;
sdom.weight = weight;
sdom.cap = cap;
+ sdom.max_boost_period = max_boost_period;
+ sdom.boost_ratio = boost_ratio;
if ( xc_sched_credit_domain_set(self->xc_handle, domid, &sdom) != 0 )
return pyxc_error_to_exception();
@@ -1312,9 +1320,11 @@ static PyObject *pyxc_sched_credit_domai
if ( xc_sched_credit_domain_get(self->xc_handle, domid, &sdom) != 0 )
return pyxc_error_to_exception();
- return Py_BuildValue("{s:H,s:H}",
- "weight", sdom.weight,
- "cap", sdom.cap);
+ return Py_BuildValue("{s:H,s:H,s:i,s:i}",
+ "weight", sdom.weight,
+ "cap", sdom.cap,
+ "max_boost_period", sdom.max_boost_period,
+ "boost_ratio", sdom.boost_ratio);
}
static PyObject *pyxc_domain_setmaxmem(XcObject *self, PyObject *args)
@@ -1720,8 +1730,11 @@ static PyMethodDef pyxc_methods[] = {
METH_KEYWORDS, "\n"
"Set the scheduling parameters for a domain when running with the\n"
"SMP credit scheduler.\n"
- " domid [int]: domain id to set\n"
- " weight [short]: domain's scheduling weight\n"
+ " domid [int]: domain id to set\n"
+ " weight [short]: domain's scheduling weight\n"
+ " cap [short]: cap\n"
+ " max_boost_period [short]: upper limit in BOOST priority\n"
+ " boost_ratio [short]; domain's boost ratio per a cpu\n"
"Returns: [int] 0 on success; -1 on error.\n" },
{ "sched_credit_domain_get",
@@ -1729,9 +1742,12 @@ static PyMethodDef pyxc_methods[] = {
METH_VARARGS, "\n"
"Get the scheduling parameters for a domain when running with the\n"
"SMP credit scheduler.\n"
- " domid [int]: domain id to get\n"
+ " domid [int]: domain id to get\n"
"Returns: [dict]\n"
- " weight [short]: domain's scheduling weight\n"},
+ " weight [short]: domain's scheduling weight\n"
+ " cap [short]: cap\n"
+ " max_boost_period [short]: upper limit in BOOST priority\n"
+ " boost_ratio [short]: domain's boost ratio per a cpu\n"},
{ "evtchn_alloc_unbound",
(PyCFunction)pyxc_evtchn_alloc_unbound,
diff -r e0e26c5c0218 tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py Mon Dec 08 09:46:24 2008 +0900
+++ b/tools/python/xen/xend/XendAPI.py Mon Dec 08 16:36:45 2008 +0900
@@ -1505,10 +1505,14 @@ class XendAPI(object):
#need to update sched params aswell
if 'weight' in xeninfo.info['vcpus_params'] \
- and 'cap' in xeninfo.info['vcpus_params']:
+ and 'cap' in xeninfo.info['vcpus_params'] \
+ and 'max_boost_period' in xeninfo.info['vcpus_params'] \
+ and 'boost_ratio' in xeninfo.info['vcpus_params']:
weight = xeninfo.info['vcpus_params']['weight']
cap = xeninfo.info['vcpus_params']['cap']
- xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap)
+ max_boost_period = xeninfo.info['vcpus_params']['max_boost_period']
+ boost_ratio = xeninfo.info['vcpus_params']['boost_ratio']
+ xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap,
max_boost_period, boost_ratio)
def VM_set_VCPUs_number_live(self, _, vm_ref, num):
dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
diff -r e0e26c5c0218 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py Mon Dec 08 09:46:24 2008 +0900
+++ b/tools/python/xen/xend/XendConfig.py Mon Dec 08 16:36:45 2008 +0900
@@ -585,6 +585,10 @@ class XendConfig(dict):
int(sxp.child_value(sxp_cfg, "cpu_weight", 256))
cfg["vcpus_params"]["cap"] = \
int(sxp.child_value(sxp_cfg, "cpu_cap", 0))
+ cfg["vcpus_params"]["max_boost_period"] = \
+ int(sxp.child_value(sxp_cfg, "cpu_max_boost_period", 0))
+ cfg["vcpus_params"]["boost_ratio"] = \
+ int(sxp.child_value(sxp_cfg, "cpu_boost_ratio", 0))
# Only extract options we know about.
extract_keys = LEGACY_UNSUPPORTED_BY_XENAPI_CFG + \
diff -r e0e26c5c0218 tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py Mon Dec 08 09:46:24 2008 +0900
+++ b/tools/python/xen/xend/XendDomain.py Mon Dec 08 16:36:45 2008 +0900
@@ -1536,7 +1536,7 @@ class XendDomain:
@param domid: Domain ID or Name
@type domid: int or string.
- @rtype: dict with keys 'weight' and 'cap'
+ @rtype: dict with keys 'weight' and 'cap' and 'max_boost_period' and
'boost_ratio'
@return: credit scheduler parameters
"""
dominfo = self.domain_lookup_nr(domid)
@@ -1549,20 +1549,26 @@ class XendDomain:
except Exception, ex:
raise XendError(str(ex))
else:
- return {'weight' : dominfo.getWeight(),
- 'cap' : dominfo.getCap()}
+ return {'weight' : dominfo.getWeight(),
+ 'cap' : dominfo.getCap(),
+ 'max_boost_period': dominfo.getMaxBoostPeriod(),
+ 'boost_ratio' : dominfo.getBoostRatio()}
- def domain_sched_credit_set(self, domid, weight = None, cap = None):
+ def domain_sched_credit_set(self, domid, weight = None, cap = None,
max_boost_period = None, boost_ratio = None):
"""Set credit scheduler parameters for a domain.
@param domid: Domain ID or Name
@type domid: int or string.
@type weight: int
@type cap: int
+ @type max_boost_period: int
+ @type boost_ratio: int
@rtype: 0
"""
set_weight = False
set_cap = False
+ set_max_boost_period = False
+ set_boost_ratio = False
dominfo = self.domain_lookup_nr(domid)
if not dominfo:
raise XendInvalidDomain(str(domid))
@@ -1581,17 +1587,37 @@ class XendDomain:
else:
set_cap = True
+ if max_boost_period is None:
+ max_boost_period = int(~0)
+ elif max_boost_period < 0:
+ raise XendError("max_boost_period is out of range")
+ else:
+ set_max_boost_period = True
+
+ if boost_ratio is None:
+ boost_ratio = int(~0)
+ elif boost_ratio < 0:
+ raise XendError("boost_ratio is out of range")
+ else:
+ set_boost_ratio = True
+
assert type(weight) == int
assert type(cap) == int
+ assert type(max_boost_period) == int
+ assert type(boost_ratio) == int
rc = 0
if dominfo._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
- rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight,
cap)
+ rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight,
cap, max_boost_period, boost_ratio)
if rc == 0:
if set_weight:
dominfo.setWeight(weight)
if set_cap:
dominfo.setCap(cap)
+ if set_max_boost_period:
+ dominfo.setMaxBoostPeriod(max_boost_period)
+ if set_boost_ratio:
+ dominfo.setBoostRatio(boost_ratio)
self.managed_config_save(dominfo)
return rc
except Exception, ex:
diff -r e0e26c5c0218 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py Mon Dec 08 09:46:24 2008 +0900
+++ b/tools/python/xen/xend/XendDomainInfo.py Mon Dec 08 16:36:45 2008 +0900
@@ -465,7 +465,9 @@ class XendDomainInfo:
if xennode.xenschedinfo() == 'credit':
xendomains.domain_sched_credit_set(self.getDomid(),
self.getWeight(),
- self.getCap())
+ self.getCap(),
+
self.getMaxBoostPeriod(),
+ self.getBoostRatio())
except:
log.exception('VM start failed')
self.destroy()
@@ -1606,6 +1608,18 @@ class XendDomainInfo:
def setWeight(self, cpu_weight):
self.info['vcpus_params']['weight'] = cpu_weight
+ def getMaxBoostPeriod(self):
+ return self.info['vcpus_params']['max_boost_period']
+
+ def setMaxBoostPeriod(self, cpu_max_boost_period):
+ self.info['vcpus_params']['max_boost_period'] = cpu_max_boost_period
+
+ def getBoostRatio(self):
+ return self.info['vcpus_params']['boost_ratio']
+
+ def setBoostRatio(self, cpu_boost_ratio):
+ self.info['vcpus_params']['boost_ratio'] = cpu_boost_ratio
+
def getRestartCount(self):
return self._readVm('xend/restart_count')
diff -r e0e26c5c0218 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py Mon Dec 08 09:46:24 2008 +0900
+++ b/tools/python/xen/xm/main.py Mon Dec 08 16:36:45 2008 +0900
@@ -150,7 +150,7 @@ SUBCOMMAND_HELP = {
'log' : ('', 'Print Xend log'),
'rename' : ('<Domain> <NewDomainName>', 'Rename a domain.'),
'sched-sedf' : ('<Domain> [options]', 'Get/set EDF parameters.'),
- 'sched-credit': ('[-d <Domain> [-w[=WEIGHT]|-c[=CAP]]]',
+ 'sched-credit': ('[-d <Domain>
[-w[=WEIGHT]|-c[=CAP]|-m[=MAXBOOSTPERIOD]|-r[=BOOSTRATIO]]]',
'Get/set credit scheduler parameters.'),
'sysrq' : ('<Domain> <letter>', 'Send a sysrq to a domain.'),
'debug-keys' : ('<Keys>', 'Send debug keys to Xen.'),
@@ -240,6 +240,8 @@ SUBCOMMAND_OPTIONS = {
('-d DOMAIN', '--domain=DOMAIN', 'Domain to modify'),
('-w WEIGHT', '--weight=WEIGHT', 'Weight (int)'),
('-c CAP', '--cap=CAP', 'Cap (int)'),
+ ('-m MAXBOOSTPERIOD', '--maxboostperiod=MAXBOOSTPERIOD', 'Upper limit
of boost period (ms)'),
+ ('-r BOOSTRATIO', '--ratio=BOOSTRATIO', 'Boost ratio per a cpu (int)'),
),
'list': (
('-l', '--long', 'Output all VM details in SXP'),
@@ -1578,8 +1580,8 @@ def xm_sched_credit(args):
check_sched_type('credit')
try:
- opts, params = getopt.getopt(args, "d:w:c:",
- ["domain=", "weight=", "cap="])
+ opts, params = getopt.getopt(args, "d:w:c:m:r:",
+ ["domain=", "weight=", "cap=", "maxboostperiod=", "ratio="])
except getopt.GetoptError, opterr:
err(opterr)
usage('sched-credit')
@@ -1587,6 +1589,8 @@ def xm_sched_credit(args):
domid = None
weight = None
cap = None
+ max_boost_period = None
+ boost_ratio = None
for o, a in opts:
if o in ["-d", "--domain"]:
@@ -1594,18 +1598,22 @@ def xm_sched_credit(args):
elif o in ["-w", "--weight"]:
weight = int(a)
elif o in ["-c", "--cap"]:
- cap = int(a);
+ cap = int(a)
+ elif o in ["-m", "--maxboostperiod"]:
+ max_boost_period = int(a)
+ elif o in ["-r", "--ratio"]:
+ boost_ratio = int(a);
doms = filter(lambda x : domid_match(domid, x),
[parse_doms_info(dom)
for dom in getDomains(None, 'all')])
- if weight is None and cap is None:
+ if weight is None and cap is None and max_boost_period is None and
boost_ratio is None:
if domid is not None and doms == []:
err("Domain '%s' does not exist." % domid)
usage('sched-credit')
# print header if we aren't setting any parameters
- print '%-33s %4s %6s %4s' % ('Name','ID','Weight','Cap')
+ print '%-33s %4s %6s %4s %8s %5s' %
('Name','ID','Weight','Cap','Max(ms)','Ratio')
for d in doms:
try:
@@ -1618,16 +1626,18 @@ def xm_sched_credit(args):
except xmlrpclib.Fault:
pass
- if 'weight' not in info or 'cap' not in info:
+ if 'weight' not in info or 'cap' not in info or 'max_boost_period'
not in info or 'boost_ratio' not in info:
# domain does not support sched-credit?
- info = {'weight': -1, 'cap': -1}
+ info = {'weight': -1, 'cap': -1, 'max_boost_period':-1,
'boost_ratio':-1}
info['weight'] = int(info['weight'])
info['cap'] = int(info['cap'])
+ info['max_boost_period'] = int(info['max_boost_period'])
+ info['boost_ratio'] = int(info['boost_ratio'])
info['name'] = d['name']
info['domid'] = str(d['domid'])
- print( ("%(name)-32s %(domid)5s %(weight)6d %(cap)4d") % info)
+ print( ("%(name)-32s %(domid)5s %(weight)6d %(cap)4d
%(max_boost_period)8d %(boost_ratio)5d") % info)
else:
if domid is None:
# place holder for system-wide scheduler parameters
@@ -1644,6 +1654,14 @@ def xm_sched_credit(args):
get_single_vm(domid),
"cap",
cap)
+ server.xenapi.VM.add_to_VCPUs_params_live(
+ get_single_vm(domid),
+ "max_boost_period",
+ max_boost_period)
+ server.xenapi.VM.add_to_VCPUs_params_live(
+ get_single_vm(domid),
+ "boost_ratio",
+ boost_ratio)
else:
server.xenapi.VM.add_to_VCPUs_params(
get_single_vm(domid),
@@ -1653,8 +1671,16 @@ def xm_sched_credit(args):
get_single_vm(domid),
"cap",
cap)
+ server.xenapi.VM.add_to_VCPUs_params(
+ get_single_vm(domid),
+ "max_boost_period",
+ max_boost_period)
+ server.xenapi.VM.add_to_VCPUs_params(
+ get_single_vm(domid),
+ "boost_ratio",
+ boost_ratio)
else:
- result = server.xend.domain.sched_credit_set(domid, weight, cap)
+ result = server.xend.domain.sched_credit_set(domid, weight, cap,
max_boost_period, boost_ratio)
if result != 0:
err(str(result))
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|