WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [RFC][PATCH 4/4] sched: introduce boost credit for latency-s

To: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [RFC][PATCH 4/4] sched: introduce boost credit for latency-sensitive domain
From: NISHIGUCHI Naoki <nisiguti@xxxxxxxxxxxxxx>
Date: Thu, 18 Dec 2008 12:06:03 +0900
Cc: Ian.Pratt@xxxxxxxxxxxxx, disheng.su@xxxxxxxxx, aviv@xxxxxxxxxxxx, keir.fraser@xxxxxxxxxxxxx, sakaia@xxxxxxxxxxxxxx
Delivery-date: Wed, 17 Dec 2008 19:06:49 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4949BC2C.4060302@xxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <4949BC2C.4060302@xxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.18 (Windows/20081105)
I attached the following two patches.
  credit_rev2_4_boost_xen.patch  : modification to xen hypervisor
  credit_rev2_4_boost_tools.patch: modification to tools

By applying these two patches, boost credit is introduced to the credit
scheduler. The credit scheduler comes to be able to give priority to
latency-sensitive domain.

The differences between these patches and last patches are as follows.
- When a vcpu is waked up and set to BOOST state, add CSCHED_CREDITS_PER_TICK to boost_credit and subtract CSCHED_CREDITS_PER_TICK from credit. This prevents the vcpu from returning to UNDER state immediately. Especially dom0 is affected largely. - Even if the vcpu has boost credit, if current tims slice is 2ms then don't send scheduler interrupt. - If credit of a vcpu is subtracted over CSCHED_CREDITS_PER_TSLICE, adjust the credit.

Best regards,
Naoki Nishiguchi
diff -r 8bc795246b5b xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Wed Dec 17 16:02:47 2008 +0900
+++ b/xen/common/sched_credit.c Wed Dec 17 16:04:43 2008 +0900
@@ -47,6 +47,7 @@
     (CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_TSLICE)
 #define CSCHED_CREDITS_PER_ACCT     \
     (CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_ACCT)
+#define CSCHED_MSECS_PER_BOOST_TSLICE 2
 
 
 /*
@@ -189,6 +190,7 @@ struct csched_pcpu {
     struct timer ticker;
     unsigned int tick;
     s_time_t start_time;
+    s_time_t time_slice;
 };
 
 /*
@@ -201,6 +203,8 @@ struct csched_vcpu {
     struct csched_dom *sdom;
     struct vcpu *vcpu;
     atomic_t credit;
+    atomic_t boost_credit;
+    int prev_credit;
     uint16_t flags;
     int16_t pri;
 #ifdef CSCHED_STATS
@@ -225,6 +229,8 @@ struct csched_dom {
     uint16_t active_vcpu_count;
     uint16_t weight;
     uint16_t cap;
+    uint16_t boost_ratio;
+    uint16_t max_boost_period;
 };
 
 /*
@@ -239,8 +245,11 @@ struct csched_private {
     cpumask_t idlers;
     uint32_t weight;
     uint32_t credit;
+    uint32_t boost_credit;
+    uint16_t total_boost_ratio;
     int credit_balance;
     uint32_t runq_sort;
+    s_time_t boost_tslice;
     CSCHED_STATS_DEFINE()
 };
 
@@ -250,6 +259,10 @@ struct csched_private {
  */
 static struct csched_private csched_priv;
 
+/* opt_credit_tslice: time slice for BOOST priority */
+static unsigned int opt_credit_tslice = CSCHED_MSECS_PER_BOOST_TSLICE;
+integer_param("credit_tslice", opt_credit_tslice);
+
 static void csched_tick(void *_cpu);
 
 static inline int
@@ -304,6 +317,7 @@ __runq_tickle(unsigned int cpu, struct c
 {
     struct csched_vcpu * const cur =
         CSCHED_VCPU(per_cpu(schedule_data, cpu).curr);
+    struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
     cpumask_t mask;
 
     ASSERT(cur);
@@ -340,6 +354,16 @@ __runq_tickle(unsigned int cpu, struct c
             cpus_or(mask, mask, csched_priv.idlers);
             cpus_and(mask, mask, new->vcpu->cpu_affinity);
         }
+    }
+
+    /* If new VCPU has boost credit, signal the CPU. */
+    if ( cpus_empty(mask) &&
+         new->pri == CSCHED_PRI_TS_BOOST &&
+         spc->time_slice != csched_priv.boost_tslice &&
+         new->sdom->max_boost_period )
+    {
+        CSCHED_STAT_CRANK(tickle_local_other);
+        cpu_set(cpu, mask);
     }
 
     /* Send scheduler interrupts to designated CPUs */
@@ -503,6 +527,8 @@ __csched_vcpu_acct_start_locked(struct c
         {
             list_add(&sdom->active_sdom_elem, &csched_priv.active_sdom);
             csched_priv.weight += sdom->weight;
+            csched_priv.boost_credit += (sdom->boost_ratio *
+                                         CSCHED_CREDITS_PER_TSLICE) / 100;
         }
     }
 }
@@ -525,6 +551,8 @@ __csched_vcpu_acct_stop_locked(struct cs
         BUG_ON( csched_priv.weight < sdom->weight );
         list_del_init(&sdom->active_sdom_elem);
         csched_priv.weight -= sdom->weight;
+        csched_priv.boost_credit -= (sdom->boost_ratio *
+                                     CSCHED_CREDITS_PER_TSLICE) / 100;
     }
 }
 
@@ -535,14 +563,6 @@ csched_vcpu_acct(unsigned int cpu)
 
     ASSERT( current->processor == cpu );
     ASSERT( svc->sdom != NULL );
-
-    /*
-     * If this VCPU's priority was boosted when it last awoke, reset it.
-     * If the VCPU is found here, then it's consuming a non-negligeable
-     * amount of CPU resources and should no longer be boosted.
-     */
-    if ( svc->pri == CSCHED_PRI_TS_BOOST )
-        svc->pri = CSCHED_PRI_TS_UNDER;
 
     /*
      * If it's been active a while, check if we'd be better off
@@ -579,6 +599,8 @@ csched_vcpu_init(struct vcpu *vc)
     svc->sdom = sdom;
     svc->vcpu = vc;
     atomic_set(&svc->credit, 0);
+    atomic_set(&svc->boost_credit, 0);
+    svc->prev_credit = 0;
     svc->flags = 0U;
     svc->pri = is_idle_domain(dom) ? CSCHED_PRI_IDLE : CSCHED_PRI_TS_UNDER;
     CSCHED_VCPU_STATS_RESET(svc);
@@ -693,6 +715,8 @@ csched_vcpu_wake(struct vcpu *vc)
          !(svc->flags & CSCHED_FLAG_VCPU_PARKED) )
     {
         svc->pri = CSCHED_PRI_TS_BOOST;
+        atomic_add(CSCHED_CREDITS_PER_TICK, &svc->boost_credit);
+        atomic_sub(CSCHED_CREDITS_PER_TICK, &svc->credit);
     }
 
     /* Put the VCPU on the runq and tickle CPUs */
@@ -712,25 +736,73 @@ csched_dom_cntl(
     {
         op->u.credit.weight = sdom->weight;
         op->u.credit.cap = sdom->cap;
+        op->u.credit.max_boost_period = sdom->max_boost_period;
+        op->u.credit.boost_ratio = sdom->boost_ratio;
     }
     else
     {
+        uint16_t weight = (uint16_t)~0U;
+
         ASSERT(op->cmd == XEN_DOMCTL_SCHEDOP_putinfo);
 
         spin_lock_irqsave(&csched_priv.lock, flags);
 
-        if ( op->u.credit.weight != 0 )
+        if ( (op->u.credit.weight != 0) &&
+             (sdom->boost_ratio == 0 || op->u.credit.boost_ratio == 0) )
+        {
+            weight = op->u.credit.weight;
+        }
+
+        if ( op->u.credit.cap != (uint16_t)~0U )
+            sdom->cap = op->u.credit.cap;
+
+        if ( (op->u.credit.max_boost_period != (uint16_t)~0U) &&
+             (op->u.credit.max_boost_period >= CSCHED_MSECS_PER_TICK ||
+              op->u.credit.max_boost_period == 0) )
+        {
+            sdom->max_boost_period = op->u.credit.max_boost_period;
+        }
+
+        if ( (op->u.credit.boost_ratio != (uint16_t)~0U) &&
+             ((csched_priv.total_boost_ratio - sdom->boost_ratio +
+               op->u.credit.boost_ratio) <= 100 * csched_priv.ncpus) &&
+             (sdom->max_boost_period || op->u.credit.boost_ratio == 0) )
+        {
+            uint16_t new_bc, old_bc;
+
+            new_bc = (op->u.credit.boost_ratio *
+                      CSCHED_CREDITS_PER_TSLICE) / 100;
+            old_bc = (sdom->boost_ratio *
+                      CSCHED_CREDITS_PER_TSLICE) / 100;
+
+            csched_priv.total_boost_ratio -= sdom->boost_ratio;
+            csched_priv.total_boost_ratio += op->u.credit.boost_ratio;
+
+            sdom->boost_ratio = op->u.credit.boost_ratio;
+
+            if ( !list_empty(&sdom->active_sdom_elem) )
+            {
+                csched_priv.boost_credit -= old_bc;
+                csched_priv.boost_credit += new_bc;
+            }
+            if ( new_bc == 0 )
+            {
+                if ( sdom->weight == 0 )
+                    weight = CSCHED_DEFAULT_WEIGHT;
+            }
+            else
+                weight = 0;
+        }
+
+        if ( weight != (uint16_t)~0U )
         {
             if ( !list_empty(&sdom->active_sdom_elem) )
             {
                 csched_priv.weight -= sdom->weight;
-                csched_priv.weight += op->u.credit.weight;
+                csched_priv.weight += weight;
             }
-            sdom->weight = op->u.credit.weight;
-        }
-
-        if ( op->u.credit.cap != (uint16_t)~0U )
-            sdom->cap = op->u.credit.cap;
+            sdom->weight = weight;
+        }
 
         spin_unlock_irqrestore(&csched_priv.lock, flags);
     }
@@ -759,6 +831,8 @@ csched_dom_init(struct domain *dom)
     sdom->dom = dom;
     sdom->weight = CSCHED_DEFAULT_WEIGHT;
     sdom->cap = 0U;
+    sdom->boost_ratio = 0U;
+    sdom->max_boost_period = 0U;
     dom->sched_priv = sdom;
 
     return 0;
@@ -774,15 +848,16 @@ csched_dom_destroy(struct domain *dom)
 /*
  * This is a O(n) optimized sort of the runq.
  *
- * Time-share VCPUs can only be one of two priorities, UNDER or OVER. We walk
- * through the runq and move up any UNDERs that are preceded by OVERS. We
- * remember the last UNDER to make the move up operation O(1).
+ * Time-share VCPUs can only be one of three priorities, BOOST, UNDER or OVER.
+ * We walk through the runq and move up any BOOSTs that are preceded by UNDERs
+ * or OVERs, and any UNDERs that are preceded by OVERS. We remember the last
+ * BOOST and UNDER to make the move up operation O(1).
  */
 static void
 csched_runq_sort(unsigned int cpu)
 {
     struct csched_pcpu * const spc = CSCHED_PCPU(cpu);
-    struct list_head *runq, *elem, *next, *last_under;
+    struct list_head *runq, *elem, *next, *last_boost, *last_under;
     struct csched_vcpu *svc_elem;
     unsigned long flags;
     int sort_epoch;
@@ -797,14 +872,26 @@ csched_runq_sort(unsigned int cpu)
 
     runq = &spc->runq;
     elem = runq->next;
-    last_under = runq;
+    last_boost = last_under = runq;
 
     while ( elem != runq )
     {
         next = elem->next;
         svc_elem = __runq_elem(elem);
 
-        if ( svc_elem->pri >= CSCHED_PRI_TS_UNDER )
+        if ( svc_elem->pri == CSCHED_PRI_TS_BOOST )
+        {
+            /* does elem need to move up the runq? */
+            if ( elem->prev != last_boost )
+            {
+                list_del(elem);
+                list_add(elem, last_boost);
+            }
+            if ( last_boost == last_under )
+                last_under = elem;
+            last_boost = elem;
+        }
+        else if ( svc_elem->pri == CSCHED_PRI_TS_UNDER )
         {
             /* does elem need to move up the runq? */
             if ( elem->prev != last_under )
@@ -840,6 +927,14 @@ csched_acct(void)
     int credit;
     int64_t credit_sum;
     int credit_average;
+    /* for boost credit */
+    uint32_t bc_total;
+    uint32_t bc_fair;
+    int boost_credit;
+    int max_boost_credit;
+    int64_t bc_sum;
+    int bc_average;
+
 
     spin_lock_irqsave(&csched_priv.lock, flags);
 
@@ -848,8 +943,12 @@ csched_acct(void)
     {
         svc = list_entry(iter_vcpu, struct csched_vcpu, inactive_vcpu_elem);
 
-        if ( atomic_read(&svc->credit)
-             <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1) )
+        max_boost_credit = svc->sdom->max_boost_period *
+                           (CSCHED_CREDITS_PER_TSLICE/CSCHED_MSECS_PER_TSLICE);
+        if ( (atomic_read(&svc->credit)
+              <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1)) ||
+             (atomic_read(&svc->boost_credit)
+              <= (max_boost_credit - CSCHED_CREDITS_PER_TICK)) )
         {
             __csched_vcpu_acct_start_locked(svc);
         }
@@ -857,6 +956,7 @@ csched_acct(void)
 
     weight_total = csched_priv.weight;
     credit_total = csched_priv.credit;
+    bc_total = csched_priv.boost_credit;
 
     /* Converge balance towards 0 when it drops negative */
     if ( csched_priv.credit_balance < 0 )
@@ -865,7 +965,7 @@ csched_acct(void)
         CSCHED_STAT_CRANK(acct_balance);
     }
 
-    if ( unlikely(weight_total == 0) )
+    if ( unlikely(weight_total == 0 && bc_total == 0) )
     {
         csched_priv.credit_balance = 0;
         spin_unlock_irqrestore(&csched_priv.lock, flags);
@@ -880,26 +980,59 @@ csched_acct(void)
     credit_xtra = 0;
     credit_cap = 0U;
 
+    /* Firstly, subtract boost credits from credit_total. */
+    if ( bc_total != 0 )
+    {
+        credit_total -= bc_total;
+        credit_balance += bc_total;
+    }
+
+    /* Avoid 0 divide error */
+    if ( weight_total == 0 )
+        weight_total = 1;
+
     list_for_each_safe( iter_sdom, next_sdom, &csched_priv.active_sdom )
     {
         sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem);
 
         BUG_ON( is_idle_domain(sdom->dom) );
         BUG_ON( sdom->active_vcpu_count == 0 );
-        BUG_ON( sdom->weight == 0 );
         BUG_ON( sdom->weight > weight_left );
 
-        /* Compute the average of active VCPUs. */
+        max_boost_credit = sdom->max_boost_period *
+                           (CSCHED_CREDITS_PER_TSLICE / 
CSCHED_MSECS_PER_TSLICE);
+
+        /*
+         *  Compute the average of active VCPUs
+         *  and adjust credit for comsumption too much.
+         */
         credit_sum = 0;
+        bc_sum = 0;
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
+            int adjust;
+
             svc = list_entry(iter_vcpu, struct csched_vcpu, active_vcpu_elem);
             BUG_ON( sdom != svc->sdom );
 
+            credit = atomic_read(&svc->credit);
+            boost_credit = atomic_read(&svc->boost_credit);
+            adjust = svc->prev_credit - (credit + boost_credit)
+                   - CSCHED_CREDITS_PER_TSLICE;
+            if ( adjust > 0 )
+            {
+                if ( max_boost_credit != 0 )
+                    atomic_add(adjust, &svc->boost_credit);
+                else
+                    atomic_add(adjust, &svc->credit);
+            }
             credit_sum += atomic_read(&svc->credit);
+            bc_sum += atomic_read(&svc->boost_credit);
         }
         credit_average = ( credit_sum + (sdom->active_vcpu_count - 1)
                          ) / sdom->active_vcpu_count;
+        bc_average = ( bc_sum + (sdom->active_vcpu_count - 1)
+                     ) / sdom->active_vcpu_count;
 
         weight_left -= sdom->weight;
 
@@ -934,7 +1067,9 @@ csched_acct(void)
 
         if ( credit_fair < credit_peak )
         {
-            credit_xtra = 1;
+            /* credit_fair is 0 if weight is 0. */
+            if ( sdom->weight != 0 )
+                credit_xtra = 1;
         }
         else
         {
@@ -966,6 +1101,10 @@ csched_acct(void)
         credit_fair = ( credit_fair + ( sdom->active_vcpu_count - 1 )
                       ) / sdom->active_vcpu_count;
 
+        /* Compute fair share of boost credit per VCPU */
+        bc_fair = ( ((sdom->boost_ratio * CSCHED_CREDITS_PER_ACCT)/100) +
+                    (sdom->active_vcpu_count - 1)
+                  ) / sdom->active_vcpu_count;
 
         list_for_each_safe( iter_vcpu, next_vcpu, &sdom->active_vcpu )
         {
@@ -976,6 +1115,54 @@ csched_acct(void)
             credit = atomic_read(&svc->credit);
             atomic_add(credit_average - credit + credit_fair, &svc->credit);
             credit = atomic_read(&svc->credit);
+
+            /* Balance and increment boost credit */
+            boost_credit = atomic_read(&svc->boost_credit);
+            atomic_add(bc_average - boost_credit + bc_fair, 
&svc->boost_credit);
+            boost_credit = atomic_read(&svc->boost_credit);
+
+            /*
+             * Upper bound on credits.
+             * Add excess to boost credit.
+             */
+            if ( credit > CSCHED_CREDITS_PER_TSLICE )
+            {
+                atomic_add(credit - CSCHED_CREDITS_PER_TSLICE,
+                           &svc->boost_credit);
+                boost_credit = atomic_read(&svc->boost_credit);
+                credit = CSCHED_CREDITS_PER_TSLICE;
+                atomic_set(&svc->credit, credit);
+            }
+            /*
+             * Upper bound on boost credits.
+             * Add excess to credit.
+             */
+            if ( boost_credit > max_boost_credit )
+            {
+                atomic_add(boost_credit - max_boost_credit, &svc->credit);
+                credit = atomic_read(&svc->credit);
+                boost_credit = max_boost_credit;
+                atomic_set(&svc->boost_credit, boost_credit);
+            }
+            /*
+             * If credit is negative,
+             * boost credits compensate credit.
+             */
+            if ( credit < 0 && boost_credit > 0 )
+            {
+                if ( boost_credit > -credit )
+                {
+                    atomic_sub(-credit, &svc->boost_credit);
+                    atomic_add(-credit, &svc->credit);
+                }
+                else
+                {
+                    atomic_sub(boost_credit, &svc->boost_credit);
+                    atomic_add(boost_credit, &svc->credit);
+                }
+                credit = atomic_read(&svc->credit);
+                boost_credit = atomic_read(&svc->boost_credit);
+            }
 
             /*
              * Recompute priority or, if VCPU is idling, remove it from
@@ -1005,7 +1192,10 @@ csched_acct(void)
             }
             else
             {
-                svc->pri = CSCHED_PRI_TS_UNDER;
+                if ( boost_credit > 0 )
+                    svc->pri = CSCHED_PRI_TS_BOOST;
+                else
+                    svc->pri = CSCHED_PRI_TS_UNDER;
 
                 /* Unpark any capped domains whose credits go positive */
                 if ( svc->flags & CSCHED_FLAG_VCPU_PARKED)
@@ -1020,18 +1210,36 @@ csched_acct(void)
                     svc->flags &= ~CSCHED_FLAG_VCPU_PARKED;
                 }
 
-                /* Upper bound on credits means VCPU stops earning */
+                /*
+                 * Upper bound on credits and boost credits means VCPU stops
+                 * earning
+                 */
                 if ( credit > CSCHED_CREDITS_PER_TSLICE )
                 {
-                    __csched_vcpu_acct_stop_locked(svc);
                     credit = CSCHED_CREDITS_PER_TSLICE;
                     atomic_set(&svc->credit, credit);
+
+                    if ( boost_credit >= max_boost_credit )
+                    {
+                        __csched_vcpu_acct_stop_locked(svc);
+                    }
                 }
             }
 
-            CSCHED_VCPU_STAT_SET(svc, credit_last, credit);
-            CSCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
-            credit_balance += credit;
+            /* save credit for adjustment */
+            svc->prev_credit = credit + boost_credit;
+
+            if ( sdom->boost_ratio == 0 )
+            {
+                CSCHED_VCPU_STAT_SET(svc, credit_last, credit);
+                CSCHED_VCPU_STAT_SET(svc, credit_incr, credit_fair);
+                credit_balance += credit;
+            }
+            else
+            {
+                CSCHED_VCPU_STAT_SET(svc, credit_last, boost_credit);
+                CSCHED_VCPU_STAT_SET(svc, credit_incr, bc_fair);
+            }
         }
     }
 
@@ -1216,6 +1424,22 @@ csched_schedule(s_time_t now)
                ) /
                ( MILLISECS(CSCHED_MSECS_PER_TSLICE) /
                  CSCHED_CREDITS_PER_TSLICE );
+    if ( scurr->pri == CSCHED_PRI_TS_BOOST )
+    {
+        int boost_credit = atomic_read(&scurr->boost_credit);
+
+        if ( boost_credit > consumed )
+        {
+            atomic_sub(consumed, &scurr->boost_credit);
+            consumed = 0;
+        }
+        else
+        {
+            atomic_sub(boost_credit, &scurr->boost_credit);
+            consumed -= boost_credit;
+            scurr->pri = CSCHED_PRI_TS_UNDER;
+        }
+    }
     if ( consumed > 0 && !is_idle_vcpu(current) )
         atomic_sub(consumed, &scurr->credit);
 
@@ -1259,9 +1483,20 @@ csched_schedule(s_time_t now)
     /*
      * Return task to run next...
      */
-    ret.time = MILLISECS(CSCHED_MSECS_PER_TSLICE);
+    if ( snext->pri == CSCHED_PRI_TS_BOOST )
+    {
+        struct csched_vcpu * const svc = __runq_elem(runq->next);
+
+        if ( svc->pri == CSCHED_PRI_TS_BOOST )
+            ret.time = csched_priv.boost_tslice;
+        else
+            ret.time = MILLISECS(CSCHED_MSECS_PER_TICK);
+    }
+    else
+        ret.time = MILLISECS(CSCHED_MSECS_PER_TSLICE);
     ret.task = snext->vcpu;
 
+    spc->time_slice  = ret.time;
     spc->start_time = now;
 
     CSCHED_VCPU_CHECK(ret.task);
@@ -1282,7 +1517,11 @@ csched_dump_vcpu(struct csched_vcpu *svc
 
     if ( sdom )
     {
-        printk(" credit=%i [w=%u]", atomic_read(&svc->credit), sdom->weight);
+        printk(" credit=%i bc=%i [w=%u,bc=%i]",
+               atomic_read(&svc->credit),
+               atomic_read(&svc->boost_credit),
+               sdom->weight,
+               (sdom->boost_ratio * CSCHED_CREDITS_PER_TSLICE)/100);
 #ifdef CSCHED_STATS
         printk(" (%d+%u) {a/i=%u/%u m=%u+%u}",
                 svc->stats.credit_last,
@@ -1348,6 +1587,8 @@ csched_dump(void)
            "\tcredit balance     = %d\n"
            "\tweight             = %u\n"
            "\trunq_sort          = %u\n"
+           "\tboost_credit       = %u\n"
+           "\ttotal_boost_ratio  = %u\n"
            "\tdefault-weight     = %d\n"
            "\tmsecs per tick     = %dms\n"
            "\tcredits per tick   = %d\n"
@@ -1359,6 +1600,8 @@ csched_dump(void)
            csched_priv.credit_balance,
            csched_priv.weight,
            csched_priv.runq_sort,
+           csched_priv.boost_credit,
+           csched_priv.total_boost_ratio,
            CSCHED_DEFAULT_WEIGHT,
            CSCHED_MSECS_PER_TICK,
            CSCHED_CREDITS_PER_TICK,
@@ -1412,6 +1655,9 @@ csched_init(void)
     csched_priv.credit = 0U;
     csched_priv.credit_balance = 0;
     csched_priv.runq_sort = 0U;
+    csched_priv.boost_credit = 0;
+    csched_priv.total_boost_ratio = 0;
+    csched_priv.boost_tslice = MILLISECS(opt_credit_tslice);
     CSCHED_STATS_RESET();
 }
 
diff -r 8bc795246b5b xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Wed Dec 17 16:02:47 2008 +0900
+++ b/xen/include/public/domctl.h       Wed Dec 17 16:04:43 2008 +0900
@@ -311,6 +311,8 @@ struct xen_domctl_scheduler_op {
         struct xen_domctl_sched_credit {
             uint16_t weight;
             uint16_t cap;
+            uint16_t max_boost_period;
+            uint16_t boost_ratio;
         } credit;
     } u;
 };
diff -r 9dfd98cac0cc tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Dec 17 16:05:11 2008 +0900
+++ b/tools/python/xen/lowlevel/xc/xc.c Wed Dec 17 16:05:42 2008 +0900
@@ -1284,18 +1284,26 @@ static PyObject *pyxc_sched_credit_domai
     uint32_t domid;
     uint16_t weight;
     uint16_t cap;
-    static char *kwd_list[] = { "domid", "weight", "cap", NULL };
-    static char kwd_type[] = "I|HH";
+    uint16_t max_boost_period;
+    uint16_t boost_ratio;
+    static char *kwd_list[] = { "domid", "weight", "cap",
+                                "max_boost_period", "boost_ratio", NULL };
+    static char kwd_type[] = "I|HHhh";
     struct xen_domctl_sched_credit sdom;
     
     weight = 0;
     cap = (uint16_t)~0U;
+    max_boost_period = (uint16_t)~0U;
+    boost_ratio = (uint16_t)~0U;
     if( !PyArg_ParseTupleAndKeywords(args, kwds, kwd_type, kwd_list, 
-                                     &domid, &weight, &cap) )
+                                     &domid, &weight, &cap,
+                                     &max_boost_period, &boost_ratio) )
         return NULL;
 
     sdom.weight = weight;
     sdom.cap = cap;
+    sdom.max_boost_period = max_boost_period;
+    sdom.boost_ratio = boost_ratio;
 
     if ( xc_sched_credit_domain_set(self->xc_handle, domid, &sdom) != 0 )
         return pyxc_error_to_exception();
@@ -1315,9 +1323,11 @@ static PyObject *pyxc_sched_credit_domai
     if ( xc_sched_credit_domain_get(self->xc_handle, domid, &sdom) != 0 )
         return pyxc_error_to_exception();
 
-    return Py_BuildValue("{s:H,s:H}",
-                         "weight",  sdom.weight,
-                         "cap",     sdom.cap);
+    return Py_BuildValue("{s:H,s:H,s:i,s:i}",
+                         "weight",           sdom.weight,
+                         "cap",              sdom.cap,
+                         "max_boost_period", sdom.max_boost_period,
+                         "boost_ratio",      sdom.boost_ratio);
 }
 
 static PyObject *pyxc_domain_setmaxmem(XcObject *self, PyObject *args)
@@ -1723,8 +1733,11 @@ static PyMethodDef pyxc_methods[] = {
       METH_KEYWORDS, "\n"
       "Set the scheduling parameters for a domain when running with the\n"
       "SMP credit scheduler.\n"
-      " domid     [int]:   domain id to set\n"
-      " weight    [short]: domain's scheduling weight\n"
+      " domid            [int]:   domain id to set\n"
+      " weight           [short]: domain's scheduling weight\n"
+      " cap              [short]: cap\n"
+      " max_boost_period [short]: upper limit in BOOST priority\n"
+      " boost_ratio      [short]; domain's boost ratio per a cpu\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "sched_credit_domain_get",
@@ -1732,9 +1745,12 @@ static PyMethodDef pyxc_methods[] = {
       METH_VARARGS, "\n"
       "Get the scheduling parameters for a domain when running with the\n"
       "SMP credit scheduler.\n"
-      " domid     [int]:   domain id to get\n"
+      " domid            [int]:   domain id to get\n"
       "Returns:   [dict]\n"
-      " weight    [short]: domain's scheduling weight\n"},
+      " weight           [short]: domain's scheduling weight\n"
+      " cap              [short]: cap\n"
+      " max_boost_period [short]: upper limit in BOOST priority\n"
+      " boost_ratio      [short]: domain's boost ratio per a cpu\n"},
 
     { "evtchn_alloc_unbound", 
       (PyCFunction)pyxc_evtchn_alloc_unbound,
diff -r 9dfd98cac0cc tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py  Wed Dec 17 16:05:11 2008 +0900
+++ b/tools/python/xen/xend/XendAPI.py  Wed Dec 17 16:05:42 2008 +0900
@@ -1505,10 +1505,14 @@ class XendAPI(object):
 
         #need to update sched params aswell
         if 'weight' in xeninfo.info['vcpus_params'] \
-           and 'cap' in xeninfo.info['vcpus_params']:
+           and 'cap' in xeninfo.info['vcpus_params'] \
+           and 'max_boost_period' in xeninfo.info['vcpus_params'] \
+           and 'boost_ratio' in xeninfo.info['vcpus_params']:
             weight = xeninfo.info['vcpus_params']['weight']
             cap = xeninfo.info['vcpus_params']['cap']
-            xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap)
+            max_boost_period = xeninfo.info['vcpus_params']['max_boost_period']
+            boost_ratio = xeninfo.info['vcpus_params']['boost_ratio']
+            xendom.domain_sched_credit_set(xeninfo.getDomid(), weight, cap, 
max_boost_period, boost_ratio)
 
     def VM_set_VCPUs_number_live(self, _, vm_ref, num):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
diff -r 9dfd98cac0cc tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Wed Dec 17 16:05:11 2008 +0900
+++ b/tools/python/xen/xend/XendConfig.py       Wed Dec 17 16:05:42 2008 +0900
@@ -589,6 +589,10 @@ class XendConfig(dict):
             int(sxp.child_value(sxp_cfg, "cpu_weight", 256))
         cfg["vcpus_params"]["cap"] = \
             int(sxp.child_value(sxp_cfg, "cpu_cap", 0))
+        cfg["vcpus_params"]["max_boost_period"] = \
+            int(sxp.child_value(sxp_cfg, "cpu_max_boost_period", 0))
+        cfg["vcpus_params"]["boost_ratio"] = \
+            int(sxp.child_value(sxp_cfg, "cpu_boost_ratio", 0))
 
         # Only extract options we know about.
         extract_keys = LEGACY_UNSUPPORTED_BY_XENAPI_CFG + \
diff -r 9dfd98cac0cc tools/python/xen/xend/XendDomain.py
--- a/tools/python/xen/xend/XendDomain.py       Wed Dec 17 16:05:11 2008 +0900
+++ b/tools/python/xen/xend/XendDomain.py       Wed Dec 17 16:05:42 2008 +0900
@@ -1536,7 +1536,7 @@ class XendDomain:
 
         @param domid: Domain ID or Name
         @type domid: int or string.
-        @rtype: dict with keys 'weight' and 'cap'
+        @rtype: dict with keys 'weight' and 'cap' and 'max_boost_period' and 
'boost_ratio'
         @return: credit scheduler parameters
         """
         dominfo = self.domain_lookup_nr(domid)
@@ -1549,20 +1549,26 @@ class XendDomain:
             except Exception, ex:
                 raise XendError(str(ex))
         else:
-            return {'weight' : dominfo.getWeight(),
-                    'cap'    : dominfo.getCap()} 
+            return {'weight'          : dominfo.getWeight(),
+                    'cap'             : dominfo.getCap(),
+                    'max_boost_period': dominfo.getMaxBoostPeriod(),
+                    'boost_ratio'    : dominfo.getBoostRatio()} 
     
-    def domain_sched_credit_set(self, domid, weight = None, cap = None):
+    def domain_sched_credit_set(self, domid, weight = None, cap = None, 
max_boost_period = None, boost_ratio = None):
         """Set credit scheduler parameters for a domain.
 
         @param domid: Domain ID or Name
         @type domid: int or string.
         @type weight: int
         @type cap: int
+        @type max_boost_period: int
+        @type boost_ratio: int
         @rtype: 0
         """
         set_weight = False
         set_cap = False
+        set_max_boost_period = False
+        set_boost_ratio = False
         dominfo = self.domain_lookup_nr(domid)
         if not dominfo:
             raise XendInvalidDomain(str(domid))
@@ -1581,17 +1587,37 @@ class XendDomain:
             else:
                 set_cap = True
 
+            if max_boost_period is None:
+                max_boost_period = int(~0)
+            elif max_boost_period < 0:
+                raise XendError("max_boost_period is out of range")
+            else:
+                set_max_boost_period = True
+
+            if boost_ratio is None:
+                boost_ratio = int(~0)
+            elif boost_ratio < 0:
+                raise XendError("boost_ratio is out of range")
+            else:
+                set_boost_ratio = True
+
             assert type(weight) == int
             assert type(cap) == int
+            assert type(max_boost_period) == int
+            assert type(boost_ratio) == int
 
             rc = 0
             if dominfo._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
-                rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight, 
cap)
+                rc = xc.sched_credit_domain_set(dominfo.getDomid(), weight, 
cap, max_boost_period, boost_ratio)
             if rc == 0:
                 if set_weight:
                     dominfo.setWeight(weight)
                 if set_cap:
                     dominfo.setCap(cap)
+                if set_max_boost_period:
+                    dominfo.setMaxBoostPeriod(max_boost_period)
+                if set_boost_ratio:
+                    dominfo.setBoostRatio(boost_ratio)
                 self.managed_config_save(dominfo)
             return rc
         except Exception, ex:
diff -r 9dfd98cac0cc tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Wed Dec 17 16:05:11 2008 +0900
+++ b/tools/python/xen/xend/XendDomainInfo.py   Wed Dec 17 16:05:42 2008 +0900
@@ -465,7 +465,9 @@ class XendDomainInfo:
                 if xennode.xenschedinfo() == 'credit':
                     xendomains.domain_sched_credit_set(self.getDomid(),
                                                        self.getWeight(),
-                                                       self.getCap())
+                                                       self.getCap(),
+                                                       
self.getMaxBoostPeriod(),
+                                                       self.getBoostRatio())
             except:
                 log.exception('VM start failed')
                 self.destroy()
@@ -1618,6 +1620,18 @@ class XendDomainInfo:
     def setWeight(self, cpu_weight):
         self.info['vcpus_params']['weight'] = cpu_weight
 
+    def getMaxBoostPeriod(self):
+        return self.info['vcpus_params']['max_boost_period']
+
+    def setMaxBoostPeriod(self, cpu_max_boost_period):
+        self.info['vcpus_params']['max_boost_period'] = cpu_max_boost_period
+
+    def getBoostRatio(self):
+        return self.info['vcpus_params']['boost_ratio']
+
+    def setBoostRatio(self, cpu_boost_ratio):
+        self.info['vcpus_params']['boost_ratio'] = cpu_boost_ratio
+
     def getRestartCount(self):
         return self._readVm('xend/restart_count')
 
diff -r 9dfd98cac0cc tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Wed Dec 17 16:05:11 2008 +0900
+++ b/tools/python/xen/xm/main.py       Wed Dec 17 16:05:42 2008 +0900
@@ -150,7 +150,7 @@ SUBCOMMAND_HELP = {
     'log'         : ('', 'Print Xend log'),
     'rename'      : ('<Domain> <NewDomainName>', 'Rename a domain.'),
     'sched-sedf'  : ('<Domain> [options]', 'Get/set EDF parameters.'),
-    'sched-credit': ('[-d <Domain> [-w[=WEIGHT]|-c[=CAP]]]',
+    'sched-credit': ('[-d <Domain> 
[-w[=WEIGHT]|-c[=CAP]|-m[=MAXBOOSTPERIOD]|-r[=BOOSTRATIO]]]',
                      'Get/set credit scheduler parameters.'),
     'sysrq'       : ('<Domain> <letter>', 'Send a sysrq to a domain.'),
     'debug-keys'  : ('<Keys>', 'Send debug keys to Xen.'),
@@ -240,6 +240,8 @@ SUBCOMMAND_OPTIONS = {
        ('-d DOMAIN', '--domain=DOMAIN', 'Domain to modify'),
        ('-w WEIGHT', '--weight=WEIGHT', 'Weight (int)'),
        ('-c CAP',    '--cap=CAP',       'Cap (int)'),
+       ('-m MAXBOOSTPERIOD', '--maxboostperiod=MAXBOOSTPERIOD', 'Upper limit 
of boost period (ms)'),
+       ('-r BOOSTRATIO', '--ratio=BOOSTRATIO', 'Boost ratio per a cpu (int)'),
     ),
     'list': (
        ('-l', '--long',         'Output all VM details in SXP'),
@@ -1578,8 +1580,8 @@ def xm_sched_credit(args):
     check_sched_type('credit')
 
     try:
-        opts, params = getopt.getopt(args, "d:w:c:",
-            ["domain=", "weight=", "cap="])
+        opts, params = getopt.getopt(args, "d:w:c:m:r:",
+            ["domain=", "weight=", "cap=", "maxboostperiod=", "ratio="])
     except getopt.GetoptError, opterr:
         err(opterr)
         usage('sched-credit')
@@ -1587,6 +1589,8 @@ def xm_sched_credit(args):
     domid = None
     weight = None
     cap = None
+    max_boost_period = None
+    boost_ratio = None
 
     for o, a in opts:
         if o in ["-d", "--domain"]:
@@ -1594,18 +1598,22 @@ def xm_sched_credit(args):
         elif o in ["-w", "--weight"]:
             weight = int(a)
         elif o in ["-c", "--cap"]:
-            cap = int(a);
+            cap = int(a)
+        elif o in ["-m", "--maxboostperiod"]:
+            max_boost_period = int(a)
+        elif o in ["-r", "--ratio"]:
+            boost_ratio = int(a);
 
     doms = filter(lambda x : domid_match(domid, x),
                   [parse_doms_info(dom)
                   for dom in getDomains(None, 'all')])
 
-    if weight is None and cap is None:
+    if weight is None and cap is None and max_boost_period is None and 
boost_ratio is None:
         if domid is not None and doms == []: 
             err("Domain '%s' does not exist." % domid)
             usage('sched-credit')
         # print header if we aren't setting any parameters
-        print '%-33s %4s %6s %4s' % ('Name','ID','Weight','Cap')
+        print '%-33s %4s %6s %4s %8s %5s' % 
('Name','ID','Weight','Cap','Max(ms)','Ratio')
         
         for d in doms:
             try:
@@ -1618,16 +1626,18 @@ def xm_sched_credit(args):
             except xmlrpclib.Fault:
                 pass
 
-            if 'weight' not in info or 'cap' not in info:
+            if 'weight' not in info or 'cap' not in info or 'max_boost_period' 
not in info or 'boost_ratio' not in info:
                 # domain does not support sched-credit?
-                info = {'weight': -1, 'cap': -1}
+                info = {'weight': -1, 'cap': -1, 'max_boost_period':-1, 
'boost_ratio':-1}
 
             info['weight'] = int(info['weight'])
             info['cap']    = int(info['cap'])
+            info['max_boost_period'] = int(info['max_boost_period'])
+            info['boost_ratio'] = int(info['boost_ratio'])
             
             info['name']  = d['name']
             info['domid'] = str(d['domid'])
-            print( ("%(name)-32s %(domid)5s %(weight)6d %(cap)4d") % info)
+            print( ("%(name)-32s %(domid)5s %(weight)6d %(cap)4d 
%(max_boost_period)8d %(boost_ratio)5d") % info)
     else:
         if domid is None:
             # place holder for system-wide scheduler parameters
@@ -1644,6 +1654,14 @@ def xm_sched_credit(args):
                     get_single_vm(domid),
                     "cap",
                     cap)
+                server.xenapi.VM.add_to_VCPUs_params_live(
+                    get_single_vm(domid),
+                    "max_boost_period",
+                    max_boost_period)
+                server.xenapi.VM.add_to_VCPUs_params_live(
+                    get_single_vm(domid),
+                    "boost_ratio",
+                    boost_ratio)
             else:
                 server.xenapi.VM.add_to_VCPUs_params(
                     get_single_vm(domid),
@@ -1653,8 +1671,16 @@ def xm_sched_credit(args):
                     get_single_vm(domid),
                     "cap",
                     cap)
+                server.xenapi.VM.add_to_VCPUs_params(
+                    get_single_vm(domid),
+                    "max_boost_period",
+                    max_boost_period)
+                server.xenapi.VM.add_to_VCPUs_params(
+                    get_single_vm(domid),
+                    "boost_ratio",
+                    boost_ratio)
         else:
-            result = server.xend.domain.sched_credit_set(domid, weight, cap)
+            result = server.xend.domain.sched_credit_set(domid, weight, cap, 
max_boost_period, boost_ratio)
             if result != 0:
                 err(str(result))
 
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>