WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [RFC][PATCH 2/4] sched: change the handling of credits o

To: George Dunlap <George.Dunlap@xxxxxxxxxxxxx>, xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: Re: [Xen-devel] [RFC][PATCH 2/4] sched: change the handling of credits over upper bound
From: NISHIGUCHI Naoki <nisiguti@xxxxxxxxxxxxxx>
Date: Fri, 05 Dec 2008 19:09:45 +0900
Cc: Ian.Pratt@xxxxxxxxxxxxx, disheng.su@xxxxxxxxx, keir.fraser@xxxxxxxxxxxxx
Delivery-date: Fri, 05 Dec 2008 02:10:23 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4938FCFF.5060503@xxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <4938FC06.9080008@xxxxxxxxxxxxxx> <4938FCFF.5060503@xxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.18 (Windows/20081105)
I forgot to attach.

NISHIGUCHI Naoki wrote:
> By applying this patch, the credit scheduler don't reset vcpu's credit
> (set to 0) when the credit would be over upper bound. And it prevents a
> vcpu from missing becoming active.
> 
> Best regards,
> Naoki Nishiguchi
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel

diff -r a00eb6595d3c xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Sat Nov 29 09:07:52 2008 +0000
+++ b/xen/common/sched_credit.c Fri Dec 05 17:49:44 2008 +0900
@@ -196,6 +196,7 @@ struct csched_vcpu {
 struct csched_vcpu {
     struct list_head runq_elem;
     struct list_head active_vcpu_elem;
+    struct list_head inactive_vcpu_elem;
     struct csched_dom *sdom;
     struct vcpu *vcpu;
     atomic_t credit;
@@ -231,6 +232,7 @@ struct csched_private {
 struct csched_private {
     spinlock_t lock;
     struct list_head active_sdom;
+    struct list_head inactive_vcpu;
     uint32_t ncpus;
     unsigned int master;
     cpumask_t idlers;
@@ -484,12 +486,9 @@ csched_cpu_pick(struct vcpu *vc)
 }
 
 static inline void
-__csched_vcpu_acct_start(struct csched_vcpu *svc)
+__csched_vcpu_acct_start_locked(struct csched_vcpu *svc)
 {
     struct csched_dom * const sdom = svc->sdom;
-    unsigned long flags;
-
-    spin_lock_irqsave(&csched_priv.lock, flags);
 
     if ( list_empty(&svc->active_vcpu_elem) )
     {
@@ -498,13 +497,22 @@ __csched_vcpu_acct_start(struct csched_v
 
         sdom->active_vcpu_count++;
         list_add(&svc->active_vcpu_elem, &sdom->active_vcpu);
+        list_del_init(&svc->inactive_vcpu_elem);
         if ( list_empty(&sdom->active_sdom_elem) )
         {
             list_add(&sdom->active_sdom_elem, &csched_priv.active_sdom);
             csched_priv.weight += sdom->weight;
         }
     }
-
+}
+
+static inline void
+__csched_vcpu_acct_start(struct csched_vcpu *svc)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&csched_priv.lock, flags);
+    __csched_vcpu_acct_start_locked(svc);
     spin_unlock_irqrestore(&csched_priv.lock, flags);
 }
 
@@ -520,6 +528,7 @@ __csched_vcpu_acct_stop_locked(struct cs
 
     sdom->active_vcpu_count--;
     list_del_init(&svc->active_vcpu_elem);
+    list_add(&svc->inactive_vcpu_elem, &csched_priv.inactive_vcpu);
     if ( list_empty(&sdom->active_vcpu) )
     {
         BUG_ON( csched_priv.weight < sdom->weight );
@@ -586,6 +595,7 @@ csched_vcpu_init(struct vcpu *vc)
 
     INIT_LIST_HEAD(&svc->runq_elem);
     INIT_LIST_HEAD(&svc->active_vcpu_elem);
+    INIT_LIST_HEAD(&svc->inactive_vcpu_elem);
     svc->sdom = sdom;
     svc->vcpu = vc;
     atomic_set(&svc->credit, 0);
@@ -621,6 +631,9 @@ csched_vcpu_destroy(struct vcpu *vc)
 
     if ( !list_empty(&svc->active_vcpu_elem) )
         __csched_vcpu_acct_stop_locked(svc);
+
+    if ( !list_empty(&svc->inactive_vcpu_elem) )
+        list_del_init(&svc->inactive_vcpu_elem);
 
     spin_unlock_irqrestore(&csched_priv.lock, flags);
 
@@ -839,6 +852,18 @@ csched_acct(void)
 
     spin_lock_irqsave(&csched_priv.lock, flags);
 
+    /* Add vcpu to active list when its credit were consumed by one tick. */
+    list_for_each_safe( iter_vcpu, next_vcpu, &csched_priv.inactive_vcpu )
+    {
+        svc = list_entry(iter_vcpu, struct csched_vcpu, inactive_vcpu_elem);
+
+        if ( atomic_read(&svc->credit)
+             <= CSCHED_CREDITS_PER_TICK * (CSCHED_TICKS_PER_ACCT - 1) )
+        {
+            __csched_vcpu_acct_start_locked(svc);
+        }
+    }
+
     weight_total = csched_priv.weight;
     credit_total = csched_priv.credit;
 
@@ -995,7 +1020,7 @@ csched_acct(void)
                 if ( credit > CSCHED_CREDITS_PER_TSLICE )
                 {
                     __csched_vcpu_acct_stop_locked(svc);
-                    credit = 0;
+                    credit = CSCHED_CREDITS_PER_TSLICE;
                     atomic_set(&svc->credit, credit);
                 }
             }
@@ -1340,6 +1365,17 @@ csched_dump(void)
             csched_dump_vcpu(svc);
         }
     }
+
+    printk("inactive vcpus:\n");
+    loop = 0;
+    list_for_each( iter_svc, &csched_priv.inactive_vcpu )
+    {
+        struct csched_vcpu *svc;
+        svc = list_entry(iter_svc, struct csched_vcpu, inactive_vcpu_elem);
+
+        printk("\t%3d: ", ++loop);
+        csched_dump_vcpu(svc);
+    }
 }
 
 static void
@@ -1347,6 +1383,7 @@ csched_init(void)
 {
     spin_lock_init(&csched_priv.lock);
     INIT_LIST_HEAD(&csched_priv.active_sdom);
+    INIT_LIST_HEAD(&csched_priv.inactive_vcpu);
     csched_priv.ncpus = 0;
     csched_priv.master = UINT_MAX;
     cpus_clear(csched_priv.idlers);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel