WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-4.1-testing] credit2: remove two nested functions,

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-4.1-testing] credit2: remove two nested functions, replacing them with static ones
From: Xen patchbot-4.1-testing <patchbot@xxxxxxx>
Date: Thu, 28 Apr 2011 11:35:12 +0100
Delivery-date: Thu, 28 Apr 2011 03:35:33 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1303907867 -3600
# Node ID e47171dcc56828ef10eaffe315defa33e3c550b4
# Parent  88c46006b8035af73ba904901f45c3602d06be36
credit2: remove two nested functions, replacing them with static ones
and passing the outer scope's variables in explicitly.
This is needed to compile xen with clang.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
xen-unstable changeset:   22982:591c459ee00a
xen-unstable date:        Mon Mar 07 11:21:11 2011 +0000
---


diff -r 88c46006b803 -r e47171dcc568 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Wed Apr 27 13:30:41 2011 +0100
+++ b/xen/common/sched_credit2.c        Wed Apr 27 13:37:47 2011 +0100
@@ -1108,86 +1108,96 @@
     return new_cpu;
 }
 
+/* Working state of the load-balancing algorithm */
+typedef struct {
+    /* NB: Modified by consider() */
+    s_time_t load_delta;
+    struct csched_vcpu * best_push_svc, *best_pull_svc;
+    /* NB: Read by consider() */
+    struct csched_runqueue_data *lrqd;
+    struct csched_runqueue_data *orqd;                  
+} balance_state_t;
+
+static void consider(balance_state_t *st, 
+                     struct csched_vcpu *push_svc,
+                     struct csched_vcpu *pull_svc)
+{
+    s_time_t l_load, o_load, delta;
+
+    l_load = st->lrqd->b_avgload;
+    o_load = st->orqd->b_avgload;
+    if ( push_svc )
+    {
+        /* What happens to the load on both if we push? */
+        l_load -= push_svc->avgload;
+        o_load += push_svc->avgload;
+    }
+    if ( pull_svc )
+    {
+        /* What happens to the load on both if we pull? */
+        l_load += pull_svc->avgload;
+        o_load -= pull_svc->avgload;
+    }
+
+    delta = l_load - o_load;
+    if ( delta < 0 )
+        delta = -delta;
+
+    if ( delta < st->load_delta )
+    {
+        st->load_delta = delta;
+        st->best_push_svc=push_svc;
+        st->best_pull_svc=pull_svc;
+    }
+}
+
+
+void migrate(const struct scheduler *ops,
+             struct csched_vcpu *svc, 
+             struct csched_runqueue_data *trqd, 
+             s_time_t now)
+{
+    if ( test_bit(__CSFLAG_scheduled, &svc->flags) )
+    {
+        d2printk("d%dv%d %d-%d a\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id,
+                 svc->rqd->id, trqd->id);
+        /* It's running; mark it to migrate. */
+        svc->migrate_rqd = trqd;
+        set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
+        set_bit(__CSFLAG_runq_migrate_request, &svc->flags);
+    }
+    else
+    {
+        int on_runq=0;
+        /* It's not running; just move it */
+        d2printk("d%dv%d %d-%d i\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id,
+                 svc->rqd->id, trqd->id);
+        if ( __vcpu_on_runq(svc) )
+        {
+            __runq_remove(svc);
+            update_load(ops, svc->rqd, svc, -1, now);
+            on_runq=1;
+        }
+        __runq_deassign(svc);
+        svc->vcpu->processor = first_cpu(trqd->active);
+        __runq_assign(svc, trqd);
+        if ( on_runq )
+        {
+            update_load(ops, svc->rqd, svc, 1, now);
+            runq_insert(ops, svc->vcpu->processor, svc);
+            runq_tickle(ops, svc->vcpu->processor, svc, now);
+        }
+    }
+}
+
+
 static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
 {
     struct csched_private *prv = CSCHED_PRIV(ops);
     int i, max_delta_rqi = -1;
     struct list_head *push_iter, *pull_iter;
 
-    /* NB: Modified by consider() */
-    s_time_t load_delta;
-    struct csched_vcpu * best_push_svc=NULL, *best_pull_svc=NULL;
-    /* NB: Read by consider() */
-    struct csched_runqueue_data *lrqd;
-    struct csched_runqueue_data *orqd;
-    
-    void consider(struct csched_vcpu *push_svc,
-                  struct csched_vcpu *pull_svc)
-    {
-        s_time_t l_load, o_load, delta;
-
-        l_load = lrqd->b_avgload;
-        o_load = orqd->b_avgload;
-        if ( push_svc )
-        {
-            /* What happens to the load on both if we push? */
-            l_load -= push_svc->avgload;
-            o_load += push_svc->avgload;
-        }
-        if ( pull_svc )
-        {
-            /* What happens to the load on both if we pull? */
-            l_load += pull_svc->avgload;
-            o_load -= pull_svc->avgload;
-        }
-        
-        delta = l_load - o_load;
-        if ( delta < 0 )
-            delta = -delta;
-
-        if ( delta < load_delta )
-        {
-            load_delta = delta;
-            best_push_svc=push_svc;
-            best_pull_svc=pull_svc;
-        }
-    }
-
-    void migrate(struct csched_vcpu *svc, struct csched_runqueue_data *trqd)
-    {
-        if ( test_bit(__CSFLAG_scheduled, &svc->flags) )
-        {
-            d2printk("d%dv%d %d-%d a\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id,
-                     svc->rqd->id, trqd->id);
-            /* It's running; mark it to migrate. */
-            svc->migrate_rqd = trqd;
-            set_bit(_VPF_migrating, &svc->vcpu->pause_flags);
-            set_bit(__CSFLAG_runq_migrate_request, &svc->flags);
-        }
-        else
-        {
-            int on_runq=0;
-            /* It's not running; just move it */
-            d2printk("d%dv%d %d-%d i\n", svc->vcpu->domain->domain_id, 
svc->vcpu->vcpu_id,
-                     svc->rqd->id, trqd->id);
-            if ( __vcpu_on_runq(svc) )
-            {
-                __runq_remove(svc);
-                update_load(ops, svc->rqd, svc, -1, now);
-                on_runq=1;
-            }
-            __runq_deassign(svc);
-            svc->vcpu->processor = first_cpu(trqd->active);
-            __runq_assign(svc, trqd);
-            if ( on_runq )
-            {
-                update_load(ops, svc->rqd, svc, 1, now);
-                runq_insert(ops, svc->vcpu->processor, svc);
-                runq_tickle(ops, svc->vcpu->processor, svc, now);
-            }
-        }
-    }
-                  
+    balance_state_t st = { .best_push_svc = NULL, .best_pull_svc = NULL };
     
     /*
      * Basic algorithm: Push, pull, or swap.
@@ -1199,39 +1209,39 @@
     /* Locking:
      * - pcpu schedule lock should be already locked
      */
-    lrqd = RQD(ops, cpu);
+    st.lrqd = RQD(ops, cpu);
 
-    __update_runq_load(ops, lrqd, 0, now);
+    __update_runq_load(ops, st.lrqd, 0, now);
 
 retry:
     if ( !spin_trylock(&prv->lock) )
         return;
 
-    load_delta = 0;
+    st.load_delta = 0;
 
     for_each_cpu_mask(i, prv->active_queues)
     {
         s_time_t delta;
         
-        orqd = prv->rqd + i;
+        st.orqd = prv->rqd + i;
 
-        if ( orqd == lrqd
-             || !spin_trylock(&orqd->lock) )
+        if ( st.orqd == st.lrqd
+             || !spin_trylock(&st.orqd->lock) )
             continue;
 
-        __update_runq_load(ops, orqd, 0, now);
+        __update_runq_load(ops, st.orqd, 0, now);
     
-        delta = lrqd->b_avgload - orqd->b_avgload;
+        delta = st.lrqd->b_avgload - st.orqd->b_avgload;
         if ( delta < 0 )
             delta = -delta;
 
-        if ( delta > load_delta )
+        if ( delta > st.load_delta )
         {
-            load_delta = delta;
+            st.load_delta = delta;
             max_delta_rqi = i;
         }
 
-        spin_unlock(&orqd->lock);
+        spin_unlock(&st.orqd->lock);
     }
 
     /* Minimize holding the big lock */
@@ -1244,23 +1254,23 @@
         int cpus_max;
 
         
-        load_max = lrqd->b_avgload;
-        if ( orqd->b_avgload > load_max )
-            load_max = orqd->b_avgload;
+        load_max = st.lrqd->b_avgload;
+        if ( st.orqd->b_avgload > load_max )
+            load_max = st.orqd->b_avgload;
 
-        cpus_max=cpus_weight(lrqd->active);
-        if ( cpus_weight(orqd->active) > cpus_max )
-            cpus_max = cpus_weight(orqd->active);
+        cpus_max=cpus_weight(st.lrqd->active);
+        if ( cpus_weight(st.orqd->active) > cpus_max )
+            cpus_max = cpus_weight(st.orqd->active);
 
         /* If we're under 100% capacaty, only shift if load difference
          * is > 1.  otherwise, shift if under 12.5% */
         if ( load_max < (1ULL<<(prv->load_window_shift))*cpus_max )
         {
-            if ( load_delta < 
(1ULL<<(prv->load_window_shift+opt_underload_balance_tolerance) ) )
+            if ( st.load_delta < 
(1ULL<<(prv->load_window_shift+opt_underload_balance_tolerance) ) )
                  goto out;
         }
         else
-            if ( load_delta < 
(1ULL<<(prv->load_window_shift+opt_overload_balance_tolerance)) )
+            if ( st.load_delta < 
(1ULL<<(prv->load_window_shift+opt_overload_balance_tolerance)) )
                 goto out;
     }
              
@@ -1268,19 +1278,19 @@
      * meantime, try the process over again.  This can't deadlock
      * because if it doesn't get any other rqd locks, it will simply
      * give up and return. */
-    orqd = prv->rqd + max_delta_rqi;
-    if ( !spin_trylock(&orqd->lock) )
+    st.orqd = prv->rqd + max_delta_rqi;
+    if ( !spin_trylock(&st.orqd->lock) )
         goto retry;
 
     /* Make sure the runqueue hasn't been deactivated since we released 
prv->lock */
-    if ( unlikely(orqd->id < 0) )
+    if ( unlikely(st.orqd->id < 0) )
         goto out_up;
 
     /* Look for "swap" which gives the best load average
      * FIXME: O(n^2)! */
 
     /* Reuse load delta (as we're trying to minimize it) */
-    list_for_each( push_iter, &lrqd->svc )
+    list_for_each( push_iter, &st.lrqd->svc )
     {
         int inner_load_updated = 0;
         struct csched_vcpu * push_svc = list_entry(push_iter, struct 
csched_vcpu, rqd_elem);
@@ -1291,7 +1301,7 @@
         if ( test_bit(__CSFLAG_runq_migrate_request, &push_svc->flags) )
             continue;
 
-        list_for_each( pull_iter, &orqd->svc )
+        list_for_each( pull_iter, &st.orqd->svc )
         {
             struct csched_vcpu * pull_svc = list_entry(pull_iter, struct 
csched_vcpu, rqd_elem);
             
@@ -1304,16 +1314,16 @@
             if ( test_bit(__CSFLAG_runq_migrate_request, &pull_svc->flags) )
                 continue;
 
-            consider(push_svc, pull_svc);
+            consider(&st, push_svc, pull_svc);
         }
 
         inner_load_updated = 1;
 
         /* Consider push only */
-        consider(push_svc, NULL);
+        consider(&st, push_svc, NULL);
     }
 
-    list_for_each( pull_iter, &orqd->svc )
+    list_for_each( pull_iter, &st.orqd->svc )
     {
         struct csched_vcpu * pull_svc = list_entry(pull_iter, struct 
csched_vcpu, rqd_elem);
         
@@ -1322,17 +1332,17 @@
             continue;
 
         /* Consider pull only */
-        consider(NULL, pull_svc);
+        consider(&st, NULL, pull_svc);
     }
 
     /* OK, now we have some candidates; do the moving */
-    if ( best_push_svc )
-        migrate(best_push_svc, orqd);
-    if ( best_pull_svc )
-        migrate(best_pull_svc, lrqd);
+    if ( st.best_push_svc )
+        migrate(ops, st.best_push_svc, st.orqd, now);
+    if ( st.best_pull_svc )
+        migrate(ops, st.best_pull_svc, st.lrqd, now);
 
 out_up:
-    spin_unlock(&orqd->lock);
+    spin_unlock(&st.orqd->lock);
 
 out:
     return;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-4.1-testing] credit2: remove two nested functions, replacing them with static ones, Xen patchbot-4 . 1-testing <=