WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Fix cpu selection at the time vCPU alloca

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Fix cpu selection at the time vCPU allocation
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 07 Mar 2009 06:35:21 -0800
Delivery-date: Sat, 07 Mar 2009 06:35:47 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1236365649 0
# Node ID a44751edcb7637103258d043e92490d561aec186
# Parent  3fd8f9b349413c5a04d0e3f93e43463f1021c9dc
Fix cpu selection at the time vCPU allocation

After cpu_[online/offline], set bits in cpu_online_map could be not
continuous. Use cycle_cpu() to pick the next one.

Signed-off-by: Xiaowei Yang <xiaowei.yang@xxxxxxxxx>
---
 xen/common/domctl.c       |    2 +-
 xen/common/sched_credit.c |   15 +++------------
 xen/include/xen/cpumask.h |   17 +++++++++++++++--
 3 files changed, 19 insertions(+), 15 deletions(-)

diff -r 3fd8f9b34941 -r a44751edcb76 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Mar 06 14:28:27 2009 +0000
+++ b/xen/common/domctl.c       Fri Mar 06 18:54:09 2009 +0000
@@ -433,7 +433,7 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
 
             cpu = (i == 0) ?
                 default_vcpu0_location() :
-                (d->vcpu[i-1]->processor + 1) % num_online_cpus();
+                cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map);
 
             if ( alloc_vcpu(d, i, cpu) == NULL )
                 goto maxvcpu_out;
diff -r 3fd8f9b34941 -r a44751edcb76 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Fri Mar 06 14:28:27 2009 +0000
+++ b/xen/common/sched_credit.c Fri Mar 06 18:54:09 2009 +0000
@@ -250,15 +250,6 @@ static void csched_tick(void *_cpu);
 static void csched_tick(void *_cpu);
 
 static inline int
-__cycle_cpu(int cpu, const cpumask_t *mask)
-{
-    int nxt = next_cpu(cpu, *mask);
-    if (nxt == NR_CPUS)
-        nxt = first_cpu(*mask);
-    return nxt;
-}
-
-static inline int
 __vcpu_on_runq(struct csched_vcpu *svc)
 {
     return !list_empty(&svc->runq_elem);
@@ -428,7 +419,7 @@ csched_cpu_pick(struct vcpu *vc)
     cpus_and(cpus, cpu_online_map, vc->cpu_affinity);
     cpu = cpu_isset(vc->processor, cpus)
             ? vc->processor
-            : __cycle_cpu(vc->processor, &cpus);
+            : cycle_cpu(vc->processor, cpus);
     ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) );
 
     /*
@@ -454,7 +445,7 @@ csched_cpu_pick(struct vcpu *vc)
         cpumask_t nxt_idlers;
         int nxt;
 
-        nxt = __cycle_cpu(cpu, &cpus);
+        nxt = cycle_cpu(cpu, cpus);
 
         if ( cpu_isset(cpu, cpu_core_map[nxt]) )
         {
@@ -1128,7 +1119,7 @@ csched_load_balance(int cpu, struct csch
 
     while ( !cpus_empty(workers) )
     {
-        peer_cpu = __cycle_cpu(peer_cpu, &workers);
+        peer_cpu = cycle_cpu(peer_cpu, workers);
         cpu_clear(peer_cpu, workers);
 
         /*
diff -r 3fd8f9b34941 -r a44751edcb76 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Fri Mar 06 14:28:27 2009 +0000
+++ b/xen/include/xen/cpumask.h Fri Mar 06 18:54:09 2009 +0000
@@ -38,6 +38,8 @@
  *
  * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
  * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
+ * int last_cpu(mask)                  Number highest set bit, or NR_CPUS
+ * int cycle_cpu(cpu, mask)            Next cpu cycling from 'cpu', or NR_CPUS
  *
  * cpumask_t cpumask_of_cpu(cpu)       Return cpumask with bit 'cpu' set
  * CPU_MASK_ALL                                Initializer - all bits set
@@ -225,10 +227,21 @@ static inline int __next_cpu(int n, cons
 #define last_cpu(src) __last_cpu(&(src), NR_CPUS)
 static inline int __last_cpu(const cpumask_t *srcp, int nbits)
 {
-       int cpu, pcpu = NR_CPUS;
-       for (cpu = first_cpu(*srcp); cpu < NR_CPUS; cpu = next_cpu(cpu, *srcp))
+       int cpu, pcpu = nbits;
+       for (cpu = __first_cpu(srcp, nbits);
+            cpu < nbits;
+            cpu = __next_cpu(cpu, srcp, nbits))
                pcpu = cpu;
        return pcpu;
+}
+
+#define cycle_cpu(n, src) __cycle_cpu((n), &(src), NR_CPUS)
+static inline int __cycle_cpu(int n, const cpumask_t *srcp, int nbits)
+{
+    int nxt = __next_cpu(n, srcp, nbits);
+    if (nxt == nbits)
+        nxt = __first_cpu(srcp, nbits);
+    return nxt;
 }
 
 #define cpumask_of_cpu(cpu)                                            \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Fix cpu selection at the time vCPU allocation, Xen patchbot-unstable <=