WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] eliminate direct assignments of CPU masks

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] eliminate direct assignments of CPU masks
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Tue, 25 Oct 2011 01:55:11 +0100
Delivery-date: Mon, 24 Oct 2011 17:55:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1319181669 -7200
# Node ID 07d303ff27576d803bb1ca92f31314426133da54
# Parent  1a4223c62ee7868e52be7640543af88483d1f5f3
eliminate direct assignments of CPU masks

Use cpumask_copy() instead of direct variable assignments for copying
CPU masks. While direct assignments are not a problem when both sides
are variables actually defined as cpumask_t (except for possibly
copying *much* more than would actually need to be copied), they must
not happen when the original variable is of type cpumask_var_t (which
may have lass space allocated to it than a full cpumask_t). Eliminate
as many of such assignments as possible (in several cases it's even
possible to collapse two operations [copy then clear one bit] into one
[cpumask_andnot()]), and thus set the way for reducing the allocation
size in alloc_cpumask_var().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/acpi/cpufreq/powernow.c
--- a/xen/arch/x86/acpi/cpufreq/powernow.c      Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c      Fri Oct 21 09:21:09 2011 +0200
@@ -107,7 +107,7 @@
     if (unlikely(result))
         return -ENODEV;
 
-    online_policy_cpus = policy->cpus;
+    cpumask_and(&online_policy_cpus, &policy->cpus, &cpu_online_map);
 
     next_perf_state = data->freq_table[next_state].index;
     if (perf->state == next_perf_state) {
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/cpu/mtrr/main.c      Fri Oct 21 09:21:09 2011 +0200
@@ -231,9 +231,9 @@
        struct set_mtrr_data data;
        unsigned long flags;
 
-       allbutself = cpu_online_map;
-       cpu_clear(smp_processor_id(), allbutself);
-       nr_cpus = cpus_weight(allbutself);
+       cpumask_andnot(&allbutself, &cpu_online_map,
+                      cpumask_of(smp_processor_id()));
+       nr_cpus = cpumask_weight(&allbutself);
 
        data.smp_reg = reg;
        data.smp_base = base;
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/irq.c        Fri Oct 21 09:21:09 2011 +0200
@@ -1040,7 +1040,7 @@
             desc->handler->end(desc, 0);
         break;
     case ACKTYPE_EOI:
-        cpu_eoi_map = action->cpu_eoi_map;
+        cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
         spin_unlock_irq(&desc->lock);
         on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
         spin_lock_irq(&desc->lock);
@@ -1366,9 +1366,9 @@
 
     ASSERT(action->ack_type == ACKTYPE_EOI);
         
-    cpu_eoi_map = action->cpu_eoi_map;
+    cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
 
-    if ( cpu_test_and_clear(smp_processor_id(), cpu_eoi_map) )
+    if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
     {
         __set_eoi_ready(desc);
         spin_unlock(&desc->lock);
@@ -1380,7 +1380,7 @@
         spin_unlock_irq(&desc->lock);
     }
 
-    if ( !cpus_empty(cpu_eoi_map) )
+    if ( !cpumask_empty(&cpu_eoi_map) )
         on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
 }
 
@@ -1619,7 +1619,7 @@
              (--action->in_flight == 0) &&
              (action->nr_guests != 0) )
         {
-            cpu_eoi_map = action->cpu_eoi_map;
+            cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
             spin_unlock_irq(&desc->lock);
             on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
             spin_lock_irq(&desc->lock);
@@ -1649,8 +1649,8 @@
      * would need to flush all ready EOIs before returning as otherwise the
      * desc->handler could change and we would call the wrong 'end' hook.
      */
-    cpu_eoi_map = action->cpu_eoi_map;
-    if ( !cpus_empty(cpu_eoi_map) )
+    cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+    if ( !cpumask_empty(&cpu_eoi_map) )
     {
         BUG_ON(action->ack_type != ACKTYPE_EOI);
         spin_unlock_irq(&desc->lock);
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/numa.c
--- a/xen/arch/x86/numa.c       Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/numa.c       Fri Oct 21 09:21:09 2011 +0200
@@ -282,7 +282,7 @@
        node_set_online(0);
        for (i = 0; i < nr_cpu_ids; i++)
                numa_set_node(i, 0);
-       node_to_cpumask[0] = cpumask_of_cpu(0);
+       cpumask_copy(&node_to_cpumask[0], cpumask_of(0));
        setup_node_bootmem(0, (u64)start_pfn << PAGE_SHIFT, (u64)end_pfn << 
PAGE_SHIFT);
 }
 
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/smp.c        Fri Oct 21 09:21:09 2011 +0200
@@ -257,8 +257,8 @@
     cpumask_t allbutself;
 
     /* Flush everyone else. We definitely flushed just before entry. */
-    allbutself = cpu_online_map;
-    cpu_clear(smp_processor_id(), allbutself);
+    cpumask_andnot(&allbutself, &cpu_online_map,
+                   cpumask_of(smp_processor_id()));
     flush_mask(&allbutself, FLUSH_TLB);
 
     /* No need for atomicity: we are the only possible updater. */
@@ -289,8 +289,10 @@
     void *info,
     int wait)
 {
-    cpumask_t allbutself = cpu_online_map;
-    cpu_clear(smp_processor_id(), allbutself);
+    cpumask_t allbutself;
+
+    cpumask_andnot(&allbutself, &cpu_online_map,
+                   cpumask_of(smp_processor_id()));
     on_selected_cpus(&allbutself, func, info, wait);
 }
 
@@ -306,9 +308,9 @@
 
     spin_lock(&call_lock);
 
-    call_data.selected = *selected;
+    cpumask_copy(&call_data.selected, selected);
 
-    nr_cpus = cpus_weight(call_data.selected);
+    nr_cpus = cpumask_weight(&call_data.selected);
     if ( nr_cpus == 0 )
         goto out;
 
@@ -318,14 +320,14 @@
 
     send_IPI_mask(&call_data.selected, CALL_FUNCTION_VECTOR);
 
-    if ( cpu_isset(smp_processor_id(), call_data.selected) )
+    if ( cpumask_test_cpu(smp_processor_id(), &call_data.selected) )
     {
         local_irq_disable();
         __smp_call_function_interrupt();
         local_irq_enable();
     }
 
-    while ( !cpus_empty(call_data.selected) )
+    while ( !cpumask_empty(&call_data.selected) )
         cpu_relax();
 
  out:
@@ -396,7 +398,7 @@
     void *info = call_data.info;
     unsigned int cpu = smp_processor_id();
 
-    if ( !cpu_isset(cpu, call_data.selected) )
+    if ( !cpumask_test_cpu(cpu, &call_data.selected) )
         return;
 
     irq_enter();
@@ -405,12 +407,12 @@
     {
         (*func)(info);
         mb();
-        cpu_clear(cpu, call_data.selected);
+        cpumask_clear_cpu(cpu, &call_data.selected);
     }
     else
     {
         mb();
-        cpu_clear(cpu, call_data.selected);
+        cpumask_clear_cpu(cpu, &call_data.selected);
         (*func)(info);
     }
 
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/smpboot.c    Fri Oct 21 09:21:09 2011 +0200
@@ -267,7 +267,8 @@
 
     if ( c[cpu].x86_max_cores == 1 )
     {
-        per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+        cpumask_copy(&per_cpu(cpu_core_map, cpu),
+                     &per_cpu(cpu_sibling_map, cpu));
         c[cpu].booted_cores = 1;
         return;
     }
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/arch/x86/time.c       Fri Oct 21 09:21:09 2011 +0200
@@ -1232,10 +1232,10 @@
 {
     unsigned int cpu = smp_processor_id();
     local_irq_disable();
-    while ( !cpu_isset(cpu, tsc_check_cpumask) )
+    while ( !cpumask_test_cpu(cpu, &tsc_check_cpumask) )
         mb();
     check_tsc_warp(cpu_khz, &tsc_max_warp);
-    cpu_clear(cpu, tsc_check_cpumask);
+    cpumask_clear_cpu(cpu, &tsc_check_cpumask);
     local_irq_enable();
 }
 
@@ -1248,12 +1248,11 @@
 
     tsc_check_count++;
     smp_call_function(tsc_check_slave, NULL, 0);
-    tsc_check_cpumask = cpu_online_map;
+    cpumask_andnot(&tsc_check_cpumask, &cpu_online_map, cpumask_of(cpu));
     local_irq_disable();
     check_tsc_warp(cpu_khz, &tsc_max_warp);
-    cpu_clear(cpu, tsc_check_cpumask);
     local_irq_enable();
-    while ( !cpus_empty(tsc_check_cpumask) )
+    while ( !cpumask_empty(&tsc_check_cpumask) )
         cpu_relax();
 
     spin_unlock(&lock);
@@ -1280,7 +1279,7 @@
     int i;
     struct cpu_calibration *c = &this_cpu(cpu_calibration);
     struct calibration_rendezvous *r = _r;
-    unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+    unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
 
     /* Loop to get rid of cache effects on TSC skew. */
     for ( i = 4; i >= 0; i-- )
@@ -1331,7 +1330,7 @@
 {
     struct cpu_calibration *c = &this_cpu(cpu_calibration);
     struct calibration_rendezvous *r = _r;
-    unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
+    unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
 
     if ( smp_processor_id() == 0 )
     {
@@ -1362,10 +1361,11 @@
 static void time_calibration(void *unused)
 {
     struct calibration_rendezvous r = {
-        .cpu_calibration_map = cpu_online_map,
         .semaphore = ATOMIC_INIT(0)
     };
 
+    cpumask_copy(&r.cpu_calibration_map, &cpu_online_map);
+
     /* @wait=1 because we must wait for all cpus before freeing @r. */
     on_selected_cpus(&r.cpu_calibration_map,
                      time_calibration_rendezvous_fn,
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/common/keyhandler.c   Fri Oct 21 09:21:09 2011 +0200
@@ -93,11 +93,11 @@
         printk("\n");
     }
 
-    cpu_clear(cpu, dump_execstate_mask);
+    cpumask_clear_cpu(cpu, &dump_execstate_mask);
     if ( !alt_key_handling )
         return;
 
-    cpu = cycle_cpu(cpu, dump_execstate_mask);
+    cpu = cpumask_cycle(cpu, &dump_execstate_mask);
     if ( cpu < nr_cpu_ids )
     {
         smp_send_state_dump(cpu);
@@ -118,7 +118,7 @@
 
     printk("'%c' pressed -> dumping registers\n\n", key);
 
-    dump_execstate_mask = cpu_online_map;
+    cpumask_copy(&dump_execstate_mask, &cpu_online_map);
 
     /* Get local execution state out immediately, in case we get stuck. */
     dump_execstate(regs);
@@ -131,7 +131,7 @@
     for_each_cpu_mask ( cpu, dump_execstate_mask )
     {
         smp_send_state_dump(cpu);
-        while ( cpu_isset(cpu, dump_execstate_mask) )
+        while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
             cpu_relax();
     }
 
@@ -324,11 +324,11 @@
 {
     unsigned int cpu = smp_processor_id();
     local_irq_disable();
-    while ( !cpu_isset(cpu, read_clocks_cpumask) )
+    while ( !cpumask_test_cpu(cpu, &read_clocks_cpumask) )
         cpu_relax();
     per_cpu(read_clocks_time, cpu) = NOW();
     per_cpu(read_cycles_time, cpu) = get_cycles();
-    cpu_clear(cpu, read_clocks_cpumask);
+    cpumask_clear_cpu(cpu, &read_clocks_cpumask);
     local_irq_enable();
 }
 
@@ -348,13 +348,12 @@
     smp_call_function(read_clocks_slave, NULL, 0);
 
     local_irq_disable();
-    read_clocks_cpumask = cpu_online_map;
+    cpumask_andnot(&read_clocks_cpumask, &cpu_online_map, cpumask_of(cpu));
     per_cpu(read_clocks_time, cpu) = NOW();
     per_cpu(read_cycles_time, cpu) = get_cycles();
-    cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 
-    while ( !cpus_empty(read_clocks_cpumask) )
+    while ( !cpumask_empty(&read_clocks_cpumask) )
         cpu_relax();
 
     min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/common/rcupdate.c
--- a/xen/common/rcupdate.c     Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/common/rcupdate.c     Fri Oct 21 09:21:09 2011 +0200
@@ -161,8 +161,7 @@
          * Don't send IPI to itself. With irqs disabled,
          * rdp->cpu is the current cpu.
          */
-        cpumask = rcp->cpumask;
-        cpu_clear(rdp->cpu, cpumask);
+        cpumask_andnot(&cpumask, &rcp->cpumask, cpumask_of(rdp->cpu));
         cpumask_raise_softirq(&cpumask, SCHEDULE_SOFTIRQ);
     }
 }
@@ -258,7 +257,7 @@
         smp_wmb();
         rcp->cur++;
 
-        rcp->cpumask = cpu_online_map;
+        cpumask_copy(&rcp->cpumask, &cpu_online_map);
     }
 }
 
@@ -269,8 +268,8 @@
  */
 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
 {
-    cpu_clear(cpu, rcp->cpumask);
-    if (cpus_empty(rcp->cpumask)) {
+    cpumask_clear_cpu(cpu, &rcp->cpumask);
+    if (cpumask_empty(&rcp->cpumask)) {
         /* batch completed ! */
         rcp->completed = rcp->cur;
         rcu_start_batch(rcp);
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/common/stop_machine.c
--- a/xen/common/stop_machine.c Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/common/stop_machine.c Fri Oct 21 09:21:09 2011 +0200
@@ -81,9 +81,9 @@
     if ( !get_cpu_maps() )
         return -EBUSY;
 
-    allbutself = cpu_online_map;
-    cpu_clear(smp_processor_id(), allbutself);
-    nr_cpus = cpus_weight(allbutself);
+    cpumask_andnot(&allbutself, &cpu_online_map,
+                   cpumask_of(smp_processor_id()));
+    nr_cpus = cpumask_weight(&allbutself);
 
     /* Must not spin here as the holder will expect us to be descheduled. */
     if ( !spin_trylock(&stopmachine_lock) )
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/include/xen/cpumask.h Fri Oct 21 09:21:09 2011 +0200
@@ -359,11 +359,14 @@
 
 static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
 {
-       /*
-        * Once all direct cpumask assignments are gone, we could use
-        * nr_cpumask_bits to determine the allocation size here.
-        */
-       return (*mask = xmalloc(cpumask_t)) != NULL;
+       *(void **)mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long));
+       return *mask != NULL;
+}
+
+static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
+{
+       *(void **)mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long));
+       return *mask != NULL;
 }
 
 static inline void free_cpumask_var(cpumask_var_t mask)
@@ -378,19 +381,17 @@
        return 1;
 }
 
+static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
+{
+       cpumask_clear(*mask);
+       return 1;
+}
+
 static inline void free_cpumask_var(cpumask_var_t mask)
 {
 }
 #endif
 
-static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
-{
-       if (!alloc_cpumask_var(mask))
-               return 0;
-       cpumask_clear(*mask);
-       return 1;
-}
-
 #if NR_CPUS > 1
 #define for_each_cpu_mask(cpu, mask)           \
        for ((cpu) = first_cpu(mask);           \
diff -r 1a4223c62ee7 -r 07d303ff2757 xen/include/xen/irq.h
--- a/xen/include/xen/irq.h     Fri Oct 21 09:19:44 2011 +0200
+++ b/xen/include/xen/irq.h     Fri Oct 21 09:21:09 2011 +0200
@@ -153,7 +153,7 @@
 
 static inline void set_native_irq_info(unsigned int irq, const cpumask_t *mask)
 {
-    irq_desc[irq].affinity = *mask;
+    cpumask_copy(&irq_desc[irq].affinity, mask);
 }
 
 unsigned int set_desc_affinity(struct irq_desc *, const cpumask_t *);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] eliminate direct assignments of CPU masks, Xen patchbot-unstable <=