WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] eliminate cpumask accessors referencing N

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] eliminate cpumask accessors referencing NR_CPUS
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Tue, 25 Oct 2011 01:55:10 +0100
Delivery-date: Mon, 24 Oct 2011 17:56:31 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1319181584 -7200
# Node ID 1a4223c62ee7868e52be7640543af88483d1f5f3
# Parent  511d5e65a30231fab56b47ffe7b2f1483b023700
eliminate cpumask accessors referencing NR_CPUS

... in favor of using the new, nr_cpumask_bits-based ones.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/linux-xen/acpi.c
--- a/xen/arch/ia64/linux-xen/acpi.c    Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/ia64/linux-xen/acpi.c    Fri Oct 21 09:19:44 2011 +0200
@@ -957,14 +957,14 @@
        buffer.length = ACPI_ALLOCATE_BUFFER;
        buffer.pointer = NULL;
 
-       cpus_complement(tmp_map, cpu_present_map);
-       cpu = first_cpu(tmp_map);
-       if (cpu >= NR_CPUS)
+       cpumask_complement(&tmp_map, &cpu_present_map);
+       cpu = cpumask_first(&tmp_map);
+       if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
        acpi_map_cpu2node(handle, cpu, physid);
 
-       cpu_set(cpu, cpu_present_map);
+       cpumask_set_cpu(cpu, &cpu_present_map);
        ia64_cpu_to_sapicid[cpu] = physid;
 
        *pcpu = cpu;
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/linux-xen/numa.c
--- a/xen/arch/ia64/linux-xen/numa.c    Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/ia64/linux-xen/numa.c    Fri Oct 21 09:19:44 2011 +0200
@@ -51,7 +51,7 @@
        int cpu, i, node;
 
        for(node=0; node < MAX_NUMNODES; node++)
-               cpus_clear(node_to_cpu_mask[node]);
+               cpumask_clear(&node_to_cpu_mask[node]);
 
        for(cpu = 0; cpu < NR_CPUS; ++cpu) {
                node = -1;
@@ -62,6 +62,6 @@
                        }
                cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
                if (node >= 0)
-                       cpu_set(cpu, node_to_cpu_mask[node]);
+                       cpumask_set_cpu(cpu, &node_to_cpu_mask[node]);
        }
 }
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/ia64/linux-xen/smpboot.c Fri Oct 21 09:19:44 2011 +0200
@@ -594,15 +594,15 @@
        }
 
        ia64_cpu_to_sapicid[0] = boot_cpu_id;
-       cpus_clear(cpu_present_map);
-       cpu_set(0, cpu_present_map);
-       cpu_set(0, cpu_possible_map);
+       cpumask_clear(&cpu_present_map);
+       cpumask_set_cpu(0, &cpu_present_map);
+       cpumask_set_cpu(0, &cpu_possible_map);
        for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
                sapicid = smp_boot_data.cpu_phys_id[i];
                if (sapicid == boot_cpu_id)
                        continue;
-               cpu_set(cpu, cpu_present_map);
-               cpu_set(cpu, cpu_possible_map);
+               cpumask_set_cpu(cpu, &cpu_present_map);
+               cpumask_set_cpu(cpu, &cpu_possible_map);
                ia64_cpu_to_sapicid[cpu] = sapicid;
                cpu++;
        }
@@ -640,12 +640,12 @@
         */
        if (!max_cpus) {
                printk(KERN_INFO "SMP mode deactivated.\n");
-               cpus_clear(cpu_online_map);
-               cpus_clear(cpu_present_map);
-               cpus_clear(cpu_possible_map);
-               cpu_set(0, cpu_online_map);
-               cpu_set(0, cpu_present_map);
-               cpu_set(0, cpu_possible_map);
+               cpumask_clear(&cpu_online_map);
+               cpumask_clear(&cpu_present_map);
+               cpumask_clear(&cpu_possible_map);
+               cpumask_set_cpu(0, &cpu_online_map);
+               cpumask_set_cpu(0, &cpu_present_map);
+               cpumask_set_cpu(0, &cpu_possible_map);
                return;
        }
 }
@@ -688,12 +688,12 @@
        int i;
 
        for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
        for_each_cpu_mask(i, per_cpu(cpu_core_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_core_map, i));
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_core_map, i));
 
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
+       cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
+       cpumask_clear(&per_cpu(cpu_core_map, cpu));
 }
 
 static void
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Fri Oct 21 09:17:42 
2011 +0200
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Fri Oct 21 09:19:44 
2011 +0200
@@ -206,7 +206,7 @@
        static DEFINE_SPINLOCK(sn2_ptcg_lock2);
 
        nodes_clear(nodes_flushed);
-       cpus_clear(selected_cpus);
+       cpumask_clear(&selected_cpus);
 
        spin_lock(&sn2_ptcg_lock2);
        node_set(cpu_to_node(smp_processor_id()), nodes_flushed);
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/xen/fw_emul.c
--- a/xen/arch/ia64/xen/fw_emul.c       Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/ia64/xen/fw_emul.c       Fri Oct 21 09:19:44 2011 +0200
@@ -866,10 +866,10 @@
                                     "status %lx", status);
 
                if (in1 == PAL_CACHE_TYPE_COHERENT) {
-                       cpus_setall(current->arch.cache_coherent_map);
-                       cpu_clear(processor, current->arch.cache_coherent_map);
-                       cpus_setall(cpu_cache_coherent_map);
-                       cpu_clear(processor, cpu_cache_coherent_map);
+                       cpumask_complement(&current->arch.cache_coherent_map,
+                                          cpumask_of(processor));
+                       cpumask_complement(&cpu_cache_coherent_map,
+                                          cpumask_of(processor));
                }
                break;
            case PAL_PERF_MON_INFO:
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/xen/tlb_track.c
--- a/xen/arch/ia64/xen/tlb_track.c     Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/ia64/xen/tlb_track.c     Fri Oct 21 09:19:44 2011 +0200
@@ -374,7 +374,7 @@
         entry->pte_val = old_pte;
         entry->vaddr = vaddr;
         entry->rid = rid;
-        cpus_clear(entry->pcpu_dirty_mask);
+        cpumask_clear(&entry->pcpu_dirty_mask);
         vcpus_clear(entry->vcpu_dirty_mask);
         list_add(&entry->list, head);
 
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/ia64/xen/xensetup.c      Fri Oct 21 09:19:44 2011 +0200
@@ -592,7 +592,7 @@
     smp_prepare_cpus(max_cpus);
 
     /* We aren't hotplug-capable yet. */
-    cpus_or(cpu_present_map, cpu_present_map, cpu_possible_map);
+    cpumask_or(&cpu_present_map, &cpu_present_map, &cpu_possible_map);
 
     /*  Enable IRQ to receive IPI (needed for ITC sync).  */
     local_irq_enable();
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/acpi/cpu_idle.c      Fri Oct 21 09:19:44 2011 +0200
@@ -233,13 +233,13 @@
     cpumask_t target;
     unsigned int cpu;
 
-    cpus_and(target, *mask, cpuidle_mwait_flags);
+    cpumask_and(&target, mask, &cpuidle_mwait_flags);
 
     /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
     for_each_cpu_mask(cpu, target)
         mwait_wakeup(cpu) = 0;
 
-    cpus_andnot(*mask, *mask, target);
+    cpumask_andnot(mask, mask, &target);
 }
 
 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/acpi/cpufreq/cpufreq.c
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c       Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c       Fri Oct 21 09:19:44 2011 +0200
@@ -446,7 +446,7 @@
     if (unlikely(result))
         return -ENODEV;
 
-    cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+    cpumask_and(&online_policy_cpus, &cpu_online_map, &policy->cpus);
 
     next_perf_state = data->freq_table[next_state].index;
     if (perf->state == next_perf_state) {
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/cpu/mcheck/mce.c
--- a/xen/arch/x86/cpu/mcheck/mce.c     Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/cpu/mcheck/mce.c     Fri Oct 21 09:19:44 2011 +0200
@@ -1537,20 +1537,19 @@
             return x86_mcerr("do_mca #MC", -ENODEV);
 
         if ( op->u.mc_inject_v2.flags & XEN_MC_INJECT_CPU_BROADCAST )
-            cpus_copy(cpumap, cpu_online_map);
+            cpumask_copy(&cpumap, &cpu_online_map);
         else
         {
             int gcw;
 
-            cpus_clear(cpumap);
             xenctl_cpumap_to_cpumask(&cpumap,
                                      &op->u.mc_inject_v2.cpumap);
-            gcw = cpus_weight(cpumap);
-            cpus_and(cpumap, cpu_online_map, cpumap);
+            gcw = cpumask_weight(&cpumap);
+            cpumask_and(&cpumap, &cpu_online_map, &cpumap);
 
-            if ( cpus_empty(cpumap) )
+            if ( cpumask_empty(&cpumap) )
                 return x86_mcerr("No online CPU passed\n", -EINVAL);
-            else if ( gcw != cpus_weight(cpumap) )
+            else if ( gcw != cpumask_weight(&cpumap) )
                 dprintk(XENLOG_INFO,
                         "Not all required CPUs are online\n");
         }
@@ -1559,7 +1558,7 @@
         {
         case XEN_MC_INJECT_TYPE_MCE:
             if ( mce_broadcast &&
-                 !cpus_equal(cpumap, cpu_online_map) )
+                 !cpumask_equal(&cpumap, &cpu_online_map) )
                 printk("Not trigger MCE on all CPUs, may HANG!\n");
             on_selected_cpus(&cpumap, x86_mc_mceinject, NULL, 1);
             break;
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/hpet.c       Fri Oct 21 09:19:44 2011 +0200
@@ -178,7 +178,7 @@
     spin_unlock_irq(&ch->lock);
 
     next_event = STIME_MAX;
-    mask = (cpumask_t)CPU_MASK_NONE;
+    cpumask_clear(&mask);
     now = NOW();
 
     /* find all expired events */
@@ -189,11 +189,11 @@
         rmb();
         deadline = per_cpu(timer_deadline, cpu);
         rmb();
-        if ( !cpu_isset(cpu, ch->cpumask) )
+        if ( !cpumask_test_cpu(cpu, &ch->cpumask) )
             continue;
 
         if ( deadline <= now )
-            cpu_set(cpu, mask);
+            cpumask_set_cpu(cpu, &mask);
         else if ( deadline < next_event )
             next_event = deadline;
     }
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/hvm/nestedhvm.c
--- a/xen/arch/x86/hvm/nestedhvm.c      Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/hvm/nestedhvm.c      Fri Oct 21 09:19:44 2011 +0200
@@ -116,7 +116,7 @@
 {
     on_selected_cpus(&p2m->p2m_dirty_cpumask, nestedhvm_flushtlb_ipi,
         p2m->domain, 1);
-    cpus_clear(p2m->p2m_dirty_cpumask);
+    cpumask_clear(&p2m->p2m_dirty_cpumask);
 }
 
 bool_t
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/io_apic.c    Fri Oct 21 09:19:44 2011 +0200
@@ -1850,7 +1850,7 @@
     int apic1, pin1, apic2, pin2;
     int vector, ret;
     unsigned long flags;
-    cpumask_t mask_all = CPU_MASK_ALL;
+    cpumask_t mask_all;
 
     local_irq_save(flags);
 
@@ -1861,6 +1861,7 @@
     vector = FIRST_HIPRIORITY_VECTOR;
     clear_irq_vector(0);
 
+    cpumask_setall(&mask_all);
     if ((ret = bind_irq_vector(0, vector, &mask_all)))
         printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", 
ret);
     
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/irq.c        Fri Oct 21 09:19:44 2011 +0200
@@ -115,10 +115,10 @@
     BUG_ON((unsigned)irq >= nr_irqs);
     BUG_ON((unsigned)vector >= NR_VECTORS);
 
-    cpus_and(online_mask, *cpu_mask, cpu_online_map);
-    if (cpus_empty(online_mask))
+    cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
+    if (cpumask_empty(&online_mask))
         return -EINVAL;
-    if ((cfg->vector == vector) && cpus_equal(cfg->cpu_mask, online_mask))
+    if ((cfg->vector == vector) && cpumask_equal(&cfg->cpu_mask, &online_mask))
         return 0;
     if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 
         return -EBUSY;
@@ -126,7 +126,7 @@
     for_each_cpu_mask(cpu, online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     cfg->vector = vector;
-    cfg->cpu_mask = online_mask;
+    cpumask_copy(&cfg->cpu_mask, &online_mask);
     if ( cfg->used_vectors )
     {
         ASSERT(!test_bit(vector, cfg->used_vectors));
@@ -197,7 +197,7 @@
     desc->msi_desc = NULL;
     desc->handler = &no_irq_type;
     desc->arch.used_vectors = NULL;
-    cpus_setall(desc->affinity);
+    cpumask_setall(&desc->affinity);
     spin_unlock_irqrestore(&desc->lock, flags);
 
     /* Wait to make sure it's not being used on another CPU */
@@ -217,7 +217,7 @@
 
     /* Always clear cfg->vector */
     vector = cfg->vector;
-    cpus_and(tmp_mask, cfg->cpu_mask, cpu_online_map);
+    cpumask_and(&tmp_mask, &cfg->cpu_mask, &cpu_online_map);
 
     for_each_cpu_mask(cpu, tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
@@ -225,7 +225,7 @@
     }
 
     cfg->vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->cpu_mask);
+    cpumask_clear(&cfg->cpu_mask);
 
     if ( cfg->used_vectors )
     {
@@ -242,7 +242,7 @@
 
     /* If we were in motion, also clear cfg->old_vector */
     old_vector = cfg->old_vector;
-    cpus_and(tmp_mask, cfg->old_cpu_mask, cpu_online_map);
+    cpumask_and(&tmp_mask, &cfg->old_cpu_mask, &cpu_online_map);
 
     for_each_cpu_mask(cpu, tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
@@ -251,7 +251,7 @@
      }
 
     cfg->old_vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->old_cpu_mask);
+    cpumask_clear(&cfg->old_cpu_mask);
 
     if ( cfg->used_vectors )
     {
@@ -303,7 +303,7 @@
     desc->action  = NULL;
     desc->msi_desc = NULL;
     spin_lock_init(&desc->lock);
-    cpus_setall(desc->affinity);
+    cpumask_setall(&desc->affinity);
     INIT_LIST_HEAD(&desc->rl_link);
 }
 
@@ -311,8 +311,8 @@
 {
     cfg->vector = IRQ_VECTOR_UNASSIGNED;
     cfg->old_vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->cpu_mask);
-    cpus_clear(cfg->old_cpu_mask);
+    cpumask_clear(&cfg->cpu_mask);
+    cpumask_clear(&cfg->old_cpu_mask);
     cfg->used_vectors = NULL;
     cfg->used = IRQ_UNUSED;
 }
@@ -425,8 +425,8 @@
 
     old_vector = irq_to_vector(irq);
     if (old_vector) {
-        cpus_and(tmp_mask, *mask, cpu_online_map);
-        if (cpus_intersects(tmp_mask, cfg->cpu_mask)) {
+        cpumask_and(&tmp_mask, mask, &cpu_online_map);
+        if (cpumask_intersects(&tmp_mask, &cfg->cpu_mask)) {
             cfg->vector = old_vector;
             return 0;
         }
@@ -455,7 +455,8 @@
         if (!cpu_online(cpu))
             continue;
 
-        cpus_and(tmp_mask, *vector_allocation_cpumask(cpu), cpu_online_map);
+        cpumask_and(&tmp_mask, vector_allocation_cpumask(cpu),
+                    &cpu_online_map);
 
         vector = current_vector;
         offset = current_offset;
@@ -485,14 +486,14 @@
         local_irq_save(flags);
         if (old_vector) {
             cfg->move_in_progress = 1;
-            cpus_copy(cfg->old_cpu_mask, cfg->cpu_mask);
+            cpumask_copy(&cfg->old_cpu_mask, &cfg->cpu_mask);
             cfg->old_vector = cfg->vector;
         }
         trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
         for_each_cpu_mask(new_cpu, tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         cfg->vector = vector;
-        cpus_copy(cfg->cpu_mask, tmp_mask);
+        cpumask_copy(&cfg->cpu_mask, &tmp_mask);
 
         cfg->used = IRQ_USED;
         ASSERT((cfg->used_vectors == NULL)
@@ -529,7 +530,7 @@
     ret = __assign_irq_vector(irq, cfg, TARGET_CPUS);
     if (!ret) {
         ret = cfg->vector;
-        cpus_copy(desc->affinity, cfg->cpu_mask);
+        cpumask_copy(&desc->affinity, &cfg->cpu_mask);
     }
     spin_unlock_irqrestore(&vector_lock, flags);
     return ret;
@@ -582,7 +583,7 @@
     if (likely(cpus_intersects(desc->pending_mask, cpu_online_map)))
         desc->handler->set_affinity(desc, &desc->pending_mask);
 
-    cpus_clear(desc->pending_mask);
+    cpumask_clear(&desc->pending_mask);
 }
 
 void move_native_irq(struct irq_desc *desc)
@@ -729,7 +730,7 @@
     ASSERT(spin_is_locked(&desc->lock));
     desc->status &= ~IRQ_MOVE_PENDING;
     wmb();
-    cpus_copy(desc->pending_mask, *mask);
+    cpumask_copy(&desc->pending_mask, mask);
     wmb();
     desc->status |= IRQ_MOVE_PENDING;
 }
@@ -1474,7 +1475,6 @@
     struct irq_desc         *desc;
     irq_guest_action_t *action, *newaction = NULL;
     int                 rc = 0;
-    cpumask_t           cpumask = CPU_MASK_NONE;
 
     WARN_ON(!spin_is_locked(&v->domain->event_lock));
     BUG_ON(!local_irq_is_enabled());
@@ -1521,7 +1521,7 @@
         action->in_flight   = 0;
         action->shareable   = will_share;
         action->ack_type    = pirq_acktype(v->domain, pirq->pirq);
-        cpus_clear(action->cpu_eoi_map);
+        cpumask_clear(&action->cpu_eoi_map);
         init_timer(&action->eoi_timer, irq_guest_eoi_timer_fn, desc, 0);
 
         desc->status |= IRQ_GUEST;
@@ -1529,9 +1529,8 @@
         desc->handler->startup(desc);
 
         /* Attempt to bind the interrupt target to the correct CPU. */
-        cpu_set(v->processor, cpumask);
         if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
-            desc->handler->set_affinity(desc, &cpumask);
+            desc->handler->set_affinity(desc, cpumask_of(v->processor));
     }
     else if ( !will_share || !action->shareable )
     {
@@ -2070,18 +2069,18 @@
 
         spin_lock(&desc->lock);
 
-        affinity = desc->affinity;
-        if ( !desc->action || cpus_subset(affinity, cpu_online_map) )
+        cpumask_copy(&affinity, &desc->affinity);
+        if ( !desc->action || cpumask_subset(&affinity, &cpu_online_map) )
         {
             spin_unlock(&desc->lock);
             continue;
         }
 
-        cpus_and(affinity, affinity, cpu_online_map);
-        if ( cpus_empty(affinity) )
+        cpumask_and(&affinity, &affinity, &cpu_online_map);
+        if ( cpumask_empty(&affinity) )
         {
             break_affinity = 1;
-            affinity = cpu_online_map;
+            cpumask_copy(&affinity, &cpu_online_map);
         }
 
         if ( desc->handler->disable )
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/mm.c Fri Oct 21 09:19:44 2011 +0200
@@ -1339,8 +1339,10 @@
 
     if ( unlikely(shadow_mode_enabled(d)) )
     {
-        cpumask_t m = CPU_MASK_NONE;
+        cpumask_t m;
+
         /* Re-shadow this l3 table on any vcpus that are using it */
+        cpumask_clear(&m);
         for_each_vcpu ( d, v )
             if ( pagetable_get_pfn(v->arch.guest_table) == mfn )
             {
@@ -2887,7 +2889,7 @@
     struct vcpu *v;
     bool_t is_native = !is_pv_32on64_domain(d);
 
-    cpus_clear(*pmask);
+    cpumask_clear(pmask);
     for ( vmask = 0, offs = 0; ; ++offs)
     {
         vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
@@ -2899,7 +2901,7 @@
                       copy_from_guest_offset((unsigned int *)&vmask, bmap,
                                              offs, 1)) )
         {
-            cpus_clear(*pmask);
+            cpumask_clear(pmask);
             return -EFAULT;
         }
 
@@ -3194,12 +3196,13 @@
             else if ( likely(cache_flush_permitted(d)) )
             {
                 unsigned int cpu;
-                cpumask_t mask = CPU_MASK_NONE;
-
+                cpumask_t mask;
+
+                cpumask_clear(&mask);
                 for_each_online_cpu(cpu)
-                    if ( !cpus_intersects(mask,
-                                          per_cpu(cpu_sibling_map, cpu)) )
-                        cpu_set(cpu, mask);
+                    if ( !cpumask_intersects(&mask,
+                                             &per_cpu(cpu_sibling_map, cpu)) )
+                        cpumask_set_cpu(cpu, &mask);
                 flush_mask(&mask, FLUSH_CACHE);
             }
             else
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/mm/p2m.c     Fri Oct 21 09:19:44 2011 +0200
@@ -81,7 +81,7 @@
     p2m->default_access = p2m_access_rwx;
 
     p2m->cr3 = CR3_EADDR;
-    cpus_clear(p2m->p2m_dirty_cpumask);
+    cpumask_clear(&p2m->p2m_dirty_cpumask);
 
     if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(p2m);
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/mm/shadow/common.c   Fri Oct 21 09:19:44 2011 +0200
@@ -3459,7 +3459,7 @@
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
         if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
         {
-            cpus_clear(flushmask);
+            cpumask_clear(&flushmask);
 
             /* If we're replacing a superpage with a normal L1 page, map it */
             if ( (l1e_get_flags(new) & _PAGE_PRESENT)
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/smp.c        Fri Oct 21 09:19:44 2011 +0200
@@ -240,12 +240,12 @@
     if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) )
     {
         spin_lock(&flush_lock);
-        cpus_and(flush_cpumask, *mask, cpu_online_map);
-        cpu_clear(smp_processor_id(), flush_cpumask);
+        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
+        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
         flush_va      = va;
         flush_flags   = flags;
         send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
-        while ( !cpus_empty(flush_cpumask) )
+        while ( !cpumask_empty(&flush_cpumask) )
             cpu_relax();
         spin_unlock(&flush_lock);
     }
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/smpboot.c    Fri Oct 21 09:19:44 2011 +0200
@@ -533,8 +533,9 @@
 {
     cpumask_t tmp_map;
     int cpu;
-    cpus_complement(tmp_map, cpu_present_map);
-    cpu = first_cpu(tmp_map);
+
+    cpumask_complement(&tmp_map, &cpu_present_map);
+    cpu = cpumask_first(&tmp_map);
     return (cpu < nr_cpu_ids) ? cpu : -ENODEV;
 }
 
@@ -818,18 +819,18 @@
     {
         cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
         /* Last thread sibling in this cpu core going down. */
-        if ( cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1 )
+        if ( cpumask_weight(&per_cpu(cpu_sibling_map, cpu)) == 1 )
             c[sibling].booted_cores--;
     }
    
     for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-        cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-    cpus_clear(per_cpu(cpu_sibling_map, cpu));
-    cpus_clear(per_cpu(cpu_core_map, cpu));
+        cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, sibling));
+    cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
+    cpumask_clear(&per_cpu(cpu_core_map, cpu));
     c[cpu].phys_proc_id = BAD_APICID;
     c[cpu].cpu_core_id = BAD_APICID;
     c[cpu].compute_unit_id = BAD_APICID;
-    cpu_clear(cpu, cpu_sibling_setup_map);
+    cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
 }
 
 void __cpu_disable(void)
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/arch/x86/time.c       Fri Oct 21 09:19:44 2011 +0200
@@ -185,15 +185,15 @@
     int cpu = smp_processor_id();
     cpumask_t mask;
 
-    cpus_and(mask, cpu_online_map, pit_broadcast_mask);
+    cpumask_and(&mask, &cpu_online_map, &pit_broadcast_mask);
 
-    if ( cpu_isset(cpu, mask) )
+    if ( cpumask_test_cpu(cpu, &mask) )
     {
-        cpu_clear(cpu, mask);
+        cpumask_clear_cpu(cpu, &mask);
         raise_softirq(TIMER_SOFTIRQ);
     }
 
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(&mask) )
     {
         cpumask_raise_softirq(&mask, TIMER_SOFTIRQ);
     }
@@ -1226,7 +1226,7 @@
 }
 
 static unsigned long tsc_max_warp, tsc_check_count;
-static cpumask_t tsc_check_cpumask = CPU_MASK_NONE;
+static cpumask_t tsc_check_cpumask;
 
 static void tsc_check_slave(void *unused)
 {
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/cpu.c
--- a/xen/common/cpu.c  Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/cpu.c  Fri Oct 21 09:19:44 2011 +0200
@@ -176,7 +176,7 @@
 
     BUG_ON(smp_processor_id() != 0);
 
-    cpus_clear(frozen_cpus);
+    cpumask_clear(&frozen_cpus);
 
     printk("Disabling non-boot CPUs ...\n");
 
@@ -192,7 +192,7 @@
             break;
         }
 
-        cpu_set(cpu, frozen_cpus);
+        cpumask_set_cpu(cpu, &frozen_cpus);
     }
 
     BUG_ON(!error && (num_online_cpus() != 1));
@@ -214,5 +214,5 @@
         }
     }
 
-    cpus_clear(frozen_cpus);
+    cpumask_clear(&frozen_cpus);
 }
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/cpupool.c
--- a/xen/common/cpupool.c      Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/cpupool.c      Fri Oct 21 09:19:44 2011 +0200
@@ -29,7 +29,7 @@
 
 static int cpupool_moving_cpu = -1;
 static struct cpupool *cpupool_cpu_moving = NULL;
-static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE;
+static cpumask_t cpupool_locked_cpus;
 
 static DEFINE_SPINLOCK(cpupool_lock);
 
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/domain.c
--- a/xen/common/domain.c       Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/domain.c       Fri Oct 21 09:19:44 2011 +0200
@@ -359,11 +359,12 @@
 
 void domain_update_node_affinity(struct domain *d)
 {
-    cpumask_t cpumask = CPU_MASK_NONE;
+    cpumask_t cpumask;
     nodemask_t nodemask = NODE_MASK_NONE;
     struct vcpu *v;
     unsigned int node;
 
+    cpumask_clear(&cpumask);
     spin_lock(&d->node_affinity_lock);
 
     for_each_vcpu ( d, v )
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/domctl.c
--- a/xen/common/domctl.c       Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/domctl.c       Fri Oct 21 09:19:44 2011 +0200
@@ -37,9 +37,9 @@
     uint8_t bytemap[(NR_CPUS + 7) / 8];
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
-    copy_bytes  = min_t(unsigned int, guest_bytes, sizeof(bytemap));
+    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
 
-    bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
+    bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);
 
     if ( copy_bytes != 0 )
         if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
@@ -59,7 +59,7 @@
     uint8_t bytemap[(NR_CPUS + 7) / 8];
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
-    copy_bytes  = min_t(unsigned int, guest_bytes, sizeof(bytemap));
+    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
 
     memset(bytemap, 0, sizeof(bytemap));
 
@@ -71,7 +71,7 @@
             bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
     }
 
-    bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
+    bitmap_byte_to_long(cpumask_bits(cpumask), bytemap, nr_cpu_ids);
 
     return 0;
 }
@@ -154,7 +154,7 @@
     cpumask_t      cpu_exclude_map;
 
     /* Do an initial CPU placement. Pick the least-populated CPU. */
-    nr_cpus = last_cpu(cpu_online_map) + 1;
+    nr_cpus = cpumask_last(&cpu_online_map) + 1;
     cnt = xzalloc_array(unsigned int, nr_cpus);
     if ( cnt )
     {
@@ -171,18 +171,19 @@
      * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
      * favour high numbered CPUs in the event of a tie.
      */
-    cpu = first_cpu(per_cpu(cpu_sibling_map, 0));
-    if ( cpus_weight(per_cpu(cpu_sibling_map, 0)) > 1 )
-        cpu = next_cpu(cpu, per_cpu(cpu_sibling_map, 0));
-    cpu_exclude_map = per_cpu(cpu_sibling_map, 0);
+    cpumask_copy(&cpu_exclude_map, &per_cpu(cpu_sibling_map, 0));
+    cpu = cpumask_first(&cpu_exclude_map);
+    if ( cpumask_weight(&cpu_exclude_map) > 1 )
+        cpu = cpumask_next(cpu, &cpu_exclude_map);
     for_each_cpu_mask(i, *online)
     {
-        if ( cpu_isset(i, cpu_exclude_map) )
+        if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
-        if ( (i == first_cpu(per_cpu(cpu_sibling_map, i))) &&
-             (cpus_weight(per_cpu(cpu_sibling_map, i)) > 1) )
+        if ( (i == cpumask_first(&per_cpu(cpu_sibling_map, i))) &&
+             (cpumask_weight(&per_cpu(cpu_sibling_map, i)) > 1) )
             continue;
-        cpus_or(cpu_exclude_map, cpu_exclude_map, per_cpu(cpu_sibling_map, i));
+        cpumask_or(&cpu_exclude_map, &cpu_exclude_map,
+                   &per_cpu(cpu_sibling_map, i));
         if ( !cnt || cnt[i] <= cnt[cpu] )
             cpu = i;
     }
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/keyhandler.c   Fri Oct 21 09:19:44 2011 +0200
@@ -316,7 +316,7 @@
     .desc = "dump domain (and guest debug) info"
 };
 
-static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
+static cpumask_t read_clocks_cpumask;
 static DEFINE_PER_CPU(s_time_t, read_clocks_time);
 static DEFINE_PER_CPU(u64, read_cycles_time);
 
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/page_alloc.c   Fri Oct 21 09:19:44 2011 +0200
@@ -304,7 +304,7 @@
     unsigned int first_node, i, j, zone = 0, nodemask_retry = 0;
     unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
     unsigned long request = 1UL << order;
-    cpumask_t extra_cpus_mask, mask;
+    cpumask_t mask;
     struct page_info *pg;
     nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map;
 
@@ -418,7 +418,7 @@
     if ( d != NULL )
         d->last_alloc_node = node;
 
-    cpus_clear(mask);
+    cpumask_clear(&mask);
 
     for ( i = 0; i < (1 << order); i++ )
     {
@@ -429,9 +429,11 @@
         if ( pg[i].u.free.need_tlbflush )
         {
             /* Add in extra CPUs that need flushing because of this page. */
-            cpus_andnot(extra_cpus_mask, cpu_online_map, mask);
+            static cpumask_t extra_cpus_mask;
+
+            cpumask_andnot(&extra_cpus_mask, &cpu_online_map, &mask);
             tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
-            cpus_or(mask, mask, extra_cpus_mask);
+            cpumask_or(&mask, &mask, &extra_cpus_mask);
         }
 
         /* Initialise fields which have other uses for free pages. */
@@ -441,7 +443,7 @@
 
     spin_unlock(&heap_lock);
 
-    if ( unlikely(!cpus_empty(mask)) )
+    if ( unlikely(!cpumask_empty(&mask)) )
     {
         perfc_incr(need_flush_tlb_flush);
         flush_tlb_mask(&mask);
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/rcupdate.c
--- a/xen/common/rcupdate.c     Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/rcupdate.c     Fri Oct 21 09:19:44 2011 +0200
@@ -59,7 +59,6 @@
     .cur = -300,
     .completed = -300,
     .lock = SPIN_LOCK_UNLOCKED,
-    .cpumask = CPU_MASK_NONE,
 };
 
 /*
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/sched_credit.c Fri Oct 21 09:19:44 2011 +0200
@@ -260,7 +260,7 @@
     cpumask_t mask;
 
     ASSERT(cur);
-    cpus_clear(mask);
+    cpumask_clear(&mask);
 
     /* If strictly higher priority than current VCPU, signal the CPU */
     if ( new->pri > cur->pri )
@@ -274,7 +274,7 @@
         else
             CSCHED_STAT_CRANK(tickle_local_other);
 
-        cpu_set(cpu, mask);
+        cpumask_set_cpu(cpu, &mask);
     }
 
     /*
@@ -283,7 +283,7 @@
      */
     if ( cur->pri > CSCHED_PRI_IDLE )
     {
-        if ( cpus_empty(prv->idlers) )
+        if ( cpumask_empty(&prv->idlers) )
         {
             CSCHED_STAT_CRANK(tickle_idlers_none);
         }
@@ -292,24 +292,24 @@
             cpumask_t idle_mask;
 
             cpumask_and(&idle_mask, &prv->idlers, new->vcpu->cpu_affinity);
-            if ( !cpus_empty(idle_mask) )
+            if ( !cpumask_empty(&idle_mask) )
             {
                 CSCHED_STAT_CRANK(tickle_idlers_some);
                 if ( opt_tickle_one_idle )
                 {
                     this_cpu(last_tickle_cpu) = 
-                        cycle_cpu(this_cpu(last_tickle_cpu), idle_mask);
-                    cpu_set(this_cpu(last_tickle_cpu), mask);
+                        cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask);
+                    cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
                 }
                 else
-                    cpus_or(mask, mask, idle_mask);
+                    cpumask_or(&mask, &mask, &idle_mask);
             }
             cpumask_and(&mask, &mask, new->vcpu->cpu_affinity);
         }
     }
 
     /* Send scheduler interrupts to designated CPUs */
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(&mask) )
         cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
 }
 
@@ -471,10 +471,10 @@
      */
     online = CSCHED_CPUONLINE(vc->domain->cpupool);
     cpumask_and(&cpus, online, vc->cpu_affinity);
-    cpu = cpu_isset(vc->processor, cpus)
+    cpu = cpumask_test_cpu(vc->processor, &cpus)
             ? vc->processor
-            : cycle_cpu(vc->processor, cpus);
-    ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) );
+            : cpumask_cycle(vc->processor, &cpus);
+    ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) );
 
     /*
      * Try to find an idle processor within the above constraints.
@@ -488,54 +488,54 @@
      * like run two VCPUs on co-hyperthreads while there are idle cores
      * or sockets.
      */
-    cpus_and(idlers, cpu_online_map, CSCHED_PRIV(ops)->idlers);
-    cpu_set(cpu, idlers);
-    cpus_and(cpus, cpus, idlers);
-    cpu_clear(cpu, cpus);
+    cpumask_and(&idlers, &cpu_online_map, &CSCHED_PRIV(ops)->idlers);
+    cpumask_set_cpu(cpu, &idlers);
+    cpumask_and(&cpus, &cpus, &idlers);
+    cpumask_clear_cpu(cpu, &cpus);
 
-    while ( !cpus_empty(cpus) )
+    while ( !cpumask_empty(&cpus) )
     {
         cpumask_t cpu_idlers;
         cpumask_t nxt_idlers;
         int nxt, weight_cpu, weight_nxt;
         int migrate_factor;
 
-        nxt = cycle_cpu(cpu, cpus);
+        nxt = cpumask_cycle(cpu, &cpus);
 
-        if ( cpu_isset(cpu, per_cpu(cpu_core_map, nxt)) )
+        if ( cpumask_test_cpu(cpu, &per_cpu(cpu_core_map, nxt)) )
         {
             /* We're on the same socket, so check the busy-ness of threads.
              * Migrate if # of idlers is less at all */
-            ASSERT( cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) );
+            ASSERT( cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
             migrate_factor = 1;
-            cpus_and(cpu_idlers, idlers, per_cpu(cpu_sibling_map, cpu));
-            cpus_and(nxt_idlers, idlers, per_cpu(cpu_sibling_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_sibling_map, cpu));
+            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_sibling_map, nxt));
         }
         else
         {
             /* We're on different sockets, so check the busy-ness of cores.
              * Migrate only if the other core is twice as idle */
-            ASSERT( !cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) );
+            ASSERT( !cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
             migrate_factor = 2;
-            cpus_and(cpu_idlers, idlers, per_cpu(cpu_core_map, cpu));
-            cpus_and(nxt_idlers, idlers, per_cpu(cpu_core_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_core_map, cpu));
+            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_core_map, nxt));
         }
 
-        weight_cpu = cpus_weight(cpu_idlers);
-        weight_nxt = cpus_weight(nxt_idlers);
+        weight_cpu = cpumask_weight(&cpu_idlers);
+        weight_nxt = cpumask_weight(&nxt_idlers);
         /* smt_power_savings: consolidate work rather than spreading it */
         if ( sched_smt_power_savings ?
              weight_cpu > weight_nxt :
              weight_cpu * migrate_factor < weight_nxt )
         {
-            cpus_and(nxt_idlers, cpus, nxt_idlers);
+            cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
             spc = CSCHED_PCPU(nxt);
-            cpu = cycle_cpu(spc->idle_bias, nxt_idlers);
-            cpus_andnot(cpus, cpus, per_cpu(cpu_sibling_map, cpu));
+            cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
+            cpumask_andnot(&cpus, &cpus, &per_cpu(cpu_sibling_map, cpu));
         }
         else
         {
-            cpus_andnot(cpus, cpus, nxt_idlers);
+            cpumask_andnot(&cpus, &cpus, &nxt_idlers);
         }
     }
 
@@ -1228,7 +1228,7 @@
     online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
 
     /* If this CPU is going offline we shouldn't steal work. */
-    if ( unlikely(!cpu_isset(cpu, *online)) )
+    if ( unlikely(!cpumask_test_cpu(cpu, online)) )
         goto out;
 
     if ( snext->pri == CSCHED_PRI_IDLE )
@@ -1242,14 +1242,14 @@
      * Peek at non-idling CPUs in the system, starting with our
      * immediate neighbour.
      */
-    cpus_andnot(workers, *online, prv->idlers);
-    cpu_clear(cpu, workers);
+    cpumask_andnot(&workers, online, &prv->idlers);
+    cpumask_clear_cpu(cpu, &workers);
     peer_cpu = cpu;
 
     while ( !cpus_empty(workers) )
     {
-        peer_cpu = cycle_cpu(peer_cpu, workers);
-        cpu_clear(peer_cpu, workers);
+        peer_cpu = cpumask_cycle(peer_cpu, &workers);
+        cpumask_clear_cpu(peer_cpu, &workers);
 
         /*
          * Get ahold of the scheduler lock for this peer CPU.
@@ -1267,7 +1267,7 @@
         /*
          * Any work over there to steal?
          */
-        speer = cpu_isset(peer_cpu, *online) ?
+        speer = cpumask_test_cpu(peer_cpu, online) ?
             csched_runq_steal(peer_cpu, cpu, snext->pri) : NULL;
         pcpu_schedule_unlock(peer_cpu);
         if ( speer != NULL )
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c        Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/sched_credit2.c        Fri Oct 21 09:19:44 2011 +0200
@@ -507,19 +507,19 @@
     }
     
     /* Get a mask of idle, but not tickled */
-    cpus_andnot(mask, rqd->idle, rqd->tickled);
+    cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
     
     /* If it's not empty, choose one */
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(&mask) )
     {
-        ipid=first_cpu(mask);
+        ipid = cpumask_first(&mask);
         goto tickle;
     }
 
     /* Otherwise, look for the non-idle cpu with the lowest credit,
      * skipping cpus which have been tickled but not scheduled yet */
-    cpus_andnot(mask, rqd->active, rqd->idle);
-    cpus_andnot(mask, mask, rqd->tickled);
+    cpumask_andnot(&mask, &rqd->active, &rqd->idle);
+    cpumask_andnot(&mask, &mask, &rqd->tickled);
 
     for_each_cpu_mask(i, mask)
     {
@@ -573,7 +573,7 @@
                   sizeof(d),
                   (unsigned char *)&d);
     }
-    cpu_set(ipid, rqd->tickled);
+    cpumask_set_cpu(ipid, &rqd->tickled);
     cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ);
 
 no_tickle:
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/schedule.c
--- a/xen/common/schedule.c     Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/schedule.c     Fri Oct 21 09:19:44 2011 +0200
@@ -595,8 +595,8 @@
     if ( v->domain->is_pinned )
         return -EINVAL;
     online = VCPU2ONLINE(v);
-    cpus_and(online_affinity, *affinity, *online);
-    if ( cpus_empty(online_affinity) )
+    cpumask_and(&online_affinity, affinity, online);
+    if ( cpumask_empty(&online_affinity) )
         return -EINVAL;
 
     vcpu_schedule_lock_irq(v);
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/softirq.c
--- a/xen/common/softirq.c      Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/softirq.c      Fri Oct 21 09:19:44 2011 +0200
@@ -71,11 +71,12 @@
 void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
 {
     int cpu;
-    cpumask_t send_mask = CPU_MASK_NONE;
+    cpumask_t send_mask;
 
+    cpumask_clear(&send_mask);
     for_each_cpu_mask(cpu, *mask)
         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
-            cpu_set(cpu, send_mask);
+            cpumask_set_cpu(cpu, &send_mask);
 
     smp_send_event_check_mask(&send_mask);
 }
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/common/trace.c
--- a/xen/common/trace.c        Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/common/trace.c        Fri Oct 21 09:19:44 2011 +0200
@@ -70,7 +70,7 @@
 int tb_init_done __read_mostly;
 
 /* which CPUs tracing is enabled on */
-static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
+static cpumask_t tb_cpu_mask;
 
 /* which tracing events are enabled */
 static u32 tb_event_mask = TRC_ALL;
@@ -338,6 +338,7 @@
  */
 void __init init_trace_bufs(void)
 {
+    cpumask_setall(&tb_cpu_mask);
     register_cpu_notifier(&cpu_nfb);
 
     if ( opt_tbuf_size )
diff -r 511d5e65a302 -r 1a4223c62ee7 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Fri Oct 21 09:17:42 2011 +0200
+++ b/xen/include/xen/cpumask.h Fri Oct 21 09:19:44 2011 +0200
@@ -13,18 +13,18 @@
  *
  * The available cpumask operations are:
  *
- * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
- * void cpus_setall(mask)              set all bits
- * void cpus_clear(mask)               clear all bits
- * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
+ * void cpumask_set_cpu(cpu, mask)     turn on bit 'cpu' in mask
+ * void cpumask_clear_cpu(cpu, mask)   turn off bit 'cpu' in mask
+ * void cpumask_setall(mask)           set all bits
+ * void cpumask_clear(mask)            clear all bits
+ * int cpumask_test_cpu(cpu, mask)     true iff bit 'cpu' set in mask
+ * int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
  *
- * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
- * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
- * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
- * void cpus_complement(dst, src)      dst = ~src
+ * void cpumask_and(dst, src1, src2)   dst = src1 & src2  [intersection]
+ * void cpumask_or(dst, src1, src2)    dst = src1 | src2  [union]
+ * void cpumask_xor(dst, src1, src2)   dst = src1 ^ src2
+ * void cpumask_andnot(dst, src1, src2)        dst = src1 & ~src2
+ * void cpumask_complement(dst, src)   dst = ~src
  *
  * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
  * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
@@ -33,8 +33,8 @@
  * int cpus_full(mask)                 Is mask full (all bits sets)?
  * int cpus_weight(mask)               Hamming weigh - number of set bits
  *
- * void cpus_shift_right(dst, src, n)  Shift right
- * void cpus_shift_left(dst, src, n)   Shift left
+ * void cpumask_shift_right(dst, src, n) Shift right
+ * void cpumask_shift_left(dst, src, n)        Shift left
  *
  * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
  * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
@@ -110,18 +110,14 @@
        clear_bit(cpumask_check(cpu), dstp->bits);
 }
 
-#define cpumask_setall(dst) __cpus_setall(dst, nr_cpumask_bits)
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+static inline void cpumask_setall(cpumask_t *dstp)
 {
-       bitmap_fill(dstp->bits, nbits);
+       bitmap_fill(dstp->bits, nr_cpumask_bits);
 }
 
-#define cpumask_clear(dst) __cpus_clear(dst, nr_cpumask_bits)
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+static inline void cpumask_clear(cpumask_t *dstp)
 {
-       bitmap_zero(dstp->bits, nbits);
+       bitmap_zero(dstp->bits, nr_cpumask_bits);
 }
 
 /* No static inline type checking - see Subtlety (1) above. */
@@ -143,50 +139,33 @@
        return test_and_clear_bit(cpumask_check(cpu), addr->bits);
 }
 
-#define cpumask_and(dst, src1, src2) \
-       __cpus_and(dst, src1, src2, nr_cpumask_bits)
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
+                              const cpumask_t *src2p)
 {
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_or(dst, src1, src2) \
-       __cpus_or(dst, src1, src2, nr_cpumask_bits)
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_or(cpumask_t *dstp, const cpumask_t *src1p,
+                             const cpumask_t *src2p)
 {
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_xor(dst, src1, src2) \
-       __cpus_xor(dst, src1, src2, nr_cpumask_bits)
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_xor(cpumask_t *dstp, const cpumask_t *src1p,
+                              const cpumask_t *src2p)
 {
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_andnot(dst, src1, src2) \
-       __cpus_andnot(dst, src1, src2, nr_cpumask_bits)
-#define cpus_andnot(dst, src1, src2) \
-                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+                                 const cpumask_t *src2p)
 {
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_complement(dst, src) \
-       __cpus_complement(dst, src, nr_cpumask_bits)
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int nbits)
+static inline void cpumask_complement(cpumask_t *dstp, const cpumask_t *srcp)
 {
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
+       bitmap_complement(dstp->bits, srcp->bits, nr_cpumask_bits);
 }
 
 #define cpumask_equal(src1, src2) __cpus_equal(src1, src2, nr_cpu_ids)
@@ -236,31 +215,21 @@
        return bitmap_weight(srcp->bits, nbits);
 }
 
-#define cpumask_copy(dest, src) __cpus_copy(dest, src, nr_cpumask_bits)
-#define cpus_copy(dest, src) __cpus_copy(&(dest), &(src), NR_CPUS)
-static inline void __cpus_copy(cpumask_t *dstp, const cpumask_t *srcp, int 
nbits)
+static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
 {
-       bitmap_copy(dstp->bits, srcp->bits, nbits);
+       bitmap_copy(dstp->bits, srcp->bits, nr_cpumask_bits);
 }
 
-#define cpumask_shift_right(dst, src, n) \
-       __cpus_shift_right(dst, src, n, nr_cpumask_bits)
-#define cpus_shift_right(dst, src, n) \
-                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
+static inline void cpumask_shift_right(cpumask_t *dstp,
+                                      const cpumask_t *srcp, int n)
 {
-       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+       bitmap_shift_right(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
-#define cpumask_shift_left(dst, src, n) \
-       __cpus_shift_left(dst, src, n, nr_cpumask_bits)
-#define cpus_shift_left(dst, src, n) \
-                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
+static inline void cpumask_shift_left(cpumask_t *dstp,
+                                     const cpumask_t *srcp, int n)
 {
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+       bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
 #define cpumask_first(src) __first_cpu(src, nr_cpu_ids)
@@ -317,6 +286,7 @@
 
 #define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
 
+#if defined(__ia64__) /* XXX needs cleanup */
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
 #if NR_CPUS <= BITS_PER_LONG
@@ -345,8 +315,10 @@
 /*(cpumask_t)*/ { {                                                    \
        [0] =  1UL                                                      \
 } }
+#endif /* __ia64__ */
 
 #define cpus_addr(src) ((src).bits)
+#define cpumask_bits(maskp) ((maskp)->bits)
 
 #define cpumask_scnprintf(buf, len, src) \
        __cpumask_scnprintf((buf), (len), &(src), nr_cpu_ids)
@@ -388,9 +360,8 @@
 static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
 {
        /*
-        * Once all direct cpumask assignments and all cpus_*() accessors
-        * still referencing NR_CPUS are gone, we could use nr_cpumask_bits
-        * to determine the allocation size here.
+        * Once all direct cpumask assignments are gone, we could use
+        * nr_cpumask_bits to determine the allocation size here.
         */
        return (*mask = xmalloc(cpumask_t)) != NULL;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] eliminate cpumask accessors referencing NR_CPUS, Xen patchbot-unstable <=