WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 02/12] eliminate cpumask accessors referencing NR_CPU

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 02/12] eliminate cpumask accessors referencing NR_CPUS
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Thu, 20 Oct 2011 14:37:45 +0100
Delivery-date: Thu, 20 Oct 2011 06:40:11 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
... in favor of using the new, nr_cpumask_bits-based ones.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- 2011-10-18.orig/xen/arch/ia64/linux-xen/acpi.c      2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/linux-xen/acpi.c   2011-10-11 17:51:40.000000000 
+0200
@@ -957,14 +957,14 @@ int acpi_map_lsapic(acpi_handle handle, 
        buffer.length = ACPI_ALLOCATE_BUFFER;
        buffer.pointer = NULL;
 
-       cpus_complement(tmp_map, cpu_present_map);
-       cpu = first_cpu(tmp_map);
-       if (cpu >= NR_CPUS)
+       cpumask_complement(&tmp_map, &cpu_present_map);
+       cpu = cpumask_first(&tmp_map);
+       if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
        acpi_map_cpu2node(handle, cpu, physid);
 
-       cpu_set(cpu, cpu_present_map);
+       cpumask_set_cpu(cpu, &cpu_present_map);
        ia64_cpu_to_sapicid[cpu] = physid;
 
        *pcpu = cpu;
--- 2011-10-18.orig/xen/arch/ia64/linux-xen/numa.c      2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/linux-xen/numa.c   2011-10-11 17:02:13.000000000 
+0200
@@ -51,7 +51,7 @@ void __init build_cpu_to_node_map(void)
        int cpu, i, node;
 
        for(node=0; node < MAX_NUMNODES; node++)
-               cpus_clear(node_to_cpu_mask[node]);
+               cpumask_clear(&node_to_cpu_mask[node]);
 
        for(cpu = 0; cpu < NR_CPUS; ++cpu) {
                node = -1;
@@ -62,6 +62,6 @@ void __init build_cpu_to_node_map(void)
                        }
                cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
                if (node >= 0)
-                       cpu_set(cpu, node_to_cpu_mask[node]);
+                       cpumask_set_cpu(cpu, &node_to_cpu_mask[node]);
        }
 }
--- 2011-10-18.orig/xen/arch/ia64/linux-xen/smpboot.c   2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/linux-xen/smpboot.c        2011-10-11 
17:00:51.000000000 +0200
@@ -594,15 +594,15 @@ smp_build_cpu_map (void)
        }
 
        ia64_cpu_to_sapicid[0] = boot_cpu_id;
-       cpus_clear(cpu_present_map);
-       cpu_set(0, cpu_present_map);
-       cpu_set(0, cpu_possible_map);
+       cpumask_clear(&cpu_present_map);
+       cpumask_set_cpu(0, &cpu_present_map);
+       cpumask_set_cpu(0, &cpu_possible_map);
        for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
                sapicid = smp_boot_data.cpu_phys_id[i];
                if (sapicid == boot_cpu_id)
                        continue;
-               cpu_set(cpu, cpu_present_map);
-               cpu_set(cpu, cpu_possible_map);
+               cpumask_set_cpu(cpu, &cpu_present_map);
+               cpumask_set_cpu(cpu, &cpu_possible_map);
                ia64_cpu_to_sapicid[cpu] = sapicid;
                cpu++;
        }
@@ -640,12 +640,12 @@ smp_prepare_cpus (unsigned int max_cpus)
         */
        if (!max_cpus) {
                printk(KERN_INFO "SMP mode deactivated.\n");
-               cpus_clear(cpu_online_map);
-               cpus_clear(cpu_present_map);
-               cpus_clear(cpu_possible_map);
-               cpu_set(0, cpu_online_map);
-               cpu_set(0, cpu_present_map);
-               cpu_set(0, cpu_possible_map);
+               cpumask_clear(&cpu_online_map);
+               cpumask_clear(&cpu_present_map);
+               cpumask_clear(&cpu_possible_map);
+               cpumask_set_cpu(0, &cpu_online_map);
+               cpumask_set_cpu(0, &cpu_present_map);
+               cpumask_set_cpu(0, &cpu_possible_map);
                return;
        }
 }
@@ -688,12 +688,12 @@ clear_cpu_sibling_map(int cpu)
        int i;
 
        for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
        for_each_cpu_mask(i, per_cpu(cpu_core_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_core_map, i));
+               cpumask_clear_cpu(cpu, &per_cpu(cpu_core_map, i));
 
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
+       cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
+       cpumask_clear(&per_cpu(cpu_core_map, cpu));
 }
 
 static void
--- 2011-10-18.orig/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c 2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c      2011-10-11 
16:58:35.000000000 +0200
@@ -206,7 +206,7 @@ sn2_global_tlb_purge(unsigned long start
        static DEFINE_SPINLOCK(sn2_ptcg_lock2);
 
        nodes_clear(nodes_flushed);
-       cpus_clear(selected_cpus);
+       cpumask_clear(&selected_cpus);
 
        spin_lock(&sn2_ptcg_lock2);
        node_set(cpu_to_node(smp_processor_id()), nodes_flushed);
--- 2011-10-18.orig/xen/arch/ia64/xen/fw_emul.c 2011-10-20 14:46:19.000000000 
+0200
+++ 2011-10-18/xen/arch/ia64/xen/fw_emul.c      2011-10-11 16:43:45.000000000 
+0200
@@ -866,10 +866,10 @@ xen_pal_emulator(unsigned long index, u6
                                     "status %lx", status);
 
                if (in1 == PAL_CACHE_TYPE_COHERENT) {
-                       cpus_setall(current->arch.cache_coherent_map);
-                       cpu_clear(processor, current->arch.cache_coherent_map);
-                       cpus_setall(cpu_cache_coherent_map);
-                       cpu_clear(processor, cpu_cache_coherent_map);
+                       cpumask_complement(&current->arch.cache_coherent_map,
+                                          cpumask_of(processor));
+                       cpumask_complement(&cpu_cache_coherent_map,
+                                          cpumask_of(processor));
                }
                break;
            case PAL_PERF_MON_INFO:
--- 2011-10-18.orig/xen/arch/ia64/xen/tlb_track.c       2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/xen/tlb_track.c    2011-10-11 16:58:01.000000000 
+0200
@@ -374,7 +374,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
         entry->pte_val = old_pte;
         entry->vaddr = vaddr;
         entry->rid = rid;
-        cpus_clear(entry->pcpu_dirty_mask);
+        cpumask_clear(&entry->pcpu_dirty_mask);
         vcpus_clear(entry->vcpu_dirty_mask);
         list_add(&entry->list, head);
 
--- 2011-10-18.orig/xen/arch/ia64/xen/xensetup.c        2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/ia64/xen/xensetup.c     2011-10-12 08:44:31.000000000 
+0200
@@ -592,7 +592,7 @@ skip_move:
     smp_prepare_cpus(max_cpus);
 
     /* We aren't hotplug-capable yet. */
-    cpus_or(cpu_present_map, cpu_present_map, cpu_possible_map);
+    cpumask_or(&cpu_present_map, &cpu_present_map, &cpu_possible_map);
 
     /*  Enable IRQ to receive IPI (needed for ITC sync).  */
     local_irq_enable();
--- 2011-10-18.orig/xen/arch/x86/acpi/cpu_idle.c        2011-10-07 
09:18:28.000000000 +0200
+++ 2011-10-18/xen/arch/x86/acpi/cpu_idle.c     2011-10-12 08:34:33.000000000 
+0200
@@ -233,13 +233,13 @@ void cpuidle_wakeup_mwait(cpumask_t *mas
     cpumask_t target;
     unsigned int cpu;
 
-    cpus_and(target, *mask, cpuidle_mwait_flags);
+    cpumask_and(&target, mask, &cpuidle_mwait_flags);
 
     /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
     for_each_cpu_mask(cpu, target)
         mwait_wakeup(cpu) = 0;
 
-    cpus_andnot(*mask, *mask, target);
+    cpumask_andnot(mask, mask, &target);
 }
 
 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
--- 2011-10-18.orig/xen/arch/x86/acpi/cpufreq/cpufreq.c 2011-09-21 
16:40:02.000000000 +0200
+++ 2011-10-18/xen/arch/x86/acpi/cpufreq/cpufreq.c      2011-10-12 
08:35:12.000000000 +0200
@@ -446,7 +446,7 @@ static int acpi_cpufreq_target(struct cp
     if (unlikely(result))
         return -ENODEV;
 
-    cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+    cpumask_and(&online_policy_cpus, &cpu_online_map, &policy->cpus);
 
     next_perf_state = data->freq_table[next_state].index;
     if (perf->state == next_perf_state) {
--- 2011-10-18.orig/xen/arch/x86/cpu/mcheck/mce.c       2011-10-07 
09:18:39.000000000 +0200
+++ 2011-10-18/xen/arch/x86/cpu/mcheck/mce.c    2011-10-11 17:22:32.000000000 
+0200
@@ -1537,20 +1537,19 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
             return x86_mcerr("do_mca #MC", -ENODEV);
 
         if ( op->u.mc_inject_v2.flags & XEN_MC_INJECT_CPU_BROADCAST )
-            cpus_copy(cpumap, cpu_online_map);
+            cpumask_copy(&cpumap, &cpu_online_map);
         else
         {
             int gcw;
 
-            cpus_clear(cpumap);
             xenctl_cpumap_to_cpumask(&cpumap,
                                      &op->u.mc_inject_v2.cpumap);
-            gcw = cpus_weight(cpumap);
-            cpus_and(cpumap, cpu_online_map, cpumap);
+            gcw = cpumask_weight(&cpumap);
+            cpumask_and(&cpumap, &cpu_online_map, &cpumap);
 
-            if ( cpus_empty(cpumap) )
+            if ( cpumask_empty(&cpumap) )
                 return x86_mcerr("No online CPU passed\n", -EINVAL);
-            else if ( gcw != cpus_weight(cpumap) )
+            else if ( gcw != cpumask_weight(&cpumap) )
                 dprintk(XENLOG_INFO,
                         "Not all required CPUs are online\n");
         }
@@ -1559,7 +1558,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
         {
         case XEN_MC_INJECT_TYPE_MCE:
             if ( mce_broadcast &&
-                 !cpus_equal(cpumap, cpu_online_map) )
+                 !cpumask_equal(&cpumap, &cpu_online_map) )
                 printk("Not trigger MCE on all CPUs, may HANG!\n");
             on_selected_cpus(&cpumap, x86_mc_mceinject, NULL, 1);
             break;
--- 2011-10-18.orig/xen/arch/x86/hpet.c 2011-10-18 11:14:42.000000000 +0200
+++ 2011-10-18/xen/arch/x86/hpet.c      2011-10-12 09:00:51.000000000 +0200
@@ -178,7 +178,7 @@ again:
     spin_unlock_irq(&ch->lock);
 
     next_event = STIME_MAX;
-    mask = (cpumask_t)CPU_MASK_NONE;
+    cpumask_clear(&mask);
     now = NOW();
 
     /* find all expired events */
@@ -189,11 +189,11 @@ again:
         rmb();
         deadline = per_cpu(timer_deadline, cpu);
         rmb();
-        if ( !cpu_isset(cpu, ch->cpumask) )
+        if ( !cpumask_test_cpu(cpu, &ch->cpumask) )
             continue;
 
         if ( deadline <= now )
-            cpu_set(cpu, mask);
+            cpumask_set_cpu(cpu, &mask);
         else if ( deadline < next_event )
             next_event = deadline;
     }
--- 2011-10-18.orig/xen/arch/x86/hvm/nestedhvm.c        2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/x86/hvm/nestedhvm.c     2011-10-11 17:24:46.000000000 
+0200
@@ -116,7 +116,7 @@ nestedhvm_vmcx_flushtlb(struct p2m_domai
 {
     on_selected_cpus(&p2m->p2m_dirty_cpumask, nestedhvm_flushtlb_ipi,
         p2m->domain, 1);
-    cpus_clear(p2m->p2m_dirty_cpumask);
+    cpumask_clear(&p2m->p2m_dirty_cpumask);
 }
 
 bool_t
--- 2011-10-18.orig/xen/arch/x86/io_apic.c      2011-10-18 12:27:30.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/io_apic.c   2011-10-18 13:32:32.000000000 +0200
@@ -1850,7 +1850,7 @@ static void __init check_timer(void)
     int apic1, pin1, apic2, pin2;
     int vector, ret;
     unsigned long flags;
-    cpumask_t mask_all = CPU_MASK_ALL;
+    cpumask_t mask_all;
 
     local_irq_save(flags);
 
@@ -1861,6 +1861,7 @@ static void __init check_timer(void)
     vector = FIRST_HIPRIORITY_VECTOR;
     clear_irq_vector(0);
 
+    cpumask_setall(&mask_all);
     if ((ret = bind_irq_vector(0, vector, &mask_all)))
         printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", 
ret);
     
--- 2011-10-18.orig/xen/arch/x86/irq.c  2011-10-18 12:37:35.000000000 +0200
+++ 2011-10-18/xen/arch/x86/irq.c       2011-10-18 13:32:30.000000000 +0200
@@ -115,10 +115,10 @@ static int __init __bind_irq_vector(int 
     BUG_ON((unsigned)irq >= nr_irqs);
     BUG_ON((unsigned)vector >= NR_VECTORS);
 
-    cpus_and(online_mask, *cpu_mask, cpu_online_map);
-    if (cpus_empty(online_mask))
+    cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
+    if (cpumask_empty(&online_mask))
         return -EINVAL;
-    if ((cfg->vector == vector) && cpus_equal(cfg->cpu_mask, online_mask))
+    if ((cfg->vector == vector) && cpumask_equal(&cfg->cpu_mask, &online_mask))
         return 0;
     if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 
         return -EBUSY;
@@ -126,7 +126,7 @@ static int __init __bind_irq_vector(int 
     for_each_cpu_mask(cpu, online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     cfg->vector = vector;
-    cfg->cpu_mask = online_mask;
+    cpumask_copy(&cfg->cpu_mask, &online_mask);
     if ( cfg->used_vectors )
     {
         ASSERT(!test_bit(vector, cfg->used_vectors));
@@ -197,7 +197,7 @@ static void dynamic_irq_cleanup(unsigned
     desc->msi_desc = NULL;
     desc->handler = &no_irq_type;
     desc->arch.used_vectors = NULL;
-    cpus_setall(desc->affinity);
+    cpumask_setall(&desc->affinity);
     spin_unlock_irqrestore(&desc->lock, flags);
 
     /* Wait to make sure it's not being used on another CPU */
@@ -217,7 +217,7 @@ static void __clear_irq_vector(int irq)
 
     /* Always clear cfg->vector */
     vector = cfg->vector;
-    cpus_and(tmp_mask, cfg->cpu_mask, cpu_online_map);
+    cpumask_and(&tmp_mask, &cfg->cpu_mask, &cpu_online_map);
 
     for_each_cpu_mask(cpu, tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[vector] == irq );
@@ -225,7 +225,7 @@ static void __clear_irq_vector(int irq)
     }
 
     cfg->vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->cpu_mask);
+    cpumask_clear(&cfg->cpu_mask);
 
     if ( cfg->used_vectors )
     {
@@ -242,7 +242,7 @@ static void __clear_irq_vector(int irq)
 
     /* If we were in motion, also clear cfg->old_vector */
     old_vector = cfg->old_vector;
-    cpus_and(tmp_mask, cfg->old_cpu_mask, cpu_online_map);
+    cpumask_and(&tmp_mask, &cfg->old_cpu_mask, &cpu_online_map);
 
     for_each_cpu_mask(cpu, tmp_mask) {
         ASSERT( per_cpu(vector_irq, cpu)[old_vector] == irq );
@@ -251,7 +251,7 @@ static void __clear_irq_vector(int irq)
      }
 
     cfg->old_vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->old_cpu_mask);
+    cpumask_clear(&cfg->old_cpu_mask);
 
     if ( cfg->used_vectors )
     {
@@ -303,7 +303,7 @@ static void __init init_one_irq_desc(str
     desc->action  = NULL;
     desc->msi_desc = NULL;
     spin_lock_init(&desc->lock);
-    cpus_setall(desc->affinity);
+    cpumask_setall(&desc->affinity);
     INIT_LIST_HEAD(&desc->rl_link);
 }
 
@@ -311,8 +311,8 @@ static void __init init_one_irq_cfg(stru
 {
     cfg->vector = IRQ_VECTOR_UNASSIGNED;
     cfg->old_vector = IRQ_VECTOR_UNASSIGNED;
-    cpus_clear(cfg->cpu_mask);
-    cpus_clear(cfg->old_cpu_mask);
+    cpumask_clear(&cfg->cpu_mask);
+    cpumask_clear(&cfg->old_cpu_mask);
     cfg->used_vectors = NULL;
     cfg->used = IRQ_UNUSED;
 }
@@ -425,8 +425,8 @@ int __assign_irq_vector(int irq, struct 
 
     old_vector = irq_to_vector(irq);
     if (old_vector) {
-        cpus_and(tmp_mask, *mask, cpu_online_map);
-        if (cpus_intersects(tmp_mask, cfg->cpu_mask)) {
+        cpumask_and(&tmp_mask, mask, &cpu_online_map);
+        if (cpumask_intersects(&tmp_mask, &cfg->cpu_mask)) {
             cfg->vector = old_vector;
             return 0;
         }
@@ -455,7 +455,8 @@ int __assign_irq_vector(int irq, struct 
         if (!cpu_online(cpu))
             continue;
 
-        cpus_and(tmp_mask, *vector_allocation_cpumask(cpu), cpu_online_map);
+        cpumask_and(&tmp_mask, vector_allocation_cpumask(cpu),
+                    &cpu_online_map);
 
         vector = current_vector;
         offset = current_offset;
@@ -485,14 +486,14 @@ next:
         local_irq_save(flags);
         if (old_vector) {
             cfg->move_in_progress = 1;
-            cpus_copy(cfg->old_cpu_mask, cfg->cpu_mask);
+            cpumask_copy(&cfg->old_cpu_mask, &cfg->cpu_mask);
             cfg->old_vector = cfg->vector;
         }
         trace_irq_mask(TRC_HW_IRQ_ASSIGN_VECTOR, irq, vector, &tmp_mask);
         for_each_cpu_mask(new_cpu, tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         cfg->vector = vector;
-        cpus_copy(cfg->cpu_mask, tmp_mask);
+        cpumask_copy(&cfg->cpu_mask, &tmp_mask);
 
         cfg->used = IRQ_USED;
         ASSERT((cfg->used_vectors == NULL)
@@ -529,7 +530,7 @@ int assign_irq_vector(int irq)
     ret = __assign_irq_vector(irq, cfg, TARGET_CPUS);
     if (!ret) {
         ret = cfg->vector;
-        cpus_copy(desc->affinity, cfg->cpu_mask);
+        cpumask_copy(&desc->affinity, &cfg->cpu_mask);
     }
     spin_unlock_irqrestore(&vector_lock, flags);
     return ret;
@@ -582,7 +583,7 @@ void move_masked_irq(struct irq_desc *de
     if (likely(cpus_intersects(desc->pending_mask, cpu_online_map)))
         desc->handler->set_affinity(desc, &desc->pending_mask);
 
-    cpus_clear(desc->pending_mask);
+    cpumask_clear(&desc->pending_mask);
 }
 
 void move_native_irq(struct irq_desc *desc)
@@ -729,7 +730,7 @@ void irq_set_affinity(struct irq_desc *d
     ASSERT(spin_is_locked(&desc->lock));
     desc->status &= ~IRQ_MOVE_PENDING;
     wmb();
-    cpus_copy(desc->pending_mask, *mask);
+    cpumask_copy(&desc->pending_mask, mask);
     wmb();
     desc->status |= IRQ_MOVE_PENDING;
 }
@@ -1474,7 +1475,6 @@ int pirq_guest_bind(struct vcpu *v, stru
     struct irq_desc         *desc;
     irq_guest_action_t *action, *newaction = NULL;
     int                 rc = 0;
-    cpumask_t           cpumask = CPU_MASK_NONE;
 
     WARN_ON(!spin_is_locked(&v->domain->event_lock));
     BUG_ON(!local_irq_is_enabled());
@@ -1521,7 +1521,7 @@ int pirq_guest_bind(struct vcpu *v, stru
         action->in_flight   = 0;
         action->shareable   = will_share;
         action->ack_type    = pirq_acktype(v->domain, pirq->pirq);
-        cpus_clear(action->cpu_eoi_map);
+        cpumask_clear(&action->cpu_eoi_map);
         init_timer(&action->eoi_timer, irq_guest_eoi_timer_fn, desc, 0);
 
         desc->status |= IRQ_GUEST;
@@ -1529,9 +1529,8 @@ int pirq_guest_bind(struct vcpu *v, stru
         desc->handler->startup(desc);
 
         /* Attempt to bind the interrupt target to the correct CPU. */
-        cpu_set(v->processor, cpumask);
         if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
-            desc->handler->set_affinity(desc, &cpumask);
+            desc->handler->set_affinity(desc, cpumask_of(v->processor));
     }
     else if ( !will_share || !action->shareable )
     {
@@ -2070,18 +2069,18 @@ void fixup_irqs(void)
 
         spin_lock(&desc->lock);
 
-        affinity = desc->affinity;
-        if ( !desc->action || cpus_subset(affinity, cpu_online_map) )
+        cpumask_copy(&affinity, &desc->affinity);
+        if ( !desc->action || cpumask_subset(&affinity, &cpu_online_map) )
         {
             spin_unlock(&desc->lock);
             continue;
         }
 
-        cpus_and(affinity, affinity, cpu_online_map);
-        if ( cpus_empty(affinity) )
+        cpumask_and(&affinity, &affinity, &cpu_online_map);
+        if ( cpumask_empty(&affinity) )
         {
             break_affinity = 1;
-            affinity = cpu_online_map;
+            cpumask_copy(&affinity, &cpu_online_map);
         }
 
         if ( desc->handler->disable )
--- 2011-10-18.orig/xen/arch/x86/mm.c   2011-10-20 14:46:20.000000000 +0200
+++ 2011-10-18/xen/arch/x86/mm.c        2011-10-20 14:47:26.000000000 +0200
@@ -1339,8 +1339,10 @@ static void pae_flush_pgd(
 
     if ( unlikely(shadow_mode_enabled(d)) )
     {
-        cpumask_t m = CPU_MASK_NONE;
+        cpumask_t m;
+
         /* Re-shadow this l3 table on any vcpus that are using it */
+        cpumask_clear(&m);
         for_each_vcpu ( d, v )
             if ( pagetable_get_pfn(v->arch.guest_table) == mfn )
             {
@@ -2887,7 +2889,7 @@ static inline int vcpumask_to_pcpumask(
     struct vcpu *v;
     bool_t is_native = !is_pv_32on64_domain(d);
 
-    cpus_clear(*pmask);
+    cpumask_clear(pmask);
     for ( vmask = 0, offs = 0; ; ++offs)
     {
         vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
@@ -2899,7 +2901,7 @@ static inline int vcpumask_to_pcpumask(
                       copy_from_guest_offset((unsigned int *)&vmask, bmap,
                                              offs, 1)) )
         {
-            cpus_clear(*pmask);
+            cpumask_clear(pmask);
             return -EFAULT;
         }
 
@@ -3194,12 +3196,13 @@ int do_mmuext_op(
             else if ( likely(cache_flush_permitted(d)) )
             {
                 unsigned int cpu;
-                cpumask_t mask = CPU_MASK_NONE;
+                cpumask_t mask;
 
+                cpumask_clear(&mask);
                 for_each_online_cpu(cpu)
-                    if ( !cpus_intersects(mask,
-                                          per_cpu(cpu_sibling_map, cpu)) )
-                        cpu_set(cpu, mask);
+                    if ( !cpumask_intersects(&mask,
+                                             &per_cpu(cpu_sibling_map, cpu)) )
+                        cpumask_set_cpu(cpu, &mask);
                 flush_mask(&mask, FLUSH_CACHE);
             }
             else
--- 2011-10-18.orig/xen/arch/x86/mm/p2m.c       2011-10-20 14:46:19.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/mm/p2m.c    2011-10-14 09:47:46.000000000 +0200
@@ -81,7 +81,7 @@ static void p2m_initialise(struct domain
     p2m->default_access = p2m_access_rwx;
 
     p2m->cr3 = CR3_EADDR;
-    cpus_clear(p2m->p2m_dirty_cpumask);
+    cpumask_clear(&p2m->p2m_dirty_cpumask);
 
     if ( hap_enabled(d) && (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(p2m);
--- 2011-10-18.orig/xen/arch/x86/mm/shadow/common.c     2011-10-20 
14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/x86/mm/shadow/common.c  2011-10-11 17:14:11.000000000 
+0200
@@ -3459,7 +3459,7 @@ static void sh_unshadow_for_p2m_change(s
         p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
         if ( p2m_is_valid(p2mt) && mfn_valid(omfn) )
         {
-            cpus_clear(flushmask);
+            cpumask_clear(&flushmask);
 
             /* If we're replacing a superpage with a normal L1 page, map it */
             if ( (l1e_get_flags(new) & _PAGE_PRESENT)
--- 2011-10-18.orig/xen/arch/x86/smp.c  2011-10-20 14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/x86/smp.c       2011-10-12 08:33:44.000000000 +0200
@@ -240,12 +240,12 @@ void flush_area_mask(const cpumask_t *ma
     if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) )
     {
         spin_lock(&flush_lock);
-        cpus_and(flush_cpumask, *mask, cpu_online_map);
-        cpu_clear(smp_processor_id(), flush_cpumask);
+        cpumask_and(&flush_cpumask, mask, &cpu_online_map);
+        cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
         flush_va      = va;
         flush_flags   = flags;
         send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
-        while ( !cpus_empty(flush_cpumask) )
+        while ( !cpumask_empty(&flush_cpumask) )
             cpu_relax();
         spin_unlock(&flush_lock);
     }
--- 2011-10-18.orig/xen/arch/x86/smpboot.c      2011-10-12 13:59:39.000000000 
+0200
+++ 2011-10-18/xen/arch/x86/smpboot.c   2011-10-14 15:08:39.000000000 +0200
@@ -533,8 +533,9 @@ int alloc_cpu_id(void)
 {
     cpumask_t tmp_map;
     int cpu;
-    cpus_complement(tmp_map, cpu_present_map);
-    cpu = first_cpu(tmp_map);
+
+    cpumask_complement(&tmp_map, &cpu_present_map);
+    cpu = cpumask_first(&tmp_map);
     return (cpu < nr_cpu_ids) ? cpu : -ENODEV;
 }
 
@@ -818,18 +819,18 @@ remove_siblinginfo(int cpu)
     {
         cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
         /* Last thread sibling in this cpu core going down. */
-        if ( cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1 )
+        if ( cpumask_weight(&per_cpu(cpu_sibling_map, cpu)) == 1 )
             c[sibling].booted_cores--;
     }
    
     for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-        cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-    cpus_clear(per_cpu(cpu_sibling_map, cpu));
-    cpus_clear(per_cpu(cpu_core_map, cpu));
+        cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, sibling));
+    cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
+    cpumask_clear(&per_cpu(cpu_core_map, cpu));
     c[cpu].phys_proc_id = BAD_APICID;
     c[cpu].cpu_core_id = BAD_APICID;
     c[cpu].compute_unit_id = BAD_APICID;
-    cpu_clear(cpu, cpu_sibling_setup_map);
+    cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
 }
 
 void __cpu_disable(void)
--- 2011-10-18.orig/xen/arch/x86/time.c 2011-10-20 14:46:19.000000000 +0200
+++ 2011-10-18/xen/arch/x86/time.c      2011-10-12 09:01:16.000000000 +0200
@@ -185,15 +185,15 @@ static void smp_send_timer_broadcast_ipi
     int cpu = smp_processor_id();
     cpumask_t mask;
 
-    cpus_and(mask, cpu_online_map, pit_broadcast_mask);
+    cpumask_and(&mask, &cpu_online_map, &pit_broadcast_mask);
 
-    if ( cpu_isset(cpu, mask) )
+    if ( cpumask_test_cpu(cpu, &mask) )
     {
-        cpu_clear(cpu, mask);
+        cpumask_clear_cpu(cpu, &mask);
         raise_softirq(TIMER_SOFTIRQ);
     }
 
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(&mask) )
     {
         cpumask_raise_softirq(&mask, TIMER_SOFTIRQ);
     }
@@ -1226,7 +1226,7 @@ void check_tsc_warp(unsigned long tsc_kh
 }
 
 static unsigned long tsc_max_warp, tsc_check_count;
-static cpumask_t tsc_check_cpumask = CPU_MASK_NONE;
+static cpumask_t tsc_check_cpumask;
 
 static void tsc_check_slave(void *unused)
 {
--- 2011-10-18.orig/xen/common/cpu.c    2011-10-17 08:39:11.000000000 +0200
+++ 2011-10-18/xen/common/cpu.c 2011-10-17 08:39:39.000000000 +0200
@@ -176,7 +176,7 @@ int disable_nonboot_cpus(void)
 
     BUG_ON(smp_processor_id() != 0);
 
-    cpus_clear(frozen_cpus);
+    cpumask_clear(&frozen_cpus);
 
     printk("Disabling non-boot CPUs ...\n");
 
@@ -192,7 +192,7 @@ int disable_nonboot_cpus(void)
             break;
         }
 
-        cpu_set(cpu, frozen_cpus);
+        cpumask_set_cpu(cpu, &frozen_cpus);
     }
 
     BUG_ON(!error && (num_online_cpus() != 1));
@@ -214,5 +214,5 @@ void enable_nonboot_cpus(void)
         }
     }
 
-    cpus_clear(frozen_cpus);
+    cpumask_clear(&frozen_cpus);
 }
--- 2011-10-18.orig/xen/common/cpupool.c        2011-10-07 09:17:45.000000000 
+0200
+++ 2011-10-18/xen/common/cpupool.c     2011-10-12 09:02:23.000000000 +0200
@@ -29,7 +29,7 @@ static struct cpupool *cpupool_list;    
 
 static int cpupool_moving_cpu = -1;
 static struct cpupool *cpupool_cpu_moving = NULL;
-static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE;
+static cpumask_t cpupool_locked_cpus;
 
 static DEFINE_SPINLOCK(cpupool_lock);
 
--- 2011-10-18.orig/xen/common/domain.c 2011-09-21 16:33:10.000000000 +0200
+++ 2011-10-18/xen/common/domain.c      2011-10-12 09:02:08.000000000 +0200
@@ -359,11 +359,12 @@ struct domain *domain_create(
 
 void domain_update_node_affinity(struct domain *d)
 {
-    cpumask_t cpumask = CPU_MASK_NONE;
+    cpumask_t cpumask;
     nodemask_t nodemask = NODE_MASK_NONE;
     struct vcpu *v;
     unsigned int node;
 
+    cpumask_clear(&cpumask);
     spin_lock(&d->node_affinity_lock);
 
     for_each_vcpu ( d, v )
--- 2011-10-18.orig/xen/common/domctl.c 2011-10-20 14:46:19.000000000 +0200
+++ 2011-10-18/xen/common/domctl.c      2011-10-18 13:32:43.000000000 +0200
@@ -37,9 +37,9 @@ int cpumask_to_xenctl_cpumap(
     uint8_t bytemap[(NR_CPUS + 7) / 8];
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
-    copy_bytes  = min_t(unsigned int, guest_bytes, sizeof(bytemap));
+    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
 
-    bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
+    bitmap_long_to_byte(bytemap, cpumask_bits(cpumask), nr_cpu_ids);
 
     if ( copy_bytes != 0 )
         if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
@@ -59,7 +59,7 @@ int xenctl_cpumap_to_cpumask(
     uint8_t bytemap[(NR_CPUS + 7) / 8];
 
     guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
-    copy_bytes  = min_t(unsigned int, guest_bytes, sizeof(bytemap));
+    copy_bytes  = min_t(unsigned int, guest_bytes, (nr_cpu_ids + 7) / 8);
 
     memset(bytemap, 0, sizeof(bytemap));
 
@@ -71,7 +71,7 @@ int xenctl_cpumap_to_cpumask(
             bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
     }
 
-    bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
+    bitmap_byte_to_long(cpumask_bits(cpumask), bytemap, nr_cpu_ids);
 
     return 0;
 }
@@ -154,7 +154,7 @@ static unsigned int default_vcpu0_locati
     cpumask_t      cpu_exclude_map;
 
     /* Do an initial CPU placement. Pick the least-populated CPU. */
-    nr_cpus = last_cpu(cpu_online_map) + 1;
+    nr_cpus = cpumask_last(&cpu_online_map) + 1;
     cnt = xzalloc_array(unsigned int, nr_cpus);
     if ( cnt )
     {
@@ -171,18 +171,19 @@ static unsigned int default_vcpu0_locati
      * If we're on a HT system, we only auto-allocate to a non-primary HT. We 
      * favour high numbered CPUs in the event of a tie.
      */
-    cpu = first_cpu(per_cpu(cpu_sibling_map, 0));
-    if ( cpus_weight(per_cpu(cpu_sibling_map, 0)) > 1 )
-        cpu = next_cpu(cpu, per_cpu(cpu_sibling_map, 0));
-    cpu_exclude_map = per_cpu(cpu_sibling_map, 0);
+    cpumask_copy(&cpu_exclude_map, &per_cpu(cpu_sibling_map, 0));
+    cpu = cpumask_first(&cpu_exclude_map);
+    if ( cpumask_weight(&cpu_exclude_map) > 1 )
+        cpu = cpumask_next(cpu, &cpu_exclude_map);
     for_each_cpu_mask(i, *online)
     {
-        if ( cpu_isset(i, cpu_exclude_map) )
+        if ( cpumask_test_cpu(i, &cpu_exclude_map) )
             continue;
-        if ( (i == first_cpu(per_cpu(cpu_sibling_map, i))) &&
-             (cpus_weight(per_cpu(cpu_sibling_map, i)) > 1) )
+        if ( (i == cpumask_first(&per_cpu(cpu_sibling_map, i))) &&
+             (cpumask_weight(&per_cpu(cpu_sibling_map, i)) > 1) )
             continue;
-        cpus_or(cpu_exclude_map, cpu_exclude_map, per_cpu(cpu_sibling_map, i));
+        cpumask_or(&cpu_exclude_map, &cpu_exclude_map,
+                   &per_cpu(cpu_sibling_map, i));
         if ( !cnt || cnt[i] <= cnt[cpu] )
             cpu = i;
     }
--- 2011-10-18.orig/xen/common/keyhandler.c     2011-09-21 16:32:28.000000000 
+0200
+++ 2011-10-18/xen/common/keyhandler.c  2011-10-12 09:07:34.000000000 +0200
@@ -316,7 +316,7 @@ static struct keyhandler dump_domains_ke
     .desc = "dump domain (and guest debug) info"
 };
 
-static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
+static cpumask_t read_clocks_cpumask;
 static DEFINE_PER_CPU(s_time_t, read_clocks_time);
 static DEFINE_PER_CPU(u64, read_cycles_time);
 
--- 2011-10-18.orig/xen/common/page_alloc.c     2011-10-20 14:46:19.000000000 
+0200
+++ 2011-10-18/xen/common/page_alloc.c  2011-10-18 13:32:47.000000000 +0200
@@ -304,7 +304,7 @@ static struct page_info *alloc_heap_page
     unsigned int first_node, i, j, zone = 0, nodemask_retry = 0;
     unsigned int node = (uint8_t)((memflags >> _MEMF_node) - 1);
     unsigned long request = 1UL << order;
-    cpumask_t extra_cpus_mask, mask;
+    cpumask_t mask;
     struct page_info *pg;
     nodemask_t nodemask = (d != NULL ) ? d->node_affinity : node_online_map;
 
@@ -418,7 +418,7 @@ static struct page_info *alloc_heap_page
     if ( d != NULL )
         d->last_alloc_node = node;
 
-    cpus_clear(mask);
+    cpumask_clear(&mask);
 
     for ( i = 0; i < (1 << order); i++ )
     {
@@ -429,9 +429,11 @@ static struct page_info *alloc_heap_page
         if ( pg[i].u.free.need_tlbflush )
         {
             /* Add in extra CPUs that need flushing because of this page. */
-            cpus_andnot(extra_cpus_mask, cpu_online_map, mask);
+            static cpumask_t extra_cpus_mask;
+
+            cpumask_andnot(&extra_cpus_mask, &cpu_online_map, &mask);
             tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
-            cpus_or(mask, mask, extra_cpus_mask);
+            cpumask_or(&mask, &mask, &extra_cpus_mask);
         }
 
         /* Initialise fields which have other uses for free pages. */
@@ -441,7 +443,7 @@ static struct page_info *alloc_heap_page
 
     spin_unlock(&heap_lock);
 
-    if ( unlikely(!cpus_empty(mask)) )
+    if ( unlikely(!cpumask_empty(&mask)) )
     {
         perfc_incr(need_flush_tlb_flush);
         flush_tlb_mask(&mask);
--- 2011-10-18.orig/xen/common/rcupdate.c       2011-10-20 14:46:19.000000000 
+0200
+++ 2011-10-18/xen/common/rcupdate.c    2011-10-14 09:47:53.000000000 +0200
@@ -59,7 +59,6 @@ static struct rcu_ctrlblk {
     .cur = -300,
     .completed = -300,
     .lock = SPIN_LOCK_UNLOCKED,
-    .cpumask = CPU_MASK_NONE,
 };
 
 /*
--- 2011-10-18.orig/xen/common/sched_credit.c   2011-10-07 09:18:05.000000000 
+0200
+++ 2011-10-18/xen/common/sched_credit.c        2011-10-12 08:38:35.000000000 
+0200
@@ -260,7 +260,7 @@ __runq_tickle(unsigned int cpu, struct c
     cpumask_t mask;
 
     ASSERT(cur);
-    cpus_clear(mask);
+    cpumask_clear(&mask);
 
     /* If strictly higher priority than current VCPU, signal the CPU */
     if ( new->pri > cur->pri )
@@ -274,7 +274,7 @@ __runq_tickle(unsigned int cpu, struct c
         else
             CSCHED_STAT_CRANK(tickle_local_other);
 
-        cpu_set(cpu, mask);
+        cpumask_set_cpu(cpu, &mask);
     }
 
     /*
@@ -283,7 +283,7 @@ __runq_tickle(unsigned int cpu, struct c
      */
     if ( cur->pri > CSCHED_PRI_IDLE )
     {
-        if ( cpus_empty(prv->idlers) )
+        if ( cpumask_empty(&prv->idlers) )
         {
             CSCHED_STAT_CRANK(tickle_idlers_none);
         }
@@ -292,24 +292,24 @@ __runq_tickle(unsigned int cpu, struct c
             cpumask_t idle_mask;
 
             cpumask_and(&idle_mask, &prv->idlers, new->vcpu->cpu_affinity);
-            if ( !cpus_empty(idle_mask) )
+            if ( !cpumask_empty(&idle_mask) )
             {
                 CSCHED_STAT_CRANK(tickle_idlers_some);
                 if ( opt_tickle_one_idle )
                 {
                     this_cpu(last_tickle_cpu) = 
-                        cycle_cpu(this_cpu(last_tickle_cpu), idle_mask);
-                    cpu_set(this_cpu(last_tickle_cpu), mask);
+                        cpumask_cycle(this_cpu(last_tickle_cpu), &idle_mask);
+                    cpumask_set_cpu(this_cpu(last_tickle_cpu), &mask);
                 }
                 else
-                    cpus_or(mask, mask, idle_mask);
+                    cpumask_or(&mask, &mask, &idle_mask);
             }
             cpumask_and(&mask, &mask, new->vcpu->cpu_affinity);
         }
     }
 
     /* Send scheduler interrupts to designated CPUs */
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(&mask) )
         cpumask_raise_softirq(&mask, SCHEDULE_SOFTIRQ);
 }
 
@@ -471,10 +471,10 @@ _csched_cpu_pick(const struct scheduler 
      */
     online = CSCHED_CPUONLINE(vc->domain->cpupool);
     cpumask_and(&cpus, online, vc->cpu_affinity);
-    cpu = cpu_isset(vc->processor, cpus)
+    cpu = cpumask_test_cpu(vc->processor, &cpus)
             ? vc->processor
-            : cycle_cpu(vc->processor, cpus);
-    ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) );
+            : cpumask_cycle(vc->processor, &cpus);
+    ASSERT( !cpumask_empty(&cpus) && cpumask_test_cpu(cpu, &cpus) );
 
     /*
      * Try to find an idle processor within the above constraints.
@@ -488,54 +488,54 @@ _csched_cpu_pick(const struct scheduler 
      * like run two VCPUs on co-hyperthreads while there are idle cores
      * or sockets.
      */
-    cpus_and(idlers, cpu_online_map, CSCHED_PRIV(ops)->idlers);
-    cpu_set(cpu, idlers);
-    cpus_and(cpus, cpus, idlers);
-    cpu_clear(cpu, cpus);
+    cpumask_and(&idlers, &cpu_online_map, &CSCHED_PRIV(ops)->idlers);
+    cpumask_set_cpu(cpu, &idlers);
+    cpumask_and(&cpus, &cpus, &idlers);
+    cpumask_clear_cpu(cpu, &cpus);
 
-    while ( !cpus_empty(cpus) )
+    while ( !cpumask_empty(&cpus) )
     {
         cpumask_t cpu_idlers;
         cpumask_t nxt_idlers;
         int nxt, weight_cpu, weight_nxt;
         int migrate_factor;
 
-        nxt = cycle_cpu(cpu, cpus);
+        nxt = cpumask_cycle(cpu, &cpus);
 
-        if ( cpu_isset(cpu, per_cpu(cpu_core_map, nxt)) )
+        if ( cpumask_test_cpu(cpu, &per_cpu(cpu_core_map, nxt)) )
         {
             /* We're on the same socket, so check the busy-ness of threads.
              * Migrate if # of idlers is less at all */
-            ASSERT( cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) );
+            ASSERT( cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
             migrate_factor = 1;
-            cpus_and(cpu_idlers, idlers, per_cpu(cpu_sibling_map, cpu));
-            cpus_and(nxt_idlers, idlers, per_cpu(cpu_sibling_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_sibling_map, cpu));
+            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_sibling_map, nxt));
         }
         else
         {
             /* We're on different sockets, so check the busy-ness of cores.
              * Migrate only if the other core is twice as idle */
-            ASSERT( !cpu_isset(nxt, per_cpu(cpu_core_map, cpu)) );
+            ASSERT( !cpumask_test_cpu(nxt, &per_cpu(cpu_core_map, cpu)) );
             migrate_factor = 2;
-            cpus_and(cpu_idlers, idlers, per_cpu(cpu_core_map, cpu));
-            cpus_and(nxt_idlers, idlers, per_cpu(cpu_core_map, nxt));
+            cpumask_and(&cpu_idlers, &idlers, &per_cpu(cpu_core_map, cpu));
+            cpumask_and(&nxt_idlers, &idlers, &per_cpu(cpu_core_map, nxt));
         }
 
-        weight_cpu = cpus_weight(cpu_idlers);
-        weight_nxt = cpus_weight(nxt_idlers);
+        weight_cpu = cpumask_weight(&cpu_idlers);
+        weight_nxt = cpumask_weight(&nxt_idlers);
         /* smt_power_savings: consolidate work rather than spreading it */
         if ( sched_smt_power_savings ?
              weight_cpu > weight_nxt :
              weight_cpu * migrate_factor < weight_nxt )
         {
-            cpus_and(nxt_idlers, cpus, nxt_idlers);
+            cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
             spc = CSCHED_PCPU(nxt);
-            cpu = cycle_cpu(spc->idle_bias, nxt_idlers);
-            cpus_andnot(cpus, cpus, per_cpu(cpu_sibling_map, cpu));
+            cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
+            cpumask_andnot(&cpus, &cpus, &per_cpu(cpu_sibling_map, cpu));
         }
         else
         {
-            cpus_andnot(cpus, cpus, nxt_idlers);
+            cpumask_andnot(&cpus, &cpus, &nxt_idlers);
         }
     }
 
@@ -1228,7 +1228,7 @@ csched_load_balance(struct csched_privat
     online = CSCHED_CPUONLINE(per_cpu(cpupool, cpu));
 
     /* If this CPU is going offline we shouldn't steal work. */
-    if ( unlikely(!cpu_isset(cpu, *online)) )
+    if ( unlikely(!cpumask_test_cpu(cpu, online)) )
         goto out;
 
     if ( snext->pri == CSCHED_PRI_IDLE )
@@ -1242,14 +1242,14 @@ csched_load_balance(struct csched_privat
      * Peek at non-idling CPUs in the system, starting with our
      * immediate neighbour.
      */
-    cpus_andnot(workers, *online, prv->idlers);
-    cpu_clear(cpu, workers);
+    cpumask_andnot(&workers, online, &prv->idlers);
+    cpumask_clear_cpu(cpu, &workers);
     peer_cpu = cpu;
 
     while ( !cpus_empty(workers) )
     {
-        peer_cpu = cycle_cpu(peer_cpu, workers);
-        cpu_clear(peer_cpu, workers);
+        peer_cpu = cpumask_cycle(peer_cpu, &workers);
+        cpumask_clear_cpu(peer_cpu, &workers);
 
         /*
          * Get ahold of the scheduler lock for this peer CPU.
@@ -1267,7 +1267,7 @@ csched_load_balance(struct csched_privat
         /*
          * Any work over there to steal?
          */
-        speer = cpu_isset(peer_cpu, *online) ?
+        speer = cpumask_test_cpu(peer_cpu, online) ?
             csched_runq_steal(peer_cpu, cpu, snext->pri) : NULL;
         pcpu_schedule_unlock(peer_cpu);
         if ( speer != NULL )
--- 2011-10-18.orig/xen/common/sched_credit2.c  2011-10-07 09:18:09.000000000 
+0200
+++ 2011-10-18/xen/common/sched_credit2.c       2011-10-11 18:03:39.000000000 
+0200
@@ -507,19 +507,19 @@ runq_tickle(const struct scheduler *ops,
     }
     
     /* Get a mask of idle, but not tickled */
-    cpus_andnot(mask, rqd->idle, rqd->tickled);
+    cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
     
     /* If it's not empty, choose one */
-    if ( !cpus_empty(mask) )
+    if ( !cpumask_empty(&mask) )
     {
-        ipid=first_cpu(mask);
+        ipid = cpumask_first(&mask);
         goto tickle;
     }
 
     /* Otherwise, look for the non-idle cpu with the lowest credit,
      * skipping cpus which have been tickled but not scheduled yet */
-    cpus_andnot(mask, rqd->active, rqd->idle);
-    cpus_andnot(mask, mask, rqd->tickled);
+    cpumask_andnot(&mask, &rqd->active, &rqd->idle);
+    cpumask_andnot(&mask, &mask, &rqd->tickled);
 
     for_each_cpu_mask(i, mask)
     {
@@ -573,7 +573,7 @@ tickle:
                   sizeof(d),
                   (unsigned char *)&d);
     }
-    cpu_set(ipid, rqd->tickled);
+    cpumask_set_cpu(ipid, &rqd->tickled);
     cpu_raise_softirq(ipid, SCHEDULE_SOFTIRQ);
 
 no_tickle:
--- 2011-10-18.orig/xen/common/schedule.c       2011-10-12 13:59:19.000000000 
+0200
+++ 2011-10-18/xen/common/schedule.c    2011-10-11 17:56:50.000000000 +0200
@@ -595,8 +595,8 @@ int vcpu_set_affinity(struct vcpu *v, co
     if ( v->domain->is_pinned )
         return -EINVAL;
     online = VCPU2ONLINE(v);
-    cpus_and(online_affinity, *affinity, *online);
-    if ( cpus_empty(online_affinity) )
+    cpumask_and(&online_affinity, affinity, online);
+    if ( cpumask_empty(&online_affinity) )
         return -EINVAL;
 
     vcpu_schedule_lock_irq(v);
--- 2011-10-18.orig/xen/common/softirq.c        2011-10-20 14:46:19.000000000 
+0200
+++ 2011-10-18/xen/common/softirq.c     2011-10-12 09:04:04.000000000 +0200
@@ -71,11 +71,12 @@ void open_softirq(int nr, softirq_handle
 void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
 {
     int cpu;
-    cpumask_t send_mask = CPU_MASK_NONE;
+    cpumask_t send_mask;
 
+    cpumask_clear(&send_mask);
     for_each_cpu_mask(cpu, *mask)
         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
-            cpu_set(cpu, send_mask);
+            cpumask_set_cpu(cpu, &send_mask);
 
     smp_send_event_check_mask(&send_mask);
 }
--- 2011-10-18.orig/xen/common/trace.c  2011-10-20 14:46:19.000000000 +0200
+++ 2011-10-18/xen/common/trace.c       2011-10-12 09:06:27.000000000 +0200
@@ -70,7 +70,7 @@ static DEFINE_PER_CPU(unsigned long, los
 int tb_init_done __read_mostly;
 
 /* which CPUs tracing is enabled on */
-static cpumask_t tb_cpu_mask = CPU_MASK_ALL;
+static cpumask_t tb_cpu_mask;
 
 /* which tracing events are enabled */
 static u32 tb_event_mask = TRC_ALL;
@@ -338,6 +338,7 @@ int trace_will_trace_event(u32 event)
  */
 void __init init_trace_bufs(void)
 {
+    cpumask_setall(&tb_cpu_mask);
     register_cpu_notifier(&cpu_nfb);
 
     if ( opt_tbuf_size )
--- 2011-10-18.orig/xen/include/xen/cpumask.h   2011-10-17 08:43:34.000000000 
+0200
+++ 2011-10-18/xen/include/xen/cpumask.h        2011-10-19 17:29:46.000000000 
+0200
@@ -13,18 +13,18 @@
  *
  * The available cpumask operations are:
  *
- * void cpu_set(cpu, mask)             turn on bit 'cpu' in mask
- * void cpu_clear(cpu, mask)           turn off bit 'cpu' in mask
- * void cpus_setall(mask)              set all bits
- * void cpus_clear(mask)               clear all bits
- * int cpu_isset(cpu, mask)            true iff bit 'cpu' set in mask
- * int cpu_test_and_set(cpu, mask)     test and set bit 'cpu' in mask
- *
- * void cpus_and(dst, src1, src2)      dst = src1 & src2  [intersection]
- * void cpus_or(dst, src1, src2)       dst = src1 | src2  [union]
- * void cpus_xor(dst, src1, src2)      dst = src1 ^ src2
- * void cpus_andnot(dst, src1, src2)   dst = src1 & ~src2
- * void cpus_complement(dst, src)      dst = ~src
+ * void cpumask_set_cpu(cpu, mask)     turn on bit 'cpu' in mask
+ * void cpumask_clear_cpu(cpu, mask)   turn off bit 'cpu' in mask
+ * void cpumask_setall(mask)           set all bits
+ * void cpumask_clear(mask)            clear all bits
+ * int cpumask_test_cpu(cpu, mask)     true iff bit 'cpu' set in mask
+ * int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
+ *
+ * void cpumask_and(dst, src1, src2)   dst = src1 & src2  [intersection]
+ * void cpumask_or(dst, src1, src2)    dst = src1 | src2  [union]
+ * void cpumask_xor(dst, src1, src2)   dst = src1 ^ src2
+ * void cpumask_andnot(dst, src1, src2)        dst = src1 & ~src2
+ * void cpumask_complement(dst, src)   dst = ~src
  *
  * int cpus_equal(mask1, mask2)                Does mask1 == mask2?
  * int cpus_intersects(mask1, mask2)   Do mask1 and mask2 intersect?
@@ -33,8 +33,8 @@
  * int cpus_full(mask)                 Is mask full (all bits sets)?
  * int cpus_weight(mask)               Hamming weigh - number of set bits
  *
- * void cpus_shift_right(dst, src, n)  Shift right
- * void cpus_shift_left(dst, src, n)   Shift left
+ * void cpumask_shift_right(dst, src, n) Shift right
+ * void cpumask_shift_left(dst, src, n)        Shift left
  *
  * int first_cpu(mask)                 Number lowest set bit, or NR_CPUS
  * int next_cpu(cpu, mask)             Next cpu past 'cpu', or NR_CPUS
@@ -110,18 +110,14 @@ static inline void cpumask_clear_cpu(int
        clear_bit(cpumask_check(cpu), dstp->bits);
 }
 
-#define cpumask_setall(dst) __cpus_setall(dst, nr_cpumask_bits)
-#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
-static inline void __cpus_setall(cpumask_t *dstp, int nbits)
+static inline void cpumask_setall(cpumask_t *dstp)
 {
-       bitmap_fill(dstp->bits, nbits);
+       bitmap_fill(dstp->bits, nr_cpumask_bits);
 }
 
-#define cpumask_clear(dst) __cpus_clear(dst, nr_cpumask_bits)
-#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
-static inline void __cpus_clear(cpumask_t *dstp, int nbits)
+static inline void cpumask_clear(cpumask_t *dstp)
 {
-       bitmap_zero(dstp->bits, nbits);
+       bitmap_zero(dstp->bits, nr_cpumask_bits);
 }
 
 /* No static inline type checking - see Subtlety (1) above. */
@@ -143,50 +139,33 @@ static inline int cpumask_test_and_clear
        return test_and_clear_bit(cpumask_check(cpu), addr->bits);
 }
 
-#define cpumask_and(dst, src1, src2) \
-       __cpus_and(dst, src1, src2, nr_cpumask_bits)
-#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
+                              const cpumask_t *src2p)
 {
-       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_and(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_or(dst, src1, src2) \
-       __cpus_or(dst, src1, src2, nr_cpumask_bits)
-#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_or(cpumask_t *dstp, const cpumask_t *src1p,
+                             const cpumask_t *src2p)
 {
-       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_or(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_xor(dst, src1, src2) \
-       __cpus_xor(dst, src1, src2, nr_cpumask_bits)
-#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_xor(cpumask_t *dstp, const cpumask_t *src1p,
+                              const cpumask_t *src2p)
 {
-       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_andnot(dst, src1, src2) \
-       __cpus_andnot(dst, src1, src2, nr_cpumask_bits)
-#define cpus_andnot(dst, src1, src2) \
-                               __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
-                                       const cpumask_t *src2p, int nbits)
+static inline void cpumask_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+                                 const cpumask_t *src2p)
 {
-       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+       bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
 }
 
-#define cpumask_complement(dst, src) \
-       __cpus_complement(dst, src, nr_cpumask_bits)
-#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
-static inline void __cpus_complement(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int nbits)
+static inline void cpumask_complement(cpumask_t *dstp, const cpumask_t *srcp)
 {
-       bitmap_complement(dstp->bits, srcp->bits, nbits);
+       bitmap_complement(dstp->bits, srcp->bits, nr_cpumask_bits);
 }
 
 #define cpumask_equal(src1, src2) __cpus_equal(src1, src2, nr_cpu_ids)
@@ -236,31 +215,21 @@ static inline int __cpus_weight(const cp
        return bitmap_weight(srcp->bits, nbits);
 }
 
-#define cpumask_copy(dest, src) __cpus_copy(dest, src, nr_cpumask_bits)
-#define cpus_copy(dest, src) __cpus_copy(&(dest), &(src), NR_CPUS)
-static inline void __cpus_copy(cpumask_t *dstp, const cpumask_t *srcp, int 
nbits)
+static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
 {
-       bitmap_copy(dstp->bits, srcp->bits, nbits);
+       bitmap_copy(dstp->bits, srcp->bits, nr_cpumask_bits);
 }
 
-#define cpumask_shift_right(dst, src, n) \
-       __cpus_shift_right(dst, src, n, nr_cpumask_bits)
-#define cpus_shift_right(dst, src, n) \
-                       __cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_right(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
+static inline void cpumask_shift_right(cpumask_t *dstp,
+                                      const cpumask_t *srcp, int n)
 {
-       bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+       bitmap_shift_right(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
-#define cpumask_shift_left(dst, src, n) \
-       __cpus_shift_left(dst, src, n, nr_cpumask_bits)
-#define cpus_shift_left(dst, src, n) \
-                       __cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
-static inline void __cpus_shift_left(cpumask_t *dstp,
-                                       const cpumask_t *srcp, int n, int nbits)
+static inline void cpumask_shift_left(cpumask_t *dstp,
+                                     const cpumask_t *srcp, int n)
 {
-       bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+       bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
 }
 
 #define cpumask_first(src) __first_cpu(src, nr_cpu_ids)
@@ -317,6 +286,7 @@ static inline const cpumask_t *cpumask_o
 
 #define cpumask_of_cpu(cpu) (*cpumask_of(cpu))
 
+#if defined(__ia64__) /* XXX needs cleanup */
 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
 #if NR_CPUS <= BITS_PER_LONG
@@ -345,8 +315,10 @@ static inline const cpumask_t *cpumask_o
 /*(cpumask_t)*/ { {                                                    \
        [0] =  1UL                                                      \
 } }
+#endif /* __ia64__ */
 
 #define cpus_addr(src) ((src).bits)
+#define cpumask_bits(maskp) ((maskp)->bits)
 
 #define cpumask_scnprintf(buf, len, src) \
        __cpumask_scnprintf((buf), (len), &(src), nr_cpu_ids)
@@ -388,9 +360,8 @@ typedef cpumask_t *cpumask_var_t;
 static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
 {
        /*
-        * Once all direct cpumask assignments and all cpus_*() accessors
-        * still referencing NR_CPUS are gone, we could use nr_cpumask_bits
-        * to determine the allocation size here.
+        * Once all direct cpumask assignments are gone, we could use
+        * nr_cpumask_bits to determine the allocation size here.
         */
        return (*mask = xmalloc(cpumask_t)) != NULL;
 }


Attachment: cpumask-NR_CPUS-accessors.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 02/12] eliminate cpumask accessors referencing NR_CPUS, Jan Beulich <=