# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1320744707 -3600
# Node ID 67defeb4baa649f70883bcfbb642db0488a4727c
# Parent 4e13729a0adf61f31dcb4e2614a7bc9cac56605c
eliminate cpus_xyz()
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/ia64/linux-xen/iosapic.c
--- a/xen/arch/ia64/linux-xen/iosapic.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/ia64/linux-xen/iosapic.c Tue Nov 08 10:31:47 2011 +0100
@@ -709,7 +709,7 @@
cpu_clear(numa_cpu, cpu_mask);
}
- num_cpus = cpus_weight(cpu_mask);
+ num_cpus = cpumask_weight(&cpu_mask);
if (!num_cpus)
goto skip_numa_setup;
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/ia64/linux-xen/smp.c Tue Nov 08 10:31:47 2011 +0100
@@ -62,7 +62,7 @@
int cpu;
/* Not for me. */
- if (cpus_subset(*mask, *cpumask_of(smp_processor_id())))
+ if (cpumask_subset(mask, cpumask_of(smp_processor_id())))
return;
//printf("smp_send_event_check_mask called\n");
@@ -444,7 +444,7 @@
void *info, int wait)
{
struct call_data_struct data;
- unsigned int cpu, nr_cpus = cpus_weight(*selected);
+ unsigned int cpu, nr_cpus = cpumask_weight(selected);
ASSERT(local_irq_is_enabled());
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/ia64/xen/mm.c
--- a/xen/arch/ia64/xen/mm.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/ia64/xen/mm.c Tue Nov 08 10:31:47 2011 +0100
@@ -3196,7 +3196,7 @@
cpumask_copy(&mask,
page_get_owner(page)->domain_dirty_cpumask);
tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(!cpus_empty(mask)) )
+ if ( unlikely(!cpumask_empty(&mask)) )
{
perfc_incr(need_flush_tlb_flush);
flush_tlb_mask(&mask);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/ia64/xen/vhpt.c Tue Nov 08 10:31:47 2011 +0100
@@ -556,7 +556,7 @@
if (cpu_isset(cpu, *mask))
flush_tlb_vhpt_all (NULL);
- if (cpus_subset(*mask, *cpumask_of(cpu)))
+ if (cpumask_subset(mask, cpumask_of(cpu)))
return;
for_each_cpu_mask (cpu, *mask)
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Tue Nov 08 10:31:47 2011 +0100
@@ -863,7 +863,7 @@
*/
if (atomic_read(&found_error) == 0)
mc_panic("MCE: No CPU found valid MCE, need reset\n");
- if (!cpus_empty(mce_fatal_cpus))
+ if (!cpumask_empty(&mce_fatal_cpus))
{
char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
ebufp = ebuf + strlen(ebuf);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/domain.c Tue Nov 08 10:31:47 2011 +0100
@@ -1611,8 +1611,9 @@
cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
/* Allow at most one CPU at a time to be dirty. */
- ASSERT(cpus_weight(dirty_mask) <= 1);
- if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) )
+ ASSERT(cpumask_weight(&dirty_mask) <= 1);
+ if ( unlikely(!cpumask_test_cpu(cpu, &dirty_mask) &&
+ !cpumask_empty(&dirty_mask)) )
{
/* Other cpus call __sync_local_execstate from flush ipi handler. */
flush_tlb_mask(&dirty_mask);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/genapic/delivery.c
--- a/xen/arch/x86/genapic/delivery.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/genapic/delivery.c Tue Nov 08 10:31:47 2011 +0100
@@ -38,7 +38,7 @@
unsigned int cpu_mask_to_apicid_flat(const cpumask_t *cpumask)
{
- return cpus_addr(*cpumask)[0]&0xFF;
+ return cpumask_bits(cpumask)[0]&0xFF;
}
/*
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/irq.c Tue Nov 08 10:31:47 2011 +0100
@@ -707,7 +707,7 @@
unsigned long flags;
cpumask_t dest_mask;
- if (!cpus_intersects(*mask, cpu_online_map))
+ if (!cpumask_intersects(mask, &cpu_online_map))
return BAD_APICID;
irq = desc->irq;
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/mm.c Tue Nov 08 10:31:47 2011 +0100
@@ -2436,7 +2436,7 @@
/* Don't flush if the timestamp is old enough */
tlbflush_filter(mask, page->tlbflush_timestamp);
- if ( unlikely(!cpus_empty(mask)) &&
+ if ( unlikely(!cpumask_empty(&mask)) &&
/* Shadow mode: track only writable pages. */
(!shadow_mode_enabled(page_get_owner(page)) ||
((nx & PGT_type_mask) == PGT_writable_page)) )
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c Tue Nov 08 10:31:47 2011 +0100
@@ -1524,7 +1524,7 @@
* we need to be sure that no TLB holds a pointer to it. */
cpumask_copy(&mask, d->domain_dirty_cpumask);
tlbflush_filter(mask, sp->tlbflush_timestamp);
- if ( unlikely(!cpus_empty(mask)) )
+ if ( unlikely(!cpumask_empty(&mask)) )
{
perfc_incr(shadow_alloc_tlbflush);
flush_tlb_mask(&mask);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/arch/x86/smp.c Tue Nov 08 10:31:47 2011 +0100
@@ -140,11 +140,11 @@
void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
{
- unsigned long mask = cpus_addr(*cpumask)[0];
+ unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long cfg;
unsigned long flags;
- mask &= cpus_addr(cpu_online_map)[0];
+ mask &= cpumask_bits(&cpu_online_map)[0];
mask &= ~(1UL << smp_processor_id());
if ( mask == 0 )
return;
@@ -237,7 +237,7 @@
if ( cpu_isset(smp_processor_id(), *mask) )
flush_area_local(va, flags);
- if ( !cpus_subset(*mask, *cpumask_of(smp_processor_id())) )
+ if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
{
spin_lock(&flush_lock);
cpumask_and(&flush_cpumask, mask, &cpu_online_map);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/common/domain.c
--- a/xen/common/domain.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/common/domain.c Tue Nov 08 10:31:47 2011 +0100
@@ -371,7 +371,7 @@
cpumask_or(&cpumask, &cpumask, v->cpu_affinity);
for_each_online_node ( node )
- if ( cpus_intersects(node_to_cpumask(node), cpumask) )
+ if ( cpumask_intersects(&node_to_cpumask(node), &cpumask) )
node_set(node, nodemask);
d->node_affinity = nodemask;
diff -r 4e13729a0adf -r 67defeb4baa6 xen/common/perfc.c
--- a/xen/common/perfc.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/common/perfc.c Tue Nov 08 10:31:47 2011 +0100
@@ -163,11 +163,11 @@
unsigned int i, j, v;
/* We only copy the name and array-size information once. */
- if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
+ if ( !cpumask_equal(&cpu_online_map, &perfc_cpumap) )
{
unsigned int nr_cpus;
perfc_cpumap = cpu_online_map;
- nr_cpus = cpus_weight(perfc_cpumap);
+ nr_cpus = cpumask_weight(&perfc_cpumap);
perfc_nbr_vals = 0;
diff -r 4e13729a0adf -r 67defeb4baa6 xen/common/rcupdate.c
--- a/xen/common/rcupdate.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/common/rcupdate.c Tue Nov 08 10:31:47 2011 +0100
@@ -121,7 +121,7 @@
*/
call_rcu(&data.head, rcu_barrier_callback);
- while ( atomic_read(data.cpu_count) != cpus_weight(cpu_online_map) )
+ while ( atomic_read(data.cpu_count) != num_online_cpus() )
{
process_pending_softirqs();
cpu_relax();
diff -r 4e13729a0adf -r 67defeb4baa6 xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/common/sched_credit.c Tue Nov 08 10:31:47 2011 +0100
@@ -1246,7 +1246,7 @@
cpumask_clear_cpu(cpu, &workers);
peer_cpu = cpu;
- while ( !cpus_empty(workers) )
+ while ( !cpumask_empty(&workers) )
{
peer_cpu = cpumask_cycle(peer_cpu, &workers);
cpumask_clear_cpu(peer_cpu, &workers);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/common/sched_credit2.c Tue Nov 08 10:31:47 2011 +0100
@@ -1009,7 +1009,7 @@
struct csched_vcpu *svc = CSCHED_VCPU(vc);
s_time_t min_avgload;
- BUG_ON(cpus_empty(prv->active_queues));
+ BUG_ON(cpumask_empty(&prv->active_queues));
/* Locking:
* - vc->processor is already locked
@@ -1098,7 +1098,7 @@
new_cpu = vc->processor;
else
{
- BUG_ON(cpus_empty(prv->rqd[min_rqi].active));
+ BUG_ON(cpumask_empty(&prv->rqd[min_rqi].active));
new_cpu = first_cpu(prv->rqd[min_rqi].active);
}
@@ -1258,9 +1258,9 @@
if ( st.orqd->b_avgload > load_max )
load_max = st.orqd->b_avgload;
- cpus_max=cpus_weight(st.lrqd->active);
- if ( cpus_weight(st.orqd->active) > cpus_max )
- cpus_max = cpus_weight(st.orqd->active);
+ cpus_max = cpumask_weight(&st.lrqd->active);
+ if ( cpumask_weight(&st.orqd->active) > cpus_max )
+ cpus_max = cpumask_weight(&st.orqd->active);
/* If we're under 100% capacaty, only shift if load difference
* is > 1. otherwise, shift if under 12.5% */
@@ -1801,7 +1801,7 @@
printk("Active queues: %d\n"
"\tdefault-weight = %d\n",
- cpus_weight(prv->active_queues),
+ cpumask_weight(&prv->active_queues),
CSCHED_DEFAULT_WEIGHT);
for_each_cpu_mask(i, prv->active_queues)
{
@@ -1815,7 +1815,7 @@
"\tinstload = %d\n"
"\taveload = %3"PRI_stime"\n",
i,
- cpus_weight(prv->rqd[i].active),
+ cpumask_weight(&prv->rqd[i].active),
prv->rqd[i].max_weight,
prv->rqd[i].load,
fraction);
@@ -1852,7 +1852,7 @@
rqd = prv->rqd + rqi;
- BUG_ON(!cpus_empty(rqd->active));
+ BUG_ON(!cpumask_empty(&rqd->active));
rqd->max_weight = 1;
rqd->id = rqi;
@@ -1869,7 +1869,7 @@
rqd = prv->rqd + rqi;
- BUG_ON(!cpus_empty(rqd->active));
+ BUG_ON(!cpumask_empty(&rqd->active));
rqd->id = -1;
@@ -1980,7 +1980,7 @@
cpu_clear(cpu, rqd->idle);
cpu_clear(cpu, rqd->active);
- if ( cpus_empty(rqd->active) )
+ if ( cpumask_empty(&rqd->active) )
{
printk(" No cpus left on runqueue, disabling\n");
deactivate_runqueue(prv, rqi);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/common/schedule.c
--- a/xen/common/schedule.c Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/common/schedule.c Tue Nov 08 10:31:47 2011 +0100
@@ -550,7 +550,7 @@
vcpu_schedule_lock_irq(v);
cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
- if ( cpus_empty(online_affinity) &&
+ if ( cpumask_empty(&online_affinity) &&
cpumask_test_cpu(cpu, v->cpu_affinity) )
{
printk("Breaking vcpu affinity for domain %d vcpu %d\n",
diff -r 4e13729a0adf -r 67defeb4baa6 xen/include/asm-ia64/linux-xen/asm/acpi.h
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h Tue Nov 08 10:31:47 2011 +0100
@@ -147,7 +147,7 @@
int cpu;
int next_nid = 0;
- low_cpu = cpus_weight(early_cpu_possible_map);
+ low_cpu = cpumask_weight(&early_cpu_possible_map);
high_cpu = max(low_cpu, min_cpus);
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
diff -r 4e13729a0adf -r 67defeb4baa6 xen/include/asm-ia64/linux/topology.h
--- a/xen/include/asm-ia64/linux/topology.h Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/include/asm-ia64/linux/topology.h Tue Nov 08 10:31:47 2011 +0100
@@ -42,7 +42,7 @@
({
\
cpumask_t __tmp__;
\
__tmp__ = node_to_cpumask(node);
\
- cpus_weight(__tmp__);
\
+ cpumask_weight(&__tmp__);
\
})
#endif
diff -r 4e13729a0adf -r 67defeb4baa6 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Mon Nov 07 16:28:26 2011 +0100
+++ b/xen/include/xen/cpumask.h Tue Nov 08 10:31:47 2011 +0100
@@ -26,12 +26,12 @@
* void cpumask_andnot(dst, src1, src2) dst = src1 & ~src2
* void cpumask_complement(dst, src) dst = ~src
*
- * int cpus_equal(mask1, mask2) Does mask1 == mask2?
- * int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
- * int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
- * int cpus_empty(mask) Is mask empty (no bits sets)?
- * int cpus_full(mask) Is mask full (all bits sets)?
- * int cpus_weight(mask) Hamming weigh - number of set bits
+ * int cpumask_equal(mask1, mask2) Does mask1 == mask2?
+ * int cpumask_intersects(mask1, mask2) Do mask1 and mask2 intersect?
+ * int cpumask_subset(mask1, mask2) Is mask1 a subset of mask2?
+ * int cpumask_empty(mask) Is mask empty (no bits sets)?
+ * int cpumask_full(mask) Is mask full (all bits sets)?
+ * int cpumask_weight(mask) Hamming weigh - number of set bits
*
* void cpumask_shift_right(dst, src, n) Shift right
* void cpumask_shift_left(dst, src, n) Shift left
@@ -42,9 +42,7 @@
* int cycle_cpu(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
*
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
- * CPU_MASK_ALL Initializer - all bits set
- * CPU_MASK_NONE Initializer - no bits set
- * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
+ * unsigned long *cpumask_bits(mask) Array of unsigned long's in mask
*
* int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
* int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
@@ -168,51 +166,37 @@
bitmap_complement(dstp->bits, srcp->bits, nr_cpumask_bits);
}
-#define cpumask_equal(src1, src2) __cpus_equal(src1, src2, nr_cpu_ids)
-#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), nr_cpu_ids)
-static inline int __cpus_equal(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+static inline int cpumask_equal(const cpumask_t *src1p,
+ const cpumask_t *src2p)
{
- return bitmap_equal(src1p->bits, src2p->bits, nbits);
+ return bitmap_equal(src1p->bits, src2p->bits, nr_cpu_ids);
}
-#define cpumask_intersects(src1, src2) \
- __cpus_intersects(src1, src2, nr_cpu_ids)
-#define cpus_intersects(src1, src2) \
- __cpus_intersects(&(src1), &(src2), nr_cpu_ids)
-static inline int __cpus_intersects(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+static inline int cpumask_intersects(const cpumask_t *src1p,
+ const cpumask_t *src2p)
{
- return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+ return bitmap_intersects(src1p->bits, src2p->bits, nr_cpu_ids);
}
-#define cpumask_subset(src1, src2) __cpus_subset(src1, src2, nr_cpu_ids)
-#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), nr_cpu_ids)
-static inline int __cpus_subset(const cpumask_t *src1p,
- const cpumask_t *src2p, int nbits)
+static inline int cpumask_subset(const cpumask_t *src1p,
+ const cpumask_t *src2p)
{
- return bitmap_subset(src1p->bits, src2p->bits, nbits);
+ return bitmap_subset(src1p->bits, src2p->bits, nr_cpu_ids);
}
-#define cpumask_empty(src) __cpus_empty(src, nr_cpu_ids)
-#define cpus_empty(src) __cpus_empty(&(src), nr_cpu_ids)
-static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
+static inline int cpumask_empty(const cpumask_t *srcp)
{
- return bitmap_empty(srcp->bits, nbits);
+ return bitmap_empty(srcp->bits, nr_cpu_ids);
}
-#define cpumask_full(cpumask) __cpus_full(cpumask, nr_cpu_ids)
-#define cpus_full(cpumask) __cpus_full(&(cpumask), nr_cpu_ids)
-static inline int __cpus_full(const cpumask_t *srcp, int nbits)
+static inline int cpumask_full(const cpumask_t *srcp)
{
- return bitmap_full(srcp->bits, nbits);
+ return bitmap_full(srcp->bits, nr_cpu_ids);
}
-#define cpumask_weight(cpumask) __cpus_weight(cpumask, nr_cpu_ids)
-#define cpus_weight(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
-static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
+static inline int cpumask_weight(const cpumask_t *srcp)
{
- return bitmap_weight(srcp->bits, nbits);
+ return bitmap_weight(srcp->bits, nr_cpu_ids);
}
static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
@@ -317,7 +301,6 @@
} }
#endif /* __ia64__ */
-#define cpus_addr(src) ((src).bits)
#define cpumask_bits(maskp) ((maskp)->bits)
static inline int cpumask_scnprintf(char *buf, int len,
@@ -458,9 +441,9 @@
extern cpumask_t cpu_present_map;
#if NR_CPUS > 1
-#define num_online_cpus() cpus_weight(cpu_online_map)
-#define num_possible_cpus() cpus_weight(cpu_possible_map)
-#define num_present_cpus() cpus_weight(cpu_present_map)
+#define num_online_cpus() cpumask_weight(&cpu_online_map)
+#define num_possible_cpus() cpumask_weight(&cpu_possible_map)
+#define num_present_cpus() cpumask_weight(&cpu_present_map)
#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|