# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1320744782 -3600
# Node ID f9c4494e77c812e9da1d0576a486dfdca5919904
# Parent 67defeb4baa649f70883bcfbb642db0488a4727c
eliminate cpu_test_xyz()
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/ia64/linux-xen/smp.c Tue Nov 08 10:33:02 2011 +0100
@@ -68,7 +68,7 @@
//printf("smp_send_event_check_mask called\n");
for (cpu = 0; cpu < NR_CPUS; ++cpu)
- if (cpu_isset(cpu, *mask) && cpu != smp_processor_id())
+ if (cpumask_test_cpu(cpu, mask) && cpu != smp_processor_id())
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
#endif
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/ia64/linux-xen/smpboot.c Tue Nov 08 10:33:02 2011 +0100
@@ -548,13 +548,13 @@
*/
Dprintk("Waiting on callin_map ...");
for (timeout = 0; timeout < 100000; timeout++) {
- if (cpu_isset(cpu, cpu_callin_map))
+ if (cpumask_test_cpu(cpu, &cpu_callin_map))
break; /* It has booted */
udelay(100);
}
Dprintk("\n");
- if (!cpu_isset(cpu, cpu_callin_map)) {
+ if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu,
sapicid);
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
@@ -818,7 +818,7 @@
* Already booted cpu? not valid anymore since we dont
* do idle loop tightspin anymore.
*/
- if (cpu_isset(cpu, cpu_callin_map))
+ if (cpumask_test_cpu(cpu, &cpu_callin_map))
return -EINVAL;
if (!per_cpu(cpu_sibling_mask, cpu) &&
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/ia64/xen/domain.c Tue Nov 08 10:33:02 2011 +0100
@@ -140,7 +140,7 @@
if (is_idle_vcpu(next) ||
__test_and_clear_bit(cpu, &next->arch.cache_coherent_map)) {
- if (cpu_test_and_clear(cpu, cpu_cache_coherent_map)) {
+ if (cpumask_test_and_clear_cpu(cpu, &cpu_cache_coherent_map)) {
unsigned long flags;
u64 progress = 0;
s64 status;
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/ia64/xen/regionreg.c
--- a/xen/arch/ia64/xen/regionreg.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/ia64/xen/regionreg.c Tue Nov 08 10:33:02 2011 +0100
@@ -320,8 +320,8 @@
else {
if (current && VMX_DOMAIN(current))
vpd = __get_cpu_var(inserted_vpd);
- ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
- percpu_set), vpd);
+ ia64_new_rr7_efi(val, cpumask_test_cpu(smp_processor_id(),
+ &percpu_set), vpd);
}
return 1;
@@ -342,8 +342,8 @@
and strcut domain are initialized. */
if (unlikely(current == NULL || current->domain == NULL ||
is_idle_vcpu(current)))
- ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
- percpu_set),
+ ia64_new_rr7_efi(val,
cpumask_test_cpu(smp_processor_id(),
+ &percpu_set),
0UL);
else if (VMX_DOMAIN(current))
__vmx_switch_rr7_vcpu(current, val);
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/ia64/xen/vhpt.c Tue Nov 08 10:33:02 2011 +0100
@@ -553,7 +553,7 @@
int cpu;
cpu = smp_processor_id();
- if (cpu_isset(cpu, *mask))
+ if (cpumask_test_cpu(cpu, mask))
flush_tlb_vhpt_all (NULL);
if (cpumask_subset(mask, cpumask_of(cpu)))
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/x86/cpu/common.c Tue Nov 08 10:33:02 2011 +0100
@@ -624,7 +624,7 @@
.limit = LAST_RESERVED_GDT_BYTE
};
- if (cpu_test_and_set(cpu, cpu_initialized)) {
+ if (cpumask_test_and_set_cpu(cpu, &cpu_initialized)) {
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
}
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/x86/smp.c Tue Nov 08 10:33:02 2011 +0100
@@ -234,7 +234,7 @@
{
ASSERT(local_irq_is_enabled());
- if ( cpu_isset(smp_processor_id(), *mask) )
+ if ( cpumask_test_cpu(smp_processor_id(), mask) )
flush_area_local(va, flags);
if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) )
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/x86/smpboot.c Tue Nov 08 10:33:02 2011 +0100
@@ -138,7 +138,7 @@
return;
if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
- !cpu_isset(slave, tsc_sync_cpu_mask) )
+ !cpumask_test_cpu(slave, &tsc_sync_cpu_mask) )
return;
for ( i = 1; i <= 5; i++ )
@@ -162,7 +162,7 @@
return;
if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) &&
- !cpu_isset(slave, tsc_sync_cpu_mask) )
+ !cpumask_test_cpu(slave, &tsc_sync_cpu_mask) )
return;
for ( i = 1; i <= 5; i++ )
@@ -956,7 +956,7 @@
return ret;
set_cpu_state(CPU_STATE_ONLINE);
- while ( !cpu_isset(cpu, cpu_online_map) )
+ while ( !cpu_online(cpu) )
{
cpu_relax();
process_pending_softirqs();
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/arch/x86/time.c Tue Nov 08 10:33:02 2011 +0100
@@ -1580,7 +1580,7 @@
{
int cpu = smp_processor_id();
- if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
+ if ( cpumask_test_and_clear_cpu(cpu, &pit_broadcast_mask) )
reprogram_timer(this_cpu(timer_deadline));
}
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/common/cpupool.c
--- a/xen/common/cpupool.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/common/cpupool.c Tue Nov 08 10:33:02 2011 +0100
@@ -299,7 +299,7 @@
ret = -EBUSY;
if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
goto out;
- if ( cpu_isset(cpu, cpupool_locked_cpus) )
+ if ( cpumask_test_cpu(cpu, &cpupool_locked_cpus) )
goto out;
ret = 0;
@@ -499,7 +499,7 @@
if ( cpu >= nr_cpu_ids )
goto addcpu_out;
ret = -EBUSY;
- if ( !cpu_isset(cpu, cpupool_free_cpus) )
+ if ( !cpumask_test_cpu(cpu, &cpupool_free_cpus) )
goto addcpu_out;
c = cpupool_find_by_id(op->cpupool_id);
ret = -ENOENT;
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/common/kexec.c
--- a/xen/common/kexec.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/common/kexec.c Tue Nov 08 10:33:02 2011 +0100
@@ -169,7 +169,7 @@
ELF_Prstatus *prstatus;
crash_xen_core_t *xencore;
- if ( cpu_test_and_set(cpu, crash_saved_cpus) )
+ if ( cpumask_test_and_set_cpu(cpu, &crash_saved_cpus) )
return;
prstatus = (ELF_Prstatus *)ELFNOTE_DESC(note);
@@ -187,7 +187,7 @@
crash_xen_info_t info;
crash_xen_info_t *out = (crash_xen_info_t *)ELFNOTE_DESC(xen_crash_note);
- BUG_ON(!cpu_test_and_set(cpu, crash_saved_cpus));
+ BUG_ON(!cpumask_test_and_set_cpu(cpu, &crash_saved_cpus));
memset(&info, 0, sizeof(info));
info.xen_major_version = xen_major_version();
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/common/sched_credit2.c Tue Nov 08 10:33:02 2011 +0100
@@ -1366,7 +1366,7 @@
struct csched_runqueue_data *trqd;
/* Check if new_cpu is valid */
- BUG_ON(!cpu_isset(new_cpu, CSCHED_PRIV(ops)->initialized));
+ BUG_ON(!cpumask_test_cpu(new_cpu, &CSCHED_PRIV(ops)->initialized));
trqd = RQD(ops, new_cpu);
@@ -1602,10 +1602,10 @@
scurr->vcpu->vcpu_id,
now);
- BUG_ON(!cpu_isset(cpu, CSCHED_PRIV(ops)->initialized));
+ BUG_ON(!cpumask_test_cpu(cpu, &CSCHED_PRIV(ops)->initialized));
rqd = RQD(ops, cpu);
- BUG_ON(!cpu_isset(cpu, rqd->active));
+ BUG_ON(!cpumask_test_cpu(cpu, &rqd->active));
/* Protected by runqueue lock */
@@ -1637,7 +1637,7 @@
BUG_ON(!is_idle_vcpu(scurr->vcpu) && scurr->rqd != rqd);
/* Clear "tickled" bit now that we've been scheduled */
- if ( cpu_isset(cpu, rqd->tickled) )
+ if ( cpumask_test_cpu(cpu, &rqd->tickled) )
cpu_clear(cpu, rqd->tickled);
/* Update credits */
@@ -1708,7 +1708,7 @@
}
/* Clear the idle mask if necessary */
- if ( cpu_isset(cpu, rqd->idle) )
+ if ( cpumask_test_cpu(cpu, &rqd->idle) )
cpu_clear(cpu, rqd->idle);
snext->start_time = now;
@@ -1724,7 +1724,7 @@
else
{
/* Update the idle mask if necessary */
- if ( !cpu_isset(cpu, rqd->idle) )
+ if ( !cpumask_test_cpu(cpu, &rqd->idle) )
cpu_set(cpu, rqd->idle);
/* Make sure avgload gets updated periodically even
* if there's no activity */
@@ -1885,7 +1885,7 @@
spin_lock_irqsave(&prv->lock, flags);
- if ( cpu_isset(cpu, prv->initialized) )
+ if ( cpumask_test_cpu(cpu, &prv->initialized) )
{
printk("%s: Strange, cpu %d already initialized!\n", __func__, cpu);
spin_unlock_irqrestore(&prv->lock, flags);
@@ -1912,7 +1912,7 @@
rqd=prv->rqd + rqi;
printk("Adding cpu %d to runqueue %d\n", cpu, rqi);
- if ( ! cpu_isset(rqi, prv->active_queues) )
+ if ( ! cpumask_test_cpu(rqi, &prv->active_queues) )
{
printk(" First cpu on runqueue, activating\n");
activate_runqueue(prv, rqi);
@@ -1963,7 +1963,7 @@
spin_lock_irqsave(&prv->lock, flags);
- BUG_ON( !cpu_isset(cpu, prv->initialized));
+ BUG_ON(!cpumask_test_cpu(cpu, &prv->initialized));
/* Find the old runqueue and remove this cpu from it */
rqi = prv->runq_map[cpu];
@@ -1973,7 +1973,7 @@
/* No need to save IRQs here, they're already disabled */
spin_lock(&rqd->lock);
- BUG_ON(!cpu_isset(cpu, rqd->idle));
+ BUG_ON(!cpumask_test_cpu(cpu, &rqd->idle));
printk("Removing cpu %d from runqueue %d\n", cpu, rqi);
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/common/sched_sedf.c Tue Nov 08 10:33:02 2011 +0100
@@ -820,7 +820,7 @@
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
if ( tasklet_work_scheduled ||
(list_empty(runq) && list_empty(waitq)) ||
- unlikely(!cpu_isset(cpu, *SEDF_CPUONLINE(per_cpu(cpupool, cpu)))) )
+ unlikely(!cpumask_test_cpu(cpu, SEDF_CPUONLINE(per_cpu(cpupool,
cpu)))) )
{
ret.task = IDLETASK(cpu);
ret.time = SECONDS(1);
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/common/trace.c
--- a/xen/common/trace.c Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/common/trace.c Tue Nov 08 10:33:02 2011 +0100
@@ -323,7 +323,7 @@
& ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
return 0;
- if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
+ if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) )
return 0;
return 1;
@@ -711,7 +711,7 @@
& ((event >> TRC_SUBCLS_SHIFT) & 0xf )) == 0 )
return;
- if ( !cpu_isset(smp_processor_id(), tb_cpu_mask) )
+ if ( !cpumask_test_cpu(smp_processor_id(), &tb_cpu_mask) )
return;
/* Read tb_init_done /before/ t_bufs. */
diff -r 67defeb4baa6 -r f9c4494e77c8 xen/include/xen/cpumask.h
--- a/xen/include/xen/cpumask.h Tue Nov 08 10:31:47 2011 +0100
+++ b/xen/include/xen/cpumask.h Tue Nov 08 10:33:02 2011 +0100
@@ -19,6 +19,7 @@
* void cpumask_clear(mask) clear all bits
* int cpumask_test_cpu(cpu, mask) true iff bit 'cpu' set in mask
* int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
+ * int cpumask_test_and_clear_cpu(cpu, mask) test and clear bit 'cpu' in mask
*
* void cpumask_and(dst, src1, src2) dst = src1 & src2 [intersection]
* void cpumask_or(dst, src1, src2) dst = src1 | src2 [union]
@@ -64,12 +65,12 @@
* for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
*
* Subtlety:
- * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
+ * 1) The 'type-checked' form of cpumask_test_cpu() causes gcc (3.3.2, anyway)
* to generate slightly worse code. Note for example the additional
* 40 lines of assembly code compiling the "for each possible cpu"
* loops buried in the disk_stat_read() macros calls when compiling
* drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
- * one-line #define for cpu_isset(), instead of wrapping an inline
+ * one-line #define for cpumask_test_cpu(), instead of wrapping an inline
* inside a macro, the way we do the other calls.
*/
@@ -121,17 +122,12 @@
/* No static inline type checking - see Subtlety (1) above. */
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), (cpumask)->bits)
-#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
-#define cpu_test_and_set(cpu, cpumask) \
- cpumask_test_and_set_cpu(cpu, &(cpumask))
static inline int cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpumask_check(cpu), addr->bits);
}
-#define cpu_test_and_clear(cpu, cpumask) \
- cpumask_test_and_clear_cpu(cpu, &(cpumask))
static inline int cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
{
return test_and_clear_bit(cpumask_check(cpu), addr->bits);
@@ -444,9 +440,9 @@
#define num_online_cpus() cpumask_weight(&cpu_online_map)
#define num_possible_cpus() cpumask_weight(&cpu_possible_map)
#define num_present_cpus() cpumask_weight(&cpu_present_map)
-#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map)
-#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map)
-#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map)
+#define cpu_online(cpu) cpumask_test_cpu(cpu, &cpu_online_map)
+#define cpu_possible(cpu) cpumask_test_cpu(cpu, &cpu_possible_map)
+#define cpu_present(cpu) cpumask_test_cpu(cpu, &cpu_present_map)
#else
#define num_online_cpus() 1
#define num_possible_cpus() 1
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|