# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1273853272 -3600
# Node ID ba2cbbea9a6972d2da9d1e94b66306ab7266c89d
# Parent df955a89b53c0bb4614476eb655538393e48c2a0
Move cpu hotplug routines into common cpu.c file.
Also simplify the locking (reverting to use if spin_trylock, as
returning EBUSY/EAGAIN seems unavoidable after all). In particular
this should continue to ensure that stop_machine_run() does not have
cpu_online_map change under its feet.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/ia64/xen/xensetup.c | 1
xen/arch/x86/acpi/power.c | 9 -
xen/arch/x86/platform_hypercall.c | 8 -
xen/arch/x86/setup.c | 1
xen/arch/x86/smpboot.c | 234 +++-----------------------------------
xen/arch/x86/sysctl.c | 1
xen/common/cpu.c | 205 +++++++++++++++++++++++++++++----
xen/common/spinlock.c | 13 +-
xen/common/stop_machine.c | 20 ++-
xen/include/asm-x86/smp.h | 4
xen/include/xen/cpu.h | 21 ++-
xen/include/xen/spinlock.h | 2
12 files changed, 261 insertions(+), 258 deletions(-)
diff -r df955a89b53c -r ba2cbbea9a69 xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/arch/ia64/xen/xensetup.c Fri May 14 17:07:52 2010 +0100
@@ -32,6 +32,7 @@
#include <xsm/acm/acm_hooks.h>
#include <asm/sn/simulator.h>
#include <asm/sal.h>
+#include <xen/cpu.h>
unsigned long total_pages;
diff -r df955a89b53c -r ba2cbbea9a69 xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/arch/x86/acpi/power.c Fri May 14 17:07:52 2010 +0100
@@ -25,6 +25,7 @@
#include <xen/domain.h>
#include <xen/console.h>
#include <xen/iommu.h>
+#include <xen/cpu.h>
#include <public/platform.h>
#include <asm/tboot.h>
@@ -138,12 +139,8 @@ static int enter_state(u32 state)
freeze_domains();
- disable_nonboot_cpus();
- if ( num_online_cpus() != 1 )
- {
- error = -EBUSY;
+ if ( (error = disable_nonboot_cpus()) )
goto enable_cpu;
- }
cpufreq_del_cpu(0);
@@ -207,7 +204,9 @@ static int enter_state(u32 state)
enable_cpu:
cpufreq_add_cpu(0);
microcode_resume_cpu(0);
+ mtrr_aps_sync_begin();
enable_nonboot_cpus();
+ mtrr_aps_sync_end();
thaw_domains();
spin_unlock(&pm_lock);
return error;
diff -r df955a89b53c -r ba2cbbea9a69 xen/arch/x86/platform_hypercall.c
--- a/xen/arch/x86/platform_hypercall.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/arch/x86/platform_hypercall.c Fri May 14 17:07:52 2010 +0100
@@ -410,7 +410,11 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
g_info = &op->u.pcpu_info;
- spin_lock(&cpu_add_remove_lock);
+ if ( !get_cpu_maps() )
+ {
+ ret = -EBUSY;
+ break;
+ }
if ( (g_info->xen_cpuid >= NR_CPUS) ||
(g_info->xen_cpuid < 0) ||
@@ -429,7 +433,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE(xe
g_info->max_present = last_cpu(cpu_present_map);
- spin_unlock(&cpu_add_remove_lock);
+ put_cpu_maps();
ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
}
diff -r df955a89b53c -r ba2cbbea9a69 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/arch/x86/setup.c Fri May 14 17:07:52 2010 +0100
@@ -43,6 +43,7 @@
#include <asm/bzimage.h> /* for bzimage_headroom */
#include <asm/mach-generic/mach_apic.h> /* for generic_apic_probe */
#include <asm/setup.h>
+#include <xen/cpu.h>
#if defined(CONFIG_X86_64)
#define BOOTSTRAP_DIRECTMAP_END (1UL << 32) /* 4GB */
diff -r df955a89b53c -r ba2cbbea9a69 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/arch/x86/smpboot.c Fri May 14 17:07:52 2010 +0100
@@ -46,7 +46,6 @@
#include <xen/tasklet.h>
#include <xen/serial.h>
#include <xen/numa.h>
-#include <xen/event.h>
#include <xen/cpu.h>
#include <asm/current.h>
#include <asm/mc146818rtc.h>
@@ -58,7 +57,6 @@
#include <mach_apic.h>
#include <mach_wakecpu.h>
#include <smpboot_hooks.h>
-#include <xen/stop_machine.h>
#include <acpi/cpufreq/processor_perf.h>
#define setup_trampoline() (bootsym_phys(trampoline_realmode_entry))
@@ -1310,169 +1308,9 @@ void __cpu_die(unsigned int cpu)
}
}
-static int take_cpu_down(void *unused)
-{
- void *hcpu = (void *)(long)smp_processor_id();
- int rc;
-
- spin_lock(&cpu_add_remove_lock);
-
- if (cpu_notifier_call_chain(CPU_DYING, hcpu) != NOTIFY_DONE)
- BUG();
-
- rc = __cpu_disable();
-
- spin_unlock(&cpu_add_remove_lock);
-
- return rc;
-}
-
-/*
- * Protects against concurrent offline/online requests for a single CPU.
- * We need this extra protection because cpu_down() cannot continuously hold
- * the cpu_add_remove_lock, as it cannot be held across stop_machine_run().
- */
-static cpumask_t cpu_offlining;
-
-int cpu_down(unsigned int cpu)
-{
- int err, notifier_rc, nr_calls;
- void *hcpu = (void *)(long)cpu;
-
- spin_lock(&cpu_add_remove_lock);
-
- if ((cpu == 0) || !cpu_online(cpu) || cpu_isset(cpu, cpu_offlining)) {
- spin_unlock(&cpu_add_remove_lock);
- return -EINVAL;
- }
-
- cpu_set(cpu, cpu_offlining);
-
- printk("Prepare to bring CPU%d down...\n", cpu);
-
- notifier_rc = __cpu_notifier_call_chain(
- CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
- if (notifier_rc != NOTIFY_DONE) {
- err = notifier_to_errno(notifier_rc);
- nr_calls--;
- notifier_rc = __cpu_notifier_call_chain(
- CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
- BUG_ON(notifier_rc != NOTIFY_DONE);
- goto out;
- }
-
- spin_unlock(&cpu_add_remove_lock);
- err = stop_machine_run(take_cpu_down, NULL, cpu);
- spin_lock(&cpu_add_remove_lock);
-
- if (err < 0) {
- notifier_rc = cpu_notifier_call_chain(CPU_DOWN_FAILED, hcpu);
- BUG_ON(notifier_rc != NOTIFY_DONE);
- goto out;
- }
-
- __cpu_die(cpu);
- BUG_ON(cpu_online(cpu));
-
- notifier_rc = cpu_notifier_call_chain(CPU_DEAD, hcpu);
- BUG_ON(notifier_rc != NOTIFY_DONE);
-
-out:
- if (!err) {
- printk("CPU %u is now offline\n", cpu);
- send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
- } else {
- printk("Failed to take down CPU %u (error %d)\n", cpu, err);
- }
- cpu_clear(cpu, cpu_offlining);
- spin_unlock(&cpu_add_remove_lock);
- return err;
-}
-
-int cpu_up(unsigned int cpu)
-{
- int err = 0;
-
- spin_lock(&cpu_add_remove_lock);
-
- if (cpu_online(cpu) || cpu_isset(cpu, cpu_offlining)) {
- err = -EINVAL;
- goto out;
- }
-
- err = __cpu_up(cpu);
- if (err < 0)
- goto out;
-
-out:
- if (!err)
- send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
- spin_unlock(&cpu_add_remove_lock);
- return err;
-}
-
-/* From kernel/power/main.c */
-/* This is protected by pm_sem semaphore */
-static cpumask_t frozen_cpus;
-
-void disable_nonboot_cpus(void)
-{
- int cpu, error;
-
- error = 0;
- cpus_clear(frozen_cpus);
- printk("Freezing cpus ...\n");
- for_each_online_cpu(cpu) {
- if (cpu == 0)
- continue;
- error = cpu_down(cpu);
- /* No need to check EBUSY here */
- ASSERT(error != -EBUSY);
- if (!error) {
- cpu_set(cpu, frozen_cpus);
- printk("CPU%d is down\n", cpu);
- continue;
- }
- printk("Error taking cpu %d down: %d\n", cpu, error);
- }
- BUG_ON(raw_smp_processor_id() != 0);
- if (error)
- panic("cpus not sleeping");
-}
-
-void enable_nonboot_cpus(void)
-{
- int cpu, error;
-
- printk("Thawing cpus ...\n");
- mtrr_aps_sync_begin();
- for_each_cpu_mask(cpu, frozen_cpus) {
- error = cpu_up(cpu);
- /* No conflict will happen here */
- ASSERT(error != -EBUSY);
- if (!error) {
- printk("CPU%d is up\n", cpu);
- continue;
- }
- printk("Error taking cpu %d up: %d\n", cpu, error);
- panic("Not enough cpus");
- }
- mtrr_aps_sync_end();
- cpus_clear(frozen_cpus);
-
- /*
- * Cleanup possible dangling ends after sleep...
- */
- smpboot_restore_warm_reset_vector();
-}
-
int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm)
{
- int cpu = -1;
-
-#ifndef CONFIG_ACPI
- return -ENOSYS;
-#endif
+ int node, cpu = -1;
dprintk(XENLOG_DEBUG, "cpu_add apic_id %x acpi_id %x pxm %x\n",
apic_id, acpi_id, pxm);
@@ -1480,68 +1318,53 @@ int cpu_add(uint32_t apic_id, uint32_t a
if ( acpi_id > MAX_MADT_ENTRIES || apic_id > MAX_APICS || pxm > 256 )
return -EINVAL;
+ if ( !cpu_hotplug_begin() )
+ return -EBUSY;
+
/* Detect if the cpu has been added before */
- if ( x86_acpiid_to_apicid[acpi_id] != 0xff)
+ if ( x86_acpiid_to_apicid[acpi_id] != 0xff )
{
- if (x86_acpiid_to_apicid[acpi_id] != apic_id)
- return -EINVAL;
- else
- return -EEXIST;
+ cpu = (x86_acpiid_to_apicid[acpi_id] != apic_id)
+ ? -EINVAL : -EEXIST;
+ goto out;
}
if ( physid_isset(apic_id, phys_cpu_present_map) )
- return -EEXIST;
-
- spin_lock(&cpu_add_remove_lock);
-
- cpu = mp_register_lapic(apic_id, 1);
-
- if (cpu < 0)
{
- spin_unlock(&cpu_add_remove_lock);
- return cpu;
- }
+ cpu = -EEXIST;
+ goto out;
+ }
+
+ if ( (cpu = mp_register_lapic(apic_id, 1)) < 0 )
+ goto out;
x86_acpiid_to_apicid[acpi_id] = apic_id;
if ( !srat_disabled() )
{
- int node;
-
- node = setup_node(pxm);
- if (node < 0)
+ if ( (node = setup_node(pxm)) < 0 )
{
dprintk(XENLOG_WARNING,
"Setup node failed for pxm %x\n", pxm);
x86_acpiid_to_apicid[acpi_id] = 0xff;
mp_unregister_lapic(apic_id, cpu);
- spin_unlock(&cpu_add_remove_lock);
- return node;
+ cpu = node;
+ goto out;
}
apicid_to_node[apic_id] = node;
}
srat_detect_node(cpu);
numa_add_cpu(cpu);
- spin_unlock(&cpu_add_remove_lock);
dprintk(XENLOG_INFO, "Add CPU %x with index %x\n", apic_id, cpu);
+ out:
+ cpu_hotplug_done();
return cpu;
}
int __devinit __cpu_up(unsigned int cpu)
{
- int notifier_rc, ret = 0, nr_calls;
- void *hcpu = (void *)(long)cpu;
-
- notifier_rc = __cpu_notifier_call_chain(
- CPU_UP_PREPARE, hcpu, -1, &nr_calls);
- if (notifier_rc != NOTIFY_DONE) {
- ret = notifier_to_errno(notifier_rc);
- nr_calls--;
- goto fail;
- }
-
/*
* We do warm boot only on cpus that had booted earlier
* Otherwise cold boot is all handled from smp_boot_cpus().
@@ -1549,20 +1372,15 @@ int __devinit __cpu_up(unsigned int cpu)
* when a cpu is taken offline from cpu_exit_clear().
*/
if (!cpu_isset(cpu, cpu_callin_map)) {
- ret = __smp_prepare_cpu(cpu);
+ if (__smp_prepare_cpu(cpu))
+ return -EIO;
smpboot_restore_warm_reset_vector();
- }
-
- if (ret) {
- ret = -EIO;
- goto fail;
}
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
- ret = -EIO;
- goto fail;
+ return -EIO;
}
/* Unleash the CPU! */
@@ -1572,15 +1390,7 @@ int __devinit __cpu_up(unsigned int cpu)
process_pending_softirqs();
}
- notifier_rc = cpu_notifier_call_chain(CPU_ONLINE, hcpu);
- BUG_ON(notifier_rc != NOTIFY_DONE);
return 0;
-
- fail:
- notifier_rc = __cpu_notifier_call_chain(
- CPU_UP_CANCELED, hcpu, nr_calls, NULL);
- BUG_ON(notifier_rc != NOTIFY_DONE);
- return ret;
}
diff -r df955a89b53c -r ba2cbbea9a69 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/arch/x86/sysctl.c Fri May 14 17:07:52 2010 +0100
@@ -25,6 +25,7 @@
#include <asm/processor.h>
#include <asm/numa.h>
#include <xen/nodemask.h>
+#include <xen/cpu.h>
#include <xsm/xsm.h>
#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
diff -r df955a89b53c -r ba2cbbea9a69 xen/common/cpu.c
--- a/xen/common/cpu.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/common/cpu.c Fri May 14 17:07:52 2010 +0100
@@ -1,6 +1,9 @@
#include <xen/config.h>
#include <xen/cpumask.h>
#include <xen/cpu.h>
+#include <xen/event.h>
+#include <xen/sched.h>
+#include <xen/stop_machine.h>
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -26,35 +29,195 @@ const unsigned long cpu_bit_bitmap[BITS_
#endif
};
-DEFINE_SPINLOCK(cpu_add_remove_lock);
+static DEFINE_SPINLOCK(cpu_add_remove_lock);
+
+bool_t get_cpu_maps(void)
+{
+ return spin_trylock_recursive(&cpu_add_remove_lock);
+}
+
+void put_cpu_maps(void)
+{
+ spin_unlock_recursive(&cpu_add_remove_lock);
+}
+
+bool_t cpu_hotplug_begin(void)
+{
+ return get_cpu_maps();
+}
+
+void cpu_hotplug_done(void)
+{
+ put_cpu_maps();
+}
static RAW_NOTIFIER_HEAD(cpu_chain);
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
- spin_lock(&cpu_add_remove_lock);
+ if ( !spin_trylock(&cpu_add_remove_lock) )
+ BUG(); /* Should never fail as we are called only during boot. */
ret = raw_notifier_chain_register(&cpu_chain, nb);
spin_unlock(&cpu_add_remove_lock);
return ret;
}
-void unregister_cpu_notifier(struct notifier_block *nb)
-{
- spin_lock(&cpu_add_remove_lock);
- raw_notifier_chain_unregister(&cpu_chain, nb);
- spin_unlock(&cpu_add_remove_lock);
-}
-
-int cpu_notifier_call_chain(unsigned long val, void *v)
-{
- BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
- return raw_notifier_call_chain(&cpu_chain, val, v);
-}
-
-int __cpu_notifier_call_chain(
- unsigned long val, void *v, int nr_to_call, int *nr_calls)
-{
- BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
- return __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, nr_calls);
-}
+static int take_cpu_down(void *unused)
+{
+ void *hcpu = (void *)(long)smp_processor_id();
+ if ( raw_notifier_call_chain(&cpu_chain, CPU_DYING, hcpu) != NOTIFY_DONE )
+ BUG();
+ return __cpu_disable();
+}
+
+int cpu_down(unsigned int cpu)
+{
+ int err, notifier_rc, nr_calls;
+ void *hcpu = (void *)(long)cpu;
+
+ if ( !cpu_hotplug_begin() )
+ return -EBUSY;
+
+ if ( (cpu == 0) || !cpu_online(cpu) )
+ {
+ cpu_hotplug_done();
+ return -EINVAL;
+ }
+
+ printk("Prepare to bring CPU%d down...\n", cpu);
+
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
+ if ( notifier_rc != NOTIFY_DONE )
+ {
+ err = notifier_to_errno(notifier_rc);
+ nr_calls--;
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ goto out;
+ }
+
+ if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
+ {
+ notifier_rc = raw_notifier_call_chain(
+ &cpu_chain, CPU_DOWN_FAILED, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ goto out;
+ }
+
+ __cpu_die(cpu);
+ BUG_ON(cpu_online(cpu));
+
+ notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+
+ out:
+ if ( !err )
+ {
+ printk("CPU %u is now offline\n", cpu);
+ send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+ }
+ else
+ {
+ printk("Failed to take down CPU %u (error %d)\n", cpu, err);
+ }
+ cpu_hotplug_done();
+ return err;
+}
+
+int cpu_up(unsigned int cpu)
+{
+ int notifier_rc, nr_calls, err = 0;
+ void *hcpu = (void *)(long)cpu;
+
+ if ( !cpu_hotplug_begin() )
+ return -EBUSY;
+
+ if ( cpu_online(cpu) || !cpu_present(cpu) )
+ {
+ cpu_hotplug_done();
+ return -EINVAL;
+ }
+
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_UP_PREPARE, hcpu, -1, &nr_calls);
+ if ( notifier_rc != NOTIFY_DONE )
+ {
+ err = notifier_to_errno(notifier_rc);
+ nr_calls--;
+ goto fail;
+ }
+
+ err = __cpu_up(cpu);
+ if ( err < 0 )
+ goto fail;
+
+ notifier_rc = raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+
+ send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+
+ cpu_hotplug_done();
+ return 0;
+
+ fail:
+ notifier_rc = __raw_notifier_call_chain(
+ &cpu_chain, CPU_UP_CANCELED, hcpu, nr_calls, NULL);
+ BUG_ON(notifier_rc != NOTIFY_DONE);
+ cpu_hotplug_done();
+ return err;
+}
+
+static cpumask_t frozen_cpus;
+
+int disable_nonboot_cpus(void)
+{
+ int cpu, error = 0;
+
+ BUG_ON(raw_smp_processor_id() != 0);
+
+ cpus_clear(frozen_cpus);
+
+ printk("Disabling non-boot CPUs ...\n");
+
+ for_each_online_cpu ( cpu )
+ {
+ if ( cpu == 0 )
+ continue;
+
+ if ( (error = cpu_down(cpu)) )
+ {
+ BUG_ON(error == -EBUSY);
+ printk("Error taking CPU%d down: %d\n", cpu, error);
+ break;
+ }
+
+ cpu_set(cpu, frozen_cpus);
+ printk("CPU%d is down\n", cpu);
+ }
+
+ BUG_ON(!error && (num_online_cpus() != 1));
+ return error;
+}
+
+void enable_nonboot_cpus(void)
+{
+ int cpu, error;
+
+ printk("Enabling non-boot CPUs ...\n");
+
+ for_each_cpu_mask ( cpu, frozen_cpus )
+ {
+ if ( (error = cpu_up(cpu)) )
+ {
+ BUG_ON(error == -EBUSY);
+ printk("Error taking CPU%d up: %d\n", cpu, error);
+ continue;
+ }
+ printk("CPU%d is up\n", cpu);
+ }
+
+ cpus_clear(frozen_cpus);
+}
diff -r df955a89b53c -r ba2cbbea9a69 xen/common/spinlock.c
--- a/xen/common/spinlock.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/common/spinlock.c Fri May 14 17:07:52 2010 +0100
@@ -186,7 +186,7 @@ void _spin_barrier_irq(spinlock_t *lock)
local_irq_restore(flags);
}
-void _spin_lock_recursive(spinlock_t *lock)
+int _spin_trylock_recursive(spinlock_t *lock)
{
int cpu = smp_processor_id();
@@ -197,13 +197,22 @@ void _spin_lock_recursive(spinlock_t *lo
if ( likely(lock->recurse_cpu != cpu) )
{
- spin_lock(lock);
+ if ( !spin_trylock(lock) )
+ return 0;
lock->recurse_cpu = cpu;
}
/* We support only fairly shallow recursion, else the counter overflows. */
ASSERT(lock->recurse_cnt < 0xfu);
lock->recurse_cnt++;
+
+ return 1;
+}
+
+void _spin_lock_recursive(spinlock_t *lock)
+{
+ while ( !spin_trylock_recursive(lock) )
+ cpu_relax();
}
void _spin_unlock_recursive(spinlock_t *lock)
diff -r df955a89b53c -r ba2cbbea9a69 xen/common/stop_machine.c
--- a/xen/common/stop_machine.c Fri May 14 15:22:48 2010 +0100
+++ b/xen/common/stop_machine.c Fri May 14 17:07:52 2010 +0100
@@ -28,6 +28,7 @@
#include <xen/stop_machine.h>
#include <xen/errno.h>
#include <xen/smp.h>
+#include <xen/cpu.h>
#include <asm/current.h>
#include <asm/processor.h>
@@ -72,19 +73,20 @@ int stop_machine_run(int (*fn)(void *),
BUG_ON(!local_irq_is_enabled());
+ /* cpu_online_map must not change. */
+ if ( !get_cpu_maps() )
+ return -EBUSY;
+
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
nr_cpus = cpus_weight(allbutself);
- if ( nr_cpus == 0 )
- {
- BUG_ON(cpu != smp_processor_id());
- return (*fn)(data);
- }
-
/* Must not spin here as the holder will expect us to be descheduled. */
if ( !spin_trylock(&stopmachine_lock) )
+ {
+ put_cpu_maps();
return -EBUSY;
+ }
stopmachine_data.fn = fn;
stopmachine_data.fn_data = data;
@@ -113,12 +115,16 @@ int stop_machine_run(int (*fn)(void *),
spin_unlock(&stopmachine_lock);
+ put_cpu_maps();
+
return ret;
}
-static void stopmachine_action(unsigned long unused)
+static void stopmachine_action(unsigned long cpu)
{
enum stopmachine_state state = STOPMACHINE_START;
+
+ BUG_ON(cpu != smp_processor_id());
smp_mb();
diff -r df955a89b53c -r ba2cbbea9a69 xen/include/asm-x86/smp.h
--- a/xen/include/asm-x86/smp.h Fri May 14 15:22:48 2010 +0100
+++ b/xen/include/asm-x86/smp.h Fri May 14 17:07:52 2010 +0100
@@ -56,12 +56,8 @@ DECLARE_PER_CPU(int, cpu_state);
DECLARE_PER_CPU(int, cpu_state);
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-extern int cpu_down(unsigned int cpu);
-extern int cpu_up(unsigned int cpu);
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
-extern void disable_nonboot_cpus(void);
-extern void enable_nonboot_cpus(void);
int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm);
/*
diff -r df955a89b53c -r ba2cbbea9a69 xen/include/xen/cpu.h
--- a/xen/include/xen/cpu.h Fri May 14 15:22:48 2010 +0100
+++ b/xen/include/xen/cpu.h Fri May 14 17:07:52 2010 +0100
@@ -5,13 +5,16 @@
#include <xen/spinlock.h>
#include <xen/notifier.h>
-extern spinlock_t cpu_add_remove_lock;
+/* Safely access cpu_online_map, cpu_present_map, etc. */
+bool_t get_cpu_maps(void);
+void put_cpu_maps(void);
+/* Safely perform CPU hotplug and update cpu_online_map, etc. */
+bool_t cpu_hotplug_begin(void);
+void cpu_hotplug_done(void);
+
+/* Receive notification of CPU hotplug events. */
int register_cpu_notifier(struct notifier_block *nb);
-void unregister_cpu_notifier(struct notifier_block *nb);
-int cpu_notifier_call_chain(unsigned long val, void *v);
-int __cpu_notifier_call_chain(
- unsigned long val, void *v, int nr_to_call, int *nr_calls);
/*
* Notification actions: note that only CPU_{UP,DOWN}_PREPARE may fail ---
@@ -25,4 +28,12 @@ int __cpu_notifier_call_chain(
#define CPU_DYING 0x0007 /* CPU is nearly dead (in stop_machine ctxt) */
#define CPU_DEAD 0x0008 /* CPU is dead */
+/* Perform CPU hotplug. May return -EAGAIN. */
+int cpu_down(unsigned int cpu);
+int cpu_up(unsigned int cpu);
+
+/* Power management. */
+int disable_nonboot_cpus(void);
+void enable_nonboot_cpus(void);
+
#endif /* __XEN_CPU_H__ */
diff -r df955a89b53c -r ba2cbbea9a69 xen/include/xen/spinlock.h
--- a/xen/include/xen/spinlock.h Fri May 14 15:22:48 2010 +0100
+++ b/xen/include/xen/spinlock.h Fri May 14 17:07:52 2010 +0100
@@ -146,6 +146,7 @@ void _spin_barrier(spinlock_t *lock);
void _spin_barrier(spinlock_t *lock);
void _spin_barrier_irq(spinlock_t *lock);
+int _spin_trylock_recursive(spinlock_t *lock);
void _spin_lock_recursive(spinlock_t *lock);
void _spin_unlock_recursive(spinlock_t *lock);
@@ -191,6 +192,7 @@ int _rw_is_write_locked(rwlock_t *lock);
* are any critical regions that cannot form part of such a set, they can use
* standard spin_[un]lock().
*/
+#define spin_trylock_recursive(l) _spin_trylock_recursive(l)
#define spin_lock_recursive(l) _spin_lock_recursive(l)
#define spin_unlock_recursive(l) _spin_unlock_recursive(l)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|