# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1172837512 0
# Node ID 3ac19fda0bc256bac20a4decf7e13bb086162220
# Parent bb22c21e1af76b2071e84f25433814ab829e4b39
linux: Support new 'fast suspend' mode which does not require us to
hotplug all auxiliary CPUs.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
linux-2.6-xen-sparse/drivers/xen/core/evtchn.c | 121 ++++++++-------
linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c | 130 ++++++++++-------
linux-2.6-xen-sparse/drivers/xen/core/reboot.c | 18 +-
linux-2.6-xen-sparse/include/xen/cpu_hotplug.h | 7
4 files changed, 165 insertions(+), 111 deletions(-)
diff -r bb22c21e1af7 -r 3ac19fda0bc2
linux-2.6-xen-sparse/drivers/xen/core/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c Fri Mar 02 12:11:10
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c Fri Mar 02 12:11:52
2007 +0000
@@ -888,11 +888,67 @@ void unmask_evtchn(int port)
}
EXPORT_SYMBOL_GPL(unmask_evtchn);
+static void restore_cpu_virqs(int cpu)
+{
+ struct evtchn_bind_virq bind_virq;
+ int virq, irq, evtchn;
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
+
+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
+
+ /* Get a new binding from Xen. */
+ bind_virq.virq = virq;
+ bind_virq.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+ bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+ }
+}
+
+static void restore_cpu_ipis(int cpu)
+{
+ struct evtchn_bind_ipi bind_ipi;
+ int ipi, irq, evtchn;
+
+ for (ipi = 0; ipi < NR_IPIS; ipi++) {
+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
+ continue;
+
+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
+
+ /* Get a new binding from Xen. */
+ bind_ipi.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+ &bind_ipi) != 0)
+ BUG();
+ evtchn = bind_ipi.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+ bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+
+ }
+}
+
void irq_resume(void)
{
- struct evtchn_bind_virq bind_virq;
- struct evtchn_bind_ipi bind_ipi;
- int cpu, pirq, virq, ipi, irq, evtchn;
+ int cpu, pirq, irq, evtchn;
init_evtchn_cpu_bindings();
@@ -903,16 +959,6 @@ void irq_resume(void)
/* Check that no PIRQs are still bound. */
for (pirq = 0; pirq < NR_PIRQS; pirq++)
BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
-
- /* Secondary CPUs must have no VIRQ or IPI bindings. */
- for_each_possible_cpu(cpu) {
- if (cpu == 0)
- continue;
- for (virq = 0; virq < NR_VIRQS; virq++)
- BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
- for (ipi = 0; ipi < NR_IPIS; ipi++)
- BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
- }
/* No IRQ <-> event-channel mappings. */
for (irq = 0; irq < NR_IRQS; irq++)
@@ -920,50 +966,11 @@ void irq_resume(void)
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
evtchn_to_irq[evtchn] = -1;
- /* Primary CPU: rebind VIRQs automatically. */
- for (virq = 0; virq < NR_VIRQS; virq++) {
- if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
- continue;
-
- BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
-
- /* Get a new binding from Xen. */
- bind_virq.virq = virq;
- bind_virq.vcpu = 0;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
- &bind_virq) != 0)
- BUG();
- evtchn = bind_virq.port;
-
- /* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
- }
-
- /* Primary CPU: rebind IPIs automatically. */
- for (ipi = 0; ipi < NR_IPIS; ipi++) {
- if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
- continue;
-
- BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-
- /* Get a new binding from Xen. */
- bind_ipi.vcpu = 0;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
- &bind_ipi) != 0)
- BUG();
- evtchn = bind_ipi.port;
-
- /* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
- }
+ for_each_possible_cpu(cpu) {
+ restore_cpu_virqs(cpu);
+ restore_cpu_ipis(cpu);
+ }
+
}
void __init xen_init_IRQ(void)
diff -r bb22c21e1af7 -r 3ac19fda0bc2
linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c Fri Mar 02
12:11:10 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/machine_reboot.c Fri Mar 02
12:11:52 2007 +0000
@@ -1,4 +1,3 @@
-#define __KERNEL_SYSCALLS__
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -7,6 +6,7 @@
#include <linux/reboot.h>
#include <linux/sysrq.h>
#include <linux/stringify.h>
+#include <linux/stop_machine.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <xen/evtchn.h>
@@ -18,6 +18,7 @@
#include <xen/gnttab.h>
#include <xen/xencons.h>
#include <xen/cpu_hotplug.h>
+#include <xen/interface/vcpu.h>
#if defined(__i386__) || defined(__x86_64__)
@@ -98,7 +99,6 @@ static void post_suspend(int suspend_can
xen_start_info->console.domU.mfn =
pfn_to_mfn(xen_start_info->console.domU.mfn);
} else {
- extern cpumask_t cpu_initialized_map;
cpu_initialized_map = cpumask_of_cpu(0);
}
@@ -133,11 +133,71 @@ static void post_suspend(int suspend_can
#endif
-int __xen_suspend(void)
+static int take_machine_down(void *p_fast_suspend)
+{
+ int fast_suspend = *(int *)p_fast_suspend;
+ int suspend_cancelled, err, cpu;
+ extern void time_resume(void);
+
+ if (fast_suspend) {
+ preempt_disable();
+ } else {
+ for (;;) {
+ err = smp_suspend();
+ if (err)
+ return err;
+
+ xenbus_suspend();
+ preempt_disable();
+
+ if (num_online_cpus() == 1)
+ break;
+
+ preempt_enable();
+ xenbus_suspend_cancel();
+ }
+ }
+
+ mm_pin_all();
+ local_irq_disable();
+ preempt_enable();
+ gnttab_suspend();
+ pre_suspend();
+
+ /*
+ * This hypercall returns 1 if suspend was cancelled or the domain was
+ * merely checkpointed, and 0 if it is resuming in a new domain.
+ */
+ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
+
+ post_suspend(suspend_cancelled);
+ gnttab_resume();
+ if (!suspend_cancelled)
+ irq_resume();
+ time_resume();
+ switch_idle_mm();
+ local_irq_enable();
+
+ if (fast_suspend && !suspend_cancelled) {
+ /*
+ * In fast-suspend mode the APs may not be brought back online
+ * when we resume. In that case we do it here.
+ */
+ for_each_online_cpu(cpu) {
+ if (cpu == 0)
+ continue;
+ cpu_set_initialized(cpu);
+ err = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+ BUG_ON(err);
+ }
+ }
+
+ return suspend_cancelled;
+}
+
+int __xen_suspend(int fast_suspend)
{
int err, suspend_cancelled;
-
- extern void time_resume(void);
BUG_ON(smp_processor_id() != 0);
BUG_ON(in_interrupt());
@@ -150,48 +210,17 @@ int __xen_suspend(void)
}
#endif
- for (;;) {
- err = smp_suspend();
- if (err)
- return err;
-
+ if (fast_suspend) {
xenbus_suspend();
- preempt_disable();
-
- if (num_online_cpus() == 1)
- break;
-
- preempt_enable();
- xenbus_suspend_cancel();
- }
-
- mm_pin_all();
- local_irq_disable();
- preempt_enable();
-
- gnttab_suspend();
-
- pre_suspend();
-
- /*
- * This hypercall returns 1 if suspend was cancelled or the domain was
- * merely checkpointed, and 0 if it is resuming in a new domain.
- */
- suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
-
- post_suspend(suspend_cancelled);
-
- gnttab_resume();
-
- if (!suspend_cancelled)
- irq_resume();
-
- time_resume();
-
- switch_idle_mm();
-
- local_irq_enable();
-
+ err = stop_machine_run(take_machine_down, &fast_suspend, 0);
+ } else {
+ err = take_machine_down(&fast_suspend);
+ }
+
+ if (err < 0)
+ return err;
+
+ suspend_cancelled = err;
if (!suspend_cancelled) {
xencons_resume();
xenbus_resume();
@@ -199,7 +228,8 @@ int __xen_suspend(void)
xenbus_suspend_cancel();
}
- smp_resume();
-
- return err;
-}
+ if (!fast_suspend)
+ smp_resume();
+
+ return 0;
+}
diff -r bb22c21e1af7 -r 3ac19fda0bc2
linux-2.6-xen-sparse/drivers/xen/core/reboot.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/reboot.c Fri Mar 02 12:11:10
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/core/reboot.c Fri Mar 02 12:11:52
2007 +0000
@@ -24,13 +24,16 @@ MODULE_LICENSE("Dual BSD/GPL");
/* Ignore multiple shutdown requests. */
static int shutting_down = SHUTDOWN_INVALID;
+/* Can we leave APs online when we suspend? */
+static int fast_suspend;
+
static void __shutdown_handler(void *unused);
static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
#ifdef CONFIG_XEN
-int __xen_suspend(void);
+int __xen_suspend(int fast_suspend);
#else
-#define __xen_suspend() (void)0
+#define __xen_suspend(fast_suspend) 0
#endif
static int shutdown_process(void *__unused)
@@ -44,7 +47,8 @@ static int shutdown_process(void *__unus
if ((shutting_down == SHUTDOWN_POWEROFF) ||
(shutting_down == SHUTDOWN_HALT)) {
- if (call_usermodehelper("/sbin/poweroff", poweroff_argv, envp,
0) < 0) {
+ if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
+ envp, 0) < 0) {
#ifdef CONFIG_XEN
sys_reboot(LINUX_REBOOT_MAGIC1,
LINUX_REBOOT_MAGIC2,
@@ -61,7 +65,9 @@ static int shutdown_process(void *__unus
static int xen_suspend(void *__unused)
{
- __xen_suspend();
+ int err = __xen_suspend(fast_suspend);
+ if (err)
+ printk(KERN_ERR "Xen suspend failed (%d)\n", err);
shutting_down = SHUTDOWN_INVALID;
return 0;
}
@@ -193,6 +199,10 @@ static int setup_shutdown_watcher(struct
{
int err;
+ xenbus_scanf(XBT_NIL, "control",
+ "platform-feature-multiprocessor-suspend",
+ "%d", &fast_suspend);
+
err = register_xenbus_watch(&shutdown_watch);
if (err)
printk(KERN_ERR "Failed to set shutdown watcher\n");
diff -r bb22c21e1af7 -r 3ac19fda0bc2
linux-2.6-xen-sparse/include/xen/cpu_hotplug.h
--- a/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h Fri Mar 02 12:11:10
2007 +0000
+++ b/linux-2.6-xen-sparse/include/xen/cpu_hotplug.h Fri Mar 02 12:11:52
2007 +0000
@@ -3,6 +3,13 @@
#include <linux/kernel.h>
#include <linux/cpumask.h>
+
+#if defined(CONFIG_X86)
+extern cpumask_t cpu_initialized_map;
+#define cpu_set_initialized(cpu) cpu_set(cpu, cpu_initialized_map)
+#else
+#define cpu_set_initialized(cpu) ((void)0)
+#endif
#if defined(CONFIG_HOTPLUG_CPU)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|