Add SMP sleep support to Xen. One new utility is created
to allow vcpu continue previous running after migration
to new processor.
Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>
diff -r 3de9eb0bc3c1 xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Thu Jul 19 14:35:47 2007 +0800
+++ b/xen/arch/x86/acpi/power.c Thu Jul 19 17:26:05 2007 +0800
@@ -119,19 +119,25 @@ int enter_state(u32 state)
if (state <= ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
return -EINVAL;
- /* Sync lazy state on ths cpu */
__sync_lazy_execstate();
pmprintk(XENLOG_INFO, "Flush lazy state\n");
if (!spin_trylock(&pm_lock))
return -EBUSY;
- freeze_domains();
-
- hvm_cpu_down();
-
pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
acpi_states[state]);
+
+ freeze_domains();
+
+ disable_nonboot_cpus();
+ if (num_online_cpus() != 1)
+ {
+ error = -EBUSY;
+ goto Enable_cpu;
+ }
+
+ hvm_cpu_down();
acpi_sleep_prepare(state);
@@ -169,9 +175,28 @@ int enter_state(u32 state)
if ( !hvm_cpu_up() )
BUG();
+ Enable_cpu:
+ enable_nonboot_cpus();
+
thaw_domains();
spin_unlock(&pm_lock);
return error;
+}
+
+static void acpi_power_off(void)
+{
+ pmprintk(XENLOG_INFO, "%s called\n", __FUNCTION__);
+ local_irq_disable();
+ /* Some SMP machines only can poweroff in boot CPU */
+ acpi_enter_sleep_state(ACPI_STATE_S5);
+}
+
+static void enter_state_helper(void *data)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct acpi_sleep_info *sinfo = (struct acpi_sleep_info *)data;
+
+ regs->eax = enter_state(sinfo->sleep_state);
}
/*
@@ -187,6 +212,8 @@ int enter_state(u32 state)
*/
int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep)
{
+ int ret;
+
if (!IS_PRIV(current->domain) || !acpi_sinfo.pm1a_cnt)
return -EPERM;
@@ -215,7 +242,26 @@ int acpi_enter_sleep(struct xenpf_enter_
acpi_video_flags = sleep->video_flags;
saved_videomode = sleep->video_mode;
- return enter_state(acpi_sinfo.sleep_state);
+ /* acpi power off method */
+ if (acpi_sinfo.sleep_state == ACPI_STATE_S5)
+ {
+ acpi_power_off();
+ /* Shouldn't return */
+ while(1);
+ }
+
+ if (current->processor == 0)
+ {
+ pmprintk(XENLOG_INFO, "vcpu0 on cpu0, sleep direclty\n");
+ ret = enter_state(acpi_sinfo.sleep_state);
+ }
+ else
+ {
+ pmprintk(XENLOG_INFO, "vcpu0 on cpu%d, migrate to cpu0\n",
+ current->processor);
+ ret = self_migrate_and_continue(0, enter_state_helper,
&acpi_sinfo);
+ }
+ return ret;
}
static int acpi_get_wake_status(void)
diff -r 3de9eb0bc3c1 xen/arch/x86/cpu/intel_cacheinfo.c
--- a/xen/arch/x86/cpu/intel_cacheinfo.c Thu Jul 19 14:35:47 2007
+0800
+++ b/xen/arch/x86/cpu/intel_cacheinfo.c Thu Jul 19 17:26:14 2007
+0800
@@ -17,7 +17,7 @@ struct _cache_table
};
/* all the cache descriptor types we care about (no TLB or trace cache
entries) */
-static struct _cache_table cache_table[] __initdata =
+static struct _cache_table cache_table[] __devinitdata =
{
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line
size */
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line
size */
@@ -60,7 +60,7 @@ static struct _cache_table cache_table[]
{ 0x00, 0, 0}
};
-unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c)
+unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
{
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /*
Cache sizes */
diff -r 3de9eb0bc3c1 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Thu Jul 19 14:35:47 2007 +0800
+++ b/xen/arch/x86/domain.c Thu Jul 19 14:35:48 2007 +0800
@@ -82,6 +82,7 @@ static void play_dead(void)
__cpu_disable();
/* This must be done before dead CPU ack */
cpu_exit_clear();
+ hvm_cpu_down();
wbinvd();
mb();
/* Ack it */
@@ -1361,6 +1362,56 @@ void sync_vcpu_execstate(struct vcpu *v)
/* Other cpus call __sync_lazy_execstate from flush ipi handler. */
flush_tlb_mask(v->vcpu_dirty_cpumask);
+}
+
+struct migrate_info {
+ void (*saved_schedule_tail) (struct vcpu *);
+ void (*func) (void *data);
+ void *data;
+ cpumask_t saved_affinity;
+};
+
+/* helper to continue previous flow on new cpu, and recover
+ * necessary context at exit
+ */
+static void self_migrate_helper(struct vcpu *v)
+{
+ struct migrate_info *info = v->arch.continue_info;
+
+ if (info->func)
+ info->func(info->data);
+
+ v->arch.schedule_tail = info->saved_schedule_tail;
+ v->cpu_affinity = info->saved_affinity;
+ xfree(info);
+
+ vcpu_set_affinity(v, &v->cpu_affinity);
+
+ /* In case no migration again */
+ schedule_tail(v);
+}
+
+int self_migrate_and_continue(int cpu,
+ void (*func)(void *data),
+ void *data)
+{
+ struct vcpu *v = current;
+ struct migrate_info *info;
+ cpumask_t mask = cpumask_of_cpu(cpu);
+
+ info = xmalloc(struct migrate_info);
+ if (!info)
+ return -ENOMEM;
+
+ info->func = func;
+ info->data = data;
+ info->saved_schedule_tail = v->arch.schedule_tail;
+ v->arch.schedule_tail = self_migrate_helper;
+
+ info->saved_affinity = v->cpu_affinity;
+ v->arch.continue_info = info;
+ vcpu_set_affinity(v, &mask);
+ return 0;
}
#define next_arg(fmt, args) ({
\
diff -r 3de9eb0bc3c1 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Thu Jul 19 14:35:47 2007 +0800
+++ b/xen/include/asm-x86/domain.h Thu Jul 19 14:35:48 2007 +0800
@@ -268,6 +268,9 @@ struct arch_vcpu
void (*ctxt_switch_from) (struct vcpu *);
void (*ctxt_switch_to) (struct vcpu *);
+ /* Record information required to continue execution after
migration */
+ void *continue_info;
+
/* Bounce information for propagating an exception to guest OS. */
struct trap_bounce trap_bounce;
diff -r 3de9eb0bc3c1 xen/include/asm-x86/smp.h
--- a/xen/include/asm-x86/smp.h Thu Jul 19 14:35:47 2007 +0800
+++ b/xen/include/asm-x86/smp.h Thu Jul 19 14:35:48 2007 +0800
@@ -66,6 +66,8 @@ extern void enable_nonboot_cpus(void);
extern void enable_nonboot_cpus(void);
#else
static inline int cpu_is_offline(int cpu) {return 0;}
+static inline void disable_nonboot_cpus(void) {}
+static inline void enable_nonboot_cpus(void) {}
#endif
/*
diff -r 3de9eb0bc3c1 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Thu Jul 19 14:35:47 2007 +0800
+++ b/xen/include/xen/sched.h Thu Jul 19 14:35:48 2007 +0800
@@ -479,6 +479,10 @@ int vcpu_set_affinity(struct vcpu *v, cp
void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info
*runstate);
+int self_migrate_and_continue(int cpu,
+ void (*func)(void *data),
+ void *data);
+
static inline void vcpu_unblock(struct vcpu *v)
{
if ( test_and_clear_bit(_VPF_blocked, &v->pause_flags) )
xen_smp_pm_support.patch
Description: xen_smp_pm_support.patch
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|