WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 3/10][RESEND] Add HVM support

To: "Keir Fraser" <keir@xxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 3/10][RESEND] Add HVM support
From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
Date: Wed, 11 Jul 2007 17:50:56 +0800
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Wed, 11 Jul 2007 02:48:54 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcfDoPo+BxOg9LE8Q2extfmcPcpXoA==
Thread-topic: [PATCH 3/10][RESEND] Add HVM support
Add HVM hardware feature suspend/resume.

Signed-off-by Ke Yu <ke.yu@xxxxxxxxx>
Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r a06fadc24250 xen/arch/x86/acpi/power.c
--- a/xen/arch/x86/acpi/power.c Tue Jul 10 14:23:46 2007 -0400
+++ b/xen/arch/x86/acpi/power.c Tue Jul 10 21:20:12 2007 -0400
@@ -80,10 +80,27 @@ static void device_power_up(void)
     console_resume();
 }
 
+static void freeze_domains(void)
+{
+    struct domain *d;
+
+    for_each_domain(d)
+        if (d->domain_id != 0)
+            domain_pause(d);
+}
+
+static void thaw_domains(void)
+{
+    struct domain *d;
+
+    for_each_domain(d)
+        if (d->domain_id != 0)
+            domain_unpause(d);
+}
+
 /* Main interface to do xen specific suspend/resume */
 int enter_state(u32 state)
 {
-    struct domain *d;
     unsigned long flags;
     int error;
 
@@ -97,9 +114,9 @@ int enter_state(u32 state)
     if (!spin_trylock(&pm_lock))
         return -EBUSY;
     
-    for_each_domain(d)
-        if (d->domain_id != 0)
-            domain_pause(d);
+    freeze_domains();
+
+    hvm_disable();
 
     pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
         acpi_states[state]);
@@ -133,13 +150,12 @@ int enter_state(u32 state)
  Done:
     local_irq_restore(flags);
 
-    for_each_domain(d)
-       if (d->domain_id!=0)
-           domain_unpause(d);
-
+    if (hvm_enabled)
+        hvm_resume_cpu();
+
+    thaw_domains();
     spin_unlock(&pm_lock);
     return error;
-
 }
 
 /*
diff -r a06fadc24250 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Jul 10 14:23:46 2007 -0400
+++ b/xen/arch/x86/hvm/hvm.c    Tue Jul 10 20:12:08 2007 -0400
@@ -79,7 +79,10 @@ void hvm_disable(void)
 void hvm_disable(void)
 {
     if ( hvm_enabled )
+    {
+        hvm_funcs.suspend_cpu();
         hvm_funcs.disable();
+    }
 }
 
 void hvm_stts(struct vcpu *v)
diff -r a06fadc24250 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jul 10 14:23:46 2007 -0400
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Tue Jul 10 21:19:54 2007 -0400
@@ -45,7 +45,7 @@ u32 vmx_vmentry_control __read_mostly;
 u32 vmx_vmentry_control __read_mostly;
 bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
 
-static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
+DEFINE_PER_CPU(struct active_vmcs_info, cpu_active_vmcs);
 
 static u32 vmcs_revision_id __read_mostly;
 
@@ -161,10 +161,17 @@ void vmx_init_vmcs_config(void)
     BUG_ON(((vmx_msr_high >> 18) & 15) != 6);
 }
 
+void vmx_init_percpu_vmcs_info(void)
+{
+    current_vmcs = NULL;
+    current_host_vmcs = NULL;
+    INIT_LIST_HEAD(&this_cpu(cpu_active_vmcs).node);
+    spin_lock_init(&this_cpu(cpu_active_vmcs).lock);
+}
+
 static struct vmcs_struct *vmx_alloc_vmcs(void)
 {
     struct vmcs_struct *vmcs;
-
     if ( (vmcs = alloc_xenheap_page()) == NULL )
     {
         gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
@@ -184,15 +191,26 @@ static void vmx_free_vmcs(struct vmcs_st
 
 static void __vmx_clear_vmcs(void *info)
 {
-    struct vcpu *v = info;
-
-    __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
-
-    v->arch.hvm_vmx.active_cpu = -1;
-    v->arch.hvm_vmx.launched   = 0;
-
-    if ( v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs) )
-        this_cpu(current_vmcs) = NULL;
+    struct arch_vmx_struct *arch_vmx = info;
+    unsigned long flags;
+
+    /* disable irq since this may be invoked in nested context */
+    spin_lock_irqsave(&this_cpu(cpu_active_vmcs).lock, flags);
+    if (list_empty(&arch_vmx->list))
+    {
+        spin_unlock_irqrestore(&this_cpu(cpu_active_vmcs).lock, flags);
+        return;
+    }
+    list_del(&arch_vmx->list);
+    spin_unlock_irqrestore(&this_cpu(cpu_active_vmcs).lock, flags);
+
+    __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+
+    arch_vmx->active_cpu = -1;
+    arch_vmx->launched   = 0;
+
+    if ( arch_vmx->vmcs == current_vmcs )
+        current_vmcs = NULL;
 }
 
 static void vmx_clear_vmcs(struct vcpu *v)
@@ -203,16 +221,43 @@ static void vmx_clear_vmcs(struct vcpu *
         return;
 
     if ( cpu == smp_processor_id() )
-        return __vmx_clear_vmcs(v);
-
-    on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
+        return __vmx_clear_vmcs(&v->arch.hvm_vmx);
+
+    on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs,
+                     &v->arch.hvm_vmx, 1, 1);
 }
 
 static void vmx_load_vmcs(struct vcpu *v)
 {
+    unsigned long flags;
+
     __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
     v->arch.hvm_vmx.active_cpu = smp_processor_id();
-    this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
+    current_vmcs = v->arch.hvm_vmx.vmcs;
+
+    spin_lock_irqsave(&this_cpu(cpu_active_vmcs).lock, flags);
+    list_add(&v->arch.hvm_vmx.list, &this_cpu(cpu_active_vmcs).node);
+    spin_unlock_irqrestore(&this_cpu(cpu_active_vmcs).lock, flags);
+}
+
+/* Clear all active VMCS on this cpu before going down */
+int vmx_suspend_cpu(void)
+{
+    struct arch_vmx_struct *arch_vmx;
+    struct list_head *head = &this_cpu(cpu_active_vmcs).node;
+    int cpu = smp_processor_id();
+
+    while (!list_empty(head))
+    {
+        arch_vmx = list_entry(head->next, struct arch_vmx_struct,
list);
+
+        spin_lock(&arch_vmx->vmcs_lock);
+        if (arch_vmx->active_cpu == cpu)
+            __vmx_clear_vmcs(arch_vmx);
+        spin_unlock(&arch_vmx->vmcs_lock);
+    }
+
+    return 0;
 }
 
 void vmx_vmcs_enter(struct vcpu *v)
@@ -456,7 +501,8 @@ int vmx_create_vmcs(struct vcpu *v)
         if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
             return -ENOMEM;
 
-        __vmx_clear_vmcs(v);
+        INIT_LIST_HEAD(&v->arch.hvm_vmx.list);
+        __vmx_clear_vmcs(&v->arch.hvm_vmx);
     }
 
     construct_vmcs(v);
@@ -497,7 +543,7 @@ void vmx_do_resume(struct vcpu *v)
 
     if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
     {
-        if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
+        if ( v->arch.hvm_vmx.vmcs != current_vmcs )
             vmx_load_vmcs(v);
     }
     else
diff -r a06fadc24250 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Jul 10 14:23:46 2007 -0400
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Jul 10 20:10:27 2007 -0400
@@ -55,6 +55,8 @@ enum handler_return { HNDL_done, HNDL_un
 
 char *vmx_msr_bitmap;
 
+static int vmx_resume_cpu(void);
+
 static void vmx_ctxt_switch_from(struct vcpu *v);
 static void vmx_ctxt_switch_to(struct vcpu *v);
 
@@ -1271,7 +1273,9 @@ static struct hvm_function_table vmx_fun
     .inject_exception     = vmx_inject_exception,
     .init_ap_context      = vmx_init_ap_context,
     .init_hypercall_page  = vmx_init_hypercall_page,
-    .event_injection_faulted = vmx_event_injection_faulted
+    .event_injection_faulted = vmx_event_injection_faulted,
+    .suspend_cpu          = vmx_suspend_cpu,
+    .resume_cpu           = vmx_resume_cpu,
 };
 
 int start_vmx(void)
@@ -1308,6 +1312,8 @@ int start_vmx(void)
 
     vmx_init_vmcs_config();
 
+    vmx_init_percpu_vmcs_info();
+
     if ( smp_processor_id() == 0 )
         setup_vmcs_dump();
 
@@ -1325,6 +1331,8 @@ int start_vmx(void)
         vmx_free_host_vmcs(vmcs);
         return 0;
     }
+
+    current_host_vmcs = vmcs;
 
     vmx_save_host_msrs();
 
@@ -3160,6 +3168,25 @@ asmlinkage void vmx_trace_vmentry(void)
     HVMTRACE_0D(VMENTRY, v);
 }
 
+/* Resume vmx feature on the given cpu */
+static int vmx_resume_cpu(void)
+{
+    /* mmu_cr4_features may be changed by hvm_disable() */
+    if (!(read_cr4() & X86_CR4_VMXE))
+        set_in_cr4(X86_CR4_VMXE);
+
+    if ( __vmxon(virt_to_maddr(current_host_vmcs)) )
+    {
+        clear_in_cr4(X86_CR4_VMXE);
+        printk("VMXON failed\n");
+        vmx_free_host_vmcs(current_host_vmcs);
+        return 0;
+    }
+
+    printk("VMXON is done\n");
+    return 1;
+}
+
 /*
  * Local variables:
  * mode: C
diff -r a06fadc24250 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Tue Jul 10 14:23:46 2007 -0400
+++ b/xen/include/asm-x86/hvm/hvm.h     Tue Jul 10 19:43:41 2007 -0400
@@ -160,6 +160,10 @@ struct hvm_function_table {
     void (*init_hypercall_page)(struct domain *d, void
*hypercall_page);
 
     int  (*event_injection_faulted)(struct vcpu *v);
+
+    int  (*suspend_cpu)(void);
+
+    int  (*resume_cpu)(void);
 };
 
 extern struct hvm_function_table hvm_funcs;
@@ -316,4 +320,26 @@ static inline int hvm_event_injection_fa
 /* These exceptions must always be intercepted. */
 #define HVM_TRAP_MASK (1U << TRAP_machine_check)
 
+static inline int
+hvm_suspend_cpu(void)
+{
+    int ret = 1;
+
+    if (hvm_funcs.suspend_cpu)
+        ret = hvm_funcs.suspend_cpu();
+
+    return ret;
+}
+
+static inline int
+hvm_resume_cpu(void)
+{
+    int ret = 1;
+
+    if (hvm_funcs.resume_cpu)
+        ret = hvm_funcs.resume_cpu();
+    
+    return ret;
+}
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
diff -r a06fadc24250 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Jul 10 14:23:46 2007
-0400
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Tue Jul 10 20:09:33 2007
-0400
@@ -28,11 +28,24 @@ extern void vmcs_dump_vcpu(void);
 extern void vmcs_dump_vcpu(void);
 extern void vmx_init_vmcs_config(void);
 extern void setup_vmcs_dump(void);
+extern int vmx_suspend_cpu(void);
+extern void vmx_init_percpu_vmcs_info(void);
 
 struct vmcs_struct {
     u32 vmcs_revision_id;
     unsigned char data [0]; /* vmcs size is read from MSR */
 };
+
+/* Record per-cpu vmcs information */
+struct active_vmcs_info {
+    struct vmcs_struct *host;
+    struct vmcs_struct *curr;
+    struct list_head   node;  /* Active VMCS on this cpu */
+    spinlock_t        lock;
+};
+DECLARE_PER_CPU(struct active_vmcs_info, cpu_active_vmcs);
+#define current_vmcs           this_cpu(cpu_active_vmcs).curr
+#define current_host_vmcs      this_cpu(cpu_active_vmcs).host
 
 enum {
     VMX_INDEX_MSR_LSTAR = 0,
@@ -80,6 +93,7 @@ struct arch_vmx_struct {
 #endif
     unsigned long        efer;
 
+    struct list_head     list; /* Link to active cpu */
     /* Following fields are all specific to vmxassist. */
     unsigned long        vmxassist_enabled:1;
     unsigned long        irqbase_mode:1;

Attachment: hvm_context.patch
Description: hvm_context.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 3/10][RESEND] Add HVM support, Tian, Kevin <=