WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM] Save/restore cleanups 02: VCPU

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM] Save/restore cleanups 02: VCPU
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 20 Jan 2007 09:10:14 -0800
Delivery-date: Sat, 20 Jan 2007 09:10:20 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1169291859 0
# Node ID dccdc3ee0efca7894870e1699233eca1d9bfc463
# Parent  56228886421d877623cdd27e68d6e5b6d1592946
[HVM] Save/restore cleanups 02: VCPU
Save/restore vcpu state therough the streaming interface
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |    4 ++
 xen/arch/x86/hvm/vmx/vmx.c        |   66 +++++++++++++++++++++-----------------
 xen/include/asm-x86/hvm/hvm.h     |   34 ++-----------------
 xen/include/public/arch-x86/xen.h |   64 ------------------------------------
 xen/include/public/hvm/save.h     |    3 +
 5 files changed, 48 insertions(+), 123 deletions(-)

diff -r 56228886421d -r dccdc3ee0efc xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Sat Jan 20 11:17:38 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Sat Jan 20 11:17:39 2007 +0000
@@ -173,6 +173,10 @@ int hvm_vcpu_initialise(struct vcpu *v)
 int hvm_vcpu_initialise(struct vcpu *v)
 {
     int rc;
+
+    hvm_register_savevm(v->domain, "xen_hvm_cpu", v->vcpu_id, 1,
+                        hvm_funcs.save_cpu_ctxt, hvm_funcs.load_cpu_ctxt, 
+                        (void *)v);
 
     if ( (rc = vlapic_init(v)) != 0 )
         return rc;
diff -r 56228886421d -r dccdc3ee0efc xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Sat Jan 20 11:17:38 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Sat Jan 20 11:17:39 2007 +0000
@@ -47,6 +47,7 @@
 #include <asm/hvm/vlapic.h>
 #include <asm/x86_emulate.h>
 #include <asm/hvm/vpt.h>
+#include <public/hvm/save.h>
 
 static void vmx_ctxt_switch_from(struct vcpu *v);
 static void vmx_ctxt_switch_to(struct vcpu *v);
@@ -364,7 +365,7 @@ static inline void __restore_debug_regis
 }
 
 static int __get_instruction_length(void);
-int vmx_vmcs_save(struct vcpu *v, struct vmcs_data *c)
+int vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
 {
     unsigned long inst_len;
 
@@ -443,7 +444,7 @@ int vmx_vmcs_save(struct vcpu *v, struct
     return 1;
 }
 
-int vmx_vmcs_restore(struct vcpu *v, struct vmcs_data *c)
+int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 {
     unsigned long mfn, old_base_mfn;
 
@@ -590,9 +591,8 @@ static void dump_msr_state(struct vmx_ms
 }
 #endif
         
-void vmx_save_cpu_state(struct vcpu *v, struct hvmcpu_context *ctxt)
-{
-    struct vmcs_data *data = &ctxt->data;
+void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
+{
     struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
     unsigned long guest_flags = guest_state->flags;
     int i = 0;
@@ -603,14 +603,15 @@ void vmx_save_cpu_state(struct vcpu *v, 
     data->flags = guest_flags;
     for (i = 0; i < VMX_MSR_COUNT; i++)
         data->msr_items[i] = guest_state->msrs[i];
-
+    
+    data->tsc = hvm_get_guest_time(v);
+    
     dump_msr_state(guest_state);
 }
 
-void vmx_load_cpu_state(struct vcpu *v, struct hvmcpu_context *ctxt)
+void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
     int i = 0;
-    struct vmcs_data *data = &ctxt->data;
     struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
 
     /* restore msrs */
@@ -625,35 +626,42 @@ void vmx_load_cpu_state(struct vcpu *v, 
 
     v->arch.hvm_vmx.vmxassist_enabled = data->vmxassist_enabled;
 
+    hvm_set_guest_time(v, data->tsc);
+
     dump_msr_state(guest_state);
 }
 
-void vmx_save_vmcs_ctxt(struct vcpu *v, struct hvmcpu_context *ctxt)
-{
-    struct vmcs_data *data = &ctxt->data;
-
-    vmx_save_cpu_state(v, ctxt);
-
+
+void vmx_save_vmcs_ctxt(hvm_domain_context_t *h, void *opaque)
+{
+    struct vcpu *v = opaque;
+    struct hvm_hw_cpu ctxt;
+
+    vmx_save_cpu_state(v, &ctxt);
     vmx_vmcs_enter(v);
-
-    vmx_vmcs_save(v, data);
-
+    vmx_vmcs_save(v, &ctxt);
     vmx_vmcs_exit(v);
 
-}
-
-void vmx_load_vmcs_ctxt(struct vcpu *v, struct hvmcpu_context *ctxt)
-{
-    vmx_load_cpu_state(v, ctxt);
-
-    if (vmx_vmcs_restore(v, &ctxt->data)) {
+    hvm_put_struct(h, &ctxt);
+}
+
+int vmx_load_vmcs_ctxt(hvm_domain_context_t *h, void *opaque, int version)
+{
+    struct vcpu *v = opaque;
+    struct hvm_hw_cpu ctxt;
+
+    if (version != 1)
+        return -EINVAL;
+
+    hvm_get_struct(h, &ctxt);
+    vmx_load_cpu_state(v, &ctxt);
+    if (vmx_vmcs_restore(v, &ctxt)) {
         printk("vmx_vmcs restore failed!\n");
         domain_crash(v->domain);
-    }
-
-    /* only load vmcs once */
-    ctxt->valid = 0;
-
+        return -EINVAL;
+    }
+
+    return 0;
 }
 
 /*
diff -r 56228886421d -r dccdc3ee0efc xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Sat Jan 20 11:17:38 2007 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Sat Jan 20 11:17:39 2007 +0000
@@ -22,6 +22,8 @@
 #define __ASM_X86_HVM_HVM_H__
 
 #include <asm/x86_emulate.h>
+#include <public/domctl.h>
+#include <public/hvm/save.h>
 
 /* 
  * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
@@ -81,10 +83,8 @@ struct hvm_function_table {
         struct vcpu *v, struct cpu_user_regs *r);
 
     /* save and load hvm guest cpu context for save/restore */
-    void (*save_cpu_ctxt)(
-        struct vcpu *v, struct hvmcpu_context *ctxt);
-    void (*load_cpu_ctxt)(
-        struct vcpu *v, struct hvmcpu_context *ctxt);
+    void (*save_cpu_ctxt)(hvm_domain_context_t *h, void *opaque);
+    int (*load_cpu_ctxt)(hvm_domain_context_t *h, void *opaque, int version);
 
     /*
      * Examine specifics of the guest state:
@@ -167,32 +167,6 @@ void hvm_set_guest_time(struct vcpu *v, 
 void hvm_set_guest_time(struct vcpu *v, u64 gtime);
 u64 hvm_get_guest_time(struct vcpu *v);
 
-static inline void
-hvm_save_cpu_context(
-        struct vcpu *v, struct hvmcpu_context *ctxt)
-{
-    hvm_funcs.save_cpu_ctxt(v, ctxt);
-
-    /* save guest time */
-    ctxt->gtime = hvm_get_guest_time(v);
-
-    /* set valid flag to recover whole vmcs when restore */
-    ctxt->valid = 0x55885588;
-}
-
-static inline void
-hvm_load_cpu_context(
-        struct vcpu *v, struct hvmcpu_context *ctxt)
-{
-    if ( ctxt->valid != 0x55885588)
-        return;
-
-    hvm_funcs.load_cpu_ctxt(v, ctxt);
-
-    /* restore guest time*/
-    hvm_set_guest_time(v, ctxt->gtime);
-}
-
 static inline int
 hvm_paging_enabled(struct vcpu *v)
 {
diff -r 56228886421d -r dccdc3ee0efc xen/include/public/arch-x86/xen.h
--- a/xen/include/public/arch-x86/xen.h Sat Jan 20 11:17:38 2007 +0000
+++ b/xen/include/public/arch-x86/xen.h Sat Jan 20 11:17:39 2007 +0000
@@ -107,70 +107,6 @@ DEFINE_XEN_GUEST_HANDLE(trap_info_t);
 DEFINE_XEN_GUEST_HANDLE(trap_info_t);
 
 typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
-
-/*
- * World vmcs state
- */
-struct vmcs_data {
-    uint64_t  eip;        /* execution pointer */
-    uint64_t  esp;        /* stack pointer */
-    uint64_t  eflags;     /* flags register */
-    uint64_t  cr0;
-    uint64_t  cr3;        /* page table directory */
-    uint64_t  cr4;
-    uint32_t  idtr_limit; /* idt */
-    uint64_t  idtr_base;
-    uint32_t  gdtr_limit; /* gdt */
-    uint64_t  gdtr_base;
-    uint32_t  cs_sel;     /* cs selector */
-    uint32_t  cs_limit;
-    uint64_t  cs_base;
-    uint32_t  cs_arbytes;
-    uint32_t  ds_sel;     /* ds selector */
-    uint32_t  ds_limit;
-    uint64_t  ds_base;
-    uint32_t  ds_arbytes;
-    uint32_t  es_sel;     /* es selector */
-    uint32_t  es_limit;
-    uint64_t  es_base;
-    uint32_t  es_arbytes;
-    uint32_t  ss_sel;     /* ss selector */
-    uint32_t  ss_limit;
-    uint64_t  ss_base;
-    uint32_t  ss_arbytes;
-    uint32_t  fs_sel;     /* fs selector */
-    uint32_t  fs_limit;
-    uint64_t  fs_base;
-    uint32_t  fs_arbytes;
-    uint32_t  gs_sel;     /* gs selector */
-    uint32_t  gs_limit;
-    uint64_t  gs_base;
-    uint32_t  gs_arbytes;
-    uint32_t  tr_sel;     /* task selector */
-    uint32_t  tr_limit;
-    uint64_t  tr_base;
-    uint32_t  tr_arbytes;
-    uint32_t  ldtr_sel;   /* ldtr selector */
-    uint32_t  ldtr_limit;
-    uint64_t  ldtr_base;
-    uint32_t  ldtr_arbytes;
-    uint32_t  sysenter_cs;
-    uint64_t  sysenter_esp;
-    uint64_t  sysenter_eip;
-    /* msr for em64t */
-    uint64_t shadow_gs;
-    uint64_t flags;
-    /* same size as VMX_MSR_COUNT */
-    uint64_t msr_items[6];
-    uint64_t vmxassist_enabled;
-};
-typedef struct vmcs_data vmcs_data_t;
-
-struct hvmcpu_context {
-    uint32_t valid;
-    struct vmcs_data data;
-    uint64_t gtime;
-};
 
 /*
  * The following is all CPU context. Note that the fpu_ctxt block is filled 
diff -r 56228886421d -r dccdc3ee0efc xen/include/public/hvm/save.h
--- a/xen/include/public/hvm/save.h     Sat Jan 20 11:17:38 2007 +0000
+++ b/xen/include/public/hvm/save.h     Sat Jan 20 11:17:39 2007 +0000
@@ -107,6 +107,9 @@ struct hvm_hw_cpu {
     /* same size as VMX_MSR_COUNT */
     uint64_t msr_items[6];
     uint64_t vmxassist_enabled;
+
+    /* guest's idea of what rdtsc() would return */
+    uint64_t tsc;
 };
 
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM] Save/restore cleanups 02: VCPU, Xen patchbot-unstable <=