|   xen-devel
[Xen-devel]  RFC: Nested VMX patch series 05: vmptrld 
| To: | "Dong, Eddie" <eddie.dong@xxxxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>, 	Keir Fraser <keir@xxxxxxx> |  
| Subject: | [Xen-devel]  RFC: Nested VMX patch series 05: vmptrld |  
| From: | "Dong, Eddie" <eddie.dong@xxxxxxxxx> |  
| Date: | Wed, 1 Jun 2011 11:57:01 +0800 |  
| Accept-language: | en-US |  
| Acceptlanguage: | en-US |  
| Cc: | "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Dong,	Eddie" <eddie.dong@xxxxxxxxx>, "He, Qing" <qing.he@xxxxxxxxx> |  
| Delivery-date: | Tue, 31 May 2011 20:59:26 -0700 |  
| Envelope-to: | www-data@xxxxxxxxxxxxxxxxxxx |  
| List-help: | <mailto:xen-devel-request@lists.xensource.com?subject=help> |  
| List-id: | Xen developer discussion <xen-devel.lists.xensource.com> |  
| List-post: | <mailto:xen-devel@lists.xensource.com> |  
| List-subscribe: | <http://lists.xensource.com/mailman/listinfo/xen-devel>,	<mailto:xen-devel-request@lists.xensource.com?subject=subscribe> |  
| List-unsubscribe: | <http://lists.xensource.com/mailman/listinfo/xen-devel>,	<mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe> |  
| References: | <osstest-7468-mainreport@xxxxxxx> |  
| Sender: | xen-devel-bounces@xxxxxxxxxxxxxxxxxxx |  
| Thread-index: | AcwgAhjDwUdZ/2BOTBqtK+IA8ti/WgAC9edgAAAdViAAAA3MIAAAF9kwAAAdbWAAABYxcA== |  
| Thread-topic: | [Xen-devel]  RFC: Nested VMX patch series 05: vmptrld |  
| 
Thx, Eddie
        Signed-off-by: Qing He <qing.he@xxxxxxxxx>
        Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
diff -r c7820e886afc xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 01 08:48:22 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 01 09:18:29 2011 +0800
@@ -2445,11 +2445,15 @@
             update_guest_eip();
         break;
 
+    case EXIT_REASON_VMPTRLD:
+        if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
     case EXIT_REASON_VMCLEAR:
     case EXIT_REASON_VMLAUNCH:
-    case EXIT_REASON_VMPTRLD:
     case EXIT_REASON_VMPTRST:
     case EXIT_REASON_VMREAD:
     case EXIT_REASON_VMRESUME:
diff -r c7820e886afc xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jun 01 08:48:22 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jun 01 09:18:29 2011 +0800
@@ -410,6 +410,42 @@
     regs->eflags = eflags;
 }
 
+static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    unsigned long gpa;
+    unsigned long mfn;
+    p2m_type_t p2mt;
+
+    if ( vmcs_reg == IO_BITMAP_A )
+    {
+        if (nvmx->iobitmap[0]) {
+            unmap_domain_page_global(nvmx->iobitmap[0]);
+        }
+        gpa = __get_vvmcs(nvcpu->nv_vvmcx, IO_BITMAP_A);
+        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
+                              gpa >> PAGE_SHIFT, &p2mt));
+        nvmx->iobitmap[0] = map_domain_page_global(mfn);
+    }
+    else if ( vmcs_reg == IO_BITMAP_B )
+    {
+        if (nvmx->iobitmap[1]) {
+            unmap_domain_page_global(nvmx->iobitmap[1]);
+        }
+        gpa = __get_vvmcs(nvcpu->nv_vvmcx, IO_BITMAP_B);
+        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
+                               gpa >> PAGE_SHIFT, &p2mt));
+        nvmx->iobitmap[1] = map_domain_page_global(mfn);
+    }
+}
+
+static inline void map_io_bitmap_all(struct vcpu *v)
+{
+   __map_io_bitmap (v, IO_BITMAP_A);
+   __map_io_bitmap (v, IO_BITMAP_B);
+}
+
 /*
  * VMX instructions handling
  */
@@ -418,6 +454,7 @@
 {
     struct vcpu *v=current;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
     struct vmx_inst_decoded decode;
     unsigned long gpa = 0;
     int rc;
@@ -426,11 +463,25 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    if ( nvmx->vmxon_region_pa )
+        gdprintk(XENLOG_WARNING, 
+                 "vmxon again: orig %lx new %lx\n",
+                 nvmx->vmxon_region_pa, gpa);
+
     nvmx->vmxon_region_pa = gpa;
+
+    /*
+     * `fork' the host vmcs to shadow_vmcs
+     * vmcs_lock is not needed since we are on current
+     */
+    nvcpu->nv_n1vmcx = v->arch.hvm_vmx.vmcs;
+    __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+    memcpy(nvcpu->nv_n2vmcx, v->arch.hvm_vmx.vmcs, PAGE_SIZE);
+    __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+    v->arch.hvm_vmx.launched = 0;
     vmreturn(regs, VMSUCCEED);
 
     return X86EMUL_OKAY;
-
 }
 
 int nvmx_handle_vmxoff(struct cpu_user_regs *regs)
@@ -449,3 +500,39 @@
     return X86EMUL_OKAY;
 }
 
+int nvmx_handle_vmptrld(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_inst_decoded decode;
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    unsigned long gpa = 0;
+    unsigned long mfn;
+    p2m_type_t p2mt;
+    int rc;
+
+    rc = decode_vmx_inst(regs, &decode, &gpa, 0);
+    if ( rc != X86EMUL_OKAY )
+        return rc;
+
+    if ( gpa == nvmx->vmxon_region_pa || gpa & 0xfff )
+    {
+        vmreturn(regs, VMFAIL_INVALID);
+        goto out;
+    }
+
+    if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
+    {
+        mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(v->domain),
+                               gpa >> PAGE_SHIFT, &p2mt));
+        nvcpu->nv_vvmcx = map_domain_page_global(mfn);
+        nvcpu->nv_vvmcxaddr = gpa;
+        map_io_bitmap_all (v);
+    }
+
+    vmreturn(regs, VMSUCCEED);
+
+out:
+    return X86EMUL_OKAY;
+}
+
diff -r c7820e886afc xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Wed Jun 01 08:48:22 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Wed Jun 01 09:18:29 2011 +0800
@@ -107,5 +107,8 @@
 u64 __get_vvmcs(void *vvmcs, u32 vmcs_encoding);
 void __set_vvmcs(void *vvmcs, u32 vmcs_encoding, u64 val);
 
+void nvmx_destroy_vmcs(struct vcpu *v);
+int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
+
 #endif /* __ASM_X86_HVM_VVMX_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
 | 
 
| <Prev in Thread] | Current Thread | [Next in Thread> |  | 
[Xen-devel] [xen-4.1-testing test] 7468: tolerable FAIL - PUSHED, xen . org
[Xen-devel] pre-cleanup1  for nested VMX, Dong, Eddie
[Xen-devel] pre-cleanup2  for nested VMX, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 01: data structure, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 02: wrap APIs, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 03: vmxon_off, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 05: vmptrld,
Dong, Eddie <=
[Xen-devel]  RFC: Nested VMX patch series 04: virtual VMCS	structure and APIs, Dong, Eddie
RE: [Xen-devel]  RFC: Nested VMX patch series 05: vmptrst, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 07: vmclear, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 08: vmwrite, Dong, Eddie
RE: [Xen-devel]  RFC: Nested VMX patch series 09: vmread, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 10: vmcs switching API, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 11: vmresume, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 12: shadow vmcs control, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 12: real VMCS switch, Dong, Eddie
[Xen-devel]  RFC: Nested VMX patch series 14: exceptions, Dong, Eddie
 |  |  |