WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 09 of 20] Emulation of guest VMCLEAR

To: Tim.Deegan@xxxxxxxxxx
Subject: [Xen-devel] [PATCH 09 of 20] Emulation of guest VMCLEAR
From: Eddie Dong <eddie.dong@xxxxxxxxx>
Date: Thu, 09 Jun 2011 16:25:14 +0800
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Thu, 09 Jun 2011 01:40:49 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1307607905@xxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1307607905@xxxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Eddie Dong <eddie.dong@xxxxxxxxx>
# Date 1307607849 -28800
# Node ID 642c9ab2a829e8f1e5997bd8ea740a0306cb5521
# Parent  002f9d4c15841dc0b98b0427a480202b7117fdcd
Emulation of guest VMCLEAR

Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

diff -r 002f9d4c1584 -r 642c9ab2a829 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 09 16:24:09 2011 +0800
@@ -2444,6 +2444,11 @@ asmlinkage void vmx_vmexit_handler(struc
             update_guest_eip();
         break;
 
+    case EXIT_REASON_VMCLEAR:
+        if ( nvmx_handle_vmclear(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+ 
     case EXIT_REASON_VMPTRLD:
         if ( nvmx_handle_vmptrld(regs) == X86EMUL_OKAY )
             update_guest_eip();
@@ -2456,7 +2461,6 @@ asmlinkage void vmx_vmexit_handler(struc
 
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
-    case EXIT_REASON_VMCLEAR:
     case EXIT_REASON_VMLAUNCH:
     case EXIT_REASON_VMREAD:
     case EXIT_REASON_VMRESUME:
diff -r 002f9d4c1584 -r 642c9ab2a829 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Thu Jun 09 16:24:09 2011 +0800
@@ -26,6 +26,8 @@
 #include <asm/hvm/vmx/vmx.h>
 #include <asm/hvm/vmx/vvmx.h>
 
+static void nvmx_purge_vvmcs(struct vcpu *v);
+
 int nvmx_vcpu_initialise(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
@@ -53,6 +55,7 @@ void nvmx_vcpu_destroy(struct vcpu *v)
 {
     struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
 
+    nvmx_purge_vvmcs(v);
     if ( nvcpu->nv_n2vmcx ) {
         __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
         free_xenheap_page(nvcpu->nv_n2vmcx);
@@ -352,6 +355,14 @@ static void vmreturn(struct cpu_user_reg
     regs->eflags = eflags;
 }
 
+static void __clear_current_vvmcs(struct vcpu *v)
+{
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    
+    if ( nvcpu->nv_n2vmcx )
+        __vmpclear(virt_to_maddr(nvcpu->nv_n2vmcx));
+}
+
 static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
@@ -371,6 +382,25 @@ static inline void map_io_bitmap_all(str
    __map_io_bitmap (v, IO_BITMAP_B);
 }
 
+static void nvmx_purge_vvmcs(struct vcpu *v)
+{
+    struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    int i;
+
+    __clear_current_vvmcs(v);
+    if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+        hvm_unmap_guest_frame (nvcpu->nv_vvmcx);
+    nvcpu->nv_vvmcx == NULL;
+    nvcpu->nv_vvmcxaddr = VMCX_EADDR;
+    for (i=0; i<2; i++) {
+        if ( nvmx->iobitmap[i] ) {
+            hvm_unmap_guest_frame (nvmx->iobitmap[i]);
+            nvmx->iobitmap[i] = NULL;
+        }
+    }
+}
+
 /*
  * VMX instructions handling
  */
@@ -419,6 +449,7 @@ int nvmx_handle_vmxoff(struct cpu_user_r
     if ( rc != X86EMUL_OKAY )
         return rc;
 
+    nvmx_purge_vvmcs(v);
     nvmx->vmxon_region_pa = 0;
 
     vmreturn(regs, VMSUCCEED);
@@ -443,6 +474,9 @@ int nvmx_handle_vmptrld(struct cpu_user_
         goto out;
     }
 
+    if ( nvcpu->nv_vvmcxaddr != gpa )
+        nvmx_purge_vvmcs(v);
+
     if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
     {
         nvcpu->nv_vvmcx = hvm_map_guest_frame_rw (gpa >> PAGE_SHIFT);
@@ -478,3 +512,39 @@ int nvmx_handle_vmptrst(struct cpu_user_
     return X86EMUL_OKAY;
 }
 
+int nvmx_handle_vmclear(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_inst_decoded decode;
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    unsigned long gpa = 0;
+    int rc;
+
+    rc = decode_vmx_inst(regs, &decode, &gpa, 0);
+    if ( rc != X86EMUL_OKAY )
+        return rc;
+
+    if ( gpa & 0xfff )
+    {
+        vmreturn(regs, VMFAIL_INVALID);
+        goto out;
+    }
+
+    if ( gpa != nvcpu->nv_vvmcxaddr && nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+    {
+        gdprintk(XENLOG_WARNING, 
+                 "vmclear gpa %lx not the same with current vmcs %lx\n",
+                 gpa, nvcpu->nv_vvmcxaddr);
+        vmreturn(regs, VMSUCCEED);
+        goto out;
+    }
+    if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
+        __set_vvmcs(nvcpu->nv_vvmcx, NVMX_LAUNCH_STATE, 0);
+    nvmx_purge_vvmcs(v);
+
+    vmreturn(regs, VMSUCCEED);
+
+out:
+    return X86EMUL_OKAY;
+}
+
diff -r 002f9d4c1584 -r 642c9ab2a829 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Jun 09 16:24:09 2011 +0800
@@ -381,6 +381,8 @@ enum vmcs_field {
     HOST_SYSENTER_EIP               = 0x00006c12,
     HOST_RSP                        = 0x00006c14,
     HOST_RIP                        = 0x00006c16,
+    /* A virtual VMCS field used for nestedvmx only */
+    NVMX_LAUNCH_STATE               = 0x00006c20,
 };
 
 #define VMCS_VPID_WIDTH 16
diff -r 002f9d4c1584 -r 642c9ab2a829 xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Thu Jun 09 16:24:09 2011 +0800
@@ -155,6 +155,7 @@ void __set_vvmcs(void *vvmcs, u32 vmcs_e
 void nvmx_destroy_vmcs(struct vcpu *v);
 int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
 int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
+int nvmx_handle_vmclear(struct cpu_user_regs *regs);
 
 #endif /* __ASM_X86_HVM_VVMX_H__ */
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>