WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

RE: [Xen-devel] RFC: Nested VMX patch series 09: vmread

To: "Dong, Eddie" <eddie.dong@xxxxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>, Keir Fraser <keir@xxxxxxx>
Subject: RE: [Xen-devel] RFC: Nested VMX patch series 09: vmread
From: "Dong, Eddie" <eddie.dong@xxxxxxxxx>
Date: Wed, 1 Jun 2011 12:00:16 +0800
Accept-language: en-US
Acceptlanguage: en-US
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Dong, Eddie" <eddie.dong@xxxxxxxxx>, "He, Qing" <qing.he@xxxxxxxxx>
Delivery-date: Tue, 31 May 2011 21:08:46 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <osstest-7468-mainreport@xxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcwgAhjDwUdZ/2BOTBqtK+IA8ti/WgAC9edgAAAdViAAAA3MIAAAF9kwAAAdbWAAABYxcAAAC+egAAALN5AAAAWccAAABoAA
Thread-topic: [Xen-devel] RFC: Nested VMX patch series 09: vmread

Thx, Eddie

        Signed-off-by: Qing He <qing.he@xxxxxxxxx>
        Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>


diff -r 4838d3cb1e85 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 01 09:40:14 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 01 09:40:43 2011 +0800
@@ -2460,6 +2460,11 @@
             update_guest_eip();
         break;
 
+    case EXIT_REASON_VMREAD:
+        if ( nvmx_handle_vmread(regs) == X86EMUL_OKAY )
+            update_guest_eip();
+        break;
+ 
     case EXIT_REASON_VMWRITE:
         if ( nvmx_handle_vmwrite(regs) == X86EMUL_OKAY )
             update_guest_eip();
@@ -2468,7 +2473,6 @@
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
     case EXIT_REASON_VMLAUNCH:
-    case EXIT_REASON_VMREAD:
     case EXIT_REASON_VMRESUME:
     case EXIT_REASON_GETSEC:
     case EXIT_REASON_INVEPT:
diff -r 4838d3cb1e85 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jun 01 09:40:14 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c       Wed Jun 01 09:40:43 2011 +0800
@@ -173,6 +173,8 @@
     VMFAIL_INVALID,
 };
 
+#define CASE_SET_REG(REG, reg)      \
+    case VMX_REG_ ## REG: regs->reg = value; break
 #define CASE_GET_REG(REG, reg)      \
     case VMX_REG_ ## REG: value = regs->reg; break
 
@@ -285,6 +287,32 @@
     return value;
 }
 
+static void reg_write(struct cpu_user_regs *regs,
+                      enum vmx_regs_enc index,
+                      unsigned long value)
+{
+    switch ( index ) {
+    CASE_SET_REG(RAX, eax);
+    CASE_SET_REG(RCX, ecx);
+    CASE_SET_REG(RDX, edx);
+    CASE_SET_REG(RBX, ebx);
+    CASE_SET_REG(RBP, ebp);
+    CASE_SET_REG(RSI, esi);
+    CASE_SET_REG(RDI, edi);
+    CASE_SET_REG(RSP, esp);
+    CASE_SET_REG(R8, r8);
+    CASE_SET_REG(R9, r9);
+    CASE_SET_REG(R10, r10);
+    CASE_SET_REG(R11, r11);
+    CASE_SET_REG(R12, r12);
+    CASE_SET_REG(R13, r13);
+    CASE_SET_REG(R14, r14);
+    CASE_SET_REG(R15, r15);
+    default:
+        break;
+    }
+}
+
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int 
vmxop_check)
 {
     struct vcpu *v = current;
@@ -622,6 +650,35 @@
     return X86EMUL_OKAY;
 }
 
+int nvmx_handle_vmread(struct cpu_user_regs *regs)
+{
+    struct vcpu *v = current;
+    struct vmx_inst_decoded decode;
+    struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+    u64 value = 0;
+    int rc;
+
+    rc = decode_vmx_inst(regs, &decode, NULL, 0);
+    if ( rc != X86EMUL_OKAY )
+        return rc;
+
+    value = __get_vvmcs(nvcpu->nv_vvmcx, reg_read(regs, decode.reg2));
+
+    switch ( decode.type ) {
+    case VMX_INST_MEMREG_TYPE_MEMORY:
+        rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0);
+        if ( rc != HVMCOPY_okay )
+            return X86EMUL_EXCEPTION;
+        break;
+    case VMX_INST_MEMREG_TYPE_REG:
+        reg_write(regs, decode.reg1, value);
+        break;
+    }
+
+    vmreturn(regs, VMSUCCEED);
+    return X86EMUL_OKAY;
+}
+
 int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
diff -r 4838d3cb1e85 xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h        Wed Jun 01 09:40:14 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h        Wed Jun 01 09:40:43 2011 +0800
@@ -111,6 +111,7 @@
 int nvmx_handle_vmptrld(struct cpu_user_regs *regs);
 int nvmx_handle_vmptrst(struct cpu_user_regs *regs);
 int nvmx_handle_vmclear(struct cpu_user_regs *regs);
+int nvmx_handle_vmread(struct cpu_user_regs *regs);
 int nvmx_handle_vmwrite(struct cpu_user_regs *regs);
 
 #endif /* __ASM_X86_HVM_VVMX_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>