WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM] Fix MSR access code.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM] Fix MSR access code.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 28 Nov 2006 13:40:35 +0000
Delivery-date: Tue, 28 Nov 2006 05:42:38 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 62b0b520ea53a7e3bd4e34dc226716a688be698a
# Parent  7a5246955bef179fb199329b4646b3eb74489735
[HVM] Fix MSR access code.

- rdmsr/wrmsr always use ECX (not RCX) as register index.
- SVM still had the function names explicitly in the HVM_DBG_LOG() output
- the guest should (at the very minimum) see GP fault for MSRs
  accesses to which even fault in Xen itself

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c |   55 ++++++++++++++----------------
 xen/arch/x86/hvm/vmx/vmx.c |   80 ++++++++++++++++++++++-----------------------
 2 files changed, 65 insertions(+), 70 deletions(-)

diff -r 7a5246955bef -r 62b0b520ea53 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Nov 28 11:35:10 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Nov 28 11:43:39 2006 +0000
@@ -277,7 +277,7 @@ static inline int long_mode_do_msr_read(
     struct vcpu *vc = current;
     struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
 
-    switch (regs->ecx)
+    switch ((u32)regs->ecx)
     {
     case MSR_EFER:
         msr_content = vmcb->efer;
@@ -315,7 +315,7 @@ static inline int long_mode_do_msr_read(
         return 0;
     }
 
-    HVM_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %"PRIx64"\n", 
+    HVM_DBG_LOG(DBG_LEVEL_2, "msr_content: %"PRIx64"\n",
                 msr_content);
 
     regs->eax = (u32)(msr_content >>  0);
@@ -329,11 +329,10 @@ static inline int long_mode_do_msr_write
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx "
-                "msr_content %"PRIx64"\n", 
-                (unsigned long)regs->ecx, msr_content);
-
-    switch ( regs->ecx )
+    HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n",
+                (u32)regs->ecx, msr_content);
+
+    switch ( (u32)regs->ecx )
     {
     case MSR_EFER:
 #ifdef __x86_64__
@@ -1855,22 +1854,18 @@ static inline void svm_do_msr_access(
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     int  inst_len;
     u64 msr_content=0;
-    u32 eax, edx;
+    u32 ecx = regs->ecx, eax, edx;
 
     ASSERT(vmcb);
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "svm_do_msr_access: ecx=%lx, eax=%lx, edx=%lx, "
-                "exitinfo = %lx", (unsigned long)regs->ecx, 
-                (unsigned long)regs->eax, (unsigned long)regs->edx, 
+    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x, exitinfo = %lx",
+                ecx, (u32)regs->eax, (u32)regs->edx,
                 (unsigned long)vmcb->exitinfo1);
 
     /* is it a read? */
     if (vmcb->exitinfo1 == 0)
     {
-        inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
-
-        regs->edx = 0;
-        switch (regs->ecx) {
+        switch (ecx) {
         case MSR_IA32_TIME_STAMP_COUNTER:
             msr_content = hvm_get_guest_time(v);
             break;
@@ -1890,25 +1885,30 @@ static inline void svm_do_msr_access(
             if (long_mode_do_msr_read(regs))
                 goto done;
 
-            if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
+            if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
+                 rdmsr_safe(ecx, eax, edx) == 0 )
             {
                 regs->eax = eax;
                 regs->edx = edx;
                 goto done;
             }
-
-            rdmsr_safe(regs->ecx, regs->eax, regs->edx);
-            break;
+            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+            return;
         }
         regs->eax = msr_content & 0xFFFFFFFF;
         regs->edx = msr_content >> 32;
+
+ done:
+        HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
+                    ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
+
+        inst_len = __get_instruction_length(vmcb, INSTR_RDMSR, NULL);
     }
     else
     {
-        inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
         msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
-        switch (regs->ecx)
+        switch (ecx)
         {
         case MSR_IA32_TIME_STAMP_COUNTER:
             hvm_set_guest_time(v, msr_content);
@@ -1927,17 +1927,12 @@ static inline void svm_do_msr_access(
             break;
         default:
             if ( !long_mode_do_msr_write(regs) )
-                wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx);
+                wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
             break;
         }
-    }
-
- done:
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "svm_do_msr_access returns: "
-                "ecx=%lx, eax=%lx, edx=%lx",
-                (unsigned long)regs->ecx, (unsigned long)regs->eax,
-                (unsigned long)regs->edx);
+
+        inst_len = __get_instruction_length(vmcb, INSTR_WRMSR, NULL);
+    }
 
     __update_guest_eip(vmcb, inst_len);
 }
diff -r 7a5246955bef -r 62b0b520ea53 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Nov 28 11:35:10 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Nov 28 11:43:39 2006 +0000
@@ -116,7 +116,7 @@ static inline int long_mode_do_msr_read(
     struct vcpu *v = current;
     struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
 
-    switch ( regs->ecx ) {
+    switch ( (u32)regs->ecx ) {
     case MSR_EFER:
         HVM_DBG_LOG(DBG_LEVEL_2, "EFER msr_content 0x%"PRIx64, msr_content);
         msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_EFER];
@@ -169,10 +169,10 @@ static inline int long_mode_do_msr_write
     struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
     struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
-                (unsigned long)regs->ecx, msr_content);
-
-    switch ( regs->ecx ) {
+    HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n",
+                (u32)regs->ecx, msr_content);
+
+    switch ( (u32)regs->ecx ) {
     case MSR_EFER:
         /* offending reserved bit will cause #GP */
         if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
@@ -1790,16 +1790,16 @@ static int vmx_cr_access(unsigned long e
     return 1;
 }
 
-static inline void vmx_do_msr_read(struct cpu_user_regs *regs)
+static inline int vmx_do_msr_read(struct cpu_user_regs *regs)
 {
     u64 msr_content = 0;
-    u32 eax, edx;
+    u32 ecx = regs->ecx, eax, edx;
     struct vcpu *v = current;
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%lx, eax=%lx, edx=%lx",
-                (unsigned long)regs->ecx, (unsigned long)regs->eax,
-                (unsigned long)regs->edx);
-    switch (regs->ecx) {
+    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
+                ecx, (u32)regs->eax, (u32)regs->edx);
+
+    switch (ecx) {
     case MSR_IA32_TIME_STAMP_COUNTER:
         msr_content = hvm_get_guest_time(v);
         break;
@@ -1817,39 +1817,41 @@ static inline void vmx_do_msr_read(struc
         break;
     default:
         if ( long_mode_do_msr_read(regs) )
-            return;
-
-        if ( rdmsr_hypervisor_regs(regs->ecx, &eax, &edx) )
+            goto done;
+
+        if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
+             rdmsr_safe(ecx, eax, edx) == 0 )
         {
             regs->eax = eax;
             regs->edx = edx;
-            return;
-        }
-
-        rdmsr_safe(regs->ecx, regs->eax, regs->edx);
-        return;
+            goto done;
+        }
+        vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+        return 0;
     }
 
     regs->eax = msr_content & 0xFFFFFFFF;
     regs->edx = msr_content >> 32;
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%lx, eax=%lx, edx=%lx",
-                (unsigned long)regs->ecx, (unsigned long)regs->eax,
+done:
+    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
+                ecx, (unsigned long)regs->eax,
                 (unsigned long)regs->edx);
-}
-
-static inline void vmx_do_msr_write(struct cpu_user_regs *regs)
-{
+    return 1;
+}
+
+static inline int vmx_do_msr_write(struct cpu_user_regs *regs)
+{
+    u32 ecx = regs->ecx;
     u64 msr_content;
     struct vcpu *v = current;
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%lx, eax=%lx, edx=%lx",
-                (unsigned long)regs->ecx, (unsigned long)regs->eax,
-                (unsigned long)regs->edx);
+    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
+                ecx, (u32)regs->eax, (u32)regs->edx);
 
     msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
-    switch (regs->ecx) {
+    switch (ecx) {
     case MSR_IA32_TIME_STAMP_COUNTER:
         {
             struct periodic_time *pt =
@@ -1874,13 +1876,11 @@ static inline void vmx_do_msr_write(stru
         break;
     default:
         if ( !long_mode_do_msr_write(regs) )
-            wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx);
-        break;
-    }
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%lx, eax=%lx, edx=%lx",
-                (unsigned long)regs->ecx, (unsigned long)regs->eax,
-                (unsigned long)regs->edx);
+            wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx);
+        break;
+    }
+
+    return 1;
 }
 
 static void vmx_do_hlt(void)
@@ -2244,16 +2244,16 @@ asmlinkage void vmx_vmexit_handler(struc
         break;
     case EXIT_REASON_MSR_READ:
         inst_len = __get_instruction_length(); /* Safe: RDMSR */
-        __update_guest_eip(inst_len);
-        vmx_do_msr_read(regs);
+        if ( vmx_do_msr_read(regs) )
+            __update_guest_eip(inst_len);
         TRACE_VMEXIT(1, regs->ecx);
         TRACE_VMEXIT(2, regs->eax);
         TRACE_VMEXIT(3, regs->edx);
         break;
     case EXIT_REASON_MSR_WRITE:
         inst_len = __get_instruction_length(); /* Safe: WRMSR */
-        __update_guest_eip(inst_len);
-        vmx_do_msr_write(regs);
+        if ( vmx_do_msr_write(regs) )
+            __update_guest_eip(inst_len);
         TRACE_VMEXIT(1, regs->ecx);
         TRACE_VMEXIT(2, regs->eax);
         TRACE_VMEXIT(3, regs->edx);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM] Fix MSR access code., Xen patchbot-unstable <=