WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86 vpmu: msr-handling cleanup

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86 vpmu: msr-handling cleanup
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 10 Jun 2010 02:25:11 -0700
Delivery-date: Thu, 10 Jun 2010 02:25:27 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1276090571 -3600
# Node ID 706e6ba6074c4c12a2cde50451556c61fadefdd0
# Parent  5145d5840e611ca1cca91d97fa7b3f90ffdce056
x86 vpmu: msr-handling cleanup

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c        |   10 ++----
 xen/arch/x86/hvm/svm/vpmu.c       |   55 +++++++++++++++-----------------------
 xen/arch/x86/hvm/vmx/vmx.c        |    6 ++--
 xen/arch/x86/hvm/vmx/vpmu_core2.c |   42 +++++++++++++----------------
 xen/arch/x86/hvm/vpmu.c           |    8 ++---
 xen/include/asm-x86/hvm/vpmu.h    |    8 ++---
 6 files changed, 57 insertions(+), 72 deletions(-)

diff -r 5145d5840e61 -r 706e6ba6074c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Jun 09 13:29:22 2010 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Jun 09 14:36:11 2010 +0100
@@ -1105,8 +1105,8 @@ static int svm_msr_read_intercept(struct
     case MSR_K7_EVNTSEL1:
     case MSR_K7_EVNTSEL2:
     case MSR_K7_EVNTSEL3:
-        vpmu_do_rdmsr(regs);
-        goto done;
+        vpmu_do_rdmsr(ecx, &msr_content);
+        break;
 
     default:
 
@@ -1126,7 +1126,6 @@ static int svm_msr_read_intercept(struct
     regs->eax = (uint32_t)msr_content;
     regs->edx = (uint32_t)(msr_content >> 32);
 
-done:
     HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
     HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
                 ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
@@ -1199,8 +1198,8 @@ static int svm_msr_write_intercept(struc
     case MSR_K7_EVNTSEL1:
     case MSR_K7_EVNTSEL2:
     case MSR_K7_EVNTSEL3:
-        vpmu_do_wrmsr(regs);
-        goto done;
+        vpmu_do_wrmsr(ecx, msr_content);
+        break;
 
     default:
         if ( wrmsr_viridian_regs(ecx, msr_content) )
@@ -1218,7 +1217,6 @@ static int svm_msr_write_intercept(struc
         }
         break;
     }
-done:
     return X86EMUL_OKAY;
 
  gpf:
diff -r 5145d5840e61 -r 706e6ba6074c xen/arch/x86/hvm/svm/vpmu.c
--- a/xen/arch/x86/hvm/svm/vpmu.c       Wed Jun 09 13:29:22 2010 +0100
+++ b/xen/arch/x86/hvm/svm/vpmu.c       Wed Jun 09 14:36:11 2010 +0100
@@ -167,42 +167,38 @@ static void amd_vpmu_save(struct vcpu *v
     apic_write(APIC_LVTPC,  ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
 }
 
-static void context_update(struct cpu_user_regs *regs, u64 msr_content)
+static void context_update(unsigned int msr, u64 msr_content)
 {
     int i;
-    u32 addr = regs->ecx;
-    struct vcpu *v = current;
-    struct vpmu_struct *vpmu = vcpu_vpmu(v);
-    struct amd_vpmu_context *ctxt = vpmu->context;
-
-    for ( i = 0; i < NUM_COUNTERS; i++ )
-        if ( addr == AMD_F10H_COUNTERS[i] )
+    struct vcpu *v = current;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct amd_vpmu_context *ctxt = vpmu->context;
+
+    for ( i = 0; i < NUM_COUNTERS; i++ )
+        if ( msr == AMD_F10H_COUNTERS[i] )
             ctxt->counters[i] = msr_content;
 
     for ( i = 0; i < NUM_COUNTERS; i++ )
-        if ( addr == AMD_F10H_CTRLS[i] )
+        if ( msr == AMD_F10H_CTRLS[i] )
             ctxt->ctrls[i] = msr_content;
 
     ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
 }
 
-static int amd_vpmu_do_wrmsr(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    struct vcpu *v = current;
-    struct vpmu_struct *vpmu = vcpu_vpmu(v);
-
-    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+{
+    struct vcpu *v = current;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
     /* For all counters, enable guest only mode for HVM guest */
-    if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) &&
+    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
         !(is_guest_mode(msr_content)) )
     {
         set_guest_mode(msr_content);
     }
 
     /* check if the first counter is enabled */
-    if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) &&
+    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
         is_pmu_enabled(msr_content) && !(vpmu->flags & VPMU_RUNNING) )
     {
         if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
@@ -212,7 +208,7 @@ static int amd_vpmu_do_wrmsr(struct cpu_
     }
 
     /* stop saving & restore if guest stops first counter */
-    if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) && 
+    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && 
         (is_pmu_enabled(msr_content) == 0) && (vpmu->flags & VPMU_RUNNING) )
     {
         apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
@@ -221,21 +217,16 @@ static int amd_vpmu_do_wrmsr(struct cpu_
     }
 
     /* Update vpmu context immediately */
-    context_update(regs, msr_content);
+    context_update(msr, msr_content);
 
     /* Write to hw counters */
-    wrmsrl(regs->ecx, msr_content);
-    return 1;
-}
-
-static int amd_vpmu_do_rdmsr(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-
-    rdmsrl(regs->ecx, msr_content);
-    regs->eax = msr_content & 0xFFFFFFFF;
-    regs->edx = msr_content >> 32;
-
+    wrmsrl(msr, msr_content);
+    return 1;
+}
+
+static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+{
+    rdmsrl(msr, *msr_content);
     return 1;
 }
 
diff -r 5145d5840e61 -r 706e6ba6074c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 09 13:29:22 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Jun 09 14:36:11 2010 +0100
@@ -1846,8 +1846,8 @@ static int vmx_msr_read_intercept(struct
                        MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
         break;
     default:
-        if ( vpmu_do_rdmsr(regs) )
-            goto done;
+        if ( vpmu_do_rdmsr(ecx, &msr_content) )
+            break;
         if ( passive_domain_do_rdmsr(regs) )
             goto done;
         switch ( long_mode_do_msr_read(regs) )
@@ -2015,7 +2015,7 @@ static int vmx_msr_write_intercept(struc
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
         goto gp_fault;
     default:
-        if ( vpmu_do_wrmsr(regs) )
+        if ( vpmu_do_wrmsr(ecx, msr_content) )
             return X86EMUL_OKAY;
         if ( passive_domain_do_wrmsr(regs) )
             return X86EMUL_OKAY;
diff -r 5145d5840e61 -r 706e6ba6074c xen/arch/x86/hvm/vmx/vpmu_core2.c
--- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Jun 09 13:29:22 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed Jun 09 14:36:11 2010 +0100
@@ -328,10 +328,9 @@ static int core2_vpmu_msr_common_check(u
     return 1;
 }
 
-static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
-{
-    u32 ecx = regs->ecx;
-    u64 msr_content, global_ctrl, non_global_ctrl;
+static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+{
+    u64 global_ctrl, non_global_ctrl;
     char pmu_enable = 0;
     int i, tmp;
     int type = -1, index = -1;
@@ -339,12 +338,11 @@ static int core2_vpmu_do_wrmsr(struct cp
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct core2_vpmu_context *core2_vpmu_cxt = NULL;
 
-    if ( !core2_vpmu_msr_common_check(ecx, &type, &index) )
-        return 0;
-
-    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+    if ( !core2_vpmu_msr_common_check(msr, &type, &index) )
+        return 0;
+
     core2_vpmu_cxt = vpmu->context;
-    switch ( ecx )
+    switch ( msr )
     {
     case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
         core2_vpmu_cxt->global_ovf_status &= ~msr_content;
@@ -395,7 +393,7 @@ static int core2_vpmu_do_wrmsr(struct cp
         }
         break;
     default:
-        tmp = ecx - MSR_P6_EVNTSEL0;
+        tmp = msr - MSR_P6_EVNTSEL0;
         vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
         if ( tmp >= 0 && tmp < core2_get_pmc_count() )
             core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
@@ -445,7 +443,7 @@ static int core2_vpmu_do_wrmsr(struct cp
         if (inject_gp)
             vmx_inject_hw_exception(TRAP_gp_fault, 0);
         else
-            wrmsrl(ecx, msr_content);
+            wrmsrl(msr, msr_content);
     }
     else
         vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
@@ -453,34 +451,32 @@ static int core2_vpmu_do_wrmsr(struct cp
     return 1;
 }
 
-static int core2_vpmu_do_rdmsr(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
+static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+{
     int type = -1, index = -1;
     struct vcpu *v = current;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct core2_vpmu_context *core2_vpmu_cxt = NULL;
 
-    if ( !core2_vpmu_msr_common_check(regs->ecx, &type, &index) )
+    if ( !core2_vpmu_msr_common_check(msr, &type, &index) )
         return 0;
 
     core2_vpmu_cxt = vpmu->context;
-    switch ( regs->ecx )
+    switch ( msr )
     {
     case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+        *msr_content = 0;
         break;
     case MSR_CORE_PERF_GLOBAL_STATUS:
-        msr_content = core2_vpmu_cxt->global_ovf_status;
+        *msr_content = core2_vpmu_cxt->global_ovf_status;
         break;
     case MSR_CORE_PERF_GLOBAL_CTRL:
-        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
+        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
         break;
     default:
-        rdmsrl(regs->ecx, msr_content);
-    }
-
-    regs->eax = msr_content & 0xFFFFFFFF;
-    regs->edx = msr_content >> 32;
+        rdmsrl(msr, *msr_content);
+    }
+
     return 1;
 }
 
diff -r 5145d5840e61 -r 706e6ba6074c xen/arch/x86/hvm/vpmu.c
--- a/xen/arch/x86/hvm/vpmu.c   Wed Jun 09 13:29:22 2010 +0100
+++ b/xen/arch/x86/hvm/vpmu.c   Wed Jun 09 14:36:11 2010 +0100
@@ -36,21 +36,21 @@ static int __read_mostly opt_vpmu_enable
 static int __read_mostly opt_vpmu_enabled;
 boolean_param("vpmu", opt_vpmu_enabled);
 
-int vpmu_do_wrmsr(struct cpu_user_regs *regs)
+int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(current);
 
     if ( vpmu->arch_vpmu_ops )
-        return vpmu->arch_vpmu_ops->do_wrmsr(regs);
+        return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
     return 0;
 }
 
-int vpmu_do_rdmsr(struct cpu_user_regs *regs)
+int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(current);
 
     if ( vpmu->arch_vpmu_ops )
-        return vpmu->arch_vpmu_ops->do_rdmsr(regs);
+        return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
     return 0;
 }
 
diff -r 5145d5840e61 -r 706e6ba6074c xen/include/asm-x86/hvm/vpmu.h
--- a/xen/include/asm-x86/hvm/vpmu.h    Wed Jun 09 13:29:22 2010 +0100
+++ b/xen/include/asm-x86/hvm/vpmu.h    Wed Jun 09 14:36:11 2010 +0100
@@ -47,8 +47,8 @@ struct msr_load_store_entry {
 
 /* Arch specific operations shared by all vpmus */
 struct arch_vpmu_ops {
-    int (*do_wrmsr)(struct cpu_user_regs *regs);
-    int (*do_rdmsr)(struct cpu_user_regs *regs);
+    int (*do_wrmsr)(unsigned int msr, uint64_t msr_content);
+    int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
     int (*do_interrupt)(struct cpu_user_regs *regs);
     void (*arch_vpmu_initialise)(struct vcpu *v);
     void (*arch_vpmu_destroy)(struct vcpu *v);
@@ -66,8 +66,8 @@ struct vpmu_struct {
 #define VPMU_CONTEXT_LOADED                 0x2
 #define VPMU_RUNNING                        0x4
 #define PASSIVE_DOMAIN_ALLOCATED           0x8
-int vpmu_do_wrmsr(struct cpu_user_regs *regs);
-int vpmu_do_rdmsr(struct cpu_user_regs *regs);
+int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
+int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
 int vpmu_do_interrupt(struct cpu_user_regs *regs);
 void vpmu_initialise(struct vcpu *v);
 void vpmu_destroy(struct vcpu *v);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86 vpmu: msr-handling cleanup, Xen patchbot-unstable <=