diff -r 1a1d593c16f3 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Feb 10 16:55:27 2011 -0600 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Feb 11 10:16:50 2011 -0600 @@ -623,6 +623,42 @@ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */ } +static void svm_lwp_save(struct vcpu *v) +{ + if ( cpu_has_lwp ) + { + rdmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg); + wrmsrl(MSR_AMD64_LWP_CFG, 0x0); + } +} + +static void svm_lwp_load(struct vcpu *v) +{ + if ( cpu_has_lwp ) + { + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg); + } +} + +static void svm_lwp_cpuid(struct vcpu *v, unsigned int *eax, + unsigned int *ebx, unsigned int *ecx, + unsigned int *edx) +{ + uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg; + + if ( !cpu_has_lwp ) + return; + + if ( !(v->arch.xcr0 & XSTATE_LWP) ) + { + *eax = 0x0; + return; + } + + /* turn on avail bit and other features specified in lwp_cfg */ + *eax = (*edx & lwp_cfg) | 0x00000001; +} + static void svm_ctxt_switch_from(struct vcpu *v) { int cpu = smp_processor_id(); @@ -631,6 +667,7 @@ svm_save_dr(v); vpmu_save(v); + svm_lwp_save(v); svm_sync_vmcb(v); svm_vmload(per_cpu(root_vmcb, cpu)); @@ -674,6 +711,7 @@ svm_vmload(vmcb); vmcb->cleanbits.bytes = 0; vpmu_load(v); + svm_lwp_load(v); if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); @@ -1003,11 +1041,16 @@ hvm_cpuid(input, eax, ebx, ecx, edx); - if ( input == 0x80000001 ) + switch ( input ) { + case 0x80000001: /* Fix up VLAPIC details. */ if ( vlapic_hw_disabled(vcpu_vlapic(v)) ) __clear_bit(X86_FEATURE_APIC & 31, edx); + break; + case 0x8000001c: + svm_lwp_cpuid(v, eax, ebx, ecx, edx); + break; } HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx); @@ -1099,6 +1142,10 @@ *msr_content = vmcb_get_lastinttoip(vmcb); break; + case MSR_AMD64_LWP_CFG: + *msr_content = v->arch.hvm_svm.guest_lwp_cfg; + break; + case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: @@ -1194,6 +1241,24 @@ vmcb_set_lastinttoip(vmcb, msr_content); break; + case MSR_AMD64_LWP_CFG: + if ( cpu_has_lwp ) + { + unsigned int eax, ebx, ecx, edx; + uint32_t msr_low; + + hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx); + msr_low = (uint32_t)msr_content; + + /* generate #GP if guest triest to turn on unsupported features. */ + if ( msr_low & ~edx) + goto gpf; + + wrmsrl(msr, msr_content); + v->arch.hvm_svm.guest_lwp_cfg = msr_content; + } + break; + case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: diff -r 1a1d593c16f3 xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Thu Feb 10 16:55:27 2011 -0600 +++ b/xen/arch/x86/hvm/svm/vmcb.c Fri Feb 11 10:16:50 2011 -0600 @@ -153,6 +153,8 @@ svm_disable_intercept_for_msr(v, MSR_LSTAR); svm_disable_intercept_for_msr(v, MSR_STAR); svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK); + if ( cpu_has_lwp ) + svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR); vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); vmcb->_iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); diff -r 1a1d593c16f3 xen/arch/x86/i387.c --- a/xen/arch/x86/i387.c Thu Feb 10 16:55:27 2011 -0600 +++ b/xen/arch/x86/i387.c Fri Feb 11 10:16:50 2011 -0600 @@ -75,7 +75,7 @@ if ( cpu_has_lwp ) { /* Has LWP been used? */ - rdmsrl(MSR_AMD_LWP_CBADDR, lwpcb); + rdmsrl(MSR_AMD64_LWP_CBADDR, lwpcb); if ( !lwpcb ) { /* Guest might have turned off LWP. So clean the bit here. */ xsave_area->xsave_hdr.xstate_bv &= ~XSTATE_LWP; @@ -90,7 +90,7 @@ stts(); /* disable LWP for next VCPU */ - wrmsrl(MSR_AMD_LWP_CBADDR, 0); + wrmsrl(MSR_AMD64_LWP_CBADDR, 0); } } diff -r 1a1d593c16f3 xen/include/asm-x86/hvm/svm/vmcb.h --- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Feb 10 16:55:27 2011 -0600 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Fri Feb 11 10:16:50 2011 -0600 @@ -504,6 +504,9 @@ uint64_t guest_sysenter_cs; uint64_t guest_sysenter_esp; uint64_t guest_sysenter_eip; + + /* AMD lightweigh profiling MSR */ + uint64_t guest_lwp_cfg; }; struct vmcb_struct *alloc_vmcb(void);