diff -r 8d22ee47ec5d xen/arch/x86/cpu/mcheck/mce.c --- a/xen/arch/x86/cpu/mcheck/mce.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce.c Fri Aug 14 12:06:58 2009 +0200 @@ -670,34 +670,33 @@ void mce_init_msr(struct domain *d) spin_lock_init(&d->arch.vmca_msrs.lock); } -int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) +int mce_rdmsr(uint32_t msr, uint64_t *msr_content) { struct domain *d = current->domain; int ret = 1; unsigned int bank; struct bank_entry *entry = NULL; - *lo = *hi = 0x0; + *msr_content = 0x0; spin_lock(&d->arch.vmca_msrs.lock); switch ( msr ) { case MSR_IA32_MCG_STATUS: - *lo = (u32)d->arch.vmca_msrs.mcg_status; - *hi = (u32)(d->arch.vmca_msrs.mcg_status >> 32); - if (*lo || *hi) + *msr_content = d->arch.vmca_msrs.mcg_status; + if (*msr_content) gdprintk(XENLOG_DEBUG, - "MCE: rdmsr MCG_STATUS lo %x hi %x\n", *lo, *hi); + "MCE: rdmsr MCG_STATUS 0x%"PRIx64"\n", *msr_content); break; case MSR_IA32_MCG_CAP: - *lo = (u32)d->arch.vmca_msrs.mcg_cap; - *hi = (u32)(d->arch.vmca_msrs.mcg_cap >> 32); - gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CAP lo %x hi %x\n", *lo, *hi); + *msr_content = d->arch.vmca_msrs.mcg_cap; + gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CAP 0x%"PRIx64"\n", + *msr_content); break; case MSR_IA32_MCG_CTL: - *lo = (u32)d->arch.vmca_msrs.mcg_ctl; - *hi = (u32)(d->arch.vmca_msrs.mcg_ctl >> 32); - gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CTL lo %x hi %x\n", *lo, *hi); + *msr_content = d->arch.vmca_msrs.mcg_ctl; + gdprintk(XENLOG_DEBUG, "MCE: rdmsr MCG_CTL 0x%"PRIx64"\n", + *msr_content); break; case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * MAX_NR_BANKS - 1: bank = (msr - MSR_IA32_MC0_CTL) / 4; @@ -710,10 +709,9 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) switch (msr & (MSR_IA32_MC0_CTL | 3)) { case MSR_IA32_MC0_CTL: - *lo = (u32)d->arch.vmca_msrs.mci_ctl[bank]; - *hi = (u32)(d->arch.vmca_msrs.mci_ctl[bank] >> 32); - gdprintk(XENLOG_DEBUG, "MCE: rdmsr MC%u_CTL lo %x hi %x\n", - bank, *lo, *hi); + *msr_content = d->arch.vmca_msrs.mci_ctl[bank]; + gdprintk(XENLOG_DEBUG, "MCE: rdmsr MC%u_CTL 0x%"PRIx64"\n", + bank, *msr_content); break; case MSR_IA32_MC0_STATUS: /* Only error bank is read. Non-error banks simply return. */ @@ -722,11 +720,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) entry = list_entry(d->arch.vmca_msrs.impact_header.next, struct bank_entry, list); if (entry->bank == bank) { - *lo = entry->mci_status; - *hi = entry->mci_status >> 32; + *msr_content = entry->mci_status; gdprintk(XENLOG_DEBUG, "MCE: rd MC%u_STATUS in vMCE# context " - "lo %x hi %x\n", bank, *lo, *hi); + "value 0x%"PRIx64"\n", bank, *msr_content); } else entry = NULL; @@ -739,11 +736,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) struct bank_entry, list); if ( entry->bank == bank ) { - *lo = entry->mci_addr; - *hi = entry->mci_addr >> 32; + *msr_content = entry->mci_addr; gdprintk(XENLOG_DEBUG, - "MCE: rd MC%u_ADDR in vMCE# context lo %x hi %x\n", - bank, *lo, *hi); + "MCE: rdmsr MC%u_ADDR in vMCE# context 0x%"PRIx64"\n", + bank, *msr_content); } } break; @@ -754,11 +750,10 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) struct bank_entry, list); if ( entry->bank == bank ) { - *lo = entry->mci_misc; - *hi = entry->mci_misc >> 32; + *msr_content = entry->mci_misc; gdprintk(XENLOG_DEBUG, - "MCE: rd MC%u_MISC in vMCE# context lo %x hi %x\n", - bank, *lo, *hi); + "MCE: rd MC%u_MISC in vMCE# context 0x%"PRIx64"\n", + bank, *msr_content); } } break; @@ -768,7 +763,7 @@ int mce_rdmsr(u32 msr, u32 *lo, u32 *hi) switch ( boot_cpu_data.x86_vendor ) { case X86_VENDOR_INTEL: - ret = intel_mce_rdmsr(msr, lo, hi); + ret = intel_mce_rdmsr(msr, msr_content); break; default: ret = 0; diff -r 8d22ee47ec5d xen/arch/x86/cpu/mcheck/mce.h --- a/xen/arch/x86/cpu/mcheck/mce.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce.h Fri Aug 14 12:06:58 2009 +0200 @@ -25,8 +25,8 @@ void amd_nonfatal_mcheck_init(struct cpu u64 mce_cap_init(void); -int intel_mce_rdmsr(u32 msr, u32 *lo, u32 *hi); -int intel_mce_wrmsr(u32 msr, u64 value); +int intel_mce_rdmsr(uint32_t msr, uint64_t *msr_content); +int intel_mce_wrmsr(uint32_t msr, uint64_t value); int mce_available(struct cpuinfo_x86 *c); int mce_firstbank(struct cpuinfo_x86 *c); diff -r 8d22ee47ec5d xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Fri Aug 14 12:06:58 2009 +0200 @@ -1080,7 +1080,7 @@ int intel_mcheck_init(struct cpuinfo_x86 return 1; } -int intel_mce_wrmsr(u32 msr, u64 value) +int intel_mce_wrmsr(uint32_t msr, uint64_t value) { int ret = 1; @@ -1098,7 +1098,7 @@ int intel_mce_wrmsr(u32 msr, u64 value) return ret; } -int intel_mce_rdmsr(u32 msr, u32 *lo, u32 *hi) +int intel_mce_rdmsr(uint32_t msr, uint64_t *msr_content) { int ret = 1; diff -r 8d22ee47ec5d xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/hvm.c Fri Aug 14 12:06:58 2009 +0200 @@ -1782,7 +1782,6 @@ int hvm_msr_read_intercept(struct cpu_us uint64_t *var_range_base, *fixed_range_base; int index, mtrr; uint32_t cpuid[4]; - uint32_t lo, hi; int ret; var_range_base = (uint64_t *)v->arch.hvm_vcpu.mtrr.var_ranges; @@ -1852,14 +1851,11 @@ int hvm_msr_read_intercept(struct cpu_us break; default: - ret = mce_rdmsr(ecx, &lo, &hi); + ret = mce_rdmsr(ecx, &msr_content); if ( ret < 0 ) goto gp_fault; else if ( ret ) - { - msr_content = ((u64)hi << 32) | lo; break; - } /* ret == 0, This is not an MCE MSR, see other MSRs */ else if (!ret) return hvm_funcs.msr_read_intercept(regs); diff -r 8d22ee47ec5d xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Aug 14 12:06:58 2009 +0200 @@ -1085,9 +1085,12 @@ static int svm_msr_read_intercept(struct break; default: - if ( rdmsr_viridian_regs(ecx, &eax, &edx) || - rdmsr_hypervisor_regs(ecx, &eax, &edx) || - rdmsr_safe(ecx, eax, edx) == 0 ) + + if ( rdmsr_viridian_regs(ecx, &msr_content) || + rdmsr_hypervisor_regs(ecx, &msr_content) ) + break; + + if ( rdmsr_safe(ecx, eax, edx) == 0 ) { regs->eax = eax; regs->edx = edx; @@ -1164,13 +1167,13 @@ static int svm_msr_write_intercept(struc break; default: - if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) + if ( wrmsr_viridian_regs(ecx, msr_content) ) break; switch ( long_mode_do_msr_write(regs) ) { case HNDL_unhandled: - wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); + wrmsr_hypervisor_regs(ecx, msr_content); break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; diff -r 8d22ee47ec5d xen/arch/x86/hvm/viridian.c --- a/xen/arch/x86/hvm/viridian.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/viridian.c Fri Aug 14 12:06:58 2009 +0200 @@ -129,10 +129,13 @@ static void enable_hypercall_page(void) put_page_and_type(mfn_to_page(mfn)); } -int wrmsr_viridian_regs(uint32_t idx, uint32_t eax, uint32_t edx) +int wrmsr_viridian_regs(uint32_t idx, uint64_t val) { struct domain *d = current->domain; - uint64_t val = ((uint64_t)edx << 32) | eax; + uint32_t eax, edx; + + eax = (uint32_t)val; + edx = (uint32_t)(val >> 32); if ( !is_viridian_domain(d) ) return 0; @@ -224,7 +227,7 @@ int wrmsr_viridian_regs(uint32_t idx, ui return 1; } -int rdmsr_viridian_regs(uint32_t idx, uint32_t *eax, uint32_t *edx) +int rdmsr_viridian_regs(uint32_t idx, uint64_t *msr_content) { uint64_t val; struct vcpu *v = current; @@ -264,8 +267,7 @@ int rdmsr_viridian_regs(uint32_t idx, ui return 0; } - *eax = val; - *edx = val >> 32; + *msr_content = val; return 1; } diff -r 8d22ee47ec5d xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Aug 14 12:06:58 2009 +0200 @@ -1849,9 +1849,11 @@ static int vmx_msr_read_intercept(struct break; } - if ( rdmsr_viridian_regs(ecx, &eax, &edx) || - rdmsr_hypervisor_regs(ecx, &eax, &edx) || - rdmsr_safe(ecx, eax, edx) == 0 ) + if ( rdmsr_viridian_regs(ecx, &msr_content) || + rdmsr_hypervisor_regs(ecx, &msr_content) ) + break; + + if ( rdmsr_safe(ecx, eax, edx) == 0 ) { regs->eax = eax; regs->edx = edx; @@ -2029,7 +2031,7 @@ static int vmx_msr_write_intercept(struc if ( passive_domain_do_wrmsr(regs) ) return X86EMUL_OKAY; - if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) + if ( wrmsr_viridian_regs(ecx, msr_content) ) break; switch ( long_mode_do_msr_write(regs) ) @@ -2037,7 +2039,7 @@ static int vmx_msr_write_intercept(struc case HNDL_unhandled: if ( (vmx_write_guest_msr(ecx, msr_content) != 0) && !is_last_branch_msr(ecx) ) - wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); + wrmsr_hypervisor_regs(ecx, msr_content); break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; diff -r 8d22ee47ec5d xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/arch/x86/traps.c Fri Aug 14 12:06:58 2009 +0200 @@ -603,8 +603,7 @@ DO_ERROR_NOCODE(TRAP_copro_error, co DO_ERROR( TRAP_alignment_check, alignment_check) DO_ERROR_NOCODE(TRAP_simd_error, simd_coprocessor_error) -int rdmsr_hypervisor_regs( - uint32_t idx, uint32_t *eax, uint32_t *edx) +int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *msr_content) { struct domain *d = current->domain; /* Optionally shift out of the way of Viridian architectural MSRs. */ @@ -618,7 +617,7 @@ int rdmsr_hypervisor_regs( { case 0: { - *eax = *edx = 0; + *msr_content = 0; break; } default: @@ -628,17 +627,20 @@ int rdmsr_hypervisor_regs( return 1; } -int wrmsr_hypervisor_regs( - uint32_t idx, uint32_t eax, uint32_t edx) +int wrmsr_hypervisor_regs(uint32_t idx, uint64_t msr_content) { struct domain *d = current->domain; /* Optionally shift out of the way of Viridian architectural MSRs. */ uint32_t base = is_viridian_domain(d) ? 0x40000200 : 0x40000000; + uint32_t eax, edx; idx -= base; if ( idx > 0 ) return 0; + eax = (uint32_t)msr_content; + edx = (uint32_t)(msr_content >> 32); + switch ( idx ) { case 0: @@ -1696,7 +1698,8 @@ static int emulate_privileged_op(struct unsigned long code_base, code_limit; char io_emul_stub[32]; void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1))); - u32 l, h; + uint32_t l, h; + uint64_t val; if ( !read_descriptor(regs->cs, v, regs, &code_base, &code_limit, &ar, @@ -2246,7 +2249,7 @@ static int emulate_privileged_op(struct goto fail; break; default: - if ( wrmsr_hypervisor_regs(regs->ecx, eax, edx) ) + if ( wrmsr_hypervisor_regs(regs->ecx, val) ) break; rc = mce_wrmsr(regs->ecx, val); @@ -2328,15 +2331,15 @@ static int emulate_privileged_op(struct case MSR_EFER: case MSR_AMD_PATCHLEVEL: default: - if ( rdmsr_hypervisor_regs(regs->ecx, &l, &h) ) + if ( rdmsr_hypervisor_regs(regs->ecx, &val) ) { rdmsr_writeback: - regs->eax = l; - regs->edx = h; + regs->eax = (uint32_t)val; + regs->edx = (uint32_t)(val >> 32); break; } - rc = mce_rdmsr(regs->ecx, &l, &h); + rc = mce_rdmsr(regs->ecx, &val); if ( rc < 0 ) goto fail; if ( rc ) diff -r 8d22ee47ec5d xen/include/asm-x86/hvm/viridian.h --- a/xen/include/asm-x86/hvm/viridian.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/include/asm-x86/hvm/viridian.h Fri Aug 14 12:06:58 2009 +0200 @@ -50,14 +50,12 @@ cpuid_viridian_leaves( int wrmsr_viridian_regs( uint32_t idx, - uint32_t eax, - uint32_t edx); + uint64_t val); int rdmsr_viridian_regs( uint32_t idx, - uint32_t *eax, - uint32_t *edx); + uint64_t *msr_content); int viridian_hypercall(struct cpu_user_regs *regs); diff -r 8d22ee47ec5d xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/include/asm-x86/processor.h Fri Aug 14 12:06:58 2009 +0200 @@ -551,10 +551,8 @@ void cpu_mcheck_disable(void); int cpuid_hypervisor_leaves( uint32_t idx, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); -int rdmsr_hypervisor_regs( - uint32_t idx, uint32_t *eax, uint32_t *edx); -int wrmsr_hypervisor_regs( - uint32_t idx, uint32_t eax, uint32_t edx); +int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *msr_content); +int wrmsr_hypervisor_regs(uint32_t idx, uint64_t msr_content); int microcode_update(XEN_GUEST_HANDLE(const_void), unsigned long len); int microcode_resume_cpu(int cpu); diff -r 8d22ee47ec5d xen/include/asm-x86/traps.h --- a/xen/include/asm-x86/traps.h Fri Aug 14 10:59:13 2009 +0100 +++ b/xen/include/asm-x86/traps.h Fri Aug 14 12:06:58 2009 +0200 @@ -49,7 +49,7 @@ extern int send_guest_trap(struct domain /* Guest vMCE MSRs virtualization */ extern void mce_init_msr(struct domain *d); -extern int mce_wrmsr(u32 msr, u64 value); -extern int mce_rdmsr(u32 msr, u32 *lo, u32 *hi); +extern int mce_wrmsr(uint32_t msr, uint64_t value); +extern int mce_rdmsr(uint32_t msr, uint64_t *msr_content); #endif /* ASM_TRAP_H */