diff -r b54f9c9f9144 xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/apic.c Thu Jun 24 12:54:47 2010 +0200 @@ -332,23 +332,19 @@ void disconnect_bsp_APIC(int virt_wire_s void disable_local_APIC(void) { - unsigned long value; - clear_local_APIC(); /* * Disable APIC (implies clearing of registers * for 82489DX!). */ - value = apic_read(APIC_SPIV); - value &= ~APIC_SPIV_APIC_ENABLED; - apic_write_around(APIC_SPIV, value); + apic_write_around(APIC_SPIV, + apic_read(APIC_SPIV) & ~APIC_SPIV_APIC_ENABLED); if (enabled_via_apicbase) { - unsigned int l, h; - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_ENABLE; - wrmsr(MSR_IA32_APICBASE, l, h); + uint64_t msr_content; + rdmsrl(MSR_IA32_APICBASE, msr_content); + wrmsrl(MSR_IA32_APICBASE, msr_content & ~MSR_IA32_APICBASE_ENABLE); } } @@ -708,7 +704,7 @@ int lapic_suspend(void) int lapic_resume(void) { - unsigned int l, h; + uint64_t msr_content; unsigned long flags; int maxlvt; @@ -725,10 +721,10 @@ int lapic_resume(void) */ if ( !x2apic_enabled ) { - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); + rdmsrl(MSR_IA32_APICBASE, msr_content); + msr_content &= ~MSR_IA32_APICBASE_BASE; + wrmsrl(MSR_IA32_APICBASE, + msr_content | MSR_IA32_APICBASE_ENABLE | mp_lapic_addr); } else enable_x2apic(); @@ -817,7 +813,8 @@ custom_param("apic_verbosity", apic_set_ static int __init detect_init_APIC (void) { - u32 h, l, features; + uint64_t msr_content; + u32 features; /* Disabled by kernel option? */ if (enable_local_apic < 0) @@ -854,12 +851,14 @@ static int __init detect_init_APIC (void * software for Intel P6 or later and AMD K7 * (Model > 1) or later. */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { + rdmsrl(MSR_IA32_APICBASE, msr_content); + if (!(msr_content & MSR_IA32_APICBASE_ENABLE)) { printk("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; - wrmsr(MSR_IA32_APICBASE, l, h); + msr_content &= ~MSR_IA32_APICBASE_BASE; + msr_content |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; + wrmsrl(MSR_IA32_APICBASE, + msr_content | MSR_IA32_APICBASE_ENABLE + | APIC_DEFAULT_PHYS_BASE); enabled_via_apicbase = 1; } } @@ -877,9 +876,9 @@ static int __init detect_init_APIC (void mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + rdmsrl(MSR_IA32_APICBASE, msr_content); + if (msr_content & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = msr_content & MSR_IA32_APICBASE_BASE; if (nmi_watchdog != NMI_NONE) nmi_watchdog = NMI_LOCAL_APIC; @@ -897,7 +896,7 @@ no_apic: void enable_x2apic(void) { - u32 lo, hi; + uint64_t msr_content; if ( smp_processor_id() == 0 ) { @@ -925,11 +924,12 @@ void enable_x2apic(void) BUG_ON(!x2apic_enabled); /* APs only enable x2apic when BSP did so. */ } - rdmsr(MSR_IA32_APICBASE, lo, hi); - if ( !(lo & MSR_IA32_APICBASE_EXTD) ) + rdmsrl(MSR_IA32_APICBASE, msr_content); + if ( !(msr_content & MSR_IA32_APICBASE_EXTD) ) { - lo |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; - wrmsr(MSR_IA32_APICBASE, lo, 0); + msr_content |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; + msr_content = (uint32_t)msr_content; + wrmsrl(MSR_IA32_APICBASE, msr_content); printk("x2APIC mode enabled.\n"); } else diff -r b54f9c9f9144 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/domain.c Thu Jun 24 12:54:47 2010 +0200 @@ -1088,21 +1088,15 @@ static void load_segments(struct vcpu *n { /* This can only be non-zero if selector is NULL. */ if ( nctxt->fs_base ) - wrmsr(MSR_FS_BASE, - nctxt->fs_base, - nctxt->fs_base>>32); + wrmsrl(MSR_FS_BASE, nctxt->fs_base); /* Most kernels have non-zero GS base, so don't bother testing. */ /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ - wrmsr(MSR_SHADOW_GS_BASE, - nctxt->gs_base_kernel, - nctxt->gs_base_kernel>>32); + wrmsrl(MSR_SHADOW_GS_BASE, nctxt->gs_base_kernel); /* This can only be non-zero if selector is NULL. */ if ( nctxt->gs_base_user ) - wrmsr(MSR_GS_BASE, - nctxt->gs_base_user, - nctxt->gs_base_user>>32); + wrmsrl(MSR_GS_BASE, nctxt->gs_base_user); /* If in kernel mode then switch the GS bases around. */ if ( (n->arch.flags & TF_kernel_mode) ) diff -r b54f9c9f9144 xen/arch/x86/microcode_amd.c --- a/xen/arch/x86/microcode_amd.c Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/microcode_amd.c Thu Jun 24 12:54:47 2010 +0200 @@ -47,7 +47,6 @@ struct equiv_cpu_entry *equiv_cpu_table; static int collect_cpu_info(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu]; - uint32_t dummy; memset(csig, 0, sizeof(*csig)); @@ -58,7 +57,7 @@ static int collect_cpu_info(int cpu, str return -EINVAL; } - rdmsr(MSR_AMD_PATCHLEVEL, csig->rev, dummy); + rdmsrl(MSR_AMD_PATCHLEVEL, csig->rev); printk(KERN_INFO "microcode: collect_cpu_info: patch_id=0x%x\n", csig->rev); @@ -126,7 +125,7 @@ static int apply_microcode(int cpu) { unsigned long flags; struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); - uint32_t rev, dummy; + uint32_t rev; struct microcode_amd *mc_amd = uci->mc.mc_amd; /* We should bind the task to the CPU */ @@ -140,7 +139,7 @@ static int apply_microcode(int cpu) wrmsrl(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code); /* get patch id after patching */ - rdmsr(MSR_AMD_PATCHLEVEL, rev, dummy); + rdmsrl(MSR_AMD_PATCHLEVEL, rev); spin_unlock_irqrestore(µcode_update_lock, flags); diff -r b54f9c9f9144 xen/arch/x86/microcode_intel.c --- a/xen/arch/x86/microcode_intel.c Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/microcode_intel.c Thu Jun 24 12:54:47 2010 +0200 @@ -62,7 +62,7 @@ static DEFINE_SPINLOCK(microcode_update_ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu_num]; - unsigned int val[2]; + uint64_t msr_content; BUG_ON(cpu_num != smp_processor_id()); @@ -81,15 +81,16 @@ static int collect_cpu_info(int cpu_num, if ( (c->x86_model >= 5) || (c->x86 > 6) ) { /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); + rdmsrl(MSR_IA32_PLATFORM_ID, msr_content); + csig->pf = 1 << ((msr_content >> 50) & 7); } - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); + rdmsrl(MSR_IA32_UCODE_REV, msr_content); + csig->rev = (uint32_t)(msr_content >> 32); pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", csig->sig, csig->pf, csig->rev); @@ -249,6 +250,7 @@ static int get_matching_microcode(void * static int apply_microcode(int cpu) { unsigned long flags; + uint64_t msr_content; unsigned int val[2]; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num); @@ -263,16 +265,15 @@ static int apply_microcode(int cpu) spin_lock_irqsave(µcode_update_lock, flags); /* write microcode via MSR 0x79 */ - wrmsr(MSR_IA32_UCODE_WRITE, - (unsigned long) uci->mc.mc_intel->bits, - (unsigned long) uci->mc.mc_intel->bits >> 16 >> 16); - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsrl(MSR_IA32_UCODE_WRITE, (uint64_t)uci->mc.mc_intel->bits); + wrmsrl(MSR_IA32_UCODE_REV, 0x0ULL); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + rdmsrl(MSR_IA32_UCODE_REV, msr_content); + val[1] = (uint32_t)(msr_content >> 32); spin_unlock_irqrestore(µcode_update_lock, flags); if ( val[1] != uci->mc.mc_intel->hdr.rev ) diff -r b54f9c9f9144 xen/arch/x86/nmi.c --- a/xen/arch/x86/nmi.c Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/nmi.c Thu Jun 24 12:54:47 2010 +0200 @@ -276,9 +276,9 @@ static void __pminit setup_p6_watchdog(u static int __pminit setup_p4_watchdog(void) { - unsigned int misc_enable, dummy; + uint64_t misc_enable; - rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL)) return 0; @@ -304,11 +304,11 @@ static int __pminit setup_p4_watchdog(vo clear_msr_range(MSR_P4_BPU_CCCR0, 18); clear_msr_range(MSR_P4_BPU_PERFCTR0, 18); - wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); + wrmsrl(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0); + wrmsrl(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE); write_watchdog_counter("P4_IQ_COUNTER0"); apic_write(APIC_LVTPC, APIC_DM_NMI); - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); return 1; } @@ -442,7 +442,7 @@ void nmi_watchdog_tick(struct cpu_user_r * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsrl(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 ) diff -r b54f9c9f9144 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/oprofile/nmi_int.c Thu Jun 24 12:54:47 2010 +0200 @@ -104,15 +104,11 @@ static void nmi_cpu_save_registers(struc unsigned int i; for (i = 0; i < nr_ctrs; ++i) { - rdmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + rdmsrl(counters[i].addr, counters[i].value); } for (i = 0; i < nr_ctrls; ++i) { - rdmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + rdmsrl(controls[i].addr, controls[i].value); } } @@ -222,15 +218,11 @@ static void nmi_restore_registers(struct unsigned int i; for (i = 0; i < nr_ctrls; ++i) { - wrmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + wrmsrl(controls[i].addr, controls[i].value); } for (i = 0; i < nr_ctrs; ++i) { - wrmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + wrmsrl(counters[i].addr, counters[i].value); } } diff -r b54f9c9f9144 xen/arch/x86/oprofile/op_x86_model.h --- a/xen/arch/x86/oprofile/op_x86_model.h Wed Jun 23 23:24:42 2010 +0100 +++ b/xen/arch/x86/oprofile/op_x86_model.h Thu Jun 24 12:54:47 2010 +0200 @@ -11,14 +11,9 @@ #ifndef OP_X86_MODEL_H #define OP_X86_MODEL_H -struct op_saved_msr { - unsigned int high; - unsigned int low; -}; - struct op_msr { unsigned long addr; - struct op_saved_msr saved; + uint64_t value; }; struct op_msrs {