# HG changeset patch # User cegger # Date 1276269584 -7200 NetBSD development diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/acpi/cpufreq/cpufreq.c --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c @@ -137,13 +137,12 @@ struct drv_cmd { static void do_drv_read(void *drvcmd) { struct drv_cmd *cmd; - u32 h; cmd = (struct drv_cmd *)drvcmd; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, cmd->val, h); + cmd->val = rdmsr(cmd->addr.msr.reg); break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, @@ -157,15 +156,16 @@ static void do_drv_read(void *drvcmd) static void do_drv_write(void *drvcmd) { struct drv_cmd *cmd; - u32 lo, hi; + uint64_t msr_content; cmd = (struct drv_cmd *)drvcmd; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, lo, hi); - lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); - wrmsr(cmd->addr.msr.reg, lo, hi); + msr_content = rdmsr(cmd->addr.msr.reg); + msr_content = (msr_content & ~INTEL_MSR_RANGE) + | (cmd->val & INTEL_MSR_RANGE); + wrmsr(cmd->addr.msr.reg, msr_content); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, @@ -252,8 +252,8 @@ static void read_measured_perf_ctrs(void { struct perf_pair *readin = _readin; - rdmsr(MSR_IA32_APERF, readin->aperf.split.lo, readin->aperf.split.hi); - rdmsr(MSR_IA32_MPERF, readin->mperf.split.lo, readin->mperf.split.hi); + readin->aperf.whole = rdmsr(MSR_IA32_APERF); + readin->mperf.whole = rdmsr(MSR_IA32_MPERF); } /* diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/acpi/cpufreq/powernow.c --- a/xen/arch/x86/acpi/cpufreq/powernow.c +++ b/xen/arch/x86/acpi/cpufreq/powernow.c @@ -77,15 +77,15 @@ static void transition_pstate(void *drvc cmd = (struct drv_cmd *) drvcmd; if (cmd->turbo != CPUFREQ_TURBO_UNSUPPORTED) { - u32 lo, hi; - rdmsr(MSR_K8_HWCR, lo, hi); + uint64_t msr_content; + msr_content = rdmsr(MSR_K8_HWCR); if (cmd->turbo == CPUFREQ_TURBO_ENABLED) - lo &= ~MSR_HWCR_CPBDIS_MASK; + msr_content &= ~MSR_HWCR_CPBDIS_MASK; else - lo |= MSR_HWCR_CPBDIS_MASK; - wrmsr(MSR_K8_HWCR, lo, hi); + msr_content |= MSR_HWCR_CPBDIS_MASK; + wrmsr(MSR_K8_HWCR, msr_content); } - wrmsr(MSR_PSTATE_CTRL, cmd->val, 0); + wrmsr(MSR_PSTATE_CTRL, cmd->val); } static int powernow_cpufreq_target(struct cpufreq_policy *policy, @@ -194,7 +194,8 @@ static int powernow_cpufreq_cpu_init(str struct powernow_cpufreq_data *data; unsigned int result = 0; struct processor_performance *perf; - u32 max_hw_pstate, hi = 0, lo = 0; + u32 max_hw_pstate, hi; + uint64_t msr_content; struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; data = xmalloc(struct powernow_cpufreq_data); @@ -226,7 +227,8 @@ static int powernow_cpufreq_cpu_init(str result = -ENODEV; goto err_unreg; } - rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); + msr_content = rdmsr(MSR_PSTATE_CUR_LIMIT); + hi = (uint32_t)(msr_content >> 32); max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; if (perf->control_register.space_id != perf->status_register.space_id) { diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/acpi/suspend.c --- a/xen/arch/x86/acpi/suspend.c +++ b/xen/arch/x86/acpi/suspend.c @@ -25,12 +25,12 @@ void save_rest_processor_state(void) unlazy_fpu(current); #if defined(CONFIG_X86_64) - rdmsrl(MSR_CSTAR, saved_cstar); - rdmsrl(MSR_LSTAR, saved_lstar); + saved_cstar = rdmsr(MSR_CSTAR); + saved_lstar = rdmsr(MSR_LSTAR); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { - rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); - rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); + saved_sysenter_esp = rdmsr(MSR_IA32_SYSENTER_ESP); + saved_sysenter_eip = rdmsr(MSR_IA32_SYSENTER_EIP); } #endif } @@ -43,24 +43,23 @@ void restore_rest_processor_state(void) #if defined(CONFIG_X86_64) /* Recover syscall MSRs */ - wrmsrl(MSR_LSTAR, saved_lstar); - wrmsrl(MSR_CSTAR, saved_cstar); - wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS); + wrmsr(MSR_LSTAR, saved_lstar); + wrmsr(MSR_CSTAR, saved_cstar); + wrmsr(MSR_STAR, ( (uint64_t)((FLAT_RING3_CS32<<16) | __HYPERVISOR_CS) << 32) ); wrmsr(MSR_SYSCALL_MASK, X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT| - X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF, - 0U); + X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { /* Recover sysenter MSRs */ - wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); - wrmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); - wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); + wrmsr(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); + wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS); } #else /* !defined(CONFIG_X86_64) */ if ( supervisor_mode_kernel && cpu_has_sep ) - wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, this_cpu(init_tss).esp1); #endif /* Maybe load the debug registers. */ @@ -79,7 +78,7 @@ void restore_rest_processor_state(void) stts(); if (cpu_has_pat) - wrmsrl(MSR_IA32_CR_PAT, host_pat); + wrmsr(MSR_IA32_CR_PAT, host_pat); mtrr_bp_restore(); } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c +++ b/xen/arch/x86/apic.c @@ -345,10 +345,10 @@ void disable_local_APIC(void) apic_write_around(APIC_SPIV, value); if (enabled_via_apicbase) { - unsigned int l, h; - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_ENABLE; - wrmsr(MSR_IA32_APICBASE, l, h); + uint64_t msr_content; + msr_content = rdmsr(MSR_IA32_APICBASE); + msr_content &= ~MSR_IA32_APICBASE_ENABLE; + wrmsr(MSR_IA32_APICBASE, msr_content); } } @@ -708,7 +708,7 @@ int lapic_suspend(void) int lapic_resume(void) { - unsigned int l, h; + uint64_t msr_content; unsigned long flags; int maxlvt; @@ -725,10 +725,10 @@ int lapic_resume(void) */ if ( !x2apic_enabled ) { - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); + msr_content = rdmsr(MSR_IA32_APICBASE); + msr_content &= ~MSR_IA32_APICBASE_BASE; + msr_content |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; + wrmsr(MSR_IA32_APICBASE, msr_content); } else enable_x2apic(); @@ -817,7 +817,8 @@ custom_param("apic_verbosity", apic_set_ static int __init detect_init_APIC (void) { - u32 h, l, features; + uint64_t msr_content; + u32 features; /* Disabled by kernel option? */ if (enable_local_apic < 0) @@ -854,12 +855,12 @@ static int __init detect_init_APIC (void * software for Intel P6 or later and AMD K7 * (Model > 1) or later. */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { + msr_content = rdmsr(MSR_IA32_APICBASE); + if (!(msr_content & MSR_IA32_APICBASE_ENABLE)) { printk("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; - wrmsr(MSR_IA32_APICBASE, l, h); + msr_content &= ~MSR_IA32_APICBASE_BASE; + msr_content |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; + wrmsr(MSR_IA32_APICBASE, msr_content); enabled_via_apicbase = 1; } } @@ -877,9 +878,9 @@ static int __init detect_init_APIC (void mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + msr_content = rdmsr(MSR_IA32_APICBASE); + if (msr_content & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = msr_content & MSR_IA32_APICBASE_BASE; if (nmi_watchdog != NMI_NONE) nmi_watchdog = NMI_LOCAL_APIC; @@ -897,7 +898,7 @@ no_apic: void enable_x2apic(void) { - u32 lo, hi; + uint64_t msr_content; if ( smp_processor_id() == 0 ) { @@ -925,11 +926,12 @@ void enable_x2apic(void) BUG_ON(!x2apic_enabled); /* APs only enable x2apic when BSP did so. */ } - rdmsr(MSR_IA32_APICBASE, lo, hi); - if ( !(lo & MSR_IA32_APICBASE_EXTD) ) + msr_content = rdmsr(MSR_IA32_APICBASE); + if ( !(msr_content & MSR_IA32_APICBASE_EXTD) ) { - lo |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; - wrmsr(MSR_IA32_APICBASE, lo, 0); + msr_content |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; + msr_content &= (uint32_t)msr_content; + wrmsr(MSR_IA32_APICBASE, msr_content); printk("x2APIC mode enabled.\n"); } else diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -61,6 +61,7 @@ static inline void wrmsr_amd(unsigned in */ static void __devinit set_cpuidmask(struct cpuinfo_x86 *c) { + uint64_t msr_content; static unsigned int feat_ecx, feat_edx; static unsigned int extfeat_ecx, extfeat_edx; static enum { not_parsed, no_mask, set_mask } status; @@ -146,8 +147,10 @@ static void __devinit set_cpuidmask(stru /* FIXME check if processor supports CPUID masking */ /* AMD processors prior to family 10h required a 32-bit password */ if (c->x86 >= 0x10) { - wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); - wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); + msr_content = (uint64_t)(feat_ecx) << 32 | feat_edx; + wrmsr(MSR_K8_FEATURE_MASK, msr_content); + msr_content = (uint64_t)(extfeat_ecx) << 32 | extfeat_edx; + wrmsr(MSR_K8_EXT_FEATURE_MASK, msr_content); } else if (c->x86 == 0x0f) { wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); @@ -258,13 +261,12 @@ static void check_disable_c1e(unsigned i static void __devinit init_amd(struct cpuinfo_x86 *c) { - u32 l, h; + uint32_t l, h; + uint64_t msr_content; int mbytes = num_physpages >> (20-PAGE_SHIFT); int r; #ifdef CONFIG_SMP - unsigned long long value; - /* Disable TLB flush filter by setting HWCR.FFDIS on K8 * bit 6 of msr C001_0015 * @@ -272,9 +274,9 @@ static void __devinit init_amd(struct cp * Errata 122 for all steppings (F+ have it disabled by default) */ if (c->x86 == 15) { - rdmsrl(MSR_K7_HWCR, value); - value |= 1 << 6; - wrmsrl(MSR_K7_HWCR, value); + msr_content = rdmsr(MSR_K7_HWCR); + msr_content |= 1 << 6; + wrmsr(MSR_K7_HWCR, msr_content); } #endif @@ -353,13 +355,15 @@ static void __devinit init_amd(struct cp if(mbytes>508) mbytes=508; - rdmsr(MSR_K6_WHCR, l, h); - if ((l&0x0000FFFF)==0) { + msr_content = rdmsr(MSR_K6_WHCR); + if ((msr_content & 0x0000FFFF) == 0) { unsigned long flags; - l=(1<<0)|((mbytes/4)<<1); + l = (1 <<0) | ((mbytes/4) << 1); + h = (uint32_t)(msr_content >> 32); local_irq_save(flags); wbinvd(); - wrmsr(MSR_K6_WHCR, l, h); + msr_content = (uint64_t)(h) << 32 | l; + wrmsr(MSR_K6_WHCR, msr_content); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", mbytes); @@ -374,13 +378,15 @@ static void __devinit init_amd(struct cp if(mbytes>4092) mbytes=4092; - rdmsr(MSR_K6_WHCR, l, h); - if ((l&0xFFFF0000)==0) { + msr_content = rdmsr(MSR_K6_WHCR); + if ((msr_content & 0xFFFF0000) == 0) { unsigned long flags; l=((mbytes>>2)<<22)|(1<<16); + h = (uint32_t)(msr_content >> 32); local_irq_save(flags); wbinvd(); - wrmsr(MSR_K6_WHCR, l, h); + msr_content = (uint64_t)(h) << 32 | l; + wrmsr(MSR_K6_WHCR, msr_content); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", mbytes); @@ -408,9 +414,9 @@ static void __devinit init_amd(struct cp if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); - rdmsr(MSR_K7_HWCR, l, h); - l &= ~0x00008000; - wrmsr(MSR_K7_HWCR, l, h); + msr_content = rdmsr(MSR_K7_HWCR); + msr_content &= ~0x00008000; + wrmsr(MSR_K7_HWCR, msr_content); set_bit(X86_FEATURE_XMM, c->x86_capability); } } @@ -420,11 +426,14 @@ static void __devinit init_amd(struct cp * As per AMD technical note 27212 0.2 */ if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { - rdmsr(MSR_K7_CLK_CTL, l, h); - if ((l & 0xfff00000) != 0x20000000) { - printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, - ((l & 0x000fffff)|0x20000000)); - wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); + msr_content = rdmsr(MSR_K7_CLK_CTL); + if ((msr_content & 0xfff00000) != 0x20000000) { + printk ("CPU: CLK_CTL MSR was %"PRIx64 + ". Reprogramming to %"PRIx64 + "\n", msr_content, + (msr_content & 0x000fffff)|0x20000000); + msr_content = (msr_content & 0x000fffff) | 0x20000000; + wrmsr(MSR_K7_CLK_CTL, msr_content); } } break; @@ -445,17 +454,18 @@ static void __devinit init_amd(struct cp } if (c->x86 == 15) { - rdmsr(MSR_K7_HWCR, l, h); + msr_content = rdmsr(MSR_K7_HWCR); printk(KERN_INFO "CPU%d: AMD Flush Filter %sabled", - smp_processor_id(), (l & (1<<6)) ? "dis" : "en"); - if ((flush_filter_force > 0) && (l & (1<<6))) { - l &= ~(1<<6); + smp_processor_id(), + (msr_content & (1<<6)) ? "dis" : "en"); + if ((flush_filter_force > 0) && (msr_content & (1<<6))) { + msr_content &= ~(1<<6); printk(" -> Forcibly enabled"); - } else if ((flush_filter_force < 0) && !(l & (1<<6))) { - l |= 1<<6; + } else if ((flush_filter_force < 0) && !(msr_content & (1<<6))) { + msr_content |= 1<<6; printk(" -> Forcibly disabled"); } - wrmsr(MSR_K7_HWCR, l, h); + wrmsr(MSR_K7_HWCR, msr_content); printk("\n"); } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/centaur.c --- a/xen/arch/x86/cpu/centaur.c +++ b/xen/arch/x86/cpu/centaur.c @@ -17,7 +17,7 @@ static void __init init_c3(struct cpuinfo_x86 *c) { - u32 lo, hi; + uint32_t msr_content; /* Test for Centaur Extended Feature Flags presence */ if (cpuid_eax(0xC0000000) >= 0xC0000001) { @@ -25,17 +25,17 @@ static void __init init_c3(struct cpuinf /* enable ACE unit, if present and disabled */ if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { - rdmsr (MSR_VIA_FCR, lo, hi); - lo |= ACE_FCR; /* enable ACE unit */ - wrmsr (MSR_VIA_FCR, lo, hi); + msr_content = rdmsr(MSR_VIA_FCR); + msr_content |= ACE_FCR; /* enable ACE unit */ + wrmsr(MSR_VIA_FCR, msr_content); printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n"); } /* enable RNG unit, if present and disabled */ if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { - rdmsr (MSR_VIA_RNG, lo, hi); - lo |= RNG_ENABLE; /* enable RNG unit */ - wrmsr (MSR_VIA_RNG, lo, hi); + msr_content = rdmsr (MSR_VIA_RNG); + msr_content |= RNG_ENABLE; /* enable RNG unit */ + wrmsr(MSR_VIA_RNG, msr_content); printk(KERN_INFO "CPU: Enabled h/w RNG\n"); } @@ -47,9 +47,9 @@ static void __init init_c3(struct cpuinf /* Cyrix III family needs CX8 & PGE explicity enabled. */ if (c->x86_model >=6 && c->x86_model <= 9) { - rdmsr (MSR_VIA_FCR, lo, hi); - lo |= (1<<1 | 1<<7); - wrmsr (MSR_VIA_FCR, lo, hi); + msr_content = rdmsr (MSR_VIA_FCR); + msr_content |= (1<<1 | 1<<7); + wrmsr(MSR_VIA_FCR, msr_content); set_bit(X86_FEATURE_CX8, c->x86_capability); } @@ -71,7 +71,8 @@ static void __init init_centaur(struct c init_c3(c); } -static unsigned int centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) +static unsigned int +centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size) { /* VIA C3 CPUs (670-68F) need further shifting. */ if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/common.c --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -315,10 +315,10 @@ static void __cpuinit squash_the_stupid_ { if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { /* Disable processor serial number */ - unsigned long lo,hi; - rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); - lo |= 0x200000; - wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + uint64_t msr_content; + msr_content = rdmsr(MSR_IA32_BBL_CR_CTL); + msr_content |= 0x200000; + wrmsr(MSR_IA32_BBL_CR_CTL,msr_content); printk(KERN_NOTICE "CPU serial number disabled.\n"); clear_bit(X86_FEATURE_PN, c->x86_capability); @@ -595,7 +595,7 @@ void __cpuinit cpu_init(void) printk("Initializing CPU#%d\n", cpu); if (cpu_has_pat) - wrmsrl(MSR_IA32_CR_PAT, host_pat); + wrmsr(MSR_IA32_CR_PAT, host_pat); /* Install correct page table. */ write_ptbase(current); @@ -614,7 +614,7 @@ void __cpuinit cpu_init(void) t->ss0 = __HYPERVISOR_DS; t->esp0 = get_stack_bottom(); if ( supervisor_mode_kernel && cpu_has_sep ) - wrmsr(MSR_IA32_SYSENTER_ESP, &t->esp1, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, t->esp1); #elif defined(CONFIG_X86_64) /* Bottom-of-stack must be 16-byte aligned! */ BUG_ON((get_stack_bottom() & 15) != 0); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/intel.c --- a/xen/arch/x86/cpu/intel.c +++ b/xen/arch/x86/cpu/intel.c @@ -46,6 +46,7 @@ struct movsl_mask movsl_mask __read_most static void __devinit set_cpuidmask(struct cpuinfo_x86 *c) { unsigned int model = c->x86_model; + uint64_t msr_content; if (!(opt_cpuid_mask_ecx | opt_cpuid_mask_edx | opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx)) @@ -54,10 +55,10 @@ static void __devinit set_cpuidmask(stru if (c->x86 != 0x6) /* Only family 6 supports this feature */ return; + msr_content = (opt_cpuid_mask_ecx ? : ~0u) + | (uint64_t)(opt_cpuid_mask_edx ? : ~0u) << 32; if ((model == 0x1d) || ((model == 0x17) && (c->x86_mask >= 4))) { - wrmsr(MSR_IA32_CPUID_FEATURE_MASK1, - opt_cpuid_mask_ecx ? : ~0u, - opt_cpuid_mask_edx ? : ~0u); + wrmsr(MSR_IA32_CPUID_FEATURE_MASK1, msr_content); } /* * CPU supports this feature if the processor signature meets the following: @@ -71,12 +72,8 @@ static void __devinit set_cpuidmask(stru || model == 0x2c || model == 0x2e || model == 0x2f) { - wrmsr(MSR_IA32_CPUID1_FEATURE_MASK, - opt_cpuid_mask_ecx ? : ~0u, - opt_cpuid_mask_edx ? : ~0u); - wrmsr(MSR_IA32_CPUID80000001_FEATURE_MASK, - opt_cpuid_mask_ext_ecx ? : ~0u, - opt_cpuid_mask_ext_edx ? : ~0u); + wrmsr(MSR_IA32_CPUID1_FEATURE_MASK, msr_content); + wrmsr(MSR_IA32_CPUID80000001_FEATURE_MASK, msr_content); } else { printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n", @@ -100,15 +97,15 @@ void __devinit early_intel_workaround(st */ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) { - unsigned long lo, hi; + uint64_t msr_content; if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); - if ((lo & (1<<9)) == 0) { + msr_content = rdmsr(MSR_IA32_MISC_ENABLE); + if ((msr_content & (1<<9)) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); - lo |= (1<<9); /* Disable hw prefetching */ - wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); + msr_content |= (1<<9); /* Disable hw prefetching */ + wrmsr (MSR_IA32_MISC_ENABLE, msr_content); } } } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/amd_k8.c --- a/xen/arch/x86/cpu/mcheck/amd_k8.c +++ b/xen/arch/x86/cpu/mcheck/amd_k8.c @@ -90,8 +90,8 @@ enum mcheck_type amd_k8_mcheck_init(stru mcequirk_amd_apply(quirkflag); } else { /* Enable error reporting of all errors */ - wrmsrl(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); - wrmsrl(MSR_IA32_MCx_STATUS(i), 0x0ULL); + wrmsr(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); + wrmsr(MSR_IA32_MCx_STATUS(i), 0x0ULL); break; } } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/amd_nonfatal.c --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c @@ -212,7 +212,7 @@ void amd_nonfatal_mcheck_init(struct cpu /* hw threshold registers present */ hw_threshold = 1; - rdmsrl(MSR_IA32_MC4_MISC, value); + value = rdmsr(MSR_IA32_MC4_MISC); if (value & (1ULL << 61)) { /* Locked bit */ /* Locked by BIOS. Not available for use */ @@ -233,7 +233,7 @@ void amd_nonfatal_mcheck_init(struct cpu value &= ~(0x60FFF00000000ULL); /* Counter enable */ value |= (1ULL << 51); - wrmsrl(MSR_IA32_MC4_MISC, value); + wrmsr(MSR_IA32_MC4_MISC, value); /* serialize */ wmb(); printk(XENLOG_INFO "MCA: Use hw thresholding to adjust polling frequency\n"); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/k7.c --- a/xen/arch/x86/cpu/mcheck/k7.c +++ b/xen/arch/x86/cpu/mcheck/k7.c @@ -20,37 +20,37 @@ static fastcall void k7_machine_check(struct cpu_user_regs * regs, long error_code) { int recover=1; - u32 alow, ahigh, high, low; - u32 mcgstl, mcgsth; + uint64_t msr_content; + uint64_t mcgst; int i; - rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); - if (mcgstl & (1<<0)) /* Recoverable ? */ + mcgst = mca_rdmsr(MSR_IA32_MCG_STATUS); + if (mcgst & (1ULL<<0)) /* Recoverable ? */ recover=0; - printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", - smp_processor_id(), mcgsth, mcgstl); + printk(KERN_EMERG "CPU %d: Machine Check Exception: 0x%16"PRIx64"\n", + smp_processor_id(), mcgst); for (i=1; imc_nmsrvals = __MC_NMSRS; xcp->mc_msrvalues[0].reg = MSR_IA32_MCG_CAP; - rdmsrl(MSR_IA32_MCG_CAP, xcp->mc_msrvalues[0].value); + xcp->mc_msrvalues[0].value = rdmsr(MSR_IA32_MCG_CAP); if (c->cpuid_level >= 1) { cpuid(1, &junk, &ebx, &junk, &junk); @@ -1148,11 +1146,11 @@ static uint64_t x86_mc_hwcr_wren(void) { uint64_t old; - rdmsrl(MSR_K8_HWCR, old); + old = rdmsr(MSR_K8_HWCR); if (!(old & K8_HWCR_MCi_STATUS_WREN)) { uint64_t new = old | K8_HWCR_MCi_STATUS_WREN; - wrmsrl(MSR_K8_HWCR, new); + wrmsr(MSR_K8_HWCR, new); } return old; @@ -1161,7 +1159,7 @@ static uint64_t x86_mc_hwcr_wren(void) static void x86_mc_hwcr_wren_restore(uint64_t hwcr) { if (!(hwcr & K8_HWCR_MCi_STATUS_WREN)) - wrmsrl(MSR_K8_HWCR, hwcr); + wrmsr(MSR_K8_HWCR, hwcr); } static void x86_mc_msrinject(void *data) @@ -1192,7 +1190,7 @@ static void x86_mc_msrinject(void *data) if (intpose) intpose_add(mci->mcinj_cpunr, msr->reg, msr->value); else - wrmsrl(msr->reg, msr->value); + wrmsr(msr->reg, msr->value); } if (mci->mcinj_flags & _MC_MSRINJ_F_REQ_HWCR_WREN) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/mce.h --- a/xen/arch/x86/cpu/mcheck/mce.h +++ b/xen/arch/x86/cpu/mcheck/mce.h @@ -87,14 +87,14 @@ static inline uint64_t mca_rdmsr(unsigne { uint64_t val; if (intpose_lookup(smp_processor_id(), msr, &val) == NULL) - rdmsrl(msr, val); + return rdmsr(msr); return val; } /* Write an MSR, invalidating any interposed value */ #define mca_wrmsr(msr, val) do { \ intpose_inval(smp_processor_id(), msr); \ - wrmsrl(msr, val); \ + wrmsr(msr, val); \ } while (0) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/mce_amd_quirks.c --- a/xen/arch/x86/cpu/mcheck/mce_amd_quirks.c +++ b/xen/arch/x86/cpu/mcheck/mce_amd_quirks.c @@ -17,8 +17,9 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include -#include +#include +#include +#include #include "mce_quirks.h" @@ -64,8 +65,8 @@ int mcequirk_amd_apply(enum mcequirk_amd * TBL walk error reporting, which trips off incorrectly * with AGP GART & 3ware & Cerberus. */ - wrmsrl(MSR_IA32_MC4_CTL, ~(1ULL << 10)); - wrmsrl(MSR_IA32_MC4_STATUS, 0ULL); + wrmsr(MSR_IA32_MC4_CTL, ~(1ULL << 10)); + wrmsr(MSR_IA32_MC4_STATUS, 0ULL); break; } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c @@ -38,7 +38,7 @@ static void unexpected_thermal_interrupt /* P4/Xeon Thermal transition interrupt handler */ static void intel_thermal_interrupt(struct cpu_user_regs *regs) { - u32 l, h; + uint64_t msr_content; unsigned int cpu = smp_processor_id(); static s_time_t next[NR_CPUS]; @@ -47,8 +47,8 @@ static void intel_thermal_interrupt(stru return; next[cpu] = NOW() + MILLISECS(5000); - rdmsr(MSR_IA32_THERM_STATUS, l, h); - if (l & 0x1) { + msr_content = rdmsr(MSR_IA32_THERM_STATUS); + if (msr_content & 0x1) { printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", cpu); @@ -74,7 +74,8 @@ fastcall void smp_thermal_interrupt(stru /* P4/Xeon Thermal regulation detect and init */ static void intel_init_thermal(struct cpuinfo_x86 *c) { - u32 l, h; + uint64_t msr_content; + uint32_t val; int tm2 = 0; unsigned int cpu = smp_processor_id(); @@ -90,39 +91,39 @@ static void intel_init_thermal(struct cp * be some SMM goo which handles it, so we can't even put a handler * since it might be delivered via SMI already -zwanem. */ - rdmsr (MSR_IA32_MISC_ENABLE, l, h); - h = apic_read(APIC_LVTTHMR); - if ((l & (1<<3)) && (h & APIC_DM_SMI)) { + msr_content = rdmsr (MSR_IA32_MISC_ENABLE); + val = apic_read(APIC_LVTTHMR); + if ((msr_content & (1ULL<<3)) && (val & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",cpu); return; /* -EBUSY */ } - if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) + if (cpu_has(c, X86_FEATURE_TM2) && (msr_content & (1ULL << 13))) tm2 = 1; /* check whether a vector already exists, temporarily masked? */ - if (h & APIC_VECTOR_MASK) { + if (val & APIC_VECTOR_MASK) { printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already installed\n", - cpu, (h & APIC_VECTOR_MASK)); + cpu, (val & APIC_VECTOR_MASK)); return; /* -EBUSY */ } /* The temperature transition interrupt handler setup */ - h = THERMAL_APIC_VECTOR; /* our delivery vector */ - h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ - apic_write_around(APIC_LVTTHMR, h); + val = THERMAL_APIC_VECTOR; /* our delivery vector */ + val |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ + apic_write_around(APIC_LVTTHMR, val); - rdmsr (MSR_IA32_THERM_INTERRUPT, l, h); - wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); + msr_content = rdmsr(MSR_IA32_THERM_INTERRUPT); + wrmsr(MSR_IA32_THERM_INTERRUPT, msr_content | 0x03); /* ok we're good to go... */ vendor_thermal_interrupt = intel_thermal_interrupt; - rdmsr (MSR_IA32_MISC_ENABLE, l, h); - wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h); + msr_content = rdmsr (MSR_IA32_MISC_ENABLE); + wrmsr (MSR_IA32_MISC_ENABLE, msr_content | (1ULL<<3)); - l = apic_read (APIC_LVTTHMR); - apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED); + val = apic_read (APIC_LVTTHMR); + apic_write_around (APIC_LVTTHMR, val & ~APIC_LVT_MASKED); if (opt_cpu_info) printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n", cpu, tm2 ? "TM2" : "TM1"); @@ -483,7 +484,7 @@ static inline void intel_get_extended_ms if ( ext->mc_msrs < ARRAY_SIZE(ext->mc_msr) && msr < MSR_IA32_MCG_EAX + nr_intel_ext_msrs ) { ext->mc_msr[ext->mc_msrs].reg = msr; - rdmsrl(msr, ext->mc_msr[ext->mc_msrs].value); + ext->mc_msr[ext->mc_msrs].value = rdmsr(msr); ++ext->mc_msrs; } } @@ -924,7 +925,7 @@ static int do_cmci_discover(int i) unsigned msr = MSR_IA32_MC0_CTL2 + i; u64 val; - rdmsrl(msr, val); + val = rdmsr(msr); /* Some other CPU already owns this bank. */ if (val & CMCI_EN) { mcabanks_clear(i, __get_cpu_var(mce_banks_owned)); @@ -932,8 +933,8 @@ static int do_cmci_discover(int i) } val &= ~CMCI_THRESHOLD_MASK; - wrmsrl(msr, val | CMCI_EN | CMCI_THRESHOLD); - rdmsrl(msr, val); + wrmsr(msr, val | CMCI_EN | CMCI_THRESHOLD); + val = rdmsr(msr); if (!(val & CMCI_EN)) { /* This bank does not support CMCI. Polling timer has to handle it. */ @@ -1034,9 +1035,9 @@ static void clear_cmci(void) u64 val; if (!mcabanks_test(i, __get_cpu_var(mce_banks_owned))) continue; - rdmsrl(msr, val); + val = rdmsr(msr); if (val & (CMCI_EN|CMCI_THRESHOLD_MASK)) - wrmsrl(msr, val & ~(CMCI_EN|CMCI_THRESHOLD_MASK)); + wrmsr(msr, val & ~(CMCI_EN|CMCI_THRESHOLD_MASK)); mcabanks_clear(i, __get_cpu_var(mce_banks_owned)); } } @@ -1127,21 +1128,21 @@ static int mce_is_broadcast(struct cpuin static void intel_init_mca(struct cpuinfo_x86 *c) { int broadcast, cmci=0, ser=0, ext_num = 0, first; - u32 l, h; + uint64_t msr_content; broadcast = mce_is_broadcast(c); - rdmsr(MSR_IA32_MCG_CAP, l, h); + msr_content = rdmsr(MSR_IA32_MCG_CAP); - if ((l & MCG_CMCI_P) && cpu_has_apic) + if ((msr_content & MCG_CMCI_P) && cpu_has_apic) cmci = 1; /* Support Software Error Recovery */ - if (l & MCG_SER_P) + if (msr_content & MCG_SER_P) ser = 1; - if (l & MCG_EXT_P) - ext_num = (l >> MCG_EXT_CNT) & 0xff; + if (msr_content & MCG_EXT_P) + ext_num = (msr_content >> MCG_EXT_CNT) & 0xff; first = mce_firstbank(c); @@ -1185,7 +1186,7 @@ static void intel_mce_post_reset(void) static void intel_init_mce(void) { - u32 l, h; + uint64_t msr_content; int i; intel_mce_post_reset(); @@ -1195,17 +1196,17 @@ static void intel_init_mce(void) { /* Some banks are shared across cores, use MCi_CTRL to judge whether * this bank has been initialized by other cores already. */ - rdmsr(MSR_IA32_MCx_CTL(i), l, h); - if (!(l | h)) + msr_content = rdmsr(MSR_IA32_MCx_CTL(i)); + if (!msr_content) { /* if ctl is 0, this bank is never initialized */ mce_printk(MCE_VERBOSE, "mce_init: init bank%d\n", i); - wrmsr (MSR_IA32_MCx_CTL(i), 0xffffffff, 0xffffffff); - wrmsr (MSR_IA32_MCx_STATUS(i), 0x0, 0x0); + wrmsr(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); + wrmsr(MSR_IA32_MCx_STATUS(i), 0x0); } } if (firstbank) /* if cmci enabled, firstbank = 0 */ - wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0); + wrmsr(MSR_IA32_MC0_STATUS, 0x0); x86_mce_vector_register(intel_machine_check); mce_recoverable_register(intel_recoverable_scan); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mcheck/vmce.c --- a/xen/arch/x86/cpu/mcheck/vmce.c +++ b/xen/arch/x86/cpu/mcheck/vmce.c @@ -439,7 +439,6 @@ int vmce_domain_inject( int vmce_init(struct cpuinfo_x86 *c) { - u32 l, h; u64 value; int i; @@ -454,14 +453,13 @@ int vmce_init(struct cpuinfo_x86 *c) /* Don't care banks before firstbank */ memset(h_mci_ctrl, 0xff, sizeof(h_mci_ctrl)); for (i = firstbank; i < nr_mce_banks; i++) - rdmsrl(MSR_IA32_MCx_CTL(i), h_mci_ctrl[i]); + h_mci_ctrl[i] = rdmsr(MSR_IA32_MCx_CTL(i)); } if (g_mcg_cap & MCG_CTL_P) - rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl); + h_mcg_ctl = rdmsr(MSR_IA32_MCG_CTL); - rdmsr(MSR_IA32_MCG_CAP, l, h); - value = ((u64)h << 32) | l; + value = rdmsr(MSR_IA32_MCG_CAP); /* For Guest vMCE usage */ g_mcg_cap = value & ~MCG_CMCI_P; diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mtrr/amd.c --- a/xen/arch/x86/cpu/mtrr/amd.c +++ b/xen/arch/x86/cpu/mtrr/amd.c @@ -9,20 +9,20 @@ static void amd_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { - unsigned long low, high; + uint64_t msr_content; - rdmsr(MSR_K6_UWCCR, low, high); + msr_content = rdmsr(MSR_K6_UWCCR); /* Upper dword is region 1, lower is region 0 */ if (reg == 1) - low = high; + msr_content = (uint32_t)(msr_content >> 32); /* The base masks off on the right alignment */ - *base = (low & 0xFFFE0000) >> PAGE_SHIFT; + *base = (msr_content & 0xFFFE0000) >> PAGE_SHIFT; *type = 0; - if (low & 1) + if (msr_content & 1) *type = MTRR_TYPE_UNCACHABLE; - if (low & 2) + if (msr_content & 2) *type = MTRR_TYPE_WRCOMB; - if (!(low & 3)) { + if (!(msr_content & 3)) { *size = 0; return; } @@ -41,8 +41,8 @@ amd_get_mtrr(unsigned int reg, unsigned * +1 000 0000 0000 0100 * *128K ... */ - low = (~low) & 0x1FFFC; - *size = (low + 4) << (15 - PAGE_SHIFT); + msr_content = (~msr_content) & 0x1FFFC; + *size = (msr_content + 4) << (15 - PAGE_SHIFT); return; } @@ -59,11 +59,14 @@ static void amd_set_mtrr(unsigned int re */ { u32 regs[2]; + uint64_t msr_content; /* * Low is MTRR0 , High MTRR 1 */ - rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); + msr_content = rdmsr(MSR_K6_UWCCR); + regs[0] = (uint32_t)msr_content; + regs[1] = (uint32_t)(msr_content >> 32); /* * Blank to disable */ @@ -85,7 +88,7 @@ static void amd_set_mtrr(unsigned int re * disable local interrupts, write back the cache, set the mtrr */ wbinvd(); - wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); + wrmsr(MSR_K6_UWCCR, regs[0] | (uint64_t)(regs[1]) << 32); } static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mtrr/generic.c --- a/xen/arch/x86/cpu/mtrr/generic.c +++ b/xen/arch/x86/cpu/mtrr/generic.c @@ -30,22 +30,31 @@ struct mtrr_state mtrr_state = {}; static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { - rdmsrl(MTRRphysBase_MSR(index), vr->base); - rdmsrl(MTRRphysMask_MSR(index), vr->mask); + vr->base = rdmsr(MTRRphysBase_MSR(index)); + vr->mask = rdmsr(MTRRphysMask_MSR(index)); } static void get_fixed_ranges(mtrr_type * frs) { + uint64_t msr_content; unsigned int *p = (unsigned int *) frs; int i; - rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); + msr_content = rdmsr(MTRRfix64K_00000_MSR); + p[0] = (uint32_t)msr_content; + p[1] = (uint32_t)(msr_content >> 32); - for (i = 0; i < 2; i++) - rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); - for (i = 0; i < 8; i++) - rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); + for (i = 0; i < 2; i++) { + msr_content = rdmsr(MTRRfix16K_80000_MSR + i); + p[2 + i * 2] = (uint32_t)msr_content; + p[3 + i * 2] = (uint32_t)(msr_content >> 32); + } + for (i = 0; i < 8; i++) { + msr_content = rdmsr(MTRRfix4K_C0000_MSR + i); + p[6 + i * 2] = (uint32_t)msr_content; + p[7 + i * 2] = (uint32_t)(msr_content >> 32); + } } void mtrr_save_fixed_ranges(void *info) @@ -59,7 +68,7 @@ void __init get_mtrr_state(void) { unsigned int i; struct mtrr_var_range *vrs; - unsigned lo, dummy; + uint64_t msr_content; if (!mtrr_state.var_ranges) { mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range, @@ -69,20 +78,20 @@ void __init get_mtrr_state(void) } vrs = mtrr_state.var_ranges; - rdmsr(MTRRcap_MSR, lo, dummy); - mtrr_state.have_fixed = (lo >> 8) & 1; + msr_content = rdmsr(MTRRcap_MSR); + mtrr_state.have_fixed = (msr_content >> 8) & 1; for (i = 0; i < num_var_ranges; i++) get_mtrr_var_range(i, &vrs[i]); if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); - rdmsr(MTRRdefType_MSR, lo, dummy); - mtrr_state.def_type = (lo & 0xff); - mtrr_state.enabled = (lo & 0xc00) >> 10; + msr_content = rdmsr(MTRRdefType_MSR); + mtrr_state.def_type = (msr_content & 0xff); + mtrr_state.enabled = (msr_content & 0xc00) >> 10; /* Store mtrr_cap for HVM MTRR virtualisation. */ - rdmsrl(MTRRcap_MSR, mtrr_state.mtrr_cap); + mtrr_state.mtrr_cap = rdmsr(MTRRcap_MSR); } /* Some BIOS's are fucked and don't set all MTRRs the same! */ @@ -123,8 +132,8 @@ static inline void k8_enable_fixed_iorrs { uint64_t msr_content; - rdmsrl(MSR_K8_SYSCFG, msr_content); - mtrr_wrmsr(MSR_K8_SYSCFG, msr_content + msr_content = rdmsr(MSR_K8_SYSCFG); + mtrr_wrmsr(MSR_K8_SYSCFG, msr_content | K8_MTRRFIXRANGE_DRAM_ENABLE | K8_MTRRFIXRANGE_DRAM_MODIFY); } @@ -141,7 +150,7 @@ static void set_fixed_range(int msr, int { uint64_t msr_content, val; - rdmsrl(msr, msr_content); + msr_content = rdmsr(msr); val = ((uint64_t)msrwords[1] << 32) | msrwords[0]; if (msr_content != val) { @@ -179,10 +188,11 @@ int generic_get_free_region(unsigned lon static void generic_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type) { - unsigned int mask_lo, mask_hi, base_lo, base_hi; + uint64_t _mask, _base; + uint32_t mask_lo, mask_hi, base_lo, base_hi; - rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); - if ((mask_lo & 0x800) == 0) { + _mask = rdmsr(MTRRphysMask_MSR(reg)); + if ((_mask & 0x800) == 0) { /* Invalid (i.e. free) range */ *base = 0; *size = 0; @@ -190,12 +200,16 @@ static void generic_get_mtrr(unsigned in return; } - rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); + _base = rdmsr(MTRRphysBase_MSR(reg)); + mask_lo = (uint32_t)_mask; + mask_hi = (uint32_t)(_mask >> 32); /* Work out the shifted address mask. */ mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; + base_hi = (uint32_t)_base; + base_lo = (uint32_t)(_base >> 32); /* This works correctly if size is a power of two, i.e. a contiguous range. */ *size = -mask_lo; @@ -226,9 +240,12 @@ static int set_fixed_ranges(mtrr_type * static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) { uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi; + uint64_t msr_content; int changed = FALSE; - rdmsr(MTRRphysBase_MSR(index), lo, hi); + msr_content = rdmsr(MTRRphysBase_MSR(index)); + lo = (uint32_t)msr_content; + hi = (uint32_t)(msr_content >> 32); base_lo = (uint32_t)vr->base; base_hi = (uint32_t)(vr->base >> 32); @@ -242,7 +259,9 @@ static int set_mtrr_var_ranges(unsigned changed = TRUE; } - rdmsr(MTRRphysMask_MSR(index), lo, hi); + msr_content = rdmsr(MTRRphysMask_MSR(index)); + lo = (uint32_t)msr_content; + hi = (uint32_t)(msr_content >> 32); mask_lo = (uint32_t)vr->mask; mask_hi = (uint32_t)(vr->mask >> 32); @@ -325,7 +344,7 @@ static void prepare_set(void) flush_tlb_local(); /* Save MTRR state */ - rdmsrl(MTRRdefType_MSR, deftype); + deftype = rdmsr(MTRRdefType_MSR); /* Disable MTRRs, and set the default type to uncached */ mtrr_wrmsr(MTRRdefType_MSR, deftype & ~0xcff); @@ -451,9 +470,7 @@ int generic_validate_add_page(unsigned l static int generic_have_wrcomb(void) { - unsigned long config, dummy; - rdmsr(MTRRcap_MSR, config, dummy); - return (config & (1 << 10)); + return (int)(rdmsr(MTRRcap_MSR) & (1ULL << 10)); } int positive_have_wrcomb(void) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mtrr/main.c --- a/xen/arch/x86/cpu/mtrr/main.c +++ b/xen/arch/x86/cpu/mtrr/main.c @@ -101,10 +101,10 @@ static int have_wrcomb(void) /* This function returns the number of variable MTRRs */ static void __init set_num_var_ranges(void) { - unsigned long config = 0, dummy; + unsigned long config = 0; if (use_intel()) { - rdmsr(MTRRcap_MSR, config, dummy); + config = rdmsr(MTRRcap_MSR); } else if (is_cpu(AMD)) config = 2; else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/mtrr/state.c --- a/xen/arch/x86/cpu/mtrr/state.c +++ b/xen/arch/x86/cpu/mtrr/state.c @@ -29,10 +29,10 @@ void set_mtrr_prepare_save(struct set_mt write_cr0(cr0); wbinvd(); - if (use_intel()) { + if (use_intel()) /* Save MTRR state */ - rdmsrl(MTRRdefType_MSR, ctxt->deftype); - } else + ctxt->deftype = rdmsr(MTRRdefType_MSR); + else /* Cyrix ARRs - everything else were excluded at the top */ ctxt->ccr3 = getCx86(CX86_CCR3); } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/cpu/transmeta.c --- a/xen/arch/x86/cpu/transmeta.c +++ b/xen/arch/x86/cpu/transmeta.c @@ -7,7 +7,8 @@ static void __init init_transmeta(struct cpuinfo_x86 *c) { - unsigned int cap_mask, uk, max, dummy; + uint64_t cap_mask; + unsigned int max, dummy; unsigned int cms_rev1, cms_rev2; unsigned int cpu_rev, cpu_freq, cpu_flags, new_cpu_rev; char cpu_info[65]; @@ -68,10 +69,10 @@ static void __init init_transmeta(struct } /* Unhide possibly hidden capability flags */ - rdmsr(0x80860004, cap_mask, uk); - wrmsr(0x80860004, ~0, uk); + cap_mask = rdmsr(0x80860004); + wrmsr(0x80860004, ~0); c->x86_capability[0] = cpuid_edx(0x00000001); - wrmsr(0x80860004, cap_mask, uk); + wrmsr(0x80860004, cap_mask); /* If we can run i686 user-space code, call us an i686 */ #define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -1088,21 +1088,15 @@ static void load_segments(struct vcpu *n { /* This can only be non-zero if selector is NULL. */ if ( nctxt->fs_base ) - wrmsr(MSR_FS_BASE, - nctxt->fs_base, - nctxt->fs_base>>32); + wrmsr(MSR_FS_BASE, nctxt->fs_base); /* Most kernels have non-zero GS base, so don't bother testing. */ /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ - wrmsr(MSR_SHADOW_GS_BASE, - nctxt->gs_base_kernel, - nctxt->gs_base_kernel>>32); + wrmsr(MSR_SHADOW_GS_BASE, nctxt->gs_base_kernel); /* This can only be non-zero if selector is NULL. */ if ( nctxt->gs_base_user ) - wrmsr(MSR_GS_BASE, - nctxt->gs_base_user, - nctxt->gs_base_user>>32); + wrmsr(MSR_GS_BASE, nctxt->gs_base_user); /* If in kernel mode then switch the GS bases around. */ if ( (n->arch.flags & TF_kernel_mode) ) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/e820.c --- a/xen/arch/x86/e820.c +++ b/xen/arch/x86/e820.c @@ -457,8 +457,8 @@ static uint64_t mtrr_top_of_ram(void) } addr_mask = ((1ull << phys_bits) - 1) & ~((1ull << 12) - 1); - rdmsrl(MSR_MTRRcap, mtrr_cap); - rdmsrl(MSR_MTRRdefType, mtrr_def); + mtrr_cap = rdmsr(MSR_MTRRcap); + mtrr_def = rdmsr(MSR_MTRRdefType); if ( e820_verbose ) printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); @@ -474,8 +474,8 @@ static uint64_t mtrr_top_of_ram(void) top = 0; for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) { - rdmsrl(MSR_MTRRphysBase(i), base); - rdmsrl(MSR_MTRRphysMask(i), mask); + base = rdmsr(MSR_MTRRphysBase(i)); + mask = rdmsr(MSR_MTRRphysMask(i) ); if ( e820_verbose ) printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2127,7 +2127,7 @@ int hvm_msr_write_intercept(unsigned int v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content; if ( cpu_has_rdtscp && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) ) - wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content); + wrmsr(MSR_TSC_AUX, (uint32_t)msr_content); break; case MSR_IA32_APICBASE: diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -684,7 +684,7 @@ static void svm_ctxt_switch_to(struct vc vpmu_load(v); if ( cpu_has_rdtscp ) - wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); + wrmsr(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); } static void svm_do_resume(struct vcpu *v) @@ -861,13 +861,12 @@ static void svm_init_erratum_383(struct static int svm_cpu_up(void) { - u32 phys_hsa_lo, phys_hsa_hi; uint64_t phys_hsa, msr_content; int rc, cpu = smp_processor_id(); struct cpuinfo_x86 *c = &cpu_data[cpu]; /* Check whether SVM feature is disabled in BIOS */ - rdmsrl(MSR_K8_VM_CR, msr_content); + msr_content = rdmsr(MSR_K8_VM_CR); if ( msr_content & K8_VMCR_SVME_DISABLE ) { printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu); @@ -881,9 +880,7 @@ static int svm_cpu_up(void) /* Initialize the HSA for this core. */ phys_hsa = (u64)virt_to_maddr(hsa[cpu]); - phys_hsa_lo = (u32)phys_hsa; - phys_hsa_hi = (u32)(phys_hsa >> 32); - wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi); + wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa); /* check for erratum 383 */ svm_init_erratum_383(c); @@ -898,12 +895,12 @@ static int svm_cpu_up(void) */ msr_content = read_efer(); if ( wrmsr_safe(MSR_EFER, msr_content | EFER_LMSLE) == 0 ) - rdmsrl(MSR_EFER, msr_content); + msr_content = rdmsr(MSR_EFER); if ( msr_content & EFER_LMSLE ) { if ( c == &boot_cpu_data ) cpu_has_lmsl = 1; - wrmsrl(MSR_EFER, msr_content ^ EFER_LMSLE); + wrmsr(MSR_EFER, msr_content ^ EFER_LMSLE); } else { @@ -1300,7 +1297,7 @@ static int svm_is_erratum_383(struct cpu if ( !amd_erratum383_found ) return 0; - rdmsrl(MSR_IA32_MC0_STATUS, msr_content); + msr_content = rdmsr(MSR_IA32_MC0_STATUS); /* Bit 62 may or may not be set for this mce */ msr_content &= ~(1ULL << 62); @@ -1309,10 +1306,10 @@ static int svm_is_erratum_383(struct cpu /* Clear MCi_STATUS registers */ for (i = 0; i < nr_mce_banks; i++) - wrmsrl(MSR_IA32_MCx_STATUS(i), 0ULL); + wrmsr(MSR_IA32_MCx_STATUS(i), 0ULL); - rdmsrl(MSR_IA32_MCG_STATUS, msr_content); - wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2)); + msr_content = rdmsr(MSR_IA32_MCG_STATUS); + wrmsr(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2)); /* flush TLB */ flush_tlb_mask(&v->domain->domain_dirty_cpumask); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/hvm/svm/vpmu.c --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -110,11 +110,11 @@ static inline void context_restore(struc struct amd_vpmu_context *ctxt = vpmu->context; for ( i = 0; i < NUM_COUNTERS; i++ ) - wrmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); + wrmsr(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); for ( i = 0; i < NUM_COUNTERS; i++ ) { - wrmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]); + wrmsr(AMD_F10H_COUNTERS[i], ctxt->counters[i]); /* Force an interrupt to allow guest reset the counter, if the value is positive */ @@ -147,10 +147,10 @@ static inline void context_save(struct v struct amd_vpmu_context *ctxt = vpmu->context; for ( i = 0; i < NUM_COUNTERS; i++ ) - rdmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]); + ctxt->counters[i] = rdmsr(AMD_F10H_COUNTERS[i]); for ( i = 0; i < NUM_COUNTERS; i++ ) - rdmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); + ctxt->ctrls[i] = rdmsr(AMD_F10H_CTRLS[i]); } static void amd_vpmu_save(struct vcpu *v) @@ -220,13 +220,13 @@ static int amd_vpmu_do_wrmsr(unsigned in context_update(msr, msr_content); /* Write to hw counters */ - wrmsrl(msr, msr_content); + wrmsr(msr, msr_content); return 1; } static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content) { - rdmsrl(msr, *msr_content); + *msr_content = rdmsr(msr); return 1; } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -102,8 +102,11 @@ static u32 adjust_vmx_controls( const char *name, u32 ctl_min, u32 ctl_opt, u32 msr, bool_t *mismatch) { u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt; + uint64_t vmx_msr; - rdmsr(msr, vmx_msr_low, vmx_msr_high); + vmx_msr = rdmsr(msr); + vmx_msr_low = (uint32_t)vmx_msr; + vmx_msr_high = (uint32_t)(vmx_msr >> 32); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ @@ -129,6 +132,7 @@ static bool_t cap_check(const char *name static int vmx_init_vmcs_config(void) { u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt; + uint64_t vmx_basic_msr; u32 _vmx_pin_based_exec_control; u32 _vmx_cpu_based_exec_control; u32 _vmx_secondary_exec_control = 0; @@ -137,7 +141,9 @@ static int vmx_init_vmcs_config(void) u32 _vmx_vmentry_control; bool_t mismatch = 0; - rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high); + vmx_basic_msr = rdmsr(MSR_IA32_VMX_BASIC); + vmx_basic_msr_low = (uint32_t)vmx_basic_msr; + vmx_basic_msr_high = (uint32_t)(vmx_basic_msr >> 32); min = (PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING); @@ -196,7 +202,7 @@ static int vmx_init_vmcs_config(void) if ( _vmx_secondary_exec_control & (SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_ENABLE_VPID) ) { - rdmsrl(MSR_IA32_VMX_EPT_VPID_CAP, _vmx_ept_vpid_cap); + _vmx_ept_vpid_cap = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); /* * Additional sanity checking before using EPT: @@ -231,9 +237,12 @@ static int vmx_init_vmcs_config(void) * We check VMX_BASIC_MSR[55] to correctly handle default1 controls. */ uint32_t must_be_one, must_be_zero, msr = MSR_IA32_VMX_PROCBASED_CTLS; + uint64_t msr_must_be; if ( vmx_basic_msr_high & (1u << 23) ) msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS; - rdmsr(msr, must_be_one, must_be_zero); + msr_must_be = rdmsr(msr); + must_be_one = (uint32_t)msr_must_be; + must_be_zero = (uint32_t)(msr_must_be >> 32); if ( must_be_one & (CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING) ) @@ -446,7 +455,7 @@ void vmx_cpu_dead(unsigned int cpu) int vmx_cpu_up(void) { - u32 eax, edx; + uint64_t msr_content; int rc, bios_locked, cpu = smp_processor_id(); u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1; @@ -459,8 +468,8 @@ int vmx_cpu_up(void) * the requred CRO fixed bits in VMX operation. */ cr0 = read_cr0(); - rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0); - rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1); + vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0); + vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1); if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) ) { printk("CPU%d: some settings of host CR0 are " @@ -468,12 +477,12 @@ int vmx_cpu_up(void) return -EINVAL; } - rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); + msr_content = rdmsr(IA32_FEATURE_CONTROL_MSR); - bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK); + bios_locked = !!(msr_content & IA32_FEATURE_CONTROL_MSR_LOCK); if ( bios_locked ) { - if ( !(eax & (tboot_in_measured_env() + if ( !(msr_content & (tboot_in_measured_env() ? IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX : IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX)) ) { @@ -483,11 +492,11 @@ int vmx_cpu_up(void) } else { - eax = IA32_FEATURE_CONTROL_MSR_LOCK; - eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX; + msr_content = IA32_FEATURE_CONTROL_MSR_LOCK; + msr_content |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX; if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) ) - eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX; - wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0); + msr_content |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX; + wrmsr(IA32_FEATURE_CONTROL_MSR, msr_content); } if ( (rc = vmx_init_vmcs_config()) != 0 ) @@ -503,8 +512,8 @@ int vmx_cpu_up(void) case -2: /* #UD or #GP */ if ( bios_locked && test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) && - (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) || - !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) ) + (!(msr_content & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) || + !(msr_content & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) ) { printk("CPU%d: VMXON failed: perhaps because of TXT settings " "in your BIOS configuration?\n", cpu); @@ -767,9 +776,9 @@ static int construct_vmcs(struct vcpu *v __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler); /* Host SYSENTER CS:RIP. */ - rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs); + sysenter_cs = rdmsr(MSR_IA32_SYSENTER_CS); __vmwrite(HOST_SYSENTER_CS, sysenter_cs); - rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip); + sysenter_eip = rdmsr(MSR_IA32_SYSENTER_EIP); __vmwrite(HOST_SYSENTER_EIP, sysenter_eip); /* MSR intercepts. */ @@ -873,7 +882,7 @@ static int construct_vmcs(struct vcpu *v { u64 host_pat, guest_pat; - rdmsrl(MSR_IA32_CR_PAT, host_pat); + host_pat = rdmsr(MSR_IA32_CR_PAT); guest_pat = MSR_IA32_CR_PAT_RESET; __vmwrite(HOST_PAT, host_pat); @@ -984,7 +993,7 @@ int vmx_add_host_load_msr(u32 msr) msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; - rdmsrl(msr, msr_area[msr_count].data); + msr_area[msr_count].data = rdmsr(msr); curr->arch.hvm_vmx.host_msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -153,13 +153,13 @@ void vmx_save_host_msrs(void) int i; for ( i = 0; i < MSR_INDEX_SIZE; i++ ) - rdmsrl(msr_index[i], host_msr_state->msrs[i]); + host_msr_state->msrs[i] = rdmsr(msr_index[i]); } #define WRITE_MSR(address) \ guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content; \ set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags); \ - wrmsrl(MSR_ ## address, msr_content); \ + wrmsr(MSR_ ## address, msr_content); \ set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \ break @@ -184,7 +184,7 @@ long_mode_do_msr_read(unsigned int msr, break; case MSR_SHADOW_GS_BASE: - rdmsrl(MSR_SHADOW_GS_BASE, *msr_content); + *msr_content = rdmsr(MSR_SHADOW_GS_BASE); break; case MSR_STAR: @@ -239,7 +239,7 @@ long_mode_do_msr_write(unsigned int msr, else if ( msr == MSR_GS_BASE ) __vmwrite(GUEST_GS_BASE, msr_content); else - wrmsrl(MSR_SHADOW_GS_BASE, msr_content); + wrmsr(MSR_SHADOW_GS_BASE, msr_content); break; @@ -287,7 +287,7 @@ static void vmx_restore_host_msrs(void) while ( host_msr_state->flags ) { i = find_first_set_bit(host_msr_state->flags); - wrmsrl(msr_index[i], host_msr_state->msrs[i]); + wrmsr(msr_index[i], host_msr_state->msrs[i]); clear_bit(i, &host_msr_state->flags); } } @@ -298,7 +298,7 @@ static void vmx_save_guest_msrs(struct v * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can * be updated at any time via SWAPGS, which we cannot trap. */ - rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); + v->arch.hvm_vmx.shadow_gs = rdmsr(MSR_SHADOW_GS_BASE); } static void vmx_restore_guest_msrs(struct vcpu *v) @@ -310,7 +310,7 @@ static void vmx_restore_guest_msrs(struc guest_msr_state = &v->arch.hvm_vmx.msr_state; host_msr_state = &this_cpu(host_msr_state); - wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); + wrmsr(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); guest_flags = guest_msr_state->flags; @@ -322,7 +322,7 @@ static void vmx_restore_guest_msrs(struc "restore guest's index %d msr %x with value %lx", i, msr_index[i], guest_msr_state->msrs[i]); set_bit(i, &host_msr_state->flags); - wrmsrl(msr_index[i], guest_msr_state->msrs[i]); + wrmsr(msr_index[i], guest_msr_state->msrs[i]); clear_bit(i, &guest_flags); } @@ -336,7 +336,7 @@ static void vmx_restore_guest_msrs(struc } if ( cpu_has_rdtscp ) - wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); + wrmsr(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); } #else /* __i386__ */ @@ -1831,7 +1831,7 @@ static int vmx_msr_read_intercept(unsign case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2: goto gp_fault; case MSR_IA32_MISC_ENABLE: - rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content); + *msr_content = rdmsr(MSR_IA32_MISC_ENABLE); /* Debug Trace Store is not supported. */ *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/hvm/vmx/vpmu_core2.c --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c @@ -189,9 +189,9 @@ static inline void __core2_vpmu_save(str struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; for ( i = 0; i < core2_counters.num; i++ ) - rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + core2_vpmu_cxt->counters[i] = rdmsr(core2_counters.msr[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) - rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); + core2_vpmu_cxt->arch_msr_pair[i].counter = rdmsr(MSR_IA32_PERFCTR0+i); core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); } @@ -220,14 +220,14 @@ static inline void __core2_vpmu_load(str struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; for ( i = 0; i < core2_counters.num; i++ ) - wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + wrmsr(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) - wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); + wrmsr(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); for ( i = 0; i < core2_ctrls.num; i++ ) - wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); + wrmsr(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) - wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); + wrmsr(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc); } @@ -253,7 +253,7 @@ static int core2_vpmu_alloc_resource(str if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) return 0; - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) ) return 0; @@ -364,13 +364,13 @@ static int core2_vpmu_do_wrmsr(unsigned global_ctrl = msr_content; for ( i = 0; i < core2_get_pmc_count(); i++ ) { - rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl); + non_global_ctrl = rdmsr(MSR_P6_EVNTSEL0+i); core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] = global_ctrl & (non_global_ctrl >> 22) & 1; global_ctrl >>= 1; } - rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); + non_global_ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); global_ctrl = msr_content >> 32; for ( i = 0; i < 3; i++ ) { @@ -443,7 +443,7 @@ static int core2_vpmu_do_wrmsr(unsigned if (inject_gp) vmx_inject_hw_exception(TRAP_gp_fault, 0); else - wrmsrl(msr, msr_content); + wrmsr(msr, msr_content); } else vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); @@ -474,7 +474,7 @@ static int core2_vpmu_do_rdmsr(unsigned vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); break; default: - rdmsrl(msr, *msr_content); + *msr_content = rdmsr(msr); } return 1; @@ -490,12 +490,12 @@ static int core2_vpmu_do_interrupt(struc struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context; struct vlapic *vlapic = vcpu_vlapic(v); - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content); + msr_content = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); if ( !msr_content ) return 0; core2_vpmu_cxt->global_ovf_status |= msr_content; msr_content = 0xC000000700000000 | ((1 << core2_get_pmc_count()) - 1); - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); + wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/microcode_amd.c --- a/xen/arch/x86/microcode_amd.c +++ b/xen/arch/x86/microcode_amd.c @@ -47,7 +47,6 @@ struct equiv_cpu_entry *equiv_cpu_table; static int collect_cpu_info(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu]; - uint32_t dummy; memset(csig, 0, sizeof(*csig)); @@ -58,7 +57,7 @@ static int collect_cpu_info(int cpu, str return -EINVAL; } - rdmsr(MSR_AMD_PATCHLEVEL, csig->rev, dummy); + csig->rev = rdmsr(MSR_AMD_PATCHLEVEL); printk(KERN_INFO "microcode: collect_cpu_info: patch_id=0x%x\n", csig->rev); @@ -126,7 +125,7 @@ static int apply_microcode(int cpu) { unsigned long flags; struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); - uint32_t rev, dummy; + uint32_t rev; struct microcode_amd *mc_amd = uci->mc.mc_amd; /* We should bind the task to the CPU */ @@ -137,10 +136,10 @@ static int apply_microcode(int cpu) spin_lock_irqsave(µcode_update_lock, flags); - wrmsrl(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code); + wrmsr(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code); /* get patch id after patching */ - rdmsr(MSR_AMD_PATCHLEVEL, rev, dummy); + rev = rdmsr(MSR_AMD_PATCHLEVEL); spin_unlock_irqrestore(µcode_update_lock, flags); diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/microcode_intel.c --- a/xen/arch/x86/microcode_intel.c +++ b/xen/arch/x86/microcode_intel.c @@ -62,6 +62,7 @@ static DEFINE_SPINLOCK(microcode_update_ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu_num]; + uint64_t msr_content; unsigned int val[2]; BUG_ON(cpu_num != smp_processor_id()); @@ -81,15 +82,17 @@ static int collect_cpu_info(int cpu_num, if ( (c->x86_model >= 5) || (c->x86 > 6) ) { /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + msr_content = rdmsr(MSR_IA32_PLATFORM_ID); + val[1] = msr_content >> 32; csig->pf = 1 << ((val[1] >> 18) & 7); } - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsr(MSR_IA32_UCODE_REV, 0); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); + msr_content = rdmsr(MSR_IA32_UCODE_REV); + csig->rev = (uint32_t)msr_content; pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", csig->sig, csig->pf, csig->rev); @@ -250,6 +253,7 @@ static int apply_microcode(int cpu) { unsigned long flags; unsigned int val[2]; + uint64_t msr_content; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num); @@ -263,16 +267,15 @@ static int apply_microcode(int cpu) spin_lock_irqsave(µcode_update_lock, flags); /* write microcode via MSR 0x79 */ - wrmsr(MSR_IA32_UCODE_WRITE, - (unsigned long) uci->mc.mc_intel->bits, - (unsigned long) uci->mc.mc_intel->bits >> 16 >> 16); - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsr(MSR_IA32_UCODE_WRITE, (unsigned long) uci->mc.mc_intel->bits); + wrmsr(MSR_IA32_UCODE_REV, 0); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + msr_content = rdmsr(MSR_IA32_UCODE_REV); + val[1] = msr_content >> 32; spin_unlock_irqrestore(µcode_update_lock, flags); if ( val[1] != uci->mc.mc_intel->hdr.rev ) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/nmi.c --- a/xen/arch/x86/nmi.c +++ b/xen/arch/x86/nmi.c @@ -147,7 +147,7 @@ static void disable_lapic_nmi_watchdog(v return; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: - wrmsr(MSR_K7_EVNTSEL0, 0, 0); + wrmsr(MSR_K7_EVNTSEL0, 0); break; case X86_VENDOR_INTEL: switch (boot_cpu_data.x86) { @@ -155,14 +155,14 @@ static void disable_lapic_nmi_watchdog(v if (boot_cpu_data.x86_model > 0xd) break; - wrmsr(MSR_P6_EVNTSEL0, 0, 0); + wrmsr(MSR_P6_EVNTSEL0, 0); break; case 15: if (boot_cpu_data.x86_model > 0x4) break; - wrmsr(MSR_P4_IQ_CCCR0, 0, 0); - wrmsr(MSR_P4_CRU_ESCR0, 0, 0); + wrmsr(MSR_P4_IQ_CCCR0, 0); + wrmsr(MSR_P4_CRU_ESCR0, 0); break; } break; @@ -219,7 +219,7 @@ static void __pminit clear_msr_range(uns unsigned int i; for (i = 0; i < n; i++) - wrmsr(base+i, 0, 0); + wrmsr(base+i, 0); } static inline void write_watchdog_counter(const char *descr) @@ -229,7 +229,7 @@ static inline void write_watchdog_counte do_div(count, nmi_hz); if(descr) Dprintk("setting %s to -0x%"PRIx64"\n", descr, count); - wrmsrl(nmi_perfctr_msr, 0 - count); + wrmsr(nmi_perfctr_msr, 0 - count); } static void __pminit setup_k7_watchdog(void) @@ -246,11 +246,11 @@ static void __pminit setup_k7_watchdog(v | K7_EVNTSEL_USR | K7_NMI_EVENT; - wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); + wrmsr(MSR_K7_EVNTSEL0, evntsel); write_watchdog_counter("K7_PERFCTR0"); apic_write(APIC_LVTPC, APIC_DM_NMI); evntsel |= K7_EVNTSEL_ENABLE; - wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); + wrmsr(MSR_K7_EVNTSEL0, evntsel); } static void __pminit setup_p6_watchdog(unsigned counter) @@ -267,18 +267,18 @@ static void __pminit setup_p6_watchdog(u | P6_EVNTSEL_USR | counter; - wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); + wrmsr(MSR_P6_EVNTSEL0, evntsel); write_watchdog_counter("P6_PERFCTR0"); apic_write(APIC_LVTPC, APIC_DM_NMI); evntsel |= P6_EVNTSEL0_ENABLE; - wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); + wrmsr(MSR_P6_EVNTSEL0, evntsel); } static int __pminit setup_p4_watchdog(void) { - unsigned int misc_enable, dummy; + uint64_t misc_enable; - rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); + misc_enable = rdmsr(MSR_IA32_MISC_ENABLE); if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL)) return 0; @@ -304,11 +304,11 @@ static int __pminit setup_p4_watchdog(vo clear_msr_range(MSR_P4_BPU_CCCR0, 18); clear_msr_range(MSR_P4_BPU_PERFCTR0, 18); - wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); + wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0); + wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE); write_watchdog_counter("P4_IQ_COUNTER0"); apic_write(APIC_LVTPC, APIC_DM_NMI); - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); return 1; } @@ -442,7 +442,7 @@ void nmi_watchdog_tick(struct cpu_user_r * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 ) diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c +++ b/xen/arch/x86/oprofile/nmi_int.c @@ -104,15 +104,11 @@ static void nmi_cpu_save_registers(struc unsigned int i; for (i = 0; i < nr_ctrs; ++i) { - rdmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + counters[i].value = rdmsr(counters[i].addr); } for (i = 0; i < nr_ctrls; ++i) { - rdmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + controls[i].value = rdmsr(controls[i].addr); } } @@ -222,15 +218,11 @@ static void nmi_restore_registers(struct unsigned int i; for (i = 0; i < nr_ctrls; ++i) { - wrmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + wrmsr(controls[i].addr, controls[i].value); } for (i = 0; i < nr_ctrs; ++i) { - wrmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + wrmsr(counters[i].addr, counters[i].value); } } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/oprofile/op_model_athlon.c --- a/xen/arch/x86/oprofile/op_model_athlon.c +++ b/xen/arch/x86/oprofile/op_model_athlon.c @@ -26,15 +26,15 @@ #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 -#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) -#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) +#define CTR_READ(msr_content,msrs,c) (msr_content) = rdmsr(msrs->counters[(c)].addr) +#define CTR_WRITE(msr_content,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(msr_content));} while (0) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) -#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) -#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) +#define CTRL_READ(msr_content,msrs,c) (msr_content) = rdmsr(msrs->controls[(c)].addr) +#define CTRL_WRITE(msr_content,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (msr_content));} while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) -#define CTRL_CLEAR(lo, hi) (lo &= (1<<21), hi = 0) +#define CTRL_CLEAR(val) (val &= (1<<21)) #define CTRL_SET_ENABLE(val) (val |= 1<<20) #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) @@ -64,14 +64,15 @@ static void athlon_fill_in_addresses(str static void athlon_setup_ctrs(struct op_msrs const * const msrs) { - unsigned int low, high; + uint64_t msr_content; + uint32_t high; int i; /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low, high); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_WRITE(msr_content, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ @@ -86,17 +87,19 @@ static void athlon_setup_ctrs(struct op_ CTR_WRITE(counter_config[i].count, msrs, i); - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low, high); - CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[i].event); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_SET_ENABLE(msr_content); + CTRL_SET_USR(msr_content, counter_config[i].user); + CTRL_SET_KERN(msr_content, counter_config[i].kernel); + CTRL_SET_UM(msr_content, counter_config[i].unit_mask); + CTRL_SET_EVENT_LOW(msr_content, counter_config[i].event); + high = (uint32_t)(msr_content >> 32); CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); - CTRL_WRITE(low, high, msrs, i); + msr_content = (uint32_t)msr_content | ((uint64_t)(high) << 32); + CTRL_WRITE(msr_content, msrs, i); } else { reset_value[i] = 0; } @@ -108,7 +111,7 @@ static int athlon_check_ctrs(unsigned in struct cpu_user_regs * const regs) { - unsigned int low, high; + uint64_t msr_content; int i; int ovf = 0; unsigned long eip = regs->eip; @@ -128,8 +131,8 @@ static int athlon_check_ctrs(unsigned in } for (i = 0 ; i < NUM_COUNTERS; ++i) { - CTR_READ(low, high, msrs, i); - if (CTR_OVERFLOWED(low)) { + CTR_READ(msr_content, msrs, i); + if (CTR_OVERFLOWED(msr_content)) { xenoprof_log_event(current, regs, eip, mode, i); CTR_WRITE(reset_value[i], msrs, i); ovf = 1; @@ -143,13 +146,13 @@ static int athlon_check_ctrs(unsigned in static void athlon_start(struct op_msrs const * const msrs) { - unsigned int low, high; + uint64_t msr_content; int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (reset_value[i]) { - CTRL_READ(low, high, msrs, i); - CTRL_SET_ACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_SET_ACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); } } } @@ -157,15 +160,15 @@ static void athlon_start(struct op_msrs static void athlon_stop(struct op_msrs const * const msrs) { - unsigned int low,high; + uint64_t msr_content; int i; /* Subtle: stop on all counters to avoid race with * setting our pm callback */ for (i = 0 ; i < NUM_COUNTERS ; ++i) { - CTRL_READ(low, high, msrs, i); - CTRL_SET_INACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_SET_INACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); } } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/oprofile/op_model_p4.c --- a/xen/arch/x86/oprofile/op_model_p4.c +++ b/xen/arch/x86/oprofile/op_model_p4.c @@ -357,8 +357,8 @@ static const struct p4_event_binding p4_ #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) -#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) -#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) +#define ESCR_READ(msr_content,ev,i) (msr_content) = rdmsr(ev->bindings[(i)].escr_address) +#define ESCR_WRITE(msr_content,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, msr_content);} while (0) #define CCCR_RESERVED_BITS 0x38030FFF #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) @@ -368,13 +368,13 @@ static const struct p4_event_binding p4_ #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) -#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) -#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) +#define CCCR_READ(msr_content, i) (msr_content) = rdmsr(p4_counters[(i)].cccr_address) +#define CCCR_WRITE(msr_content, i) do {wrmsr(p4_counters[(i)].cccr_address, (msr_content));} while (0) #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) -#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) -#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) +#define CTR_READ(msr_content,i) (msr_content) = rdmsr(p4_counters[(i)].counter_address) +#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u64)(l));} while (0) #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) @@ -481,9 +481,8 @@ static void pmc_setup_one_p4_counter(uns { int i; int const maxbind = 2; - unsigned int cccr = 0; - unsigned int escr = 0; - unsigned int high = 0; + uint64_t cccr = 0; + uint64_t escr = 0; unsigned int counter_bit; const struct p4_event_binding *ev = NULL; unsigned int stag; @@ -507,7 +506,7 @@ static void pmc_setup_one_p4_counter(uns if (ev->bindings[i].virt_counter & counter_bit) { /* modify ESCR */ - ESCR_READ(escr, high, ev, i); + ESCR_READ(escr, ev, i); ESCR_CLEAR(escr); if (stag == 0) { ESCR_SET_USR_0(escr, counter_config[ctr].user); @@ -518,10 +517,10 @@ static void pmc_setup_one_p4_counter(uns } ESCR_SET_EVENT_SELECT(escr, ev->event_select); ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); - ESCR_WRITE(escr, high, ev, i); + ESCR_WRITE(escr, ev, i); /* modify CCCR */ - CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); + CCCR_READ(cccr, VIRT_CTR(stag, ctr)); CCCR_CLEAR(cccr); CCCR_SET_REQUIRED_BITS(cccr); CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); @@ -530,7 +529,7 @@ static void pmc_setup_one_p4_counter(uns } else { CCCR_SET_PMI_OVF_1(cccr); } - CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); + CCCR_WRITE(cccr, VIRT_CTR(stag, ctr)); return; } } @@ -544,68 +543,68 @@ static void pmc_setup_one_p4_counter(uns static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; - unsigned int low, high; + uint64_t msr_content; unsigned int addr; unsigned int stag; stag = get_stagger(); - rdmsr(MSR_IA32_MISC_ENABLE, low, high); - if (! MISC_PMC_ENABLED_P(low)) { + msr_content = rdmsr(MSR_IA32_MISC_ENABLE); + if (! MISC_PMC_ENABLED_P(msr_content)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } /* clear the cccrs we will use */ for (i = 0 ; i < num_counters ; i++) { - rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); - CCCR_CLEAR(low); - CCCR_SET_REQUIRED_BITS(low); - wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); + msr_content = rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address); + CCCR_CLEAR(msr_content); + CCCR_SET_REQUIRED_BITS(msr_content); + wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, msr_content); } /* clear cccrs outside our concern */ for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { - rdmsr(p4_unused_cccr[i], low, high); - CCCR_CLEAR(low); - CCCR_SET_REQUIRED_BITS(low); - wrmsr(p4_unused_cccr[i], low, high); + msr_content = rdmsr(p4_unused_cccr[i]); + CCCR_CLEAR(msr_content); + CCCR_SET_REQUIRED_BITS(msr_content); + wrmsr(p4_unused_cccr[i], msr_content); } /* clear all escrs (including those outside our concern) */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } /* On older models clear also MSR_P4_IQ_ESCR0/1 */ if (boot_cpu_data.x86_model < 0x3) { - wrmsr(MSR_P4_IQ_ESCR0, 0, 0); - wrmsr(MSR_P4_IQ_ESCR1, 0, 0); + wrmsr(MSR_P4_IQ_ESCR0, 0); + wrmsr(MSR_P4_IQ_ESCR1, 0); } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } if (num_counters == NUM_COUNTERS_NON_HT) { - wrmsr(MSR_P4_CRU_ESCR4, 0, 0); - wrmsr(MSR_P4_CRU_ESCR5, 0, 0); + wrmsr(MSR_P4_CRU_ESCR4, 0); + wrmsr(MSR_P4_CRU_ESCR5, 0); } else if (stag == 0) { - wrmsr(MSR_P4_CRU_ESCR4, 0, 0); + wrmsr(MSR_P4_CRU_ESCR4, 0); } else { - wrmsr(MSR_P4_CRU_ESCR5, 0, 0); + wrmsr(MSR_P4_CRU_ESCR5, 0); } /* setup all counters */ @@ -624,7 +623,8 @@ static int p4_check_ctrs(unsigned int co struct op_msrs const * const msrs, struct cpu_user_regs * const regs) { - unsigned long ctr, low, high, stag, real; + unsigned long ctr, stag, real; + uint64_t msr_content; int i; int ovf = 0; unsigned long eip = regs->eip; @@ -656,13 +656,13 @@ static int p4_check_ctrs(unsigned int co real = VIRT_CTR(stag, i); - CCCR_READ(low, high, real); - CTR_READ(ctr, high, real); - if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { + CCCR_READ(msr_content, real); + CTR_READ(ctr, real); + if (CCCR_OVF_P(msr_content) || CTR_OVERFLOW_P(ctr)) { xenoprof_log_event(current, regs, eip, mode, i); CTR_WRITE(reset_value[i], real); - CCCR_CLEAR_OVF(low); - CCCR_WRITE(low, high, real); + CCCR_CLEAR_OVF(msr_content); + CCCR_WRITE(msr_content, real); CTR_WRITE(reset_value[i], real); ovf = 1; } @@ -677,7 +677,8 @@ static int p4_check_ctrs(unsigned int co static void p4_start(struct op_msrs const * const msrs) { - unsigned int low, high, stag; + unsigned int stag; + uint64_t msr_content; int i; stag = get_stagger(); @@ -685,24 +686,25 @@ static void p4_start(struct op_msrs cons for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; - CCCR_READ(low, high, VIRT_CTR(stag, i)); - CCCR_SET_ENABLE(low); - CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + CCCR_READ(msr_content, VIRT_CTR(stag, i)); + CCCR_SET_ENABLE(msr_content); + CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); } } static void p4_stop(struct op_msrs const * const msrs) { - unsigned int low, high, stag; + unsigned int stag; + uint64_t msr_content; int i; stag = get_stagger(); for (i = 0; i < num_counters; ++i) { - CCCR_READ(low, high, VIRT_CTR(stag, i)); - CCCR_SET_DISABLE(low); - CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + CCCR_READ(msr_content, VIRT_CTR(stag, i)); + CCCR_SET_DISABLE(msr_content); + CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); } } diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/oprofile/op_model_ppro.c --- a/xen/arch/x86/oprofile/op_model_ppro.c +++ b/xen/arch/x86/oprofile/op_model_ppro.c @@ -43,8 +43,8 @@ static int counter_width = 32; #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) -#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) -#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) +#define CTRL_READ(msr_content,msrs,c) (msr_content) = rdmsr((msrs->controls[(c)].addr)) +#define CTRL_WRITE(msr_content,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (msr_content));} while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) #define CTRL_CLEAR(x) (x &= (1<<21)) @@ -71,7 +71,7 @@ static void ppro_fill_in_addresses(struc static void ppro_setup_ctrs(struct op_msrs const * const msrs) { - unsigned int low, high; + uint64_t msr_content; int i; if (cpu_has_arch_perfmon) { @@ -93,30 +93,30 @@ static void ppro_setup_ctrs(struct op_ms /* clear all counters */ for (i = 0 ; i < num_counters; ++i) { - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_WRITE(msr_content, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < num_counters; ++i) - wrmsrl(msrs->counters[i].addr, -1LL); + wrmsr(msrs->counters[i].addr, -1LL); /* enable active counters */ for (i = 0; i < num_counters; ++i) { if (counter_config[i].enabled) { reset_value[i] = counter_config[i].count; - wrmsrl(msrs->counters[i].addr, -reset_value[i]); + wrmsr(msrs->counters[i].addr, -reset_value[i]); - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low); - CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT(low, counter_config[i].event); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_SET_ENABLE(msr_content); + CTRL_SET_USR(msr_content, counter_config[i].user); + CTRL_SET_KERN(msr_content, counter_config[i].kernel); + CTRL_SET_UM(msr_content, counter_config[i].unit_mask); + CTRL_SET_EVENT(msr_content, counter_config[i].event); + CTRL_WRITE(msr_content, msrs, i); } else { reset_value[i] = 0; } @@ -137,10 +137,10 @@ static int ppro_check_ctrs(unsigned int for (i = 0 ; i < num_counters; ++i) { if (!reset_value[i]) continue; - rdmsrl(msrs->counters[i].addr, val); + val = rdmsr(msrs->counters[i].addr); if (CTR_OVERFLOWED(val)) { xenoprof_log_event(current, regs, eip, mode, i); - wrmsrl(msrs->counters[i].addr, -reset_value[i]); + wrmsr(msrs->counters[i].addr, -reset_value[i]); if ( is_passive(current->domain) && (mode != 2) && (vcpu_vpmu(current)->flags & PASSIVE_DOMAIN_ALLOCATED) ) { @@ -166,38 +166,38 @@ static int ppro_check_ctrs(unsigned int static void ppro_start(struct op_msrs const * const msrs) { - unsigned int low,high; + uint64_t msr_content; int i; for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { - CTRL_READ(low, high, msrs, i); - CTRL_SET_ACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_SET_ACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); } } /* Global Control MSR is enabled by default when system power on. * However, this may not hold true when xenoprof starts to run. */ if ( ppro_has_global_ctrl ) - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1< %08x\n", from, to); + uint64_t msr_content_from, msr_content_to; + msr_content_from = rdmsr(this_cpu(ler_msr)); + msr_content_to = rdmsr(this_cpu(ler_msr) + 1); + printk("ler: %16"PRIx64" -> %16"PRIx64"\n", + msr_content_from, msr_content_to); } } @@ -403,8 +404,8 @@ static void do_update_sysenter(void *inf { xen_callback_t *address = info; - wrmsr(MSR_IA32_SYSENTER_CS, address->cs, 0); - wrmsr(MSR_IA32_SYSENTER_EIP, address->eip, 0); + wrmsr(MSR_IA32_SYSENTER_CS, address->cs); + wrmsr(MSR_IA32_SYSENTER_EIP, address->eip); } #endif diff -r ab385be9e2ef -r 70357840696c xen/arch/x86/x86_64/traps.c --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -138,8 +138,8 @@ void show_registers(struct cpu_user_regs if ( this_cpu(ler_msr) && !guest_mode(regs) ) { u64 from, to; - rdmsrl(this_cpu(ler_msr), from); - rdmsrl(this_cpu(ler_msr) + 1, to); + from = rdmsr(this_cpu(ler_msr)); + to = rdmsr(this_cpu(ler_msr) + 1); printk("ler: %016lx -> %016lx\n", from, to); } } @@ -395,28 +395,27 @@ void __devinit subarch_percpu_traps_init /* Trampoline for SYSCALL entry from long mode. */ stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */ - wrmsrl(MSR_LSTAR, (unsigned long)stack); + wrmsr(MSR_LSTAR, (unsigned long)stack); stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { /* SYSENTER entry. */ - wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom); - wrmsrl(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry); - wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom); + wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry); + wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS); } /* Trampoline for SYSCALL entry from compatibility mode. */ stack = (char *)L1_CACHE_ALIGN((unsigned long)stack); - wrmsrl(MSR_CSTAR, (unsigned long)stack); + wrmsr(MSR_CSTAR, (unsigned long)stack); stack += write_stack_trampoline(stack, stack_bottom, FLAT_USER_CS32); /* Common SYSCALL parameters. */ - wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS); + wrmsr(MSR_STAR, ((uint64_t)(FLAT_RING3_CS32<<16) | __HYPERVISOR_CS) << 32); wrmsr(MSR_SYSCALL_MASK, X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT| - X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF, - 0U); + X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF); } void init_int80_direct_trap(struct vcpu *v) diff -r ab385be9e2ef -r 70357840696c xen/include/asm-x86/apic.h --- a/xen/include/asm-x86/apic.h +++ b/xen/include/asm-x86/apic.h @@ -82,30 +82,25 @@ static __inline void apic_wrmsr(unsigned reg == APIC_LVR) return; - wrmsrl(APIC_MSR_BASE + (reg >> 4), msr_content); + wrmsr(APIC_MSR_BASE + (reg >> 4), msr_content); } static __inline uint64_t apic_rdmsr(unsigned long reg) { - uint64_t msr_content; - if (reg == APIC_DFR) return -1u; - - rdmsrl(APIC_MSR_BASE + (reg >> 4), msr_content); - return msr_content; + return rdmsr(APIC_MSR_BASE + (reg >> 4)); } -static __inline void apic_write(unsigned long reg, u32 v) +static __inline void apic_write(unsigned long reg, uint32_t v) { - if ( x2apic_enabled ) apic_wrmsr(reg, v); else apic_mem_write(reg, v); } -static __inline void apic_write_atomic(unsigned long reg, u32 v) +static __inline void apic_write_atomic(unsigned long reg, uint32_t v) { if ( x2apic_enabled ) apic_wrmsr(reg, v); @@ -113,7 +108,7 @@ static __inline void apic_write_atomic(u apic_mem_write_atomic(reg, v); } -static __inline u32 apic_read(unsigned long reg) +static __inline uint32_t apic_read(unsigned long reg) { if ( x2apic_enabled ) return apic_rdmsr(reg); diff -r ab385be9e2ef -r 70357840696c xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -9,29 +9,24 @@ #include #include -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) +static inline uint64_t rdmsr(unsigned int msr) +{ + unsigned long a__, b__; -#define rdmsrl(msr,val) do { unsigned long a__,b__; \ - __asm__ __volatile__("rdmsr" \ - : "=a" (a__), "=d" (b__) \ - : "c" (msr)); \ - val = a__ | ((u64)b__<<32); \ -} while(0); + __asm__ __volatile__("rdmsr" + : "=a" (a__), "=d" (b__) + : "c" (msr)); + return a__ | ((uint64_t)b__<<32); +} -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ +static inline void wrmsr(unsigned int msr, uint64_t val) +{ + uint32_t lo, hi; + lo = (uint32_t)val; + hi = (uint32_t)(val >> 32); + __asm__ __volatile__("wrmsr" \ : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - -static inline void wrmsrl(unsigned int msr, __u64 val) -{ - __u32 lo, hi; - lo = (__u32)val; - hi = (__u32)(val >> 32); - wrmsr(msr, lo, hi); + : "c" (msr), "a" (lo), "d" (hi)); } /* rdmsr with exception handling */ @@ -91,9 +86,9 @@ static inline int wrmsr_safe(unsigned in } while(0) #endif -#define write_tsc(val) wrmsrl(MSR_IA32_TSC, val) +#define write_tsc(val) wrmsr(MSR_IA32_TSC, val) -#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) +#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val)) #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \