diff -r 7b00193bd033 xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/cpu/amd.c Tue Jun 29 10:34:16 2010 +0200 @@ -139,8 +139,10 @@ static void __devinit set_cpuidmask(cons /* FIXME check if processor supports CPUID masking */ /* AMD processors prior to family 10h required a 32-bit password */ if (c->x86 >= 0x10) { - wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); - wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); + wrmsrl(MSR_K8_FEATURE_MASK, + ((uint64_t)feat_ecx << 32) | feat_edx); + wrmsrl(MSR_K8_EXT_FEATURE_MASK, + ((uint64_t)extfeat_ecx << 32) | extfeat_edx); } else if (c->x86 == 0x0f) { wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); @@ -252,11 +254,12 @@ static void check_disable_c1e(unsigned i static void __devinit init_amd(struct cpuinfo_x86 *c) { u32 l, h; + uint64_t msr_content; int mbytes = num_physpages >> (20-PAGE_SHIFT); int r; #ifdef CONFIG_SMP - unsigned long long value; + uint64_t value; /* Disable TLB flush filter by setting HWCR.FFDIS on K8 * bit 6 of msr C001_0015 @@ -266,8 +269,7 @@ static void __devinit init_amd(struct cp */ if (c->x86 == 15) { rdmsrl(MSR_K7_HWCR, value); - value |= 1 << 6; - wrmsrl(MSR_K7_HWCR, value); + wrmsrl(MSR_K7_HWCR, value | (1ULL << 6)); } #endif @@ -346,13 +348,15 @@ static void __devinit init_amd(struct cp if(mbytes>508) mbytes=508; - rdmsr(MSR_K6_WHCR, l, h); - if ((l&0x0000FFFF)==0) { + rdmsrl(MSR_K6_WHCR, msr_content); + if (((uint32_t)msr_content & 0x0000ffff) == 0) { unsigned long flags; - l=(1<<0)|((mbytes/4)<<1); + l = (1 << 0) | ((mbytes/4) << 1); + h = (uint32_t)(msr_content >> 32); local_irq_save(flags); wbinvd(); - wrmsr(MSR_K6_WHCR, l, h); + msr_content = ((uint64_t)h << 32) | l; + wrmsrl(MSR_K6_WHCR, msr_content); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", mbytes); @@ -367,13 +371,15 @@ static void __devinit init_amd(struct cp if(mbytes>4092) mbytes=4092; - rdmsr(MSR_K6_WHCR, l, h); - if ((l&0xFFFF0000)==0) { + rdmsrl(MSR_K6_WHCR, msr_content); + if ((msr_content & 0xffff0000) == 0) { unsigned long flags; - l=((mbytes>>2)<<22)|(1<<16); + l = ((mbytes/4) << 22) | (1 << 16); + h = (uint32_t)(msr_content >> 32); local_irq_save(flags); wbinvd(); - wrmsr(MSR_K6_WHCR, l, h); + msr_content = ((uint64_t)h << 32) | l; + wrmsrl(MSR_K6_WHCR, msr_content); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", mbytes); @@ -401,9 +407,9 @@ static void __devinit init_amd(struct cp if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); - rdmsr(MSR_K7_HWCR, l, h); - l &= ~0x00008000; - wrmsr(MSR_K7_HWCR, l, h); + rdmsrl(MSR_K7_HWCR, msr_content); + wrmsrl(MSR_K7_HWCR, + msr_content & ~0x8000ULL); set_bit(X86_FEATURE_XMM, c->x86_capability); } } @@ -413,11 +419,17 @@ static void __devinit init_amd(struct cp * As per AMD technical note 27212 0.2 */ if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { - rdmsr(MSR_K7_CLK_CTL, l, h); - if ((l & 0xfff00000) != 0x20000000) { - printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, - ((l & 0x000fffff)|0x20000000)); - wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); + uint64_t old_value; + rdmsrl(MSR_K7_CLK_CTL, msr_content); + if ((msr_content & 0xfff00000ULL) != 0x20000000ULL) { + old_value = msr_content; + msr_content &= 0xfffffULL; + msr_content |= 0x20000000ULL; + printk("CPU: CLK_CTL MSR was 0x%"PRIx64 + ". Reprogramming to 0x%"PRIx64 + "\n", + old_value, msr_content); + wrmsrl(MSR_K7_CLK_CTL, msr_content); } } break; @@ -438,17 +450,18 @@ static void __devinit init_amd(struct cp } if (c->x86 == 15) { - rdmsr(MSR_K7_HWCR, l, h); + rdmsrl(MSR_K7_HWCR, msr_content); printk(KERN_INFO "CPU%d: AMD Flush Filter %sabled", - smp_processor_id(), (l & (1<<6)) ? "dis" : "en"); - if ((flush_filter_force > 0) && (l & (1<<6))) { - l &= ~(1<<6); + smp_processor_id(), + (msr_content & (1ULL<<6)) ? "dis" : "en"); + if ((flush_filter_force > 0) && (msr_content & (1ULL<<6))) { + msr_content &= ~(1ULL<<6); printk(" -> Forcibly enabled"); - } else if ((flush_filter_force < 0) && !(l & (1<<6))) { - l |= 1<<6; + } else if ((flush_filter_force < 0) && !(msr_content & (1ULL<<6))) { + msr_content |= 1ULL<<6; printk(" -> Forcibly disabled"); } - wrmsr(MSR_K7_HWCR, l, h); + wrmsrl(MSR_K7_HWCR, msr_content); printk("\n"); } diff -r 7b00193bd033 xen/arch/x86/cpu/intel.c --- a/xen/arch/x86/cpu/intel.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/cpu/intel.c Tue Jun 29 10:34:16 2010 +0200 @@ -39,11 +39,14 @@ struct movsl_mask movsl_mask __read_most static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c) { const char *extra = ""; + uint64_t msr_content; if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx & opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) return; + msr_content = (opt_cpuid_mask_ecx ? : ~0U) + | ((uint64_t)(opt_cpuid_mask_edx ? : ~0u) << 32); /* Only family 6 supports this feature */ switch ((c->x86 == 6) * c->x86_model) { case 0x17: @@ -51,9 +54,7 @@ static void __devinit set_cpuidmask(cons break; /* fall through */ case 0x1d: - wrmsr(MSR_INTEL_CPUID_FEATURE_MASK, - opt_cpuid_mask_ecx, - opt_cpuid_mask_edx); + wrmsrl(MSR_INTEL_CPUID_FEATURE_MASK, msr_content); if (!~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) return; extra = "extended "; @@ -70,12 +71,8 @@ static void __devinit set_cpuidmask(cons /* fall through */ case 0x1e: case 0x1f: case 0x25: case 0x2c: case 0x2e: case 0x2f: - wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK, - opt_cpuid_mask_ecx, - opt_cpuid_mask_edx); - wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK, - opt_cpuid_mask_ext_ecx, - opt_cpuid_mask_ext_edx); + wrmsrl(MSR_INTEL_CPUID1_FEATURE_MASK, msr_content); + wrmsrl(MSR_INTEL_CPUID80000001_FEATURE_MASK, msr_content); return; } @@ -98,15 +95,15 @@ void __devinit early_intel_workaround(st */ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) { - unsigned long lo, hi; + uint64_t msr_content; if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); - if ((lo & (1<<9)) == 0) { - printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); - printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); - lo |= (1<<9); /* Disable hw prefetching */ - wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); + rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); + if ((msr_content & (1ULL << 9)) == 0) { + printk(KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); + printk(KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); + /* Disable hw prefetching */ + wrmsrl(MSR_IA32_MISC_ENABLE, msr_content | (1ULL << 9)); } } } diff -r 7b00193bd033 xen/arch/x86/cpu/transmeta.c --- a/xen/arch/x86/cpu/transmeta.c Mon Jun 28 17:40:16 2010 +0100 +++ b/xen/arch/x86/cpu/transmeta.c Tue Jun 29 10:34:16 2010 +0200 @@ -7,7 +7,8 @@ static void __init init_transmeta(struct cpuinfo_x86 *c) { - unsigned int cap_mask, uk, max, dummy; + uint64_t cap_mask; + unsigned int max, dummy; unsigned int cms_rev1, cms_rev2; unsigned int cpu_rev, cpu_freq, cpu_flags, new_cpu_rev; char cpu_info[65]; @@ -68,10 +69,10 @@ static void __init init_transmeta(struct } /* Unhide possibly hidden capability flags */ - rdmsr(0x80860004, cap_mask, uk); - wrmsr(0x80860004, ~0, uk); + rdmsrl(0x80860004, cap_mask); + wrmsrl(0x80860004, ~0x0ULL); c->x86_capability[0] = cpuid_edx(0x00000001); - wrmsr(0x80860004, cap_mask, uk); + wrmsrl(0x80860004, cap_mask); /* If we can run i686 user-space code, call us an i686 */ #define USER686 (X86_FEATURE_TSC|X86_FEATURE_CX8|X86_FEATURE_CMOV)