WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: fix pv cpuid masking

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: fix pv cpuid masking
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 16 Jun 2010 00:15:21 -0700
Delivery-date: Wed, 16 Jun 2010 00:17:04 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1276604335 -3600
# Node ID 2501732e291b001711a0dc1c474bb89ce77f3110
# Parent  a2cc1db1af9c8f9b148c80f8b2c3f64bde7542f9
x86: fix pv cpuid masking

Invert initial values of the variables parsed into from the command
line, so that completely clearing out one or more of the four bit
fields is possible.

Further, consolidate the command line parameter specifications into
a single place.

Finally, as per "Intel Virtualization Technology FlexMigration
Application Note" (http://www.intel.com/Assets/PDF/manual/323850.pdf),
also handle family 6 model 0x1f.

What remains open is the question whether pv_cpuid() shouldn't also
consume these masks.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c          |   22 ++-------
 xen/arch/x86/cpu/common.c       |    9 +++
 xen/arch/x86/cpu/cpu.h          |    3 +
 xen/arch/x86/cpu/intel.c        |   94 +++++++++++++++++++---------------------
 xen/include/asm-x86/msr-index.h |    8 +--
 5 files changed, 69 insertions(+), 67 deletions(-)

diff -r a2cc1db1af9c -r 2501732e291b xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Tue Jun 15 13:18:09 2010 +0100
+++ b/xen/arch/x86/cpu/amd.c    Tue Jun 15 13:18:55 2010 +0100
@@ -32,14 +32,6 @@ static char opt_famrev[14];
 static char opt_famrev[14];
 string_param("cpuid_mask_cpu", opt_famrev);
 
-/* Finer-grained CPUID feature control. */
-static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
-integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
-integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
-static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
-integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
-integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-
 static inline void wrmsr_amd(unsigned int index, unsigned int lo, 
                unsigned int hi)
 {
@@ -60,7 +52,7 @@ static inline void wrmsr_amd(unsigned in
  *
  * The processor revision string parameter has precedene.
  */
-static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
 {
        static unsigned int feat_ecx, feat_edx;
        static unsigned int extfeat_ecx, extfeat_edx;
@@ -75,12 +67,12 @@ static void __devinit set_cpuidmask(stru
        ASSERT((status == not_parsed) && (smp_processor_id() == 0));
        status = no_mask;
 
-       if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
-           opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) {
-               feat_ecx = opt_cpuid_mask_ecx ? : ~0U;
-               feat_edx = opt_cpuid_mask_edx ? : ~0U;
-               extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U;
-               extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U;
+       if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+             opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
+               feat_ecx = opt_cpuid_mask_ecx;
+               feat_edx = opt_cpuid_mask_edx;
+               extfeat_ecx = opt_cpuid_mask_ext_ecx;
+               extfeat_edx = opt_cpuid_mask_ext_edx;
        } else if (*opt_famrev == '\0') {
                return;
        } else if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
diff -r a2cc1db1af9c -r 2501732e291b xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Tue Jun 15 13:18:09 2010 +0100
+++ b/xen/arch/x86/cpu/common.c Tue Jun 15 13:18:55 2010 +0100
@@ -21,6 +21,15 @@ static int cachesize_override __cpuinitd
 static int cachesize_override __cpuinitdata = -1;
 static int disable_x86_fxsr __cpuinitdata;
 static int disable_x86_serial_nr __cpuinitdata;
+
+unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
+unsigned int __devinitdata opt_cpuid_mask_edx = ~0u;
+integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
+unsigned int __devinitdata opt_cpuid_mask_ext_ecx = ~0u;
+integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
+unsigned int __devinitdata opt_cpuid_mask_ext_edx = ~0u;
+integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
 
 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
 
diff -r a2cc1db1af9c -r 2501732e291b xen/arch/x86/cpu/cpu.h
--- a/xen/arch/x86/cpu/cpu.h    Tue Jun 15 13:18:09 2010 +0100
+++ b/xen/arch/x86/cpu/cpu.h    Tue Jun 15 13:18:55 2010 +0100
@@ -21,6 +21,9 @@ struct cpu_dev {
 
 extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
 
+extern unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
+extern unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
+
 extern int get_model_name(struct cpuinfo_x86 *c);
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
 
diff -r a2cc1db1af9c -r 2501732e291b xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c  Tue Jun 15 13:18:09 2010 +0100
+++ b/xen/arch/x86/cpu/intel.c  Tue Jun 15 13:18:55 2010 +0100
@@ -20,69 +20,67 @@
 
 extern int trap_init_f00f_bug(void);
 
+static int use_xsave = 1;
+boolean_param("xsave", use_xsave);
+
+#ifdef CONFIG_X86_INTEL_USERCOPY
+/*
+ * Alignment at which movsl is preferred for bulk memory copies.
+ */
+struct movsl_mask movsl_mask __read_mostly;
+#endif
+
 /*
  * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
- * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD, 
+ * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
  * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
  * 'rev down' to E8400, you can set these values in these Xen boot parameters.
  */
-static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
-integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
-integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
-static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
-integer_param("cpuid_mask_ext_ecx", opt_cpuid_mask_ext_ecx);
-integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
-
-static int use_xsave = 1;
-boolean_param("xsave", use_xsave);
-
-#ifdef CONFIG_X86_INTEL_USERCOPY
-/*
- * Alignment at which movsl is preferred for bulk memory copies.
- */
-struct movsl_mask movsl_mask __read_mostly;
-#endif
-
-static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
-{
-       unsigned int model = c->x86_model;
-
-       if (!(opt_cpuid_mask_ecx | opt_cpuid_mask_edx | 
-             opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx))
+static void __devinit set_cpuidmask(const struct cpuinfo_x86 *c)
+{
+       const char *extra = "";
+
+       if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+              opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
                return;
 
-       if (c->x86 != 0x6)      /* Only family 6 supports this feature  */
-               return;
-
-       if ((model == 0x1d) || ((model == 0x17) && (c->x86_mask >= 4))) {
-               wrmsr(MSR_IA32_CPUID_FEATURE_MASK1,
-                     opt_cpuid_mask_ecx ? : ~0u,
-                     opt_cpuid_mask_edx ? : ~0u);
-       }
+       /* Only family 6 supports this feature  */
+       switch ((c->x86 == 6) * c->x86_model) {
+       case 0x17:
+               if ((c->x86_mask & 0x0f) < 4)
+                       break;
+               /* fall through */
+       case 0x1d:
+               wrmsr(MSR_INTEL_CPUID_FEATURE_MASK,
+                     opt_cpuid_mask_ecx,
+                     opt_cpuid_mask_edx);
+               if (!~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx))
+                       return;
+               extra = "extended ";
+               break;
 /* 
  * CPU supports this feature if the processor signature meets the following:
  * (CPUID.(EAX=01h):EAX) > 000106A2h, or
  * (CPUID.(EAX=01h):EAX) == 000106Exh, 0002065xh, 000206Cxh, 000206Exh, or 
000206Fxh
  *
  */
-       else if (((model == 0x1a) && (c->x86_mask > 2))
-                || model == 0x1e
-                || model == 0x25 
-                || model == 0x2c 
-                || model == 0x2e 
-                || model == 0x2f) {
-               wrmsr(MSR_IA32_CPUID1_FEATURE_MASK,
-                     opt_cpuid_mask_ecx ? : ~0u,
-                     opt_cpuid_mask_edx ? : ~0u);
-               wrmsr(MSR_IA32_CPUID80000001_FEATURE_MASK,
-                     opt_cpuid_mask_ext_ecx ? : ~0u,
-                     opt_cpuid_mask_ext_edx ? : ~0u);
-       }
-       else {
-               printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n",
-                      smp_processor_id());
+       case 0x1a:
+               if ((c->x86_mask & 0x0f) <= 2)
+                       break;
+               /* fall through */
+       case 0x1e: case 0x1f:
+       case 0x25: case 0x2c: case 0x2e: case 0x2f:
+               wrmsr(MSR_INTEL_CPUID1_FEATURE_MASK,
+                     opt_cpuid_mask_ecx,
+                     opt_cpuid_mask_edx);
+               wrmsr(MSR_INTEL_CPUID80000001_FEATURE_MASK,
+                     opt_cpuid_mask_ext_ecx,
+                     opt_cpuid_mask_ext_edx);
                return;
        }
+
+       printk(XENLOG_ERR "Cannot set CPU feature mask on CPU#%d\n",
+              smp_processor_id());
 }
 
 void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
diff -r a2cc1db1af9c -r 2501732e291b xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h   Tue Jun 15 13:18:09 2010 +0100
+++ b/xen/include/asm-x86/msr-index.h   Tue Jun 15 13:18:55 2010 +0100
@@ -156,10 +156,10 @@
 #define MSR_P6_EVNTSEL0                        0x00000186
 #define MSR_P6_EVNTSEL1                        0x00000187
 
-/* MSR for cpuid feature mask */
-#define MSR_IA32_CPUID_FEATURE_MASK1   0x00000478
-#define MSR_IA32_CPUID1_FEATURE_MASK    0x00000130
-#define MSR_IA32_CPUID80000001_FEATURE_MASK 0x00000131
+/* MSRs for Intel cpuid feature mask */
+#define MSR_INTEL_CPUID_FEATURE_MASK   0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK  0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
 
 /* MSRs & bits used for VMX enabling */
 #define MSR_IA32_VMX_BASIC                      0x480

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: fix pv cpuid masking, Xen patchbot-unstable <=