(Applies cleanly only on top of the previously sent SVM/LBR patch.)
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Index: 2007-08-08/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-08-08.orig/xen/arch/x86/hvm/svm/svm.c 2007-08-08 11:40:11.000000000
+0200
+++ 2007-08-08/xen/arch/x86/hvm/svm/svm.c 2007-08-08 11:43:53.000000000
+0200
@@ -68,6 +68,11 @@ static void *hsa[NR_CPUS] __read_mostly;
/* vmcb used for extended host state */
static void *root_vmcb[NR_CPUS] __read_mostly;
+#ifdef __x86_64__
+/* indicate whether guest may use EFER.LMSLE */
+static unsigned char cpu_has_lmsl = 1;
+#endif
+
/* SVM feature flags */
u32 svm_feature_flags;
@@ -190,7 +195,10 @@ static enum handler_return long_mode_do_
case MSR_EFER:
/* Offending reserved bit will cause #GP. */
#ifdef __x86_64__
- if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) ||
+ if ( (msr_content & ~(EFER_FFXSE | EFER_LMSLE | EFER_LME | EFER_LMA |
+ EFER_NX | EFER_SCE)) ||
+ (!cpu_has_lmsl && (msr_content & EFER_LMSLE)) ||
+ (!cpu_has_ffxsr && (msr_content & EFER_FFXSE)) ||
#else
if ( (msr_content & ~(EFER_NX | EFER_SCE)) ||
#endif
@@ -1044,6 +1052,21 @@ int start_svm(struct cpuinfo_x86 *c)
/* Initialize core's ASID handling. */
svm_asid_init(c);
+#ifdef __x86_64__
+ /*
+ * Check whether EFER.LMSLE can be written.
+ * Unfortunately there's no feature bit defined for this.
+ */
+ eax = read_efer();
+ edx = read_efer() >> 32;
+ if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 )
+ rdmsr(MSR_EFER, eax, edx);
+ if ( !(eax & EFER_LMSLE) )
+ cpu_has_lmsl = 0;
+ else
+ wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx);
+#endif
+
if ( cpu != 0 )
return 1;
@@ -1131,8 +1154,6 @@ static void svm_vmexit_do_cpuid(struct v
/* So far, we do not support 3DNow for the guest. */
clear_bit(X86_FEATURE_3DNOW & 31, &edx);
clear_bit(X86_FEATURE_3DNOWEXT & 31, &edx);
- /* no FFXSR instructions feature. */
- clear_bit(X86_FEATURE_FFXSR & 31, &edx);
}
else if ( input == 0x80000007 || input == 0x8000000A )
{
Index: 2007-08-08/xen/include/asm-x86/cpufeature.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/cpufeature.h 2007-08-08
11:37:08.000000000 +0200
+++ 2007-08-08/xen/include/asm-x86/cpufeature.h 2007-08-08 11:45:13.000000000
+0200
@@ -49,6 +49,7 @@
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FFXSR (1*32+25) /* FFXSR instruction optimizations */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
@@ -93,7 +94,6 @@
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVME (6*32+ 2) /* Secure Virtual Machine */
-#define X86_FEATURE_FFXSR (6*32+25) /* FFXSR instruction optimizations */
#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
@@ -121,6 +121,7 @@
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR)
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_ffxsr 0
#else /* __x86_64__ */
#define cpu_has_vme 0
#define cpu_has_de 1
@@ -144,6 +145,7 @@
#define cpu_has_cyrix_arr 0
#define cpu_has_centaur_mcr 0
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_ffxsr boot_cpu_has(X86_FEATURE_FFXSR)
#endif
#endif /* __ASM_I386_CPUFEATURE_H */
Index: 2007-08-08/xen/include/asm-x86/msr.h
===================================================================
--- 2007-08-08.orig/xen/include/asm-x86/msr.h 2007-08-07 15:00:27.000000000
+0200
+++ 2007-08-08/xen/include/asm-x86/msr.h 2007-08-08 11:43:53.000000000
+0200
@@ -140,12 +140,16 @@ static inline void wrmsrl(unsigned int m
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
#define _EFER_SVME 12
+#define _EFER_LMSLE 13
+#define _EFER_FFXSE 14
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
+#define EFER_LMSLE (1<<_EFER_LMSLE)
+#define EFER_FFXSE (1<<_EFER_FFXSE)
#ifndef __ASSEMBLY__
@@ -329,8 +333,6 @@ static inline void write_efer(__u64 val)
#define MSR_P4_U2L_ESCR0 0x3b0
#define MSR_P4_U2L_ESCR1 0x3b1
-#define MSR_K6_EFER 0xC0000080
-#define MSR_K6_STAR 0xC0000081
#define MSR_K6_WHCR 0xC0000082
#define MSR_K6_UWCCR 0xC0000085
#define MSR_K6_EPMR 0xC0000086
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|