This patch adds support for AMD's extended migration, aka CPUID features and
extended features masking.
Signed-of-by: Travis Betak <travis.betak@xxxxxxx>
diff -r 7c10be016e43 xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c Tue Aug 19 10:51:41 2008 +0100
+++ b/xen/arch/x86/cpu/amd.c Tue Aug 26 13:35:01 2008 -0500
@@ -10,8 +10,137 @@
#include <asm/hvm/support.h>
#include "cpu.h"
+#include "amd.h"
int start_svm(struct cpuinfo_x86 *c);
+
+/*
+ * Pre-canned values for overriding the CPUID features
+ * and extended features masks.
+ *
+ * Currently supported processors:
+ *
+ * "fam_0f_rev_c"
+ * "fam_0f_rev_d"
+ * "fam_0f_rev_e"
+ * "fam_0f_rev_f"
+ * "fam_0f_rev_g"
+ * "fam_10_rev_b"
+ * "fam_10_rev_c"
+ * "fam_11_rev_b"
+ */
+static char opt_famrev[14];
+string_param("cpuid_mask_cpu", opt_famrev);
+
+/* Finer-grained CPUID feature control. */
+static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
+integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
+static unsigned int opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx;
+integer_param("cpuid_mask_ecx", opt_cpuid_mask_ext_ecx);
+integer_param("cpuid_mask_edx", opt_cpuid_mask_ext_edx);
+
+static inline void wrmsr_amd(unsigned int index, unsigned int lo,
+ unsigned int hi)
+{
+ __asm__ __volatile__ (
+ "wrmsr"
+ : /* No outputs */
+ : "c" (index), "a" (lo),
+ "d" (hi), "D" (0x9c5a203a)
+ );
+}
+
+/*
+ * Mask the features and extended features returned by CPUID. Parameters are
+ * set from the boot line via two methods:
+ *
+ * 1) Specific processor revision string
+ * 2) User-defined masks
+ *
+ * The processor revision string parameter has precedene.
+ */
+static void __devinit set_cpuidmask(struct cpuinfo_x86 *c)
+{
+ unsigned int feat_ecx, feat_edx;
+ unsigned int extfeat_ecx, extfeat_edx;
+
+ /* FIXME check if processor supports CPUID masking */
+
+ /* Check if we were given specific processor to emulate. */
+ if (opt_famrev[0] != '\0')
+ {
+ if (!strcmp(opt_famrev, "fam_0f_rev_c")) {
+ feat_ecx = AMD_FEATURES_K8_REV_C_ECX;
+ feat_edx = AMD_FEATURES_K8_REV_C_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_K8_REV_C_ECX;
+ extfeat_edx = AMD_EXTFEATURES_K8_REV_C_EDX;
+ } else if (!strcmp(opt_famrev, "fam_0f_rev_d")) {
+ feat_ecx = AMD_FEATURES_K8_REV_D_ECX;
+ feat_edx = AMD_FEATURES_K8_REV_D_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_K8_REV_D_ECX;
+ extfeat_edx = AMD_EXTFEATURES_K8_REV_D_EDX;
+ } else if (!strcmp(opt_famrev, "fam_0f_rev_e")) {
+ feat_ecx = AMD_FEATURES_K8_REV_E_ECX;
+ feat_edx = AMD_FEATURES_K8_REV_E_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_K8_REV_E_ECX;
+ extfeat_edx = AMD_EXTFEATURES_K8_REV_E_EDX;
+ } else if (!strcmp(opt_famrev, "fam_0f_rev_f")) {
+ feat_ecx = AMD_FEATURES_K8_REV_F_ECX;
+ feat_edx = AMD_FEATURES_K8_REV_F_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_K8_REV_F_ECX;
+ extfeat_edx = AMD_EXTFEATURES_K8_REV_F_EDX;
+ } else if (!strcmp(opt_famrev, "fam_0f_rev_g")) {
+ feat_ecx = AMD_FEATURES_K8_REV_G_ECX;
+ feat_edx = AMD_FEATURES_K8_REV_G_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_K8_REV_G_ECX;
+ extfeat_edx = AMD_EXTFEATURES_K8_REV_G_EDX;
+ } else if (!strcmp(opt_famrev, "fam_10_rev_b")) {
+ feat_ecx = AMD_FEATURES_FAM10h_REV_B_ECX;
+ feat_edx = AMD_FEATURES_FAM10h_REV_B_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_B_ECX;
+ extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_B_EDX;
+ } else if (!strcmp(opt_famrev, "fam_10_rev_c")) {
+ feat_ecx = AMD_FEATURES_FAM10h_REV_C_ECX;
+ feat_edx = AMD_FEATURES_FAM10h_REV_C_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_FAM10h_REV_C_ECX;
+ extfeat_edx = AMD_EXTFEATURES_FAM10h_REV_C_EDX;
+ } else if (!strcmp(opt_famrev, "fam_11_rev_b")) {
+ feat_ecx = AMD_FEATURES_FAM11h_REV_B_ECX;
+ feat_edx = AMD_FEATURES_FAM11h_REV_B_EDX;
+ extfeat_ecx = AMD_EXTFEATURES_FAM11h_REV_B_ECX;
+ extfeat_edx = AMD_EXTFEATURES_FAM11h_REV_B_EDX;
+ } else {
+ printk("Invalid processor string: %s\n", opt_famrev);
+ printk("CPUID will not be masked\n");
+ return;
+ }
+ /* Otherwise, check if we were given specific masking values. */
+ } else if (opt_cpuid_mask_ecx | opt_cpuid_mask_edx |
+ opt_cpuid_mask_ext_ecx | opt_cpuid_mask_ext_edx) {
+ feat_ecx = opt_cpuid_mask_ecx ? : ~0U;
+ feat_edx = opt_cpuid_mask_edx ? : ~0U;
+ extfeat_ecx = opt_cpuid_mask_ext_ecx ? : ~0U;
+ extfeat_edx = opt_cpuid_mask_ext_edx ? : ~0U;
+ } else {
+ /* Silently do nothing. */
+ return;
+ }
+
+ printk("Writing CPUID feature mask ECX:EDX -> %08Xh:%08Xh\n",
+ feat_ecx, feat_edx);
+ printk("Writing CPUID extended feature mask ECX:EDX -> %08Xh:%08Xh\n",
+ extfeat_ecx, extfeat_edx);
+
+ /* AMD processors prior to family 10h required a 32-bit password */
+ if (c->x86 >= 0x10) {
+ wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
+ wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
+ } else if (c->x86 == 0x0f) {
+ wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx);
+ wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx);
+ }
+}
/*
* amd_flush_filter={on,off}. Forcibly Enable or disable the TLB flush
@@ -368,6 +497,8 @@
if ((smp_processor_id() == 1) && c1_ramping_may_cause_clock_drift(c))
disable_c1_ramping();
+ set_cpuidmask(c);
+
start_svm(c);
}
diff -r 7c10be016e43 xen/arch/x86/cpu/amd.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/cpu/amd.h Tue Aug 26 13:35:01 2008 -0500
@@ -0,0 +1,103 @@
+/*
+ * amd.h - AMD processor specific definitions
+ */
+
+#ifndef __AMD_H__
+#define __AMD_H__
+
+#include <asm/cpufeature.h>
+
+/* CPUID masked for use by AMD-V Extended Migration */
+
+#define X86_FEATURE_BITPOS(_feature_) ((_feature_) % 32)
+#define __bit(_x_) (1U << X86_FEATURE_BITPOS(_x_))
+
+/* Family 0Fh, Revision C */
+#define AMD_FEATURES_K8_REV_C_ECX 0
+#define AMD_FEATURES_K8_REV_C_EDX ( \
+ __bit(X86_FEATURE_FPU) | __bit(X86_FEATURE_VME) | \
+ __bit(X86_FEATURE_DE) | __bit(X86_FEATURE_PSE) | \
+ __bit(X86_FEATURE_TSC) | __bit(X86_FEATURE_MSR) | \
+ __bit(X86_FEATURE_PAE) | __bit(X86_FEATURE_MCE) | \
+ __bit(X86_FEATURE_CX8) | __bit(X86_FEATURE_APIC) | \
+ __bit(X86_FEATURE_SEP) | __bit(X86_FEATURE_MTRR) | \
+ __bit(X86_FEATURE_PGE) | __bit(X86_FEATURE_MCA) | \
+ __bit(X86_FEATURE_CMOV) | __bit(X86_FEATURE_PAT) | \
+ __bit(X86_FEATURE_PSE36) | __bit(X86_FEATURE_CLFLSH)| \
+ __bit(X86_FEATURE_MMX) | __bit(X86_FEATURE_FXSR) | \
+ __bit(X86_FEATURE_XMM) | __bit(X86_FEATURE_XMM2))
+#define AMD_EXTFEATURES_K8_REV_C_ECX 0
+#define AMD_EXTFEATURES_K8_REV_C_EDX (
\
+ __bit(X86_FEATURE_FPU) | __bit(X86_FEATURE_VME) | \
+ __bit(X86_FEATURE_DE) | __bit(X86_FEATURE_PSE) | \
+ __bit(X86_FEATURE_TSC) | __bit(X86_FEATURE_MSR) | \
+ __bit(X86_FEATURE_PAE) | __bit(X86_FEATURE_MCE) | \
+ __bit(X86_FEATURE_CX8) | __bit(X86_FEATURE_APIC) | \
+ __bit(X86_FEATURE_SYSCALL) | __bit(X86_FEATURE_MTRR) | \
+ __bit(X86_FEATURE_PGE) | __bit(X86_FEATURE_MCA) | \
+ __bit(X86_FEATURE_CMOV) | __bit(X86_FEATURE_PAT) | \
+ __bit(X86_FEATURE_PSE36) | __bit(X86_FEATURE_NX) | \
+ __bit(X86_FEATURE_MMXEXT) | __bit(X86_FEATURE_MMX) | \
+ __bit(X86_FEATURE_FXSR) | __bit(X86_FEATURE_LM) | \
+ __bit(X86_FEATURE_3DNOWEXT) | __bit(X86_FEATURE_3DNOW))
+
+/* Family 0Fh, Revision D */
+#define AMD_FEATURES_K8_REV_D_ECX AMD_FEATURES_K8_REV_C_ECX
+#define AMD_FEATURES_K8_REV_D_EDX AMD_FEATURES_K8_REV_C_EDX
+#define AMD_EXTFEATURES_K8_REV_D_ECX (AMD_EXTFEATURES_K8_REV_C_ECX |\
+ __bit(X86_FEATURE_LAHF_LM))
+#define AMD_EXTFEATURES_K8_REV_D_EDX (AMD_EXTFEATURES_K8_REV_C_EDX |\
+ __bit(X86_FEATURE_FFXSR))
+
+/* Family 0Fh, Revision E */
+#define AMD_FEATURES_K8_REV_E_ECX (AMD_FEATURES_K8_REV_D_ECX | \
+ __bit(X86_FEATURE_XMM3))
+#define AMD_FEATURES_K8_REV_E_EDX (AMD_FEATURES_K8_REV_D_EDX | \
+ __bit(X86_FEATURE_HT))
+#define AMD_EXTFEATURES_K8_REV_E_ECX (AMD_EXTFEATURES_K8_REV_D_ECX |\
+ __bit(X86_FEATURE_CMP_LEGACY))
+#define AMD_EXTFEATURES_K8_REV_E_EDX AMD_EXTFEATURES_K8_REV_D_EDX
+
+/* Family 0Fh, Revision F */
+#define AMD_FEATURES_K8_REV_F_ECX (AMD_FEATURES_K8_REV_E_ECX | \
+ __bit(X86_FEATURE_CX16))
+#define AMD_FEATURES_K8_REV_F_EDX AMD_FEATURES_K8_REV_E_EDX
+#define AMD_EXTFEATURES_K8_REV_F_ECX (AMD_EXTFEATURES_K8_REV_E_ECX |\
+ __bit(X86_FEATURE_SVME) | __bit(X86_FEATURE_EXTAPICSPACE) | \
+ __bit(X86_FEATURE_ALTMOVCR))
+#define AMD_EXTFEATURES_K8_REV_F_EDX (AMD_EXTFEATURES_K8_REV_E_EDX |\
+ __bit(X86_FEATURE_RDTSCP))
+
+/* Family 0Fh, Revision G */
+#define AMD_FEATURES_K8_REV_G_ECX AMD_FEATURES_K8_REV_F_ECX
+#define AMD_FEATURES_K8_REV_G_EDX AMD_FEATURES_K8_REV_F_EDX
+#define AMD_EXTFEATURES_K8_REV_G_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
+ __bit(X86_FEATURE_3DNOWPF))
+#define AMD_EXTFEATURES_K8_REV_G_EDX AMD_EXTFEATURES_K8_REV_F_EDX
+
+/* Family 10h, Revision B */
+#define AMD_FEATURES_FAM10h_REV_B_ECX (AMD_FEATURES_K8_REV_F_ECX | \
+ __bit(X86_FEATURE_POPCNT) | __bit(X86_FEATURE_MWAIT))
+#define AMD_FEATURES_FAM10h_REV_B_EDX AMD_FEATURES_K8_REV_F_EDX
+#define AMD_EXTFEATURES_FAM10h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_F_ECX |\
+ __bit(X86_FEATURE_ABM) | __bit(X86_FEATURE_SSE4A) | \
+ __bit(X86_FEATURE_MISALIGNSSE) | __bit(X86_FEATURE_OSVW) | \
+ __bit(X86_FEATURE_IBS))
+#define AMD_EXTFEATURES_FAM10h_REV_B_EDX (AMD_EXTFEATURES_K8_REV_F_EDX |\
+ __bit(X86_FEATURE_PAGE1GB))
+
+/* Family 10h, Revision C */
+#define AMD_FEATURES_FAM10h_REV_C_ECX AMD_FEATURES_FAM10h_REV_B_ECX
+#define AMD_FEATURES_FAM10h_REV_C_EDX AMD_FEATURES_FAM10h_REV_B_EDX
+#define AMD_EXTFEATURES_FAM10h_REV_C_ECX (AMD_EXTFEATURES_FAM10h_REV_B_ECX |\
+ __bit(X86_FEATURE_SKINIT) | __bit(X86_FEATURE_WDT))
+#define AMD_EXTFEATURES_FAM10h_REV_C_EDX AMD_EXTFEATURES_FAM10h_REV_B_EDX
+
+/* Family 11h, Revision B */
+#define AMD_FEATURES_FAM11h_REV_B_ECX AMD_FEATURES_K8_REV_G_ECX
+#define AMD_FEATURES_FAM11h_REV_B_EDX AMD_FEATURES_K8_REV_G_EDX
+#define AMD_EXTFEATURES_FAM11h_REV_B_ECX (AMD_EXTFEATURES_K8_REV_G_ECX |\
+ __bit(X86_FEATURE_SKINIT))
+#define AMD_EXTFEATURES_FAM11h_REV_B_EDX AMD_EXTFEATURES_K8_REV_G_EDX
+
+#endif /* __AMD_H__ */
diff -r 7c10be016e43 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h Tue Aug 19 10:51:41 2008 +0100
+++ b/xen/include/asm-x86/msr-index.h Tue Aug 26 13:35:01 2008 -0500
@@ -186,6 +186,9 @@
#define MSR_K8_ENABLE_C1E 0xc0010055
#define MSR_K8_VM_CR 0xc0010114
#define MSR_K8_VM_HSAVE_PA 0xc0010117
+
+#define MSR_K8_FEATURE_MASK 0xc0011004
+#define MSR_K8_EXT_FEATURE_MASK 0xc0011005
/* MSR_K8_VM_CR bits: */
#define _K8_VMCR_SVME_DISABLE 4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|