# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1254220073 -3600
# Node ID ad35f39e5fdccab3b158b38e139ca3498c347cb5
# Parent 24117d67661c0c62f03901a25960cca82a732e9a
vmx: add the support of XSAVE/XRSTOR to VMX guest
XSAVE/XRSTOR manages the existing and future processor extended states
on x86 architecture.
The XSAVE/XRSTOR infrastructure is defined in Intel SDMs:
http://www.intel.com/products/processor/manuals/
The patch uses the classical CR0.TS based algorithm to manage the
states on context switch. At present, we know 3 bits in the
XFEATURE_ENABLED_MASK: FPU, SSE and YMM. YMM is defined in Intel AVX
Programming Reference: http://software.intel.com/sites/avx/
Signed-off-by: Dexuan Cui <dexuan.cui@xxxxxxxxx>
---
tools/libxc/xc_cpuid_x86.c | 2
xen/arch/x86/cpu/intel.c | 10 ++++
xen/arch/x86/hvm/hvm.c | 57 +++++++++++++++++++++++
xen/arch/x86/hvm/vmx/vmcs.c | 2
xen/arch/x86/hvm/vmx/vmx.c | 47 +++++++++++++++++--
xen/arch/x86/i387.c | 76 ++++++++++++++++++++++++++++++
xen/include/asm-x86/cpufeature.h | 4 +
xen/include/asm-x86/domain.h | 2
xen/include/asm-x86/hvm/hvm.h | 3 -
xen/include/asm-x86/hvm/vcpu.h | 9 +++
xen/include/asm-x86/hvm/vmx/vmx.h | 1
xen/include/asm-x86/i387.h | 94 ++++++++++++++++++++++++++++++++++++--
xen/include/asm-x86/processor.h | 1
13 files changed, 296 insertions(+), 12 deletions(-)
diff -r 24117d67661c -r ad35f39e5fdc tools/libxc/xc_cpuid_x86.c
--- a/tools/libxc/xc_cpuid_x86.c Tue Sep 29 11:22:17 2009 +0100
+++ b/tools/libxc/xc_cpuid_x86.c Tue Sep 29 11:27:53 2009 +0100
@@ -28,7 +28,7 @@
#define clear_bit(idx, dst) ((dst) &= ~(1u << ((idx) & 31)))
#define set_bit(idx, dst) ((dst) |= (1u << ((idx) & 31)))
-#define DEF_MAX_BASE 0x00000004u
+#define DEF_MAX_BASE 0x0000000du
#define DEF_MAX_EXT 0x80000008u
static int hypervisor_is_64bit(int xc)
diff -r 24117d67661c -r ad35f39e5fdc xen/arch/x86/cpu/intel.c
--- a/xen/arch/x86/cpu/intel.c Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/arch/x86/cpu/intel.c Tue Sep 29 11:27:53 2009 +0100
@@ -9,6 +9,7 @@
#include <asm/uaccess.h>
#include <asm/mpspec.h>
#include <asm/apic.h>
+#include <asm/i387.h>
#include <mach_apic.h>
#include <asm/hvm/support.h>
@@ -27,6 +28,9 @@ static unsigned int opt_cpuid_mask_ecx,
static unsigned int opt_cpuid_mask_ecx, opt_cpuid_mask_edx;
integer_param("cpuid_mask_ecx", opt_cpuid_mask_ecx);
integer_param("cpuid_mask_edx", opt_cpuid_mask_edx);
+
+static int use_xsave = 1;
+boolean_param("xsave", use_xsave);
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
@@ -233,6 +237,12 @@ static void __devinit init_intel(struct
set_bit(X86_FEATURE_ARAT, c->x86_capability);
start_vmx();
+
+ if ( !use_xsave )
+ clear_bit(X86_FEATURE_XSAVE, boot_cpu_data.x86_capability);
+
+ if ( cpu_has_xsave )
+ xsave_init();
}
diff -r 24117d67661c -r ad35f39e5fdc xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/arch/x86/hvm/hvm.c Tue Sep 29 11:27:53 2009 +0100
@@ -43,6 +43,7 @@
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/msr.h>
+#include <asm/i387.h>
#include <asm/traps.h>
#include <asm/mc146818rtc.h>
#include <asm/spinlock.h>
@@ -753,6 +754,18 @@ int hvm_vcpu_initialise(struct vcpu *v)
{
int rc;
+ if ( cpu_has_xsave )
+ {
+ /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
+ void *xsave_area = _xmalloc(xsave_cntxt_size, 64);
+ if ( xsave_area == NULL )
+ return -ENOMEM;
+
+ xsave_init_save_area(xsave_area);
+ v->arch.hvm_vcpu.xsave_area = xsave_area;
+ v->arch.hvm_vcpu.xfeature_mask = XSTATE_FP_SSE;
+ }
+
if ( (rc = vlapic_init(v)) != 0 )
goto fail1;
@@ -815,6 +828,7 @@ void hvm_vcpu_destroy(struct vcpu *v)
hvm_vcpu_cacheattr_destroy(v);
vlapic_destroy(v);
hvm_funcs.vcpu_destroy(v);
+ xfree(v->arch.hvm_vcpu.xsave_area);
/* Event channel is already freed by evtchn_destroy(). */
/*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
@@ -1771,6 +1785,7 @@ void hvm_cpuid(unsigned int input, unsig
unsigned int *ecx, unsigned int *edx)
{
struct vcpu *v = current;
+ unsigned int count = *ecx;
if ( cpuid_viridian_leaves(input, eax, ebx, ecx, edx) )
return;
@@ -1788,10 +1803,52 @@ void hvm_cpuid(unsigned int input, unsig
*ebx |= (v->vcpu_id * 2) << 24;
if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
__clear_bit(X86_FEATURE_APIC & 31, edx);
+
+ /* Fix up XSAVE and OSXSAVE. */
+ *ecx &= ~(bitmaskof(X86_FEATURE_XSAVE) |
+ bitmaskof(X86_FEATURE_OSXSAVE));
+ if ( cpu_has_xsave )
+ {
+ *ecx |= bitmaskof(X86_FEATURE_XSAVE);
+ *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
+ bitmaskof(X86_FEATURE_OSXSAVE) : 0;
+ }
break;
case 0xb:
/* Fix the x2APIC identifier. */
*edx = v->vcpu_id * 2;
+ break;
+ case 0xd:
+ if ( cpu_has_xsave )
+ {
+ /*
+ * Fix up "Processor Extended State Enumeration". We only present
+ * FPU(bit0) and SSE(bit1) to HVM guest for now.
+ */
+ *eax = *ebx = *ecx = *edx = 0;
+ switch ( count )
+ {
+ case 0:
+ /* No HW defines bit in EDX yet. */
+ *edx = 0;
+ /* We only enable the features we know. */
+ *eax = xfeature_low;
+ /* FP/SSE + XSAVE.HEADER + YMM. */
+ *ecx = 512 + 64 + ((*eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
+ /* Let ebx equal ecx at present. */
+ *ebx = *ecx;
+ break;
+ case 2:
+ if ( !(xfeature_low & XSTATE_YMM) )
+ break;
+ *eax = XSTATE_YMM_SIZE;
+ *ebx = XSTATE_YMM_OFFSET;
+ break;
+ case 1:
+ default:
+ break;
+ }
+ }
break;
}
}
diff -r 24117d67661c -r ad35f39e5fdc xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Sep 29 11:27:53 2009 +0100
@@ -629,7 +629,7 @@ static int construct_vmcs(struct vcpu *v
/* Host control registers. */
v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
- __vmwrite(HOST_CR4, mmu_cr4_features);
+ __vmwrite(HOST_CR4, mmu_cr4_features | (cpu_has_xsave ? X86_CR4_OSXSAVE :
0));
/* Host CS:RIP. */
__vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
diff -r 24117d67661c -r ad35f39e5fdc xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Sep 29 11:27:53 2009 +0100
@@ -669,10 +669,14 @@ static void vmx_ctxt_switch_to(struct vc
static void vmx_ctxt_switch_to(struct vcpu *v)
{
struct domain *d = v->domain;
-
- /* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */
- if ( unlikely(read_cr4() != mmu_cr4_features) )
- write_cr4(mmu_cr4_features);
+ unsigned long old_cr4 = read_cr4(), new_cr4 = mmu_cr4_features;
+
+ /* HOST_CR4 in VMCS is always mmu_cr4_features and
+ * CR4_OSXSAVE(if supported). Sync CR4 now. */
+ if ( cpu_has_xsave )
+ new_cr4 |= X86_CR4_OSXSAVE;
+ if ( old_cr4 != new_cr4 )
+ write_cr4(new_cr4);
if ( d->arch.hvm_domain.hap_enabled )
{
@@ -2317,6 +2321,30 @@ static int vmx_handle_eoi_write(void)
return 0;
}
+static int vmx_handle_xsetbv(u64 new_bv)
+{
+ struct vcpu *v = current;
+ u64 xfeature = (((u64)xfeature_high) << 32) | xfeature_low;
+ struct segment_register sreg;
+
+ hvm_get_segment_register(v, x86_seg_ss, &sreg);
+ if ( sreg.attr.fields.dpl != 0 )
+ goto err;
+
+ if ( ((new_bv ^ xfeature) & ~xfeature) || !(new_bv & 1) )
+ goto err;
+
+ if ( (xfeature & XSTATE_YMM & new_bv) && !(new_bv & XSTATE_SSE) )
+ goto err;
+
+ v->arch.hvm_vcpu.xfeature_mask = new_bv;
+ set_xcr0(new_bv);
+ return 0;
+err:
+ vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ return -1;
+}
+
asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
unsigned int exit_reason, idtv_info;
@@ -2668,6 +2696,17 @@ asmlinkage void vmx_vmexit_handler(struc
do_sched_op_compat(SCHEDOP_yield, 0);
break;
+ case EXIT_REASON_XSETBV:
+ {
+ u64 new_bv = (((u64)regs->edx) << 32) | regs->eax;
+ if ( vmx_handle_xsetbv(new_bv) == 0 )
+ {
+ inst_len = __get_instruction_length();
+ __update_guest_eip(inst_len);
+ }
+ break;
+ }
+
default:
exit_and_crash:
gdprintk(XENLOG_ERR, "Bad vmexit (reason %x)\n", exit_reason);
diff -r 24117d67661c -r ad35f39e5fdc xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/arch/x86/i387.c Tue Sep 29 11:27:53 2009 +0100
@@ -33,7 +33,11 @@ void save_init_fpu(struct vcpu *v)
if ( cr0 & X86_CR0_TS )
clts();
- if ( cpu_has_fxsr )
+ if ( cpu_has_xsave && is_hvm_vcpu(v) )
+ {
+ xsave(v);
+ }
+ else if ( cpu_has_fxsr )
{
#ifdef __i386__
asm volatile (
@@ -129,6 +133,76 @@ void restore_fpu(struct vcpu *v)
}
/*
+ * Maximum size (in byte) of the XSAVE/XRSTOR save area required by all
+ * the supported and enabled features on the processor, including the
+ * XSAVE.HEADER. We only enable XCNTXT_MASK that we have known.
+ */
+u32 xsave_cntxt_size;
+
+/* A 64-bit bitmask of the XSAVE/XRSTOR features supported by processor. */
+u32 xfeature_low, xfeature_high;
+
+void xsave_init(void)
+{
+ u32 eax, ebx, ecx, edx;
+ int cpu = smp_processor_id();
+ u32 min_size;
+
+ cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
+
+ printk("%s: cpu%d: cntxt_max_size: 0x%x and states: %08x:%08x\n",
+ __func__, cpu, ecx, edx, eax);
+
+ if ( ((eax & XSTATE_FP_SSE) != XSTATE_FP_SSE) ||
+ ((eax & XSTATE_YMM) && !(eax & XSTATE_SSE)) )
+ {
+ BUG();
+ }
+
+ /* FP/SSE, XSAVE.HEADER, YMM */
+ min_size = 512 + 64 + ((eax & XSTATE_YMM) ? XSTATE_YMM_SIZE : 0);
+ BUG_ON(ecx < min_size);
+
+ /*
+ * We will only enable the features we know for hvm guest. Here we use
+ * set/clear CR4_OSXSAVE and re-run cpuid to get xsave_cntxt_size.
+ */
+ set_in_cr4(X86_CR4_OSXSAVE);
+ set_xcr0(eax & XCNTXT_MASK);
+ cpuid_count(0xd, 0, &eax, &ebx, &ecx, &edx);
+ clear_in_cr4(X86_CR4_OSXSAVE);
+
+ if ( cpu == 0 )
+ {
+ /*
+ * xsave_cntxt_size is the max size required by enabled features.
+ * We know FP/SSE and YMM about eax, and nothing about edx at present.
+ */
+ xsave_cntxt_size = ebx;
+ xfeature_low = eax & XCNTXT_MASK;
+ xfeature_high = 0;
+ printk("%s: using cntxt_size: 0x%x and states: %08x:%08x\n",
+ __func__, xsave_cntxt_size, xfeature_high, xfeature_low);
+ }
+ else
+ {
+ BUG_ON(xsave_cntxt_size != ebx);
+ BUG_ON(xfeature_low != (eax & XCNTXT_MASK));
+ }
+}
+
+void xsave_init_save_area(void *save_area)
+{
+ memset(save_area, 0, xsave_cntxt_size);
+
+ ((u16 *)save_area)[0] = 0x37f; /* FCW */
+ ((u16 *)save_area)[2] = 0xffff; /* FTW */
+ ((u32 *)save_area)[6] = 0x1f80; /* MXCSR */
+
+ ((struct xsave_struct *)save_area)->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
+}
+
+/*
* Local variables:
* mode: C
* c-set-style: "BSD"
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/cpufeature.h Tue Sep 29 11:27:53 2009 +0100
@@ -98,6 +98,7 @@
#define X86_FEATURE_X2APIC (4*32+21) /* Extended xAPIC */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE (4*32+27) /* OSXSAVE */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running under some hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
@@ -189,6 +190,9 @@
&& boot_cpu_has(X86_FEATURE_FFXSR))
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
+
+#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
+
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#endif /* __ASM_I386_CPUFEATURE_H */
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/domain.h Tue Sep 29 11:27:53 2009 +0100
@@ -440,7 +440,7 @@ unsigned long pv_guest_cr4_fixup(unsigne
| ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)) \
& ~X86_CR4_DE)
#define real_cr4_to_pv_guest_cr4(c) \
- ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD))
+ ((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE))
void domain_cpuid(struct domain *d,
unsigned int input,
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Tue Sep 29 11:27:53 2009 +0100
@@ -268,7 +268,8 @@ static inline int hvm_do_pmu_interrupt(s
(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | \
X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
- X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT)))
+ X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
+ (cpu_has_xsave ? X86_CR4_OSXSAVE : 0))))
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h Tue Sep 29 11:27:53 2009 +0100
@@ -46,6 +46,15 @@ struct hvm_vcpu {
* CR3: Always used and kept up to date by paging subsystem.
*/
unsigned long hw_cr[5];
+
+ /*
+ * The save area for Processor Extended States and the bitmask of the
+ * XSAVE/XRSTOR features. They are used by: 1) when a vcpu (which has
+ * dirtied FPU/SSE) is scheduled out we XSAVE the states here; 2) in
+ * #NM handler, we XRSTOR the states we XSAVE-ed;
+ */
+ void *xsave_area;
+ uint64_t xfeature_mask;
struct vlapic vlapic;
s64 cache_tsc_offset;
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Tue Sep 29 11:27:53 2009 +0100
@@ -111,6 +111,7 @@ void vmx_update_debug_state(struct vcpu
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
/*
* Interruption-information format
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/i387.h Tue Sep 29 11:27:53 2009 +0100
@@ -13,6 +13,82 @@
#include <xen/sched.h>
#include <asm/processor.h>
+
+extern unsigned int xsave_cntxt_size;
+extern u32 xfeature_low, xfeature_high;
+
+extern void xsave_init(void);
+extern void xsave_init_save_area(void *save_area);
+
+#define XSTATE_FP (1 << 0)
+#define XSTATE_SSE (1 << 1)
+#define XSTATE_YMM (1 << 2)
+#define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE)
+#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+#define XSTATE_YMM_OFFSET (512 + 64)
+#define XSTATE_YMM_SIZE 256
+
+struct xsave_struct
+{
+ struct { char x[512]; } fpu_sse; /* FPU/MMX, SSE */
+
+ struct {
+ u64 xstate_bv;
+ u64 reserved[7];
+ } xsave_hdr; /* The 64-byte header */
+
+ struct { char x[XSTATE_YMM_SIZE]; } ymm; /* YMM */
+ char data[]; /* Future new states */
+} __attribute__ ((packed, aligned (64)));
+
+#define XCR_XFEATURE_ENABLED_MASK 0
+
+#ifdef CONFIG_X86_64
+#define REX_PREFIX "0x48, "
+#else
+#define REX_PREFIX
+#endif
+
+static inline void xsetbv(u32 index, u64 xfeature_mask)
+{
+ u32 hi = xfeature_mask >> 32;
+ u32 lo = (u32)xfeature_mask;
+
+ asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
+ "a" (lo), "d" (hi));
+}
+
+static inline void set_xcr0(u64 xfeature_mask)
+{
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeature_mask);
+}
+
+static inline void xsave(struct vcpu *v)
+{
+ u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
+ u32 lo = mask, hi = mask >> 32;
+ struct xsave_struct *ptr;
+
+ ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
+
+ asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
+ :
+ : "a" (lo), "d" (hi), "D"(ptr)
+ : "memory");
+}
+
+static inline void xrstor(struct vcpu *v)
+{
+ u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
+ u32 lo = mask, hi = mask >> 32;
+ struct xsave_struct *ptr;
+
+ ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
+
+ asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
+ :
+ : "m" (*ptr), "a" (lo), "d" (hi), "D"(ptr));
+}
extern void init_fpu(void);
extern void save_init_fpu(struct vcpu *v);
@@ -36,10 +112,22 @@ static inline void setup_fpu(struct vcpu
if ( !v->fpu_dirtied )
{
v->fpu_dirtied = 1;
- if ( v->fpu_initialised )
- restore_fpu(v);
+ if ( cpu_has_xsave && is_hvm_vcpu(v) )
+ {
+ if ( !v->fpu_initialised )
+ v->fpu_initialised = 1;
+
+ set_xcr0(v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE);
+ xrstor(v);
+ set_xcr0(v->arch.hvm_vcpu.xfeature_mask);
+ }
else
- init_fpu();
+ {
+ if ( v->fpu_initialised )
+ restore_fpu(v);
+ else
+ init_fpu();
+ }
}
}
diff -r 24117d67661c -r ad35f39e5fdc xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h Tue Sep 29 11:22:17 2009 +0100
+++ b/xen/include/asm-x86/processor.h Tue Sep 29 11:27:53 2009 +0100
@@ -83,6 +83,7 @@
#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x2000 /* enable VMX */
#define X86_CR4_SMXE 0x4000 /* enable SMX */
+#define X86_CR4_OSXSAVE 0x40000 /* enable XSAVE/XRSTOR */
/*
* Trap/fault mnemonics.
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|