Keir Fraser wrote on 2011-02-19:
> On 19/02/2011 01:21, "Kay, Allen M" <allen.m.kay@xxxxxxxxx> wrote:
>> Maybe the proper thing to do is to have a new function call
>> xsave_enabled(), this function then checks for whether memory has
>> allocated properly in addition to checking cpu_has_xsave.
>>
>> What do you think or do you have a better suggestion?
>
> Yeah, a new function xsave_enabled() which encapsulates the check of
> cpu_has_xsave, plus your new assertions, would be acceptable I think.
Good suggestion. Attached is the reworked patch.
Jimmy
x86: add strictly sanity check for XSAVE/XRSTOR
Replace most checks on cpu_has_xsave with checks on new fn xsave_enabled(), do
additional sanity checks in the new fn.
Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>
diff -r 137ad3347504 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domain.c Tue Feb 22 19:57:01 2011 +0800
@@ -628,7 +628,7 @@ unsigned long pv_guest_cr4_fixup(const s
hv_cr4_mask &= ~X86_CR4_DE;
if ( cpu_has_fsgsbase && !is_pv_32bit_domain(v->domain) )
hv_cr4_mask &= ~X86_CR4_FSGSBASE;
- if ( cpu_has_xsave )
+ if ( xsave_enabled(v) )
hv_cr4_mask &= ~X86_CR4_OSXSAVE;
if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) )
@@ -1402,7 +1402,7 @@ static void __context_switch(void)
memcpy(stack_regs,
&n->arch.guest_context.user_regs,
CTXT_SWITCH_STACK_BYTES);
- if ( cpu_has_xsave && n->arch.xcr0 != get_xcr0() )
+ if ( xsave_enabled(n) && n->arch.xcr0 != get_xcr0() )
set_xcr0(n->arch.xcr0);
n->arch.ctxt_switch_to(n);
}
diff -r 137ad3347504 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/domctl.c Tue Feb 22 19:30:01 2011 +0800
@@ -1603,7 +1603,7 @@ void arch_get_info_guest(struct vcpu *v,
#endif
/* Fill legacy context from xsave area first */
- if ( cpu_has_xsave )
+ if ( xsave_enabled(v) )
memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
sizeof(v->arch.guest_context.fpu_ctxt));
diff -r 137ad3347504 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c Tue Feb 22 19:58:20 2011 +0800
@@ -773,7 +773,7 @@ static int hvm_load_cpu_ctxt(struct doma
memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
/* In case xsave-absent save file is restored on a xsave-capable host */
- if ( cpu_has_xsave )
+ if ( xsave_enabled(v) )
{
struct xsave_struct *xsave_area = v->arch.xsave_area;
@@ -831,7 +831,7 @@ static int hvm_save_cpu_xsave_states(str
struct vcpu *v;
struct hvm_hw_cpu_xsave *ctxt;
- if ( !cpu_has_xsave )
+ if ( !xsave_enabled(NULL) )
return 0; /* do nothing */
for_each_vcpu ( d, v )
@@ -846,8 +846,12 @@ static int hvm_save_cpu_xsave_states(str
ctxt->xcr0 = v->arch.xcr0;
ctxt->xcr0_accum = v->arch.xcr0_accum;
if ( v->fpu_initialised )
+ {
+ ASSERT(v->arch.xsave_area);
+
memcpy(&ctxt->save_area,
v->arch.xsave_area, xsave_cntxt_size);
+ }
}
return 0;
@@ -861,11 +865,6 @@ static int hvm_load_cpu_xsave_states(str
struct hvm_save_descriptor *desc;
uint64_t _xfeature_mask;
- /* fails since we can't restore an img saved on xsave-capable host */
-//XXX:
- if ( !cpu_has_xsave )
- return -EINVAL;
-
/* Which vcpu is this? */
vcpuid = hvm_load_instance(h);
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
@@ -873,6 +872,11 @@ static int hvm_load_cpu_xsave_states(str
gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
return -EINVAL;
}
+
+ /* fails since we can't restore an img saved on xsave-capable host */
+//XXX:
+ if ( !xsave_enabled(v) )
+ return -EINVAL;
/* Customized checking for entry since our entry is of variable length */
desc = (struct hvm_save_descriptor *)&h->data[h->cur];
@@ -2208,7 +2212,7 @@ void hvm_cpuid(unsigned int input, unsig
__clear_bit(X86_FEATURE_APIC & 31, edx);
/* Fix up OSXSAVE. */
- if ( cpu_has_xsave )
+ if ( xsave_enabled(v) )
*ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
bitmaskof(X86_FEATURE_OSXSAVE) : 0;
break;
diff -r 137ad3347504 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Tue Feb 22 20:55:28 2011 +0800
@@ -760,7 +760,8 @@ static int construct_vmcs(struct vcpu *v
/* Host control registers. */
v->arch.hvm_vmx.host_cr0 = read_cr0() | X86_CR0_TS;
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
- __vmwrite(HOST_CR4, mmu_cr4_features | (cpu_has_xsave ? X86_CR4_OSXSAVE :
0));
+ __vmwrite(HOST_CR4,
+ mmu_cr4_features | (xsave_enabled(v) ? X86_CR4_OSXSAVE : 0));
/* Host CS:RIP. */
__vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
diff -r 137ad3347504 xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/i387.c Tue Feb 22 19:29:14 2011 +0800
@@ -69,7 +69,7 @@ void setup_fpu(struct vcpu *v)
if ( v->fpu_dirtied )
return;
- if ( cpu_has_xsave )
+ if ( xsave_enabled(v) )
{
/*
* XCR0 normally represents what guest OS set. In case of Xen itself,
@@ -116,7 +116,7 @@ void save_init_fpu(struct vcpu *v)
if ( cr0 & X86_CR0_TS )
clts();
- if ( cpu_has_xsave )
+ if ( xsave_enabled(v) )
{
/* XCR0 normally represents what guest OS set. In case of Xen itself,
* we set all accumulated feature mask before doing save/restore.
diff -r 137ad3347504 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/arch/x86/traps.c Tue Feb 22 20:09:16 2011 +0800
@@ -771,7 +771,7 @@ static void pv_cpuid(struct cpu_user_reg
__clear_bit(X86_FEATURE_XTPR % 32, &c);
__clear_bit(X86_FEATURE_PDCM % 32, &c);
__clear_bit(X86_FEATURE_DCA % 32, &c);
- if ( !cpu_has_xsave )
+ if ( !xsave_enabled(current) )
{
__clear_bit(X86_FEATURE_XSAVE % 32, &c);
__clear_bit(X86_FEATURE_AVX % 32, &c);
diff -r 137ad3347504 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/domain.h Tue Feb 22 20:10:06 2011 +0800
@@ -464,7 +464,7 @@ unsigned long pv_guest_cr4_fixup(const s
(((v)->arch.guest_context.ctrlreg[4] \
| (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE)) \
| ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0) \
- | ((cpu_has_xsave)? X86_CR4_OSXSAVE : 0)) \
+ | ((xsave_enabled(v))? X86_CR4_OSXSAVE : 0)) \
& ~X86_CR4_DE)
#define real_cr4_to_pv_guest_cr4(c) \
((c) & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_TSD | X86_CR4_OSXSAVE))
diff -r 137ad3347504 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h Tue Feb 22 20:13:23 2011 +0800
@@ -291,7 +291,7 @@ static inline int hvm_do_pmu_interrupt(s
X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
- (cpu_has_xsave ? X86_CR4_OSXSAVE : 0))))
+ (xsave_enabled(NULL) ? X86_CR4_OSXSAVE : 0))))
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
diff -r 137ad3347504 xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h Mon Feb 14 17:02:55 2011 +0000
+++ b/xen/include/asm-x86/i387.h Tue Feb 22 20:22:07 2011 +0800
@@ -31,6 +31,19 @@ void xsave_free_save_area(struct vcpu *v
#define XSTATE_YMM_OFFSET XSAVE_AREA_MIN_SIZE
#define XSTATE_YMM_SIZE 256
#define XSAVEOPT (1 << 0)
+
+static inline int xsave_enabled(const struct vcpu *v)
+{
+ if ( cpu_has_xsave )
+ {
+ ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
+
+ if ( v )
+ ASSERT(v->arch.xsave_area);
+ }
+
+ return cpu_has_xsave;
+}
struct xsave_struct
{
xsave_sanity_check_2.patch
Description: xsave_sanity_check_2.patch
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|