On 18/02/2011 02:45, "Wei, Gang" <gang.wei@xxxxxxxxx> wrote:
> This patch is trying to make issues around XSAVE/XRSTOR induced in future easy
> to be exposed.
The fact that xsave_alloc_save_area() is called unconditionally on the vcpu
allocation path suffices I think. It's pretty easy to eyeball that no
successfully initialised non-idle vcpu can have an xsave area smaller than
min_size.
I like assertions a lot, but not carpet bombed all over the code.
-- Keir
> Jimmy
>
> x86: add strictly sanity check for XSAVE/XRSTOR
>
> Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>
>
> diff -r 137ad3347504 xen/arch/x86/domctl.c
> --- a/xen/arch/x86/domctl.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/domctl.c Fri Feb 18 16:00:41 2011 +0800
> @@ -1604,8 +1604,13 @@ void arch_get_info_guest(struct vcpu *v,
>
> /* Fill legacy context from xsave area first */
> if ( cpu_has_xsave )
> + {
> + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> + ASSERT(v->arch.xsave_area);
> +
> memcpy(v->arch.xsave_area, &v->arch.guest_context.fpu_ctxt,
> sizeof(v->arch.guest_context.fpu_ctxt));
> + }
>
> if ( !is_pv_32on64_domain(v->domain) )
> memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat)); diff -r
> 137ad3347504 xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/hvm/hvm.c Fri Feb 18 16:03:23 2011 +0800
> @@ -777,6 +777,9 @@ static int hvm_load_cpu_ctxt(struct doma
> {
> struct xsave_struct *xsave_area = v->arch.xsave_area;
>
> + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> + ASSERT(v->arch.xsave_area);
> +
> memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
> xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
> v->arch.xcr0_accum = XSTATE_FP_SSE; @@ -834,6 +837,7 @@ static int
> hvm_save_cpu_xsave_states(str
> if ( !cpu_has_xsave )
> return 0; /* do nothing */
>
> + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> for_each_vcpu ( d, v )
> {
> if ( _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id,
> HVM_CPU_XSAVE_SIZE) ) @@ -846,8 +850,12 @@ static int
> hvm_save_cpu_xsave_states(str
> ctxt->xcr0 = v->arch.xcr0;
> ctxt->xcr0_accum = v->arch.xcr0_accum;
> if ( v->fpu_initialised )
> + {
> + ASSERT(v->arch.xsave_area);
> +
> memcpy(&ctxt->save_area,
> v->arch.xsave_area, xsave_cntxt_size);
> + }
> }
>
> return 0;
> @@ -873,6 +881,9 @@ static int hvm_load_cpu_xsave_states(str
> gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
> return -EINVAL;
> }
> +
> + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> + ASSERT(v->arch.xsave_area);
>
> /* Customized checking for entry since our entry is of variable length */
> desc = (struct hvm_save_descriptor *)&h->data[h->cur]; diff -r
> 137ad3347504 xen/arch/x86/i387.c
> --- a/xen/arch/x86/i387.c Mon Feb 14 17:02:55 2011 +0000
> +++ b/xen/arch/x86/i387.c Fri Feb 18 16:00:41 2011 +0800
> @@ -71,6 +71,9 @@ void setup_fpu(struct vcpu *v)
>
> if ( cpu_has_xsave )
> {
> + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> + ASSERT(v->arch.xsave_area);
> +
> /*
> * XCR0 normally represents what guest OS set. In case of Xen itself,
> * we set all supported feature mask before doing save/restore.
> @@ -118,6 +121,9 @@ void save_init_fpu(struct vcpu *v)
>
> if ( cpu_has_xsave )
> {
> + ASSERT(xsave_cntxt_size >= XSAVE_AREA_MIN_SIZE);
> + ASSERT(v->arch.xsave_area);
> +
> /* XCR0 normally represents what guest OS set. In case of Xen itself,
> * we set all accumulated feature mask before doing save/restore.
> */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|