# HG changeset patch # User Wei Huang # Date 1304448326 18000 # Node ID 63208cfe3c558cebc5149fc569702785f6d8e73b # Parent 0d71902c5b8fb8e6deadccf7ef7c39e05105267a FPU: clean up FPU context restore function This patch cleans up context restore function. It renames the function name to vcpu_restore_fpu(). It also extracts FPU restore code (frstor, fxrstor, xrstor) out into seperate functions. vcpu_restor_fpu() will dispatch to these functions depending on CPU's capability. Signed-off-by: Wei Huang diff -r 0d71902c5b8f -r 63208cfe3c55 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Tue May 03 13:43:13 2011 -0500 +++ b/xen/arch/x86/hvm/svm/svm.c Tue May 03 13:45:26 2011 -0500 @@ -348,7 +348,7 @@ { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - setup_fpu(v); + vcpu_restore_fpu(v); vmcb_set_exception_intercepts( vmcb, vmcb_get_exception_intercepts(vmcb) & ~(1U << TRAP_no_device)); } diff -r 0d71902c5b8f -r 63208cfe3c55 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Tue May 03 13:43:13 2011 -0500 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue May 03 13:45:26 2011 -0500 @@ -612,7 +612,7 @@ static void vmx_fpu_enter(struct vcpu *v) { - setup_fpu(v); + vcpu_restore_fpu(v); v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device); vmx_update_exception_bitmap(v); v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS; diff -r 0d71902c5b8f -r 63208cfe3c55 xen/arch/x86/i387.c --- a/xen/arch/x86/i387.c Tue May 03 13:43:13 2011 -0500 +++ b/xen/arch/x86/i387.c Tue May 03 13:45:26 2011 -0500 @@ -17,56 +17,37 @@ #include #include -static void load_mxcsr(unsigned long val) +#define MXCSR_DEFAULT 0x1f80 +static void fpu_init(void) { - val &= 0xffbf; - asm volatile ( "ldmxcsr %0" : : "m" (val) ); + unsigned long val; + + asm volatile ( "fninit" ); + if ( cpu_has_xmm ) + { + /* load default value into MXCSR control/status register */ + val = MXCSR_DEFAULT; + asm volatile ( "ldmxcsr %0" : : "m" (val) ); + } } -static void init_fpu(void); -static void restore_fpu(struct vcpu *v); - -void setup_fpu(struct vcpu *v) +/*******************************/ +/* FPU Restore Functions */ +/*******************************/ +/* Restore x87 extended state */ +static inline void fpu_xrstor(struct vcpu *v) { - ASSERT(!is_idle_vcpu(v)); - - /* Avoid recursion. */ - clts(); - - if ( v->fpu_dirtied ) - return; - - if ( xsave_enabled(v) ) - { - /* - * XCR0 normally represents what guest OS set. In case of Xen itself, - * we set all supported feature mask before doing save/restore. - */ - set_xcr0(v->arch.xcr0_accum); - xrstor(v); - set_xcr0(v->arch.xcr0); - } - else if ( v->fpu_initialised ) - { - restore_fpu(v); - } - else - { - init_fpu(); - } - - v->fpu_initialised = 1; - v->fpu_dirtied = 1; + /* + * XCR0 normally represents what guest OS set. In case of Xen itself, + * we set all supported feature mask before doing save/restore. + */ + set_xcr0(v->arch.xcr0_accum); + xrstor(v); + set_xcr0(v->arch.xcr0); } -static void init_fpu(void) -{ - asm volatile ( "fninit" ); - if ( cpu_has_xmm ) - load_mxcsr(0x1f80); -} - -static void restore_fpu(struct vcpu *v) +/* Restor x87 FPU, MMX, SSE and SSE2 state */ +static inline void fpu_fxrstor(struct vcpu *v) { const char *fpu_ctxt = v->arch.fpu_ctxt; @@ -75,41 +56,42 @@ * possibility, which may occur if the block was passed to us by control * tools, by silently clearing the block. */ - if ( cpu_has_fxsr ) - { - asm volatile ( + asm volatile ( #ifdef __i386__ - "1: fxrstor %0 \n" + "1: fxrstor %0 \n" #else /* __x86_64__ */ - /* See above for why the operands/constraints are this way. */ - "1: " REX64_PREFIX "fxrstor (%2)\n" + /* See above for why the operands/constraints are this way. */ + "1: " REX64_PREFIX "fxrstor (%2)\n" #endif - ".section .fixup,\"ax\" \n" - "2: push %%"__OP"ax \n" - " push %%"__OP"cx \n" - " push %%"__OP"di \n" - " lea %0,%%"__OP"di \n" - " mov %1,%%ecx \n" - " xor %%eax,%%eax \n" - " rep ; stosl \n" - " pop %%"__OP"di \n" - " pop %%"__OP"cx \n" - " pop %%"__OP"ax \n" - " jmp 1b \n" - ".previous \n" - _ASM_EXTABLE(1b, 2b) - : - : "m" (*fpu_ctxt), - "i" (sizeof(v->arch.xsave_area->fpu_sse)/4) + ".section .fixup,\"ax\" \n" + "2: push %%"__OP"ax \n" + " push %%"__OP"cx \n" + " push %%"__OP"di \n" + " lea %0,%%"__OP"di \n" + " mov %1,%%ecx \n" + " xor %%eax,%%eax \n" + " rep ; stosl \n" + " pop %%"__OP"di \n" + " pop %%"__OP"cx \n" + " pop %%"__OP"ax \n" + " jmp 1b \n" + ".previous \n" + _ASM_EXTABLE(1b, 2b) + : + : "m" (*fpu_ctxt), + "i" (sizeof(v->arch.xsave_area->fpu_sse)/4) #ifdef __x86_64__ - ,"cdaSDb" (fpu_ctxt) + ,"cdaSDb" (fpu_ctxt) #endif - ); - } - else - { - asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) ); - } + ); +} + +/* Restore x87 extended state */ +static inline void fpu_frstor(struct vcpu *v) +{ + const char *fpu_ctxt = v->arch.fpu_ctxt; + + asm volatile ( "frstor %0" : : "m" (*fpu_ctxt) ); } /*******************************/ @@ -178,6 +160,35 @@ /*******************************/ /* VCPU FPU Functions */ /*******************************/ +/* + * Restore FPU state when #NM is triggered. + */ +void vcpu_restore_fpu(struct vcpu *v) +{ + ASSERT(!is_idle_vcpu(v)); + + /* Avoid recursion. */ + clts(); + + if ( v->fpu_dirtied ) + return; + + if ( xsave_enabled(v) ) + fpu_xrstor(v); + else if ( v->fpu_initialised ) + { + if ( cpu_has_fxsr ) + fpu_fxrstor(v); + else + fpu_frstor(v); + } + else + fpu_init(); + + v->fpu_initialised = 1; + v->fpu_dirtied = 1; +} + /* * On each context switch, save the necessary FPU info of VCPU being switch * out. It dispatches saving operation based on CPU's capability. diff -r 0d71902c5b8f -r 63208cfe3c55 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Tue May 03 13:43:13 2011 -0500 +++ b/xen/arch/x86/traps.c Tue May 03 13:45:26 2011 -0500 @@ -3198,7 +3198,7 @@ BUG_ON(!guest_mode(regs)); - setup_fpu(curr); + vcpu_restore_fpu(curr); if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS ) { diff -r 0d71902c5b8f -r 63208cfe3c55 xen/include/asm-x86/i387.h --- a/xen/include/asm-x86/i387.h Tue May 03 13:43:13 2011 -0500 +++ b/xen/include/asm-x86/i387.h Tue May 03 13:45:26 2011 -0500 @@ -14,7 +14,7 @@ #include #include -void setup_fpu(struct vcpu *v); +void vcpu_restore_fpu(struct vcpu *v); void vcpu_save_fpu(struct vcpu *v); int vcpu_init_fpu(struct vcpu *v);