diff -r 02003bee3e80 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Jun 25 18:31:10 2009 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Jun 26 17:19:31 2009 +0200 @@ -111,10 +111,11 @@ static void svm_cpu_down(void) write_efer(read_efer() & ~EFER_SVME); } -static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) +static enum handler_return long_mode_do_msr_write(struct vcpu *v, + struct cpu_user_regs *regs) { - u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - u32 ecx = regs->ecx; + uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32); + uint32_t ecx = regs->ecx; HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64, ecx, msr_content); @@ -124,6 +125,11 @@ static enum handler_return long_mode_do_ case MSR_EFER: if ( hvm_set_efer(msr_content) ) return HNDL_exception_raised; + if (!( msr_content & EFER_LME )) + break; + svm_enable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS); + svm_enable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP); + svm_enable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP); break; case MSR_IA32_MC4_MISC: /* Threshold register */ @@ -1139,7 +1145,7 @@ static int svm_msr_write_intercept(struc if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) break; - switch ( long_mode_do_msr_write(regs) ) + switch ( long_mode_do_msr_write(v, regs) ) { case HNDL_unhandled: wrmsr_hypervisor_regs(ecx, regs->eax, regs->edx); diff -r 02003bee3e80 xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Thu Jun 25 18:31:10 2009 +0100 +++ b/xen/arch/x86/hvm/svm/vmcb.c Fri Jun 26 17:19:31 2009 +0200 @@ -78,29 +78,36 @@ struct host_save_area *alloc_host_save_a return hsa; } -void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr) +void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable) { unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm; + unsigned long *msr_bit = NULL; /* * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address). */ if ( msr <= 0x1fff ) { - __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG); - __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG); + msr_bit = msr_bitmap + 0x0000 / BYTES_PER_LONG; } else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) { msr &= 0x1fff; - __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG); - __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG); + msr_bit = msr_bitmap + 0x0800 / BYTES_PER_LONG; } else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) ) { msr &= 0x1fff; - __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG); - __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG); + msr_bit = msr_bitmap + 0x1000 / BYTES_PER_LONG; + } + + BUG_ON(msr_bit == NULL); + if (enable) { + __set_bit(msr * 2, msr_bit); + __set_bit(msr * 2 + 1, msr_bit); + } else { + __clear_bit(msr * 2, msr_bit); + __clear_bit(msr * 2 + 1, msr_bit); } } @@ -151,6 +158,14 @@ static int construct_vmcb(struct vcpu *v svm_disable_intercept_for_msr(v, MSR_STAR); svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK); + /* Assume 32bit legacy guest. We intercept MSR_EFER and when guest enables + * longmode, we enable intercept for the SYSENTER MSRs below + * (needed for sysenter/sysexit emulation). + */ + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS); + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP); + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP); + vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); vmcb->iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); diff -r 02003bee3e80 xen/include/asm-x86/hvm/svm/vmcb.h --- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Jun 25 18:31:10 2009 +0100 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Fri Jun 26 17:19:31 2009 +0200 @@ -481,7 +481,9 @@ void svm_destroy_vmcb(struct vcpu *v); void setup_vmcb_dump(void); -void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr); +#define svm_disable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 0) +#define svm_enable_intercept_for_msr(v, msr) svm_intercept_msr((v), (msr), 1) +void svm_intercept_msr(struct vcpu *v, uint32_t msr, int enable); #endif /* ASM_X86_HVM_SVM_VMCS_H__ */