# HG changeset patch # User cegger # Date 1299670577 -3600 Implement SVM specific interrupt handling Signed-off-by: Christoph Egger diff -r 3df1f127bd4f -r d8a80d4b19aa xen/arch/x86/hvm/svm/intr.c --- a/xen/arch/x86/hvm/svm/intr.c +++ b/xen/arch/x86/hvm/svm/intr.c @@ -33,6 +33,7 @@ #include #include #include +#include /* for nestedhvm_vcpu_in_guestmode */ #include #include #include @@ -74,15 +75,30 @@ static void svm_inject_extint(struct vcp ASSERT(vmcb->eventinj.fields.v == 0); vmcb->eventinj = event; } - + static void enable_intr_window(struct vcpu *v, struct hvm_intack intack) { struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb); + uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb); vintr_t intr; ASSERT(intack.source != hvm_intsrc_none); + if ( nestedhvm_enabled(v->domain) ) { + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + if ( nv->nv_vmentry_pending ) { + struct vmcb_struct *gvmcb = nv->nv_vvmcx; + + /* check if l1 guest injects interrupt into l2 guest via vintr. + * return here or l2 guest looses interrupts, otherwise. + */ + ASSERT(gvmcb != NULL); + intr = vmcb_get_vintr(gvmcb); + if ( intr.fields.irq ) + return; + } + } + HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1); @@ -121,6 +137,7 @@ asmlinkage void svm_intr_assist(void) struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct hvm_intack intack; + enum hvm_intblk intblk; /* Crank the handle on interrupt state. */ pt_update_irq(v); @@ -130,6 +147,39 @@ asmlinkage void svm_intr_assist(void) if ( likely(intack.source == hvm_intsrc_none) ) return; + intblk = hvm_interrupt_blocked(v, intack); + if ( intblk == hvm_intblk_svm_gif ) { + ASSERT(nestedhvm_enabled(v->domain)); + return; + } + + /* Interrupts for the nested guest are already + * in the vmcb. + */ + if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) + { + int rc; + + /* l2 guest was running when an interrupt for + * the l1 guest occured. + */ + rc = nestedsvm_vcpu_interrupt(v, intack); + switch (rc) { + case NSVM_INTR_NOTINTERCEPTED: + /* Inject interrupt into 2nd level guest directly. */ + break; + case NSVM_INTR_NOTHANDLED: + case NSVM_INTR_FORCEVMEXIT: + return; + case NSVM_INTR_MASKED: + /* Guest already enabled an interrupt window. */ + return; + default: + panic("%s: nestedsvm_vcpu_interrupt can't handle value 0x%x\n", + __func__, rc); + } + } + /* * Pending IRQs must be delayed if: * 1. An event is already pending. This is despite the fact that SVM @@ -144,8 +194,7 @@ asmlinkage void svm_intr_assist(void) * have cleared the interrupt out of the IRR. * 2. The IRQ is masked. */ - if ( unlikely(vmcb->eventinj.fields.v) || - hvm_interrupt_blocked(v, intack) ) + if ( unlikely(vmcb->eventinj.fields.v) || intblk ) { enable_intr_window(v, intack); return; diff -r 3df1f127bd4f -r d8a80d4b19aa xen/arch/x86/hvm/svm/nestedsvm.c --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -25,6 +25,23 @@ #include #include #include /* paging_mode_hap */ +#include /* for local_event_delivery_(en|dis)able */ + +static void +nestedsvm_vcpu_clgi(struct vcpu *v) +{ + /* clear gif flag */ + vcpu_nestedsvm(v).ns_gif = 0; + local_event_delivery_disable(); /* mask events for PV drivers */ +} + +static void +nestedsvm_vcpu_stgi(struct vcpu *v) +{ + /* enable gif flag */ + vcpu_nestedsvm(v).ns_gif = 1; + local_event_delivery_enable(); /* unmask events for PV drivers */ +} static int nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr) @@ -145,6 +162,7 @@ int nsvm_vcpu_reset(struct vcpu *v) if (svm->ns_iomap) svm->ns_iomap = NULL; + nestedsvm_vcpu_stgi(v); return 0; } @@ -601,6 +619,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct return ret; } + nestedsvm_vcpu_stgi(v); return 0; } @@ -646,6 +665,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct nestedsvm *svm = &vcpu_nestedsvm(v); struct vmcb_struct *ns_vmcb; + ASSERT(svm->ns_gif == 0); ns_vmcb = nv->nv_vvmcx; if (nv->nv_vmexit_pending) { @@ -1035,6 +1055,32 @@ nsvm_vmcb_hap_enabled(struct vcpu *v) return vcpu_nestedsvm(v).ns_hap_enabled; } +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v) +{ + struct nestedsvm *svm = &vcpu_nestedsvm(v); + struct nestedvcpu *nv = &vcpu_nestedhvm(v); + + ASSERT(nestedhvm_enabled(v->domain)); + + if ( !nestedsvm_gif_isset(v) ) + return hvm_intblk_svm_gif; + + if ( nestedhvm_vcpu_in_guestmode(v) ) { + if ( svm->ns_hostflags.fields.vintrmask ) + if ( !svm->ns_hostflags.fields.rflagsif ) + return hvm_intblk_rflags_ie; + } + + if ( nv->nv_vmexit_pending ) { + /* hvm_inject_exception() must have run before. + * exceptions have higher priority than interrupts. + */ + return hvm_intblk_rflags_ie; + } + + return hvm_intblk_none; +} + /* MSR handling */ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content) { @@ -1090,6 +1136,7 @@ nestedsvm_vmexit_defer(struct vcpu *v, { struct nestedsvm *svm = &vcpu_nestedsvm(v); + nestedsvm_vcpu_clgi(v); svm->ns_vmexit.exitcode = exitcode; svm->ns_vmexit.exitinfo1 = exitinfo1; svm->ns_vmexit.exitinfo2 = exitinfo2; @@ -1276,4 +1323,98 @@ asmlinkage void nsvm_vcpu_switch(struct } } +/* Interrupts, Virtual GIF */ +int +nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack) +{ + int ret; + enum hvm_intblk intr; + uint64_t exitcode = VMEXIT_INTR; + uint64_t exitinfo2 = 0; + ASSERT(nestedhvm_vcpu_in_guestmode(v)); + intr = nhvm_interrupt_blocked(v); + if ( intr != hvm_intblk_none ) + return NSVM_INTR_MASKED; + + switch (intack.source) { + case hvm_intsrc_pic: + case hvm_intsrc_lapic: + exitcode = VMEXIT_INTR; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_nmi: + exitcode = VMEXIT_NMI; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_mce: + exitcode = VMEXIT_EXCEPTION_MC; + exitinfo2 = intack.vector; + break; + case hvm_intsrc_none: + return NSVM_INTR_NOTHANDLED; + default: + BUG(); + } + + ret = nsvm_vmcb_guest_intercepts_exitcode(v, + guest_cpu_user_regs(), exitcode); + if (ret) { + nestedsvm_vmexit_defer(v, exitcode, intack.source, exitinfo2); + return NSVM_INTR_FORCEVMEXIT; + } + + return NSVM_INTR_NOTINTERCEPTED; +} + +bool_t +nestedsvm_gif_isset(struct vcpu *v) +{ + struct nestedsvm *svm = &vcpu_nestedsvm(v); + + return (!!svm->ns_gif); +} + +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v) +{ + unsigned int inst_len; + + if ( !nestedhvm_enabled(v->domain) ) { + hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + return; + } + + if ( (inst_len = __get_instruction_length(v, INSTR_STGI)) == 0 ) + return; + + nestedsvm_vcpu_stgi(v); + + __update_guest_eip(regs, inst_len); +} + +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + unsigned int inst_len; + uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb); + vintr_t intr; + + if ( !nestedhvm_enabled(v->domain) ) { + hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); + return; + } + + if ( (inst_len = __get_instruction_length(v, INSTR_CLGI)) == 0 ) + return; + + nestedsvm_vcpu_clgi(v); + + /* After a CLGI no interrupts should come */ + intr = vmcb_get_vintr(vmcb); + intr.fields.irq = 0; + general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR; + vmcb_set_vintr(vmcb, intr); + vmcb_set_general1_intercepts(vmcb, general1_intercepts); + + __update_guest_eip(regs, inst_len); +} diff -r 3df1f127bd4f -r d8a80d4b19aa xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -78,8 +78,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(void * static bool_t amd_erratum383_found __read_mostly; -static void inline __update_guest_eip( - struct cpu_user_regs *regs, unsigned int inst_len) +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len) { struct vcpu *curr = current; @@ -1618,6 +1617,7 @@ static struct hvm_function_table __read_ .nhvm_vcpu_asid = nsvm_vcpu_asid, .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap, .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled, + .nhvm_intr_blocked = nsvm_intr_blocked, }; asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs) @@ -1929,7 +1929,11 @@ asmlinkage void svm_vmexit_handler(struc svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax); break; case VMEXIT_STGI: + svm_vmexit_do_stgi(regs, v); + break; case VMEXIT_CLGI: + svm_vmexit_do_clgi(regs, v); + break; case VMEXIT_SKINIT: hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0); break; diff -r 3df1f127bd4f -r d8a80d4b19aa xen/include/asm-x86/hvm/svm/nestedsvm.h --- a/xen/include/asm-x86/hvm/svm/nestedsvm.h +++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h @@ -23,7 +23,12 @@ #include #include +/* SVM specific intblk types, cannot be an enum because gcc 4.5 complains */ +/* GIF cleared */ +#define hvm_intblk_svm_gif hvm_intblk_arch + struct nestedsvm { + bool_t ns_gif; uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */ /* l1 guest physical address of virtual vmcb used by prior VMRUN. @@ -111,11 +116,23 @@ int nsvm_vmcb_guest_intercepts_exitcode( struct cpu_user_regs *regs, uint64_t exitcode); int nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr); bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); +enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); /* MSRs */ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content); int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content); +/* Interrupts, vGIF */ +void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v); +void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v); +bool_t nestedsvm_gif_isset(struct vcpu *v); + +#define NSVM_INTR_NOTHANDLED 3 +#define NSVM_INTR_NOTINTERCEPTED 2 +#define NSVM_INTR_FORCEVMEXIT 1 +#define NSVM_INTR_MASKED 0 +int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack); + #endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */ /* diff -r 3df1f127bd4f -r d8a80d4b19aa xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -61,6 +61,7 @@ static inline void svm_vmsave(void *vmcb } unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr); +void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len); extern u32 svm_feature_flags;