# HG changeset patch
# User cegger
# Date 1299670577 -3600
# Node ID 4530152eec39df1dfeee77b034cde0661a1235bf
# Parent a5e69b6fdd16a2c16d14afaad7025dfd794a44e1
Implement SVM specific interrupt handling
Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
diff -r a5e69b6fdd16 -r 4530152eec39 xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Wed Mar 09 12:36:05 2011 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c Wed Mar 09 12:36:17 2011 +0100
@@ -33,6 +33,7 @@
#include <asm/hvm/vlapic.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/intr.h>
+#include <asm/hvm/nestedhvm.h> /* for nestedhvm_vcpu_in_guestmode */
#include <xen/event.h>
#include <xen/kernel.h>
#include <public/hvm/ioreq.h>
@@ -74,15 +75,30 @@
ASSERT(vmcb->eventinj.fields.v == 0);
vmcb->eventinj = event;
}
-
+
static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+ uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
vintr_t intr;
ASSERT(intack.source != hvm_intsrc_none);
+ if ( nestedhvm_enabled(v->domain) ) {
+ struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+ if ( nv->nv_vmentry_pending ) {
+ struct vmcb_struct *gvmcb = nv->nv_vvmcx;
+
+ /* check if l1 guest injects interrupt into l2 guest via vintr.
+ * return here or l2 guest looses interrupts, otherwise.
+ */
+ ASSERT(gvmcb != NULL);
+ intr = vmcb_get_vintr(gvmcb);
+ if ( intr.fields.irq )
+ return;
+ }
+ }
+
HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
@@ -121,6 +137,7 @@
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
struct hvm_intack intack;
+ enum hvm_intblk intblk;
/* Crank the handle on interrupt state. */
pt_update_irq(v);
@@ -130,6 +147,39 @@
if ( likely(intack.source == hvm_intsrc_none) )
return;
+ intblk = hvm_interrupt_blocked(v, intack);
+ if ( intblk == hvm_intblk_svm_gif ) {
+ ASSERT(nestedhvm_enabled(v->domain));
+ return;
+ }
+
+ /* Interrupts for the nested guest are already
+ * in the vmcb.
+ */
+ if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
+ {
+ int rc;
+
+ /* l2 guest was running when an interrupt for
+ * the l1 guest occured.
+ */
+ rc = nestedsvm_vcpu_interrupt(v, intack);
+ switch (rc) {
+ case NSVM_INTR_NOTINTERCEPTED:
+ /* Inject interrupt into 2nd level guest directly. */
+ break;
+ case NSVM_INTR_NOTHANDLED:
+ case NSVM_INTR_FORCEVMEXIT:
+ return;
+ case NSVM_INTR_MASKED:
+ /* Guest already enabled an interrupt window. */
+ return;
+ default:
+ panic("%s: nestedsvm_vcpu_interrupt can't handle value 0x%x\n",
+ __func__, rc);
+ }
+ }
+
/*
* Pending IRQs must be delayed if:
* 1. An event is already pending. This is despite the fact that SVM
@@ -144,8 +194,7 @@
* have cleared the interrupt out of the IRR.
* 2. The IRQ is masked.
*/
- if ( unlikely(vmcb->eventinj.fields.v) ||
- hvm_interrupt_blocked(v, intack) )
+ if ( unlikely(vmcb->eventinj.fields.v) || intblk )
{
enable_intr_window(v, intack);
return;
diff -r a5e69b6fdd16 -r 4530152eec39 xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c Wed Mar 09 12:36:05 2011 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c Wed Mar 09 12:36:17 2011 +0100
@@ -25,6 +25,23 @@
#include <asm/hvm/svm/nestedsvm.h>
#include <asm/hvm/svm/svmdebug.h>
#include <asm/paging.h> /* paging_mode_hap */
+#include <asm/event.h> /* for local_event_delivery_(en|dis)able */
+
+static void
+nestedsvm_vcpu_clgi(struct vcpu *v)
+{
+ /* clear gif flag */
+ vcpu_nestedsvm(v).ns_gif = 0;
+ local_event_delivery_disable(); /* mask events for PV drivers */
+}
+
+static void
+nestedsvm_vcpu_stgi(struct vcpu *v)
+{
+ /* enable gif flag */
+ vcpu_nestedsvm(v).ns_gif = 1;
+ local_event_delivery_enable(); /* unmask events for PV drivers */
+}
static int
nestedsvm_vmcb_isvalid(struct vcpu *v, uint64_t vmcxaddr)
@@ -145,6 +162,7 @@
if (svm->ns_iomap)
svm->ns_iomap = NULL;
+ nestedsvm_vcpu_stgi(v);
return 0;
}
@@ -601,6 +619,7 @@
return ret;
}
+ nestedsvm_vcpu_stgi(v);
return 0;
}
@@ -646,6 +665,7 @@
struct nestedsvm *svm = &vcpu_nestedsvm(v);
struct vmcb_struct *ns_vmcb;
+ ASSERT(svm->ns_gif == 0);
ns_vmcb = nv->nv_vvmcx;
if (nv->nv_vmexit_pending) {
@@ -1035,6 +1055,32 @@
return vcpu_nestedsvm(v).ns_hap_enabled;
}
+enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
+{
+ struct nestedsvm *svm = &vcpu_nestedsvm(v);
+ struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+
+ ASSERT(nestedhvm_enabled(v->domain));
+
+ if ( !nestedsvm_gif_isset(v) )
+ return hvm_intblk_svm_gif;
+
+ if ( nestedhvm_vcpu_in_guestmode(v) ) {
+ if ( svm->ns_hostflags.fields.vintrmask )
+ if ( !svm->ns_hostflags.fields.rflagsif )
+ return hvm_intblk_rflags_ie;
+ }
+
+ if ( nv->nv_vmexit_pending ) {
+ /* hvm_inject_exception() must have run before.
+ * exceptions have higher priority than interrupts.
+ */
+ return hvm_intblk_rflags_ie;
+ }
+
+ return hvm_intblk_none;
+}
+
/* MSR handling */
int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
{
@@ -1090,6 +1136,7 @@
{
struct nestedsvm *svm = &vcpu_nestedsvm(v);
+ nestedsvm_vcpu_clgi(v);
svm->ns_vmexit.exitcode = exitcode;
svm->ns_vmexit.exitinfo1 = exitinfo1;
svm->ns_vmexit.exitinfo2 = exitinfo2;
@@ -1276,4 +1323,98 @@
}
}
+/* Interrupts, Virtual GIF */
+int
+nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack)
+{
+ int ret;
+ enum hvm_intblk intr;
+ uint64_t exitcode = VMEXIT_INTR;
+ uint64_t exitinfo2 = 0;
+ ASSERT(nestedhvm_vcpu_in_guestmode(v));
+ intr = nhvm_interrupt_blocked(v);
+ if ( intr != hvm_intblk_none )
+ return NSVM_INTR_MASKED;
+
+ switch (intack.source) {
+ case hvm_intsrc_pic:
+ case hvm_intsrc_lapic:
+ exitcode = VMEXIT_INTR;
+ exitinfo2 = intack.vector;
+ break;
+ case hvm_intsrc_nmi:
+ exitcode = VMEXIT_NMI;
+ exitinfo2 = intack.vector;
+ break;
+ case hvm_intsrc_mce:
+ exitcode = VMEXIT_EXCEPTION_MC;
+ exitinfo2 = intack.vector;
+ break;
+ case hvm_intsrc_none:
+ return NSVM_INTR_NOTHANDLED;
+ default:
+ BUG();
+ }
+
+ ret = nsvm_vmcb_guest_intercepts_exitcode(v,
+ guest_cpu_user_regs(), exitcode);
+ if (ret) {
+ nestedsvm_vmexit_defer(v, exitcode, intack.source, exitinfo2);
+ return NSVM_INTR_FORCEVMEXIT;
+ }
+
+ return NSVM_INTR_NOTINTERCEPTED;
+}
+
+bool_t
+nestedsvm_gif_isset(struct vcpu *v)
+{
+ struct nestedsvm *svm = &vcpu_nestedsvm(v);
+
+ return (!!svm->ns_gif);
+}
+
+void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v)
+{
+ unsigned int inst_len;
+
+ if ( !nestedhvm_enabled(v->domain) ) {
+ hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ return;
+ }
+
+ if ( (inst_len = __get_instruction_length(v, INSTR_STGI)) == 0 )
+ return;
+
+ nestedsvm_vcpu_stgi(v);
+
+ __update_guest_eip(regs, inst_len);
+}
+
+void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ unsigned int inst_len;
+ uint32_t general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+ vintr_t intr;
+
+ if ( !nestedhvm_enabled(v->domain) ) {
+ hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
+ return;
+ }
+
+ if ( (inst_len = __get_instruction_length(v, INSTR_CLGI)) == 0 )
+ return;
+
+ nestedsvm_vcpu_clgi(v);
+
+ /* After a CLGI no interrupts should come */
+ intr = vmcb_get_vintr(vmcb);
+ intr.fields.irq = 0;
+ general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
+ vmcb_set_vintr(vmcb, intr);
+ vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+
+ __update_guest_eip(regs, inst_len);
+}
diff -r a5e69b6fdd16 -r 4530152eec39 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Mar 09 12:36:05 2011 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Mar 09 12:36:17 2011 +0100
@@ -78,8 +78,7 @@
static bool_t amd_erratum383_found __read_mostly;
-static void inline __update_guest_eip(
- struct cpu_user_regs *regs, unsigned int inst_len)
+void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
{
struct vcpu *curr = current;
@@ -1618,6 +1617,7 @@
.nhvm_vcpu_asid = nsvm_vcpu_asid,
.nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
.nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
+ .nhvm_intr_blocked = nsvm_intr_blocked,
};
asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
@@ -1929,7 +1929,11 @@
svm_vmexit_do_vmsave(vmcb, regs, v, regs->eax);
break;
case VMEXIT_STGI:
+ svm_vmexit_do_stgi(regs, v);
+ break;
case VMEXIT_CLGI:
+ svm_vmexit_do_clgi(regs, v);
+ break;
case VMEXIT_SKINIT:
hvm_inject_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE, 0);
break;
diff -r a5e69b6fdd16 -r 4530152eec39 xen/include/asm-x86/hvm/svm/nestedsvm.h
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h Wed Mar 09 12:36:05 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h Wed Mar 09 12:36:17 2011 +0100
@@ -23,7 +23,12 @@
#include <asm/hvm/hvm.h>
#include <asm/hvm/svm/vmcb.h>
+/* SVM specific intblk types, cannot be an enum because gcc 4.5 complains */
+/* GIF cleared */
+#define hvm_intblk_svm_gif hvm_intblk_arch
+
struct nestedsvm {
+ bool_t ns_gif;
uint64_t ns_msr_hsavepa; /* MSR HSAVE_PA value */
/* l1 guest physical address of virtual vmcb used by prior VMRUN.
@@ -111,11 +116,23 @@
struct cpu_user_regs *regs, uint64_t exitcode);
int nsvm_vmcb_guest_intercepts_trap(struct vcpu *v, unsigned int trapnr);
bool_t nsvm_vmcb_hap_enabled(struct vcpu *v);
+enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
/* MSRs */
int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content);
+/* Interrupts, vGIF */
+void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
+void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
+bool_t nestedsvm_gif_isset(struct vcpu *v);
+
+#define NSVM_INTR_NOTHANDLED 3
+#define NSVM_INTR_NOTINTERCEPTED 2
+#define NSVM_INTR_FORCEVMEXIT 1
+#define NSVM_INTR_MASKED 0
+int nestedsvm_vcpu_interrupt(struct vcpu *v, const struct hvm_intack intack);
+
#endif /* ASM_X86_HVM_SVM_NESTEDSVM_H__ */
/*
diff -r a5e69b6fdd16 -r 4530152eec39 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Wed Mar 09 12:36:05 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/svm.h Wed Mar 09 12:36:17 2011 +0100
@@ -61,6 +61,7 @@
}
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
+void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
extern u32 svm_feature_flags;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|