# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1211187806 -3600
# Node ID 2ada81810ddb73f29dfd1eb00de466eec2881ce6
# Parent 8dce20be0bd5a4d3abaebb84b3c749e8d69bfb48
svm: Better handling of NMI injection -- avoid nested NMIs.
We do this by emulating the NMI mask which blocks NMI delivery until
next IRET on native hardware.
Signed-off-by: Gianluca Guida <gianluca.guida@xxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/hvm/svm/intr.c | 6 ++++++
xen/arch/x86/hvm/svm/svm.c | 38 ++++++++++++++++++++++++++++++++++++--
2 files changed, 42 insertions(+), 2 deletions(-)
diff -r 8dce20be0bd5 -r 2ada81810ddb xen/arch/x86/hvm/svm/intr.c
--- a/xen/arch/x86/hvm/svm/intr.c Mon May 19 09:46:02 2008 +0100
+++ b/xen/arch/x86/hvm/svm/intr.c Mon May 19 10:03:26 2008 +0100
@@ -51,6 +51,12 @@ static void svm_inject_nmi(struct vcpu *
ASSERT(vmcb->eventinj.fields.v == 0);
vmcb->eventinj = event;
+
+ /*
+ * SVM does not virtualise the NMI mask, so we emulate it by intercepting
+ * the next IRET and blocking NMI injection until the intercept triggers.
+ */
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET;
}
static void svm_inject_extint(struct vcpu *v, int vector)
diff -r 8dce20be0bd5 -r 2ada81810ddb xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Mon May 19 09:46:02 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Mon May 19 10:03:26 2008 +0100
@@ -367,15 +367,27 @@ static unsigned int svm_get_interrupt_sh
static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- return (vmcb->interrupt_shadow ?
- (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI) : 0);
+ unsigned int intr_shadow = 0;
+
+ if ( vmcb->interrupt_shadow )
+ intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI;
+
+ if ( vmcb->general1_intercepts & GENERAL1_INTERCEPT_IRET )
+ intr_shadow |= HVM_INTR_SHADOW_NMI;
+
+ return intr_shadow;
}
static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
vmcb->interrupt_shadow =
!!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI));
+
+ vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
+ if ( intr_shadow & HVM_INTR_SHADOW_NMI )
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IRET;
}
static int svm_guest_x86_mode(struct vcpu *v)
@@ -1266,6 +1278,15 @@ asmlinkage void svm_vmexit_handler(struc
reason = TSW_call_or_int;
if ( (vmcb->exitinfo2 >> 44) & 1 )
errcode = (uint32_t)vmcb->exitinfo2;
+
+ /*
+ * Some processors set the EXITINTINFO field when the task switch
+ * is caused by a task gate in the IDT. In this case we will be
+ * emulating the event injection, so we do not want the processor
+ * to re-inject the original event!
+ */
+ vmcb->eventinj.bytes = 0;
+
hvm_task_switch((uint16_t)vmcb->exitinfo1, reason, errcode);
break;
}
@@ -1331,6 +1352,19 @@ asmlinkage void svm_vmexit_handler(struc
svm_do_nested_pgfault(vmcb->exitinfo2, regs);
break;
+ case VMEXIT_IRET:
+ /*
+ * IRET clears the NMI mask. However because we clear the mask
+ * /before/ executing IRET, we set the interrupt shadow to prevent
+ * a pending NMI from being injected immediately. This will work
+ * perfectly unless the IRET instruction faults: in that case we
+ * may inject an NMI before the NMI handler's IRET instruction is
+ * retired.
+ */
+ vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
+ vmcb->interrupt_shadow = 1;
+ break;
+
default:
exit_and_crash:
gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|