WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] nestedsvm: fix fpu context switch

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] nestedsvm: fix fpu context switch
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Wed, 18 May 2011 07:25:18 +0100
Delivery-date: Tue, 17 May 2011 23:26:00 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Christoph Egger <Christoph.Egger@xxxxxxx>
# Date 1305549102 -3600
# Node ID 776b0c3e6544ea91f8b7e94214c9e4b390337003
# Parent  edcf8fc77b64955236a697691b223662354461b5
nestedsvm: fix fpu context switch

Two different vmcb's are used to run l1 guest and l2 guest.
When host xen switches physical FPU to a different cpu while
l1 or l2 guest is running we need to sync CR0.TS bit in the
other vmcb.

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Reviewed-by: Uwe Dannowski <Uwe.Dannowski@xxxxxxx>
Reviewed-by: Wei Huang <Wei.Huang2@xxxxxxx>
---


diff -r edcf8fc77b64 -r 776b0c3e6544 xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Mon May 16 13:29:24 2011 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Mon May 16 13:31:42 2011 +0100
@@ -165,6 +165,45 @@
     return 0;
 }
 
+static uint64_t nestedsvm_fpu_vmentry(uint64_t n1cr0,
+    struct vmcb_struct *vvmcb,
+    struct vmcb_struct *n1vmcb, struct vmcb_struct *n2vmcb)
+{
+    uint64_t vcr0;
+
+    vcr0 = vvmcb->_cr0;
+    if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) ) {
+        /* svm_fpu_leave() run while l1 guest was running.
+         * Sync FPU state with l2 guest.
+         */
+        vcr0 |= X86_CR0_TS;
+        n2vmcb->_exception_intercepts |= (1U << TRAP_no_device);
+    } else if ( !(vcr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) ) {
+        /* svm_fpu_enter() run while l1 guest was running.
+         * Sync FPU state with l2 guest. */
+        vcr0 &= ~X86_CR0_TS;
+        n2vmcb->_exception_intercepts &= ~(1U << TRAP_no_device);
+    }
+
+    return vcr0;
+}
+
+static void nestedsvm_fpu_vmexit(struct vmcb_struct *n1vmcb,
+    struct vmcb_struct *n2vmcb, uint64_t n1cr0, uint64_t guest_cr0)
+{
+    if ( !(guest_cr0 & X86_CR0_TS) && (n2vmcb->_cr0 & X86_CR0_TS) ) {
+        /* svm_fpu_leave() run while l2 guest was running.
+         * Sync FPU state with l1 guest. */
+        n1vmcb->_cr0 |= X86_CR0_TS;
+        n1vmcb->_exception_intercepts |= (1U << TRAP_no_device);
+    } else if ( !(n1cr0 & X86_CR0_TS) && (n1vmcb->_cr0 & X86_CR0_TS) ) {
+        /* svm_fpu_enter() run while l2 guest was running.
+         * Sync FPU state with l1 guest. */
+        n1vmcb->_cr0 &= ~X86_CR0_TS;
+        n1vmcb->_exception_intercepts &= ~(1U << TRAP_no_device);
+    }
+}
+
 static int nsvm_vcpu_hostsave(struct vcpu *v, unsigned int inst_len)
 {
     struct nestedsvm *svm = &vcpu_nestedsvm(v);
@@ -176,6 +215,13 @@
 
     n1vmcb->rip += inst_len;
 
+    /* Save shadowed values. This ensures that the l1 guest
+     * cannot override them to break out. */
+    n1vmcb->_efer = v->arch.hvm_vcpu.guest_efer;
+    n1vmcb->_cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    n1vmcb->_cr2 = v->arch.hvm_vcpu.guest_cr[2];
+    n1vmcb->_cr4 = v->arch.hvm_vcpu.guest_cr[4];
+
     /* Remember the host interrupt flag */
     svm->ns_hostflags.fields.rflagsif =
         (n1vmcb->rflags & X86_EFLAGS_IF) ? 1 : 0;
@@ -186,6 +232,7 @@
 int nsvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
+    struct nestedsvm *svm = &vcpu_nestedsvm(v);
     struct vmcb_struct *n1vmcb, *n2vmcb;
     int rc;
 
@@ -215,11 +262,14 @@
         gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
 
     /* CR0 */
+    nestedsvm_fpu_vmexit(n1vmcb, n2vmcb,
+        svm->ns_cr0, v->arch.hvm_vcpu.guest_cr[0]);
     v->arch.hvm_vcpu.guest_cr[0] = n1vmcb->_cr0 | X86_CR0_PE;
     n1vmcb->rflags &= ~X86_EFLAGS_VM;
     rc = hvm_set_cr0(n1vmcb->_cr0 | X86_CR0_PE);
     if (rc != X86EMUL_OKAY)
         gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
+    svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
 
     /* CR2 */
     v->arch.hvm_vcpu.guest_cr[2] = n1vmcb->_cr2;
@@ -336,6 +386,7 @@
     struct vmcb_struct *ns_vmcb, *n1vmcb, *n2vmcb;
     bool_t vcleanbits_valid;
     int rc;
+    uint64_t cr0;
 
     ns_vmcb = nv->nv_vvmcx;
     n1vmcb = nv->nv_n1vmcx;
@@ -470,8 +521,10 @@
         gdprintk(XENLOG_ERR, "hvm_set_cr4 failed, rc: %u\n", rc);
 
     /* CR0 */
+    svm->ns_cr0 = v->arch.hvm_vcpu.guest_cr[0];
+    cr0 = nestedsvm_fpu_vmentry(svm->ns_cr0, ns_vmcb, n1vmcb, n2vmcb);
     v->arch.hvm_vcpu.guest_cr[0] = ns_vmcb->_cr0;
-    rc = hvm_set_cr0(ns_vmcb->_cr0);
+    rc = hvm_set_cr0(cr0);
     if (rc != X86EMUL_OKAY)
         gdprintk(XENLOG_ERR, "hvm_set_cr0 failed, rc: %u\n", rc);
 
diff -r edcf8fc77b64 -r 776b0c3e6544 xen/include/asm-x86/hvm/svm/nestedsvm.h
--- a/xen/include/asm-x86/hvm/svm/nestedsvm.h   Mon May 16 13:29:24 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/nestedsvm.h   Mon May 16 13:31:42 2011 +0100
@@ -56,6 +56,9 @@
     /* Shadow io permission map */
     unsigned long *ns_iomap;
 
+    uint64_t ns_cr0; /* Cached guest_cr[0] of l1 guest while l2 guest runs.
+                      * Needed to handle FPU context switching */
+
     /* Cache guest cr3/host cr3 the guest sets up for the l2 guest.
      * Used by Shadow-on-Shadow and Nested-on-Nested.
      * ns_vmcb_guestcr3: in l2 guest physical address space and points to

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] nestedsvm: fix fpu context switch, Xen patchbot-unstable <=