WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm: Unify %cr0 handling.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm: Unify %cr0 handling.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 16 Aug 2007 07:40:15 -0700
Delivery-date: Thu, 16 Aug 2007 07:41:26 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1186585757 -3600
# Node ID 25e5c1b9faad01e03bb3a35b709d79c00697bf30
# Parent  359707941ae84944d757faa1c4c2b76c59fd7333
hvm: Unify %cr0 handling.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |  103 ++++++++++++++++++++--
 xen/arch/x86/hvm/svm/svm.c        |  144 +++++++++----------------------
 xen/arch/x86/hvm/svm/vmcb.c       |   12 +-
 xen/arch/x86/hvm/vmx/vmx.c        |  176 +++++++-------------------------------
 xen/include/asm-x86/hvm/hvm.h     |    9 +
 xen/include/asm-x86/hvm/support.h |    1 
 xen/include/asm-x86/hvm/vcpu.h    |    6 -
 7 files changed, 190 insertions(+), 261 deletions(-)

diff -r 359707941ae8 -r 25e5c1b9faad xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed Aug 08 16:09:17 2007 +0100
@@ -76,13 +76,6 @@ void hvm_enable(struct hvm_function_tabl
     hvm_enabled = 1;
 }
 
-void hvm_stts(struct vcpu *v)
-{
-    /* FPU state already dirty? Then no need to setup_fpu() lazily. */
-    if ( !v->fpu_dirtied )
-        hvm_funcs.stts(v);
-}
-
 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
 {
     u64 host_tsc;
@@ -112,7 +105,8 @@ void hvm_do_resume(struct vcpu *v)
 {
     ioreq_t *p;
 
-    hvm_stts(v);
+    if ( !v->fpu_dirtied )
+        hvm_funcs.stts(v);
 
     pt_thaw_time(v);
 
@@ -518,6 +512,99 @@ void hvm_triple_fault(void)
     gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
              "invoking HVM system reset.\n", v->vcpu_id);
     domain_shutdown(v->domain, SHUTDOWN_reboot);
+}
+
+int hvm_set_cr0(unsigned long value)
+{
+    struct vcpu *v = current;
+    unsigned long mfn, old_base_mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+  
+    HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
+
+    if ( (u32)value != value )
+    {
+        HVM_DBG_LOG(DBG_LEVEL_1,
+                    "Guest attempts to set upper 32 bits in CR0: %lx",
+                    value);
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return 0;
+    }
+
+    value &= ~HVM_CR0_GUEST_RESERVED_BITS;
+
+    /* ET is reserved and should be always be 1. */
+    value |= X86_CR0_ET;
+
+    if ( (value & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG )
+    {
+        hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return 0;
+    }
+
+    if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
+    {
+        if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
+        {
+            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
+            {
+                HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
+                hvm_inject_exception(TRAP_gp_fault, 0, 0);
+                return 0;
+            }
+            HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
+            v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
+            hvm_update_guest_efer(v);
+        }
+
+        if ( !paging_mode_hap(v->domain) )
+        {
+            /* The guest CR3 must be pointing to the guest physical. */
+            mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT);
+            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
+            {
+                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
+                         v->arch.hvm_vcpu.guest_cr[3], mfn);
+                domain_crash(v->domain);
+                return 0;
+            }
+
+            /* Now arch.guest_table points to machine physical. */
+            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+            v->arch.guest_table = pagetable_from_pfn(mfn);
+            if ( old_base_mfn )
+                put_page(mfn_to_page(old_base_mfn));
+
+            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
+                        v->arch.hvm_vcpu.guest_cr[3], mfn);
+        }
+    }
+    else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
+    {
+        /* When CR0.PG is cleared, LMA is cleared immediately. */
+        if ( hvm_long_mode_enabled(v) )
+        {
+            v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
+            hvm_update_guest_efer(v);
+        }
+
+        if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
+        {
+            put_page(mfn_to_page(get_mfn_from_gpfn(
+                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
+            v->arch.guest_table = pagetable_null();
+        }
+    }
+
+    v->arch.hvm_vcpu.guest_cr[0] = value;
+    v->arch.hvm_vcpu.hw_cr[0] = value;
+    if ( !paging_mode_hap(v->domain) )
+        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_PG | X86_CR0_WP;
+    hvm_update_guest_cr(v, 0);
+
+    if ( (value ^ old_value) & X86_CR0_PG )
+        paging_update_paging_modes(v);
+
+    return 1;
 }
 
 int hvm_set_cr3(unsigned long value)
diff -r 359707941ae8 -r 25e5c1b9faad xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Aug 08 16:09:17 2007 +0100
@@ -344,7 +344,8 @@ int svm_vmcb_restore(struct vcpu *v, str
     vmcb->rflags = c->rflags;
 
     v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
-    vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
+    vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = 
+        c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
 
     v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
 
@@ -392,7 +393,7 @@ int svm_vmcb_restore(struct vcpu *v, str
     }
 
  skip_cr3:
-    vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
+    vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] = c->cr4 | HVM_CR4_HOST_MASK;
     v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
     
     vmcb->idtr.limit = c->idtr_limit;
@@ -448,10 +449,10 @@ int svm_vmcb_restore(struct vcpu *v, str
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
-        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
-                     (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
-        vmcb->cr3 = c->cr3;
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] =
+            v->arch.hvm_vcpu.guest_cr[4] | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
+        vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3] = c->cr3;
         vmcb->np_enable = 1;
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
@@ -580,18 +581,38 @@ static void svm_update_host_cr3(struct v
 
 static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
 {
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
     switch ( cr )
     {
+    case 0:
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0];
+        break;
+    case 2:
+        vmcb->cr2 = v->arch.hvm_vcpu.hw_cr[2];
+        break;
     case 3:
-        v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
+        vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
         svm_asid_inv_asid(v);
         break;
     case 4:
-        v->arch.hvm_svm.vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
+        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
         break;
     default:
         BUG();
     }
+}
+
+static void svm_update_guest_efer(struct vcpu *v)
+{
+#ifdef __x86_64__
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+    if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
+        vmcb->efer |= EFER_LME | EFER_LMA;
+    else
+        vmcb->efer &= ~(EFER_LME | EFER_LMA);
+#endif
 }
 
 static void svm_flush_guest_tlbs(void)
@@ -703,7 +724,7 @@ static void svm_stts(struct vcpu *v)
     if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
     {
         v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
-        vmcb->cr0 |= X86_CR0_TS;
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
     }
 }
 
@@ -928,6 +949,7 @@ static struct hvm_function_table svm_fun
     .get_segment_register = svm_get_segment_register,
     .update_host_cr3      = svm_update_host_cr3,
     .update_guest_cr      = svm_update_guest_cr,
+    .update_guest_efer    = svm_update_guest_efer,
     .flush_guest_tlbs     = svm_flush_guest_tlbs,
     .update_vtpr          = svm_update_vtpr,
     .stts                 = svm_stts,
@@ -1023,7 +1045,7 @@ static void svm_do_no_device_fault(struc
     vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
 
     if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
-        vmcb->cr0 &= ~X86_CR0_TS;
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
 }
 
 /* Reserved bits ECX: [31:14], [12:4], [2:1]*/
@@ -1597,31 +1619,11 @@ static int svm_set_cr0(unsigned long val
 static int svm_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
-    unsigned long mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    unsigned long old_base_mfn;
-  
-    HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
-
-    if ( (u32)value != value )
-    {
-        HVM_DBG_LOG(DBG_LEVEL_1,
-                    "Guest attempts to set upper 32 bits in CR0: %lx",
-                    value);
-        svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    int rc = hvm_set_cr0(value);
+
+    if ( rc == 0 )
         return 0;
-    }
-
-    value &= ~HVM_CR0_GUEST_RESERVED_BITS;
-
-    /* ET is reserved and should be always be 1. */
-    value |= X86_CR0_ET;
-
-    if ( (value & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG )
-    {
-        svm_inject_exception(v, TRAP_gp_fault, 1, 0);
-        return 0;
-    }
 
     /* TS cleared? Then initialise FPU now. */
     if ( !(value & X86_CR0_TS) )
@@ -1629,67 +1631,6 @@ static int svm_set_cr0(unsigned long val
         setup_fpu(v);
         vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
     }
-
-    if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
-    {
-        if ( svm_lme_is_set(v) )
-        {
-            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
-            {
-                HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
-                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
-                return 0;
-            }
-            HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
-            v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
-            vmcb->efer |= EFER_LMA | EFER_LME;
-        }
-
-        if ( !paging_mode_hap(v->domain) )
-        {
-            /* The guest CR3 must be pointing to the guest physical. */
-            mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> 
PAGE_SHIFT);
-            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
-            {
-                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
-                         v->arch.hvm_vcpu.guest_cr[3], mfn);
-                domain_crash(v->domain);
-                return 0;
-            }
-
-            /* Now arch.guest_table points to machine physical. */
-            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-            v->arch.guest_table = pagetable_from_pfn(mfn);
-            if ( old_base_mfn )
-                put_page(mfn_to_page(old_base_mfn));
-
-            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                        v->arch.hvm_vcpu.guest_cr[3], mfn);
-        }
-    }
-    else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
-    {
-        /* When CR0.PG is cleared, LMA is cleared immediately. */
-        if ( hvm_long_mode_enabled(v) )
-        {
-            vmcb->efer &= ~(EFER_LME | EFER_LMA);
-            v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
-        }
-
-        if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
-        {
-            put_page(mfn_to_page(get_mfn_from_gpfn(
-                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
-            v->arch.guest_table = pagetable_null();
-        }
-    }
-
-    vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0] = value;
-    if ( !paging_mode_hap(v->domain) )
-        vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
-
-    if ( (value ^ old_value) & X86_CR0_PG )
-        paging_update_paging_modes(v);
 
     return 1;
 }
@@ -1833,7 +1774,7 @@ static void svm_cr_access(
         /* TS being cleared means that it's time to restore fpu state. */
         setup_fpu(current);
         vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
-        vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
         v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
         break;
 
@@ -2144,20 +2085,21 @@ static int svm_reset_to_realmode(struct 
 
     memset(regs, 0, sizeof(struct cpu_user_regs));
 
-    vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
+    vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] =
+        X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
     v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
 
     vmcb->cr2 = 0;
     vmcb->efer = EFER_SVME;
 
-    vmcb->cr4 = HVM_CR4_HOST_MASK;
+    vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
     v->arch.hvm_vcpu.guest_cr[4] = 0;
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
-        vmcb->cr4 = (v->arch.hvm_vcpu.guest_cr[4] |
-                     (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] =
+            v->arch.hvm_vcpu.guest_cr[4] | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
     }
 
     /* This will jump to ROMBIOS */
diff -r 359707941ae8 -r 25e5c1b9faad xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Wed Aug 08 16:09:17 2007 +0100
@@ -217,24 +217,26 @@ static int construct_vmcb(struct vcpu *v
     vmcb->tr.limit = 0xff;
 
     /* Guest CR0. */
-    vmcb->cr0 = read_cr0();
-    v->arch.hvm_vcpu.guest_cr[0] = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
+    vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = read_cr0();
+    v->arch.hvm_vcpu.guest_cr[0] =
+        v->arch.hvm_vcpu.hw_cr[0] & ~(X86_CR0_PG | X86_CR0_TS);
 
     /* Guest CR4. */
     v->arch.hvm_vcpu.guest_cr[4] =
         read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
-    vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
+    vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] =
+        v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
 
     paging_update_paging_modes(v);
     vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3]; 
 
     if ( paging_mode_hap(v->domain) )
     {
-        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
+        vmcb->cr0 = v->arch.hvm_vcpu.hw_cr[0] = v->arch.hvm_vcpu.guest_cr[0];
         vmcb->np_enable = 1; /* enable nested paging */
         vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
         vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
-        vmcb->cr4 = v->arch.hvm_vcpu.guest_cr[4] =
+        vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4] = v->arch.hvm_vcpu.guest_cr[4] =
             HVM_CR4_HOST_MASK & ~X86_CR4_PAE;
         vmcb->exception_intercepts = HVM_TRAP_MASK;
 
diff -r 359707941ae8 -r 25e5c1b9faad xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 08 16:09:17 2007 +0100
@@ -61,6 +61,7 @@ static int  vmx_alloc_vlapic_mapping(str
 static int  vmx_alloc_vlapic_mapping(struct domain *d);
 static void vmx_free_vlapic_mapping(struct domain *d);
 static void vmx_install_vlapic_mapping(struct vcpu *v);
+static void vmx_update_guest_efer(struct vcpu *v);
 
 static int vmx_domain_initialise(struct domain *d)
 {
@@ -101,33 +102,6 @@ static void vmx_vcpu_destroy(struct vcpu
 }
 
 #ifdef __x86_64__
-
-static int vmx_lme_is_set(struct vcpu *v)
-{
-    return v->arch.hvm_vcpu.guest_efer & EFER_LME;
-}
-
-static void vmx_enable_long_mode(struct vcpu *v)
-{
-    unsigned long vm_entry_value;
-
-    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
-    vm_entry_value |= VM_ENTRY_IA32E_MODE;
-    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
-
-    v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
-}
-
-static void vmx_disable_long_mode(struct vcpu *v)
-{
-    unsigned long vm_entry_value;
-
-    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
-    vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
-    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
-
-    v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
-}
 
 static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
 
@@ -378,13 +352,6 @@ static void vmx_restore_guest_msrs(struc
 
 #else  /* __i386__ */
 
-static int vmx_lme_is_set(struct vcpu *v)
-{ return 0; }
-static void vmx_enable_long_mode(struct vcpu *v)
-{ BUG(); }
-static void vmx_disable_long_mode(struct vcpu *v)
-{ BUG(); }
-
 #define vmx_save_host_msrs()        ((void)0)
 
 static void vmx_restore_host_msrs(void)
@@ -644,8 +611,7 @@ int vmx_vmcs_restore(struct vcpu *v, str
  skip_cr3:
     v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
 
-    if ( hvm_long_mode_enabled(v) )
-        vmx_enable_long_mode(v);
+    vmx_update_guest_efer(v);
 
     __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
     v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
@@ -1095,6 +1061,14 @@ static void vmx_update_guest_cr(struct v
 
     switch ( cr )
     {
+    case 0:
+        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_PE | X86_CR0_NE;
+        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
+        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
+        break;
+    case 2:
+        /* CR2 is updated in exit stub. */
+        break;
     case 3:
         __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
         break;
@@ -1107,6 +1081,26 @@ static void vmx_update_guest_cr(struct v
     }
 
     vmx_vmcs_exit(v);
+}
+
+static void vmx_update_guest_efer(struct vcpu *v)
+{
+#ifdef __x86_64__
+    unsigned long vm_entry_value;
+
+    ASSERT((v == current) || !vcpu_runnable(v));
+
+    vmx_vmcs_enter(v);
+
+    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
+    if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
+        vm_entry_value |= VM_ENTRY_IA32E_MODE;
+    else
+        vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
+    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
+
+    vmx_vmcs_exit(v);
+#endif
 }
 
 static void vmx_flush_guest_tlbs(void)
@@ -1172,6 +1166,7 @@ static struct hvm_function_table vmx_fun
     .get_segment_register = vmx_get_segment_register,
     .update_host_cr3      = vmx_update_host_cr3,
     .update_guest_cr      = vmx_update_guest_cr,
+    .update_guest_efer    = vmx_update_guest_efer,
     .flush_guest_tlbs     = vmx_flush_guest_tlbs,
     .update_vtpr          = vmx_update_vtpr,
     .stts                 = vmx_stts,
@@ -2110,108 +2105,17 @@ static int vmx_set_cr0(unsigned long val
 static int vmx_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
-    unsigned long mfn;
     unsigned long eip;
-    int paging_enabled;
-    unsigned long old_cr0;
-    unsigned long old_base_mfn;
-
-    HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
-
-    if ( (u32)value != value )
-    {
-        HVM_DBG_LOG(DBG_LEVEL_1,
-                    "Guest attempts to set upper 32 bits in CR0: %lx",
-                    value);
-        vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+    int rc = hvm_set_cr0(value);
+
+    if ( rc == 0 )
         return 0;
-    }
-
-    value &= ~HVM_CR0_GUEST_RESERVED_BITS;
-
-    /* ET is reserved and should be always be 1. */
-    value |= X86_CR0_ET;
-
-    if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG )
-    {
-        vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-        return 0;
-    }
 
     /* TS cleared? Then initialise FPU now. */
     if ( !(value & X86_CR0_TS) )
     {
         setup_fpu(v);
         __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
-    }
-
-    old_cr0 = v->arch.hvm_vcpu.guest_cr[0];
-    paging_enabled = old_cr0 & X86_CR0_PG;
-
-    v->arch.hvm_vcpu.hw_cr[0] = (value | X86_CR0_PE | X86_CR0_PG |
-                                 X86_CR0_NE | X86_CR0_WP);
-    __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
-
-    v->arch.hvm_vcpu.guest_cr[0] = value;
-    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
-
-    /* Trying to enable paging. */
-    if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
-    {
-        if ( vmx_lme_is_set(v) && !hvm_long_mode_enabled(v) )
-        {
-            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
-            {
-                HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
-                            "with EFER.LME set but not CR4.PAE");
-                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-                return 0;
-            }
-
-            HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
-            vmx_enable_long_mode(v);
-        }
-
-        /*
-         * The guest CR3 must be pointing to the guest physical.
-         */
-        mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT);
-        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
-        {
-            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
-                     v->arch.hvm_vcpu.guest_cr[3], mfn);
-            domain_crash(v->domain);
-            return 0;
-        }
-
-        /*
-         * Now arch.guest_table points to machine physical.
-         */
-        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
-        v->arch.guest_table = pagetable_from_pfn(mfn);
-        if ( old_base_mfn )
-            put_page(mfn_to_page(old_base_mfn));
-
-        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                    v->arch.hvm_vcpu.guest_cr[3], mfn);
-
-        paging_update_paging_modes(v);
-    }
-
-    /* Trying to disable paging. */
-    if ( ((value & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) &&
-         paging_enabled )
-    {
-        /* When CR0.PG is cleared, LMA is cleared immediately. */
-        if ( hvm_long_mode_enabled(v) )
-            vmx_disable_long_mode(v);
-
-        if ( v->arch.hvm_vcpu.guest_cr[3] )
-        {
-            put_page(mfn_to_page(get_mfn_from_gpfn(
-                      v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
-            v->arch.guest_table = pagetable_null();
-        }
     }
 
     /*
@@ -2219,14 +2123,8 @@ static int vmx_set_cr0(unsigned long val
      * real-mode by performing a world switch to VMXAssist whenever
      * a partition disables the CR0.PE bit.
      */
-    if ( (value & X86_CR0_PE) == 0 )
-    {
-        if ( value & X86_CR0_PG )
-        {
-            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
-            return 0;
-        }
-
+    if ( !(value & X86_CR0_PE) )
+    {
         if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
         {
             eip = __vmread(GUEST_RIP);
@@ -2247,8 +2145,6 @@ static int vmx_set_cr0(unsigned long val
             return 0; /* do not update eip! */
         }
     }
-    else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
-        paging_update_paging_modes(v);
 
     return 1;
 }
diff -r 359707941ae8 -r 25e5c1b9faad xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Wed Aug 08 16:09:17 2007 +0100
@@ -112,9 +112,10 @@ struct hvm_function_table {
     void (*update_host_cr3)(struct vcpu *v);
 
     /*
-     * Called to inform HVM layer that a guest control register has changed.
+     * Called to inform HVM layer that a guest CRn or EFER has changed.
      */
     void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
+    void (*update_guest_efer)(struct vcpu *v);
 
     /*
      * Called to ensure than all guest-specific mappings in a tagged TLB
@@ -225,6 +226,11 @@ static inline void hvm_update_guest_cr(s
     hvm_funcs.update_guest_cr(v, cr);
 }
 
+static inline void hvm_update_guest_efer(struct vcpu *v)
+{
+    hvm_funcs.update_guest_efer(v);
+}
+
 static inline void 
 hvm_flush_guest_tlbs(void)
 {
@@ -250,7 +256,6 @@ hvm_get_segment_register(struct vcpu *v,
 
 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
                                    unsigned int *ecx, unsigned int *edx);
-void hvm_stts(struct vcpu *v);
 void hvm_migrate_timers(struct vcpu *v);
 void hvm_do_resume(struct vcpu *v);
 
diff -r 359707941ae8 -r 25e5c1b9faad xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Aug 08 16:09:17 2007 +0100
@@ -234,6 +234,7 @@ void hvm_hlt(unsigned long rflags);
 void hvm_hlt(unsigned long rflags);
 void hvm_triple_fault(void);
 
+int hvm_set_cr0(unsigned long value);
 int hvm_set_cr3(unsigned long value);
 int hvm_set_cr4(unsigned long value);
 
diff -r 359707941ae8 -r 25e5c1b9faad xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Wed Aug 08 15:03:40 2007 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h    Wed Aug 08 16:09:17 2007 +0100
@@ -33,11 +33,7 @@ struct hvm_vcpu {
     unsigned long       guest_cr[5];
     unsigned long       guest_efer;
 
-    /*
-     * Processor-visible CR0-4 while guest executes.
-     * Only CR3 is guaranteed to be valid: all other array entries are private
-     * to the specific HVM implementation (e.g., VMX, SVM).
-     */
+    /* Processor-visible control-register values, while guest executes. */
     unsigned long       hw_cr[5];
 
     struct hvm_io_op    io_op;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm: Unify %cr0 handling., Xen patchbot-unstable <=