WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM][SVM] Turn off long mode (EFER.LMA)

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM][SVM] Turn off long mode (EFER.LMA) when CR0.PG==0.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 07 Nov 2006 12:30:22 +0000
Delivery-date: Tue, 07 Nov 2006 04:31:04 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID a174f9787014e4887d16a3f2685dd31b15c9975a
# Parent  d745f1420d5b98aef8e518ff306b96fbfc7e6708
[HVM][SVM] Turn off long mode (EFER.LMA) when CR0.PG==0.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         |   22 +++++++++++++---------
 xen/include/asm-x86/hvm/svm/vmcb.h |    4 ----
 2 files changed, 13 insertions(+), 13 deletions(-)

diff -r d745f1420d5b -r a174f9787014 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Nov 07 10:18:50 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Nov 07 10:19:20 2006 +0000
@@ -264,6 +264,11 @@ static int svm_pae_enabled(struct vcpu *
     return (cr4 & X86_CR4_PAE);
 }
 
+static int svm_long_mode_enabled(struct vcpu *v)
+{
+    return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
+}
+
 #define IS_CANO_ADDRESS(add) 1
 
 static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
@@ -369,7 +374,7 @@ static inline int long_mode_do_msr_write
 
     case MSR_FS_BASE:
     case MSR_GS_BASE:
-        if (!(SVM_LONG_GUEST(vc)))
+        if ( !svm_long_mode_enabled(vc) )
             domain_crash_synchronous();
 
         if (!IS_CANO_ADDRESS(msr_content))
@@ -680,11 +685,6 @@ static void svm_load_cpu_guest_regs(
     struct vcpu *v, struct cpu_user_regs *regs)
 {
     svm_load_cpu_user_regs(v, regs);
-}
-
-int svm_long_mode_enabled(struct vcpu *v)
-{
-    return SVM_LONG_GUEST(v);
 }
 
 static void arch_svm_do_launch(struct vcpu *v) 
@@ -1487,9 +1487,8 @@ static int svm_set_cr0(unsigned long val
         {
             /* Here the PAE is should to be opened */
             HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n");
-            set_bit(SVM_CPU_STATE_LMA_ENABLED,
-                    &v->arch.hvm_svm.cpu_state);
-            vmcb->efer |= (EFER_LMA | EFER_LME);
+            set_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
+            vmcb->efer |= EFER_LMA;
         }
 #endif  /* __x86_64__ */
 
@@ -1530,6 +1529,11 @@ static int svm_set_cr0(unsigned long val
     }
     else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
     {
+        if ( svm_long_mode_enabled(v) )
+        {
+            vmcb->efer &= ~EFER_LMA;
+            clear_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
+        }
         /* we should take care of this kind of situation */
         shadow_update_paging_modes(v);
         vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
diff -r d745f1420d5b -r a174f9787014 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Nov 07 10:18:50 2006 +0000
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Nov 07 10:19:20 2006 +0000
@@ -310,10 +310,6 @@ enum {
     SVM_CPU_STATE_LMA_ENABLED,
     SVM_CPU_STATE_ASSIST_ENABLED,
 };  
-    
-#define SVM_LONG_GUEST(ed)    \
-  (test_bit(SVM_CPU_STATE_LMA_ENABLED, &ed->arch.hvm_svm.cpu_state))
-
 
 /* 
  * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM][SVM] Turn off long mode (EFER.LMA) when CR0.PG==0., Xen patchbot-unstable <=