WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 03/17] vmx: nest: wrapper for control update

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 03/17] vmx: nest: wrapper for control update
From: Qing He <qing.he@xxxxxxxxx>
Date: Thu, 22 Apr 2010 17:41:15 +0800
Cc: Qing He <qing.he@xxxxxxxxx>
Delivery-date: Thu, 22 Apr 2010 02:48:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1271929289-18572-1-git-send-email-qing.he@xxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1271929289-18572-1-git-send-email-qing.he@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
In nested virtualization, the L0 controls may not be the same
with controls in physical VMCS.
Explict maintain guest controls in variables and use wrappers
for control update, do not rely on physical control value.

Signed-off-by: Qing He <qing.he@xxxxxxxxx>

---
 arch/x86/hvm/vmx/intr.c        |    4 +-
 arch/x86/hvm/vmx/vmcs.c        |    6 +--
 arch/x86/hvm/vmx/vmx.c         |   72 ++++++++++++++++++++++++-----------------
 include/asm-x86/hvm/vmx/vmcs.h |    1 
 include/asm-x86/hvm/vmx/vmx.h  |    3 +
 5 files changed, 52 insertions(+), 34 deletions(-)

diff -r fe49b7452637 -r a0bbec37b529 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Thu Apr 22 21:49:38 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/intr.c       Thu Apr 22 21:49:38 2010 +0800
@@ -106,7 +106,7 @@
     if ( !(*cpu_exec_control & ctl) )
     {
         *cpu_exec_control |= ctl;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
+        vmx_update_cpu_exec_control(v);
     }
 }
 
@@ -121,7 +121,7 @@
     if ( unlikely(v->arch.hvm_vcpu.single_step) )
     {
         v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         return;
     }
 
diff -r fe49b7452637 -r a0bbec37b529 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Thu Apr 22 21:49:38 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu Apr 22 21:49:38 2010 +0800
@@ -737,10 +737,10 @@
     __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
 #endif
 
-    __vmwrite(EXCEPTION_BITMAP,
-              HVM_TRAP_MASK
+    v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
               | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
-              | (1U << TRAP_no_device));
+              | (1U << TRAP_no_device);
+    __vmwrite(EXCEPTION_BITMAP, v->arch.hvm_vmx.exception_bitmap);
 
     v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
     hvm_update_guest_cr(v, 0);
diff -r fe49b7452637 -r a0bbec37b529 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Apr 22 21:49:38 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Apr 22 21:49:38 2010 +0800
@@ -390,6 +390,22 @@
 
 #endif /* __i386__ */
 
+void vmx_update_cpu_exec_control(struct vcpu *v)
+{
+    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+}
+
+void vmx_update_secondary_exec_control(struct vcpu *v)
+{
+    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+              v->arch.hvm_vmx.secondary_exec_control);
+}
+
+void vmx_update_exception_bitmap(struct vcpu *v)
+{
+    __vmwrite(EXCEPTION_BITMAP, v->arch.hvm_vmx.exception_bitmap);
+}
+
 static int vmx_guest_x86_mode(struct vcpu *v)
 {
     unsigned int cs_ar_bytes;
@@ -413,7 +429,7 @@
     /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
     v->arch.hvm_vcpu.flag_dr_dirty = 0;
     v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_update_cpu_exec_control(v);
 
     v->arch.guest_context.debugreg[0] = read_debugreg(0);
     v->arch.guest_context.debugreg[1] = read_debugreg(1);
@@ -627,7 +643,8 @@
 static void vmx_fpu_enter(struct vcpu *v)
 {
     setup_fpu(v);
-    __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+    v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device);
+    vmx_update_exception_bitmap(v);
     v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
 }
@@ -653,7 +670,8 @@
     {
         v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
         __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
-        __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
+        v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device);
+        vmx_update_exception_bitmap(v);
     }
 }
 
@@ -959,7 +977,7 @@
     v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
     if ( enable )
         v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_update_cpu_exec_control(v);
     vmx_vmcs_exit(v);
 }
 
@@ -1052,7 +1070,7 @@
 
 void vmx_update_debug_state(struct vcpu *v)
 {
-    unsigned long intercepts, mask;
+    unsigned long mask;
 
     ASSERT(v == current);
 
@@ -1060,12 +1078,11 @@
     if ( !cpu_has_monitor_trap_flag )
         mask |= 1u << TRAP_debug;
 
-    intercepts = __vmread(EXCEPTION_BITMAP);
     if ( v->arch.hvm_vcpu.debug_state_latch )
-        intercepts |= mask;
+        v->arch.hvm_vmx.exception_bitmap |= mask;
     else
-        intercepts &= ~mask;
-    __vmwrite(EXCEPTION_BITMAP, intercepts);
+        v->arch.hvm_vmx.exception_bitmap &= ~mask;
+    vmx_update_exception_bitmap(v);
 }
 
 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
@@ -1092,7 +1109,7 @@
             v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) )
                 v->arch.hvm_vmx.exec_control |= cr3_ctls;
-            __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+            vmx_update_cpu_exec_control(v);
 
             /* Changing CR0.PE can change some bits in real CR4. */
             vmx_update_guest_cr(v, 4);
@@ -1127,7 +1144,8 @@
                     vmx_set_segment_register(v, s, &reg[s]);
                 v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_VME;
                 __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
-                __vmwrite(EXCEPTION_BITMAP, 0xffffffff);
+                v->arch.hvm_vmx.exception_bitmap = 0xffffffff;
+                vmx_update_exception_bitmap(v);
             }
             else 
             {
@@ -1139,11 +1157,11 @@
                     ((v->arch.hvm_vcpu.hw_cr[4] & ~X86_CR4_VME)
                      |(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VME));
                 __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
-                __vmwrite(EXCEPTION_BITMAP, 
-                          HVM_TRAP_MASK
+                v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
                           | (paging_mode_hap(v->domain) ?
                              0 : (1U << TRAP_page_fault))
-                          | (1U << TRAP_no_device));
+                          | (1U << TRAP_no_device);
+                vmx_update_exception_bitmap(v);
                 vmx_update_debug_state(v);
             }
         }
@@ -1556,7 +1574,7 @@
 
     /* Allow guest direct access to DR registers */
     v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_update_cpu_exec_control(v);
 }
 
 static void vmx_invlpg_intercept(unsigned long vaddr)
@@ -1949,18 +1967,18 @@
 void vmx_vlapic_msr_changed(struct vcpu *v)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
-    uint32_t ctl;
 
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return;
 
     vmx_vmcs_enter(v);
-    ctl  = __vmread(SECONDARY_VM_EXEC_CONTROL);
-    ctl &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+    v->arch.hvm_vmx.secondary_exec_control
+        &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
     if ( !vlapic_hw_disabled(vlapic) &&
          (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) )
-        ctl |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-    __vmwrite(SECONDARY_VM_EXEC_CONTROL, ctl);
+        v->arch.hvm_vmx.secondary_exec_control
+            |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+    vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
 }
 
@@ -2497,14 +2515,12 @@
     case EXIT_REASON_PENDING_VIRT_INTR:
         /* Disable the interrupt window. */
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
-                  v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         break;
     case EXIT_REASON_PENDING_VIRT_NMI:
         /* Disable the NMI window. */
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
-                  v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         break;
     case EXIT_REASON_TASK_SWITCH: {
         const enum hvm_task_switch_reason reasons[] = {
@@ -2644,7 +2660,7 @@
 
     case EXIT_REASON_MONITOR_TRAP_FLAG:
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step )
             domain_pause_for_debugger();
         break;
@@ -2694,16 +2710,14 @@
             /* VPID was disabled: now enabled. */
             curr->arch.hvm_vmx.secondary_exec_control |=
                 SECONDARY_EXEC_ENABLE_VPID;
-            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                      curr->arch.hvm_vmx.secondary_exec_control);
+            vmx_update_secondary_exec_control(curr);
         }
         else if ( old_asid && !new_asid )
         {
             /* VPID was enabled: now disabled. */
             curr->arch.hvm_vmx.secondary_exec_control &=
                 ~SECONDARY_EXEC_ENABLE_VPID;
-            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                      curr->arch.hvm_vmx.secondary_exec_control);
+            vmx_update_secondary_exec_control(curr);
         }
     }
 
diff -r fe49b7452637 -r a0bbec37b529 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Apr 22 21:49:38 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Apr 22 21:49:38 2010 +0800
@@ -90,6 +90,7 @@
     /* Cache of cpu execution control. */
     u32                  exec_control;
     u32                  secondary_exec_control;
+    u32                  exception_bitmap;
 
     /* PMU */
     struct vpmu_struct   vpmu;
diff -r fe49b7452637 -r a0bbec37b529 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Thu Apr 22 21:49:38 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Apr 22 21:49:38 2010 +0800
@@ -60,6 +60,9 @@
 void vmx_vlapic_msr_changed(struct vcpu *v);
 void vmx_realmode(struct cpu_user_regs *regs);
 void vmx_update_debug_state(struct vcpu *v);
+void vmx_update_cpu_exec_control(struct vcpu *v);
+void vmx_update_secondary_exec_control(struct vcpu *v);
+void vmx_update_exception_bitmap(struct vcpu *v);
 
 /*
  * Exit Reasons

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel