WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 02/16] vmx: nest: wrapper for control update

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 02/16] vmx: nest: wrapper for control update
From: Qing He <qing.he@xxxxxxxxx>
Date: Wed, 8 Sep 2010 23:22:10 +0800
Cc: Qing He <qing.he@xxxxxxxxx>
Delivery-date: Wed, 08 Sep 2010 08:31:44 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1283959344-3837-1-git-send-email-qing.he@xxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1283959344-3837-1-git-send-email-qing.he@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
In nested virtualization, the L0 controls may not be the same
with controls in physical VMCS.
Explict maintain guest controls in variables and use wrappers
for control update, do not rely on physical control value.

Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>

---

diff -r 905ca9cc0596 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c       Wed Aug 04 16:30:40 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/intr.c       Thu Aug 05 15:32:24 2010 +0800
@@ -106,7 +106,7 @@
     if ( !(*cpu_exec_control & ctl) )
     {
         *cpu_exec_control |= ctl;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
+        vmx_update_cpu_exec_control(v);
     }
 }
 
@@ -121,7 +121,7 @@
     if ( unlikely(v->arch.hvm_vcpu.single_step) )
     {
         v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         return;
     }
 
diff -r 905ca9cc0596 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Aug 04 16:30:40 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu Aug 05 15:32:24 2010 +0800
@@ -839,10 +839,10 @@
     __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
 #endif
 
-    __vmwrite(EXCEPTION_BITMAP,
-              HVM_TRAP_MASK
+    v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
               | (paging_mode_hap(d) ? 0 : (1U << TRAP_page_fault))
-              | (1U << TRAP_no_device));
+              | (1U << TRAP_no_device);
+    vmx_update_exception_bitmap(v);
 
     v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
     hvm_update_guest_cr(v, 0);
diff -r 905ca9cc0596 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Aug 04 16:30:40 2010 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Aug 05 15:32:24 2010 +0800
@@ -385,6 +385,22 @@
 
 #endif /* __i386__ */
 
+void vmx_update_cpu_exec_control(struct vcpu *v)
+{
+    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+}
+
+void vmx_update_secondary_exec_control(struct vcpu *v)
+{
+    __vmwrite(SECONDARY_VM_EXEC_CONTROL,
+              v->arch.hvm_vmx.secondary_exec_control);
+}
+
+void vmx_update_exception_bitmap(struct vcpu *v)
+{
+    __vmwrite(EXCEPTION_BITMAP, v->arch.hvm_vmx.exception_bitmap);
+}
+
 static int vmx_guest_x86_mode(struct vcpu *v)
 {
     unsigned int cs_ar_bytes;
@@ -408,7 +424,7 @@
     /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
     v->arch.hvm_vcpu.flag_dr_dirty = 0;
     v->arch.hvm_vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_update_cpu_exec_control(v);
 
     v->arch.guest_context.debugreg[0] = read_debugreg(0);
     v->arch.guest_context.debugreg[1] = read_debugreg(1);
@@ -622,7 +638,8 @@
 static void vmx_fpu_enter(struct vcpu *v)
 {
     setup_fpu(v);
-    __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
+    v->arch.hvm_vmx.exception_bitmap &= ~(1u << TRAP_no_device);
+    vmx_update_exception_bitmap(v);
     v->arch.hvm_vmx.host_cr0 &= ~X86_CR0_TS;
     __vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);
 }
@@ -648,7 +665,8 @@
     {
         v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
         __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
-        __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
+        v->arch.hvm_vmx.exception_bitmap |= (1u << TRAP_no_device);
+        vmx_update_exception_bitmap(v);
     }
 }
 
@@ -954,7 +972,7 @@
     v->arch.hvm_vmx.exec_control &= ~CPU_BASED_RDTSC_EXITING;
     if ( enable )
         v->arch.hvm_vmx.exec_control |= CPU_BASED_RDTSC_EXITING;
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_update_cpu_exec_control(v);
     vmx_vmcs_exit(v);
 }
 
@@ -1047,7 +1065,7 @@
 
 void vmx_update_debug_state(struct vcpu *v)
 {
-    unsigned long intercepts, mask;
+    unsigned long mask;
 
     ASSERT(v == current);
 
@@ -1055,12 +1073,11 @@
     if ( !cpu_has_monitor_trap_flag )
         mask |= 1u << TRAP_debug;
 
-    intercepts = __vmread(EXCEPTION_BITMAP);
     if ( v->arch.hvm_vcpu.debug_state_latch )
-        intercepts |= mask;
+        v->arch.hvm_vmx.exception_bitmap |= mask;
     else
-        intercepts &= ~mask;
-    __vmwrite(EXCEPTION_BITMAP, intercepts);
+        v->arch.hvm_vmx.exception_bitmap &= ~mask;
+    vmx_update_exception_bitmap(v);
 }
 
 static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
@@ -1087,7 +1104,7 @@
             v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) )
                 v->arch.hvm_vmx.exec_control |= cr3_ctls;
-            __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+            vmx_update_cpu_exec_control(v);
 
             /* Changing CR0.PE can change some bits in real CR4. */
             vmx_update_guest_cr(v, 4);
@@ -1122,7 +1139,8 @@
                     vmx_set_segment_register(v, s, &reg[s]);
                 v->arch.hvm_vcpu.hw_cr[4] |= X86_CR4_VME;
                 __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
-                __vmwrite(EXCEPTION_BITMAP, 0xffffffff);
+                v->arch.hvm_vmx.exception_bitmap = 0xffffffff;
+                vmx_update_exception_bitmap(v);
             }
             else 
             {
@@ -1134,11 +1152,11 @@
                     ((v->arch.hvm_vcpu.hw_cr[4] & ~X86_CR4_VME)
                      |(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_VME));
                 __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
-                __vmwrite(EXCEPTION_BITMAP, 
-                          HVM_TRAP_MASK
+                v->arch.hvm_vmx.exception_bitmap = HVM_TRAP_MASK
                           | (paging_mode_hap(v->domain) ?
                              0 : (1U << TRAP_page_fault))
-                          | (1U << TRAP_no_device));
+                          | (1U << TRAP_no_device);
+                vmx_update_exception_bitmap(v);
                 vmx_update_debug_state(v);
             }
         }
@@ -1544,7 +1562,7 @@
 
     /* Allow guest direct access to DR registers */
     v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MOV_DR_EXITING;
-    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+    vmx_update_cpu_exec_control(v);
 }
 
 static void vmx_invlpg_intercept(unsigned long vaddr)
@@ -1928,18 +1946,18 @@
 void vmx_vlapic_msr_changed(struct vcpu *v)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
-    uint32_t ctl;
 
     if ( !cpu_has_vmx_virtualize_apic_accesses )
         return;
 
     vmx_vmcs_enter(v);
-    ctl  = __vmread(SECONDARY_VM_EXEC_CONTROL);
-    ctl &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+    v->arch.hvm_vmx.secondary_exec_control
+        &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
     if ( !vlapic_hw_disabled(vlapic) &&
          (vlapic_base_address(vlapic) == APIC_DEFAULT_PHYS_BASE) )
-        ctl |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-    __vmwrite(SECONDARY_VM_EXEC_CONTROL, ctl);
+        v->arch.hvm_vmx.secondary_exec_control
+            |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+    vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
 }
 
@@ -2469,14 +2487,12 @@
     case EXIT_REASON_PENDING_VIRT_INTR:
         /* Disable the interrupt window. */
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
-                  v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         break;
     case EXIT_REASON_PENDING_VIRT_NMI:
         /* Disable the NMI window. */
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
-                  v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         break;
     case EXIT_REASON_TASK_SWITCH: {
         const enum hvm_task_switch_reason reasons[] = {
@@ -2627,7 +2643,7 @@
 
     case EXIT_REASON_MONITOR_TRAP_FLAG:
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
-        __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control);
+        vmx_update_cpu_exec_control(v);
         if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step )
             domain_pause_for_debugger();
         break;
@@ -2677,16 +2693,14 @@
             /* VPID was disabled: now enabled. */
             curr->arch.hvm_vmx.secondary_exec_control |=
                 SECONDARY_EXEC_ENABLE_VPID;
-            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                      curr->arch.hvm_vmx.secondary_exec_control);
+            vmx_update_secondary_exec_control(curr);
         }
         else if ( old_asid && !new_asid )
         {
             /* VPID was enabled: now disabled. */
             curr->arch.hvm_vmx.secondary_exec_control &=
                 ~SECONDARY_EXEC_ENABLE_VPID;
-            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
-                      curr->arch.hvm_vmx.secondary_exec_control);
+            vmx_update_secondary_exec_control(curr);
         }
     }
 
diff -r 905ca9cc0596 xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h        Wed Aug 04 16:30:40 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h        Thu Aug 05 15:32:24 2010 +0800
@@ -97,6 +97,7 @@
     /* Cache of cpu execution control. */
     u32                  exec_control;
     u32                  secondary_exec_control;
+    u32                  exception_bitmap;
 
 #ifdef __x86_64__
     struct vmx_msr_state msr_state;
diff -r 905ca9cc0596 xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Wed Aug 04 16:30:40 2010 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Thu Aug 05 15:32:24 2010 +0800
@@ -60,6 +60,9 @@
 void vmx_vlapic_msr_changed(struct vcpu *v);
 void vmx_realmode(struct cpu_user_regs *regs);
 void vmx_update_debug_state(struct vcpu *v);
+void vmx_update_cpu_exec_control(struct vcpu *v);
+void vmx_update_secondary_exec_control(struct vcpu *v);
+void vmx_update_exception_bitmap(struct vcpu *v);
 
 /*
  * Exit Reasons

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>