WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Removes redundant/unnecessary __vmread/__vmwrite.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Removes redundant/unnecessary __vmread/__vmwrite.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 21 Sep 2005 10:14:11 +0000
Delivery-date: Wed, 21 Sep 2005 10:12:43 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b8537442f3d6ef953bc7b0bd2b69c674e0038caf
# Parent  21e7935b20252fd9bc7853bb2bdd8269643b615a
Removes redundant/unnecessary __vmread/__vmwrite.

Signed-off-by: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
Signed-off-by: Edwin Zhai <edwin.zhai@xxxxxxxxx>

diff -r 21e7935b2025 -r b8537442f3d6 xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Wed Sep 21 09:58:15 2005
+++ b/xen/arch/x86/vmx.c        Wed Sep 21 10:11:02 2005
@@ -377,12 +377,13 @@
 
 static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
 {
-    unsigned long eip;
     unsigned long gpa; /* FIXME: PAE */
     int result;
 
-#if VMX_DEBUG
+#if 0 /* keep for debugging */
     {
+        unsigned long eip;
+
         __vmread(GUEST_RIP, &eip);
         VMX_DBG_LOG(DBG_LEVEL_VMMU, 
                     "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
@@ -429,9 +430,9 @@
         
     clts();
     setup_fpu(current);
-    __vmread(CR0_READ_SHADOW, &cr0);
+    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
     if (!(cr0 & X86_CR0_TS)) {
-        __vmread(GUEST_CR0, &cr0);
+        __vmread_vcpu(GUEST_CR0, &cr0);
         cr0 &= ~X86_CR0_TS;
         __vmwrite(GUEST_CR0, cr0);
     }
@@ -1129,9 +1130,7 @@
                 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
             }
         }
-        __vmread(GUEST_RIP, &eip);
-        VMX_DBG_LOG(DBG_LEVEL_1,
-                    "Disabling CR0.PE at %%eip 0x%lx\n", eip);
+
         if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
             set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
             __vmread(GUEST_RIP, &eip);
@@ -1370,17 +1369,17 @@
         clts();
         setup_fpu(current);
 
-        __vmread(GUEST_CR0, &value);
+        __vmread_vcpu(GUEST_CR0, &value);
         value &= ~X86_CR0_TS; /* clear TS */
         __vmwrite(GUEST_CR0, value);
 
-        __vmread(CR0_READ_SHADOW, &value);
+        __vmread_vcpu(CR0_READ_SHADOW, &value);
         value &= ~X86_CR0_TS; /* clear TS */
         __vmwrite(CR0_READ_SHADOW, value);
         break;
     case TYPE_LMSW:
         TRACE_VMEXIT(1,TYPE_LMSW);
-        __vmread(CR0_READ_SHADOW, &value);
+        __vmread_vcpu(CR0_READ_SHADOW, &value);
         value = (value & ~0xF) |
             (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
         return vmx_set_cr0(value);
@@ -1456,16 +1455,12 @@
                 (unsigned long)regs->edx);
 }
 
+volatile unsigned long do_hlt_count;
 /*
  * Need to use this exit to reschedule
  */
-static inline void vmx_vmexit_do_hlt(void)
-{
-#if VMX_DEBUG
-    unsigned long eip;
-    __vmread(GUEST_RIP, &eip);
-#endif
-    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
+void vmx_vmexit_do_hlt(void)
+{
     raise_softirq(SCHEDULE_SOFTIRQ);
 }
 
@@ -1516,13 +1511,9 @@
     }
 }
 
+volatile unsigned long do_mwait_count;
 static inline void vmx_vmexit_do_mwait(void)
 {
-#if VMX_DEBUG
-    unsigned long eip;
-    __vmread(GUEST_RIP, &eip);
-#endif
-    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
     raise_softirq(SCHEDULE_SOFTIRQ);
 }
 
@@ -1631,9 +1622,13 @@
         return;
     }
 
-    __vmread(GUEST_RIP, &eip);
-    TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
-    TRACE_VMEXIT(0,exit_reason);
+#ifdef TRACE_BUFFER
+    {
+        __vmread(GUEST_RIP, &eip);
+        TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
+        TRACE_VMEXIT(0,exit_reason);
+    }
+#endif
 
     switch (exit_reason) {
     case EXIT_REASON_EXCEPTION_NMI:
diff -r 21e7935b2025 -r b8537442f3d6 xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     Wed Sep 21 09:58:15 2005
+++ b/xen/arch/x86/vmx_io.c     Wed Sep 21 10:11:02 2005
@@ -891,7 +891,7 @@
     struct vcpu *v = current;
 
     highest_vector = find_highest_pending_irq(v, &intr_type);
-    __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
+    __vmread_vcpu(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
 
     if (highest_vector == -1) {
         disable_irq_window(cpu_exec_control);
@@ -948,14 +948,6 @@
 void vmx_do_resume(struct vcpu *d) 
 {
     vmx_stts();
-    if ( vmx_paging_enabled(d) )
-        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
-    else
-        // paging is not enabled in the guest
-        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
-
-    __vmwrite(HOST_CR3, pagetable_get_paddr(d->arch.monitor_table));
-    __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
 
     if (event_pending(d)) {
         vmx_check_events(d);
diff -r 21e7935b2025 -r b8537442f3d6 xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Wed Sep 21 09:58:15 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Wed Sep 21 10:11:02 2005
@@ -67,9 +67,6 @@
 
     error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, 
                        MONITOR_PIN_BASED_EXEC_CONTROLS);
-
-    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 
-                       MONITOR_CPU_BASED_EXEC_CONTROLS);
 
     error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
 
@@ -117,12 +114,6 @@
     unsigned long fs_base; 
     unsigned long gs_base; 
 #endif 
-
-    /* control registers */
-    unsigned long cr3;
-    unsigned long cr0;
-    unsigned long cr4;
-    unsigned long dr7;
 };
 
 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
@@ -217,8 +208,32 @@
 /* Update CR3, GDT, LDT, TR */
     unsigned int  error = 0;
     unsigned long pfn = 0;
+    unsigned long cr0, cr4;
     struct pfn_info *page;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
+
+    __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
+
+    error |= __vmwrite(GUEST_CR0, cr0);
+    cr0 &= ~X86_CR0_PG;
+    error |= __vmwrite(CR0_READ_SHADOW, cr0);
+    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 
+                       MONITOR_CPU_BASED_EXEC_CONTROLS);
+
+    __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
+
+#ifdef __x86_64__
+    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
+#else
+    error |= __vmwrite(GUEST_CR4, cr4);
+#endif
+
+#ifdef __x86_64__
+    cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
+#else
+    cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE);
+#endif
+    error |= __vmwrite(CR4_READ_SHADOW, cr4);
 
     vmx_stts();
 
@@ -254,7 +269,7 @@
     int error = 0;
     union vmcs_arbytes arbytes;
     unsigned long dr7;
-    unsigned long eflags, shadow_cr;
+    unsigned long eflags;
 
     /* MSR */
     error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
@@ -326,27 +341,7 @@
 
     arbytes.fields.seg_type = 0xb;          /* 32-bit TSS (busy) */
     error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
-
-    error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
-
-    /* Initally PG, PE are not set*/
-    shadow_cr = host_env->cr0;
-    shadow_cr &= ~X86_CR0_PG;
-    error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
     /* CR3 is set in vmx_final_setup_guest */
-#ifdef __x86_64__
-    error |= __vmwrite(GUEST_CR4, host_env->cr4 & ~X86_CR4_PSE);
-#else
-    error |= __vmwrite(GUEST_CR4, host_env->cr4);
-#endif
-    shadow_cr = host_env->cr4;
-
-#ifdef __x86_64__
-    shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
-#else
-    shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
-#endif
-    error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
 
     error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
     error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
@@ -403,12 +398,10 @@
     host_env->cs_base = 0;
 
     __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
-    host_env->cr0 = crn;
     error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
 
     /* CR3 is set in vmx_final_setup_hostos */
     __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : ); 
-    host_env->cr4 = crn;
     error |= __vmwrite(HOST_CR4, crn);
 
     error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
diff -r 21e7935b2025 -r b8537442f3d6 xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Wed Sep 21 09:58:15 2005
+++ b/xen/include/asm-x86/vmx.h Wed Sep 21 10:11:02 2005
@@ -314,6 +314,57 @@
     return 0;
 }
 
+
+static always_inline void __vmwrite_vcpu(unsigned long field, unsigned long 
value)
+{
+    struct vcpu *v = current;
+
+    switch(field) {
+    case CR0_READ_SHADOW:
+       v->arch.arch_vmx.cpu_shadow_cr0 = value;
+       break;
+    case GUEST_CR0:
+       v->arch.arch_vmx.cpu_cr0 = value;
+       break;
+    case CPU_BASED_VM_EXEC_CONTROL:
+       v->arch.arch_vmx.cpu_based_exec_control = value;
+       break;
+    default:
+       printk("__vmwrite_cpu: invalid field %lx\n", field);
+       break;
+    }
+}
+
+static always_inline void __vmread_vcpu(unsigned long field, unsigned long 
*value)
+{
+    struct vcpu *v = current;
+
+    switch(field) {
+    case CR0_READ_SHADOW:
+       *value = v->arch.arch_vmx.cpu_shadow_cr0;
+       break;
+    case GUEST_CR0:
+       *value = v->arch.arch_vmx.cpu_cr0;
+       break;
+    case CPU_BASED_VM_EXEC_CONTROL:
+       *value = v->arch.arch_vmx.cpu_based_exec_control;
+       break;
+    default:
+       printk("__vmread_cpu: invalid field %lx\n", field);
+       break;
+    }
+
+   /* 
+    * __vmwrite() can be used for non-current vcpu, and it's possible that
+    * the vcpu field is not initialized at that case.
+    * 
+    */
+    if (!*value) {
+       __vmread(field, value);
+       __vmwrite_vcpu(field, *value);
+    }
+}
+
 static inline int __vmwrite (unsigned long field, unsigned long value)
 {
     unsigned long eflags;
@@ -326,6 +377,15 @@
     __save_flags(eflags);
     if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
         return -1;
+
+    switch(field) {
+    case CR0_READ_SHADOW:
+    case GUEST_CR0:
+    case CPU_BASED_VM_EXEC_CONTROL:
+       __vmwrite_vcpu(field, value);
+       break;
+    }
+
     return 0;
 }
 
@@ -379,11 +439,12 @@
 {
     unsigned long cr0;
 
-    __vmread(GUEST_CR0, &cr0);
-    if (!(cr0 & X86_CR0_TS))
+    __vmread_vcpu(GUEST_CR0, &cr0);
+    if (!(cr0 & X86_CR0_TS)) {
         __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
-
-    __vmread(CR0_READ_SHADOW, &cr0);
+    }
+
+    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
     if (!(cr0 & X86_CR0_TS))
        __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
 }
@@ -393,7 +454,7 @@
 {
     unsigned long cr0;
 
-    __vmread(CR0_READ_SHADOW, &cr0);
+    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
     return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
 }
 
diff -r 21e7935b2025 -r b8537442f3d6 xen/include/asm-x86/vmx_vmcs.h
--- a/xen/include/asm-x86/vmx_vmcs.h    Wed Sep 21 09:58:15 2005
+++ b/xen/include/asm-x86/vmx_vmcs.h    Wed Sep 21 10:11:02 2005
@@ -74,9 +74,12 @@
 struct arch_vmx_struct {
     struct vmcs_struct      *vmcs;  /* VMCS pointer in virtual */
     unsigned long           flags;  /* VMCS flags */
+    unsigned long           cpu_cr0; /* copy of guest CR0 */
+    unsigned long           cpu_shadow_cr0; /* copy of guest read shadow CR0 */
     unsigned long           cpu_cr2; /* save CR2 */
     unsigned long           cpu_cr3;
     unsigned long           cpu_state;
+    unsigned long           cpu_based_exec_control;
     struct msr_state        msr_content;
     void                   *io_bitmap_a, *io_bitmap_b;
 };

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Removes redundant/unnecessary __vmread/__vmwrite., Xen patchbot -unstable <=