WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [PATCH] [PATCH] Assorted VMX patches

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [PATCH] [PATCH] Assorted VMX patches
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Mon, 25 Apr 2005 07:55:36 +0000
Delivery-date: Mon, 25 Apr 2005 08:04:02 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1379, 2005/04/25 08:55:36+01:00, leendert@xxxxxxxxxxxxxx

        [PATCH] [PATCH] Assorted VMX patches
        
        This patch contains the following VMX patches:
        
        - ioemu: Update the address when doing a stosb/stosw/stosl. Without 
this patch
          stosb writes req->count times at the same location req->addr. Not 
exactly
          the intended semantics.
        
        - xc_ptrace.c: Use the page_array to refer to partition memory. This 
allows
          you to use gdbserver on a VMX partition (and presumably partitions 
that use
          shadow page tables).
        
        - dom0_ops.c: Preserve ECF_VMX_GUEST flag for gdbserver. Without it you
          cannot (re)set the VMX domain state.
        
        - vmx.c: Added support for lmsw. Unmodified FreeBSD uses this when 
running
          inside a VMX partition.
        
        Signed-Off-By: Leendert van Doorn <leendert@xxxxxxxxxxxxxx>
        
        ===== tools/ioemu/iodev/cpu.cc 1.8 vs edited =====



 tools/ioemu/iodev/cpu.cc  |    6 -
 tools/libxc/xc_ptrace.c   |   28 ++++++-
 xen/arch/x86/dom0_ops.c   |    4 +
 xen/arch/x86/vmx.c        |  166 ++++++++++++++++++++++++----------------------
 xen/include/asm-x86/vmx.h |    2 
 5 files changed, 121 insertions(+), 85 deletions(-)


diff -Nru a/tools/ioemu/iodev/cpu.cc b/tools/ioemu/iodev/cpu.cc
--- a/tools/ioemu/iodev/cpu.cc  2005-04-25 04:04:22 -04:00
+++ b/tools/ioemu/iodev/cpu.cc  2005-04-25 04:04:22 -04:00
@@ -128,15 +128,13 @@
                if (!req->pdata_valid) {
                        if(req->dir == IOREQ_READ){//read
                                //BX_INFO(("mmio[value]: <READ> addr:%llx, 
value:%llx, size: %llx, count: %llx\n", req->addr, req->u.data, req->size, 
req->count));
-
                                for (i = 0; i < req->count; i++) {
-                                       BX_MEM_READ_PHYSICAL(req->addr, 
req->size, &req->u.data);
+                                       BX_MEM_READ_PHYSICAL(req->addr + (sign 
* i * req->size), req->size, &req->u.data);
                                }
                        } else if(req->dir == IOREQ_WRITE) {//write
                                //BX_INFO(("mmio[value]: <WRITE> addr:%llx, 
value:%llx, size: %llx, count: %llx\n", req->addr, req->u.data, req->size, 
req->count));
-
                                for (i = 0; i < req->count; i++) {
-                                       BX_MEM_WRITE_PHYSICAL(req->addr, 
req->size, &req->u.data);
+                                       BX_MEM_WRITE_PHYSICAL(req->addr + (sign 
* i * req->size), req->size, &req->u.data);
                                }
                        }
                } else {
diff -Nru a/tools/libxc/xc_ptrace.c b/tools/libxc/xc_ptrace.c
--- a/tools/libxc/xc_ptrace.c   2005-04-25 04:04:22 -04:00
+++ b/tools/libxc/xc_ptrace.c   2005-04-25 04:04:22 -04:00
@@ -75,7 +75,7 @@
        int retval = xc_domain_getfullinfo(xc_handle, domid, cpu, NULL, 
&ctxt[cpu]); \
        if (retval) \
            goto error_out; \
-       cr3[cpu] = ctxt[cpu].pt_base; \
+       cr3[cpu] = ctxt[cpu].pt_base; /* physical address */ \
        regs_valid[cpu] = 1; \
     } \
 
@@ -128,11 +128,12 @@
 
 
 static int                      xc_handle;
+static long                    nr_pages = 0;
+unsigned long                  *page_array = NULL;
 static int                      regs_valid[MAX_VIRT_CPUS];
 static unsigned long            cr3[MAX_VIRT_CPUS];
 static full_execution_context_t ctxt[MAX_VIRT_CPUS];
 
-
 /* --------------------- */
 
 static void *
@@ -140,6 +141,7 @@
 {
     unsigned long pde, page;
     unsigned long va = (unsigned long)guest_va;
+    long npgs = xc_get_tot_pages(xc_handle, domid);
 
     static unsigned long  cr3_phys[MAX_VIRT_CPUS];
     static unsigned long *cr3_virt[MAX_VIRT_CPUS];
@@ -150,6 +152,21 @@
     
     static int            prev_perm[MAX_VIRT_CPUS];
 
+    if (nr_pages != npgs) {
+       if (nr_pages > 0)
+           free(page_array);
+       nr_pages = npgs;
+       if ((page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) {
+           printf("Could not allocate memory\n");
+           goto error_out;
+       }
+
+       if (xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != 
nr_pages) {
+               printf("Could not get the page frame list\n");
+               goto error_out;
+       }
+    }
+
     FETCH_REGS(cpu);
 
     if (cr3[cpu] != cr3_phys[cpu]) 
@@ -162,8 +179,9 @@
                                             cr3_phys[cpu] >> PAGE_SHIFT)) == 
NULL)
            goto error_out;
     } 
-    if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0)
+    if ((pde = cr3_virt[cpu][vtopdi(va)]) == 0) /* logical address */
        goto error_out;
+    pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
     if (pde != pde_phys[cpu]) 
     {
        pde_phys[cpu] = pde;
@@ -174,8 +192,9 @@
                                             pde_phys[cpu] >> PAGE_SHIFT)) == 
NULL)
            goto error_out;
     }
-    if ((page = pde_virt[cpu][vtopti(va)]) == 0)
+    if ((page = pde_virt[cpu][vtopti(va)]) == 0) /* logical address */
        goto error_out;
+    page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
     if (page != page_phys[cpu] || perm != prev_perm[cpu]) 
     {
        page_phys[cpu] = page;
@@ -330,6 +349,7 @@
            perror("dom0 op failed");
            goto error_out;
        }
+       /* FALLTHROUGH */
     case PTRACE_CONT:
     case PTRACE_DETACH:
        if (request != PTRACE_SINGLESTEP) {
diff -Nru a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c   2005-04-25 04:04:22 -04:00
+++ b/xen/arch/x86/dom0_ops.c   2005-04-25 04:04:22 -04:00
@@ -402,6 +402,10 @@
         c->flags |= ECF_I387_VALID;
     if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) )
         c->flags |= ECF_IN_KERNEL;
+#ifdef CONFIG_VMX
+    if (VMX_DOMAIN(ed))
+        c->flags |= ECF_VMX_GUEST;
+#endif
     memcpy(&c->fpu_ctxt,
            &ed->arch.i387,
            sizeof(ed->arch.i387));
diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        2005-04-25 04:04:22 -04:00
+++ b/xen/arch/x86/vmx.c        2005-04-25 04:04:22 -04:00
@@ -640,6 +640,84 @@
     return 0;
 }
 
+static int vmx_set_cr0(unsigned long value)
+{
+    struct exec_domain *d = current;
+    unsigned long old_base_mfn, mfn;
+    unsigned long eip;
+
+    /* 
+     * CR0: We don't want to lose PE and PG.
+     */
+    __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
+
+    if (value & (X86_CR0_PE | X86_CR0_PG) &&
+        !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
+        /*
+         * Enable paging
+         */
+        set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
+        /*
+         * The guest CR3 must be pointing to the guest physical.
+         */
+        if ( !VALID_MFN(mfn = phys_to_machine_mapping(
+                            d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
+             !get_page(pfn_to_page(mfn), d->domain) )
+        {
+            VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx",
+                        d->arch.arch_vmx.cpu_cr3);
+            domain_crash_synchronous(); /* need to take a clean path */
+        }
+        old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
+        if (old_base_mfn)
+            put_page(pfn_to_page(old_base_mfn));
+
+        /*
+         * Now arch.guest_table points to machine physical.
+         */
+        d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
+        update_pagetables(d);
+
+        VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
+                (unsigned long) (mfn << PAGE_SHIFT));
+
+        __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
+        /* 
+         * arch->shadow_table should hold the next CR3 for shadow
+         */
+        VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 
+                d->arch.arch_vmx.cpu_cr3, mfn);
+    } else {
+        if ((value & X86_CR0_PE) == 0) {
+            __vmread(GUEST_EIP, &eip);
+            VMX_DBG_LOG(DBG_LEVEL_1,
+               "Disabling CR0.PE at %%eip 0x%lx", eip);
+           if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
+               set_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+                                       &d->arch.arch_vmx.cpu_state);
+               __vmread(GUEST_EIP, &eip);
+               VMX_DBG_LOG(DBG_LEVEL_1,
+                   "Transfering control to vmxassist %%eip 0x%lx", eip);
+               return 0; /* do not update eip! */
+           }
+       } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+                                       &d->arch.arch_vmx.cpu_state)) {
+           __vmread(GUEST_EIP, &eip);
+           VMX_DBG_LOG(DBG_LEVEL_1,
+               "Enabling CR0.PE at %%eip 0x%lx", eip);
+           if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
+               clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
+                                       &d->arch.arch_vmx.cpu_state);
+               __vmread(GUEST_EIP, &eip);
+               VMX_DBG_LOG(DBG_LEVEL_1,
+                   "Restoring to %%eip 0x%lx", eip);
+               return 0; /* do not update eip! */
+           }
+       }
+    }
+    return 1;
+}
+
 #define CASE_GET_REG(REG, reg)  \
     case REG_ ## REG: value = regs->reg; break
 
@@ -650,7 +728,6 @@
 {
     unsigned long value;
     unsigned long old_cr;
-    unsigned long eip;
     struct exec_domain *d = current;
 
     switch (gp) {
@@ -675,80 +752,8 @@
     switch(cr) {
     case 0: 
     {
-        unsigned long old_base_mfn, mfn;
-
-        /* 
-         * CR0:
-         * We don't want to lose PE and PG.
-         */
-        __vmwrite(GUEST_CR0, (value | X86_CR0_PE | X86_CR0_PG));
-        __vmwrite(CR0_READ_SHADOW, value);
-
-        if (value & (X86_CR0_PE | X86_CR0_PG) &&
-            !test_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state)) {
-            /*
-             * Enable paging
-             */
-            set_bit(VMX_CPU_STATE_PG_ENABLED, &d->arch.arch_vmx.cpu_state);
-            /*
-             * The guest CR3 must be pointing to the guest physical.
-             */
-            if ( !VALID_MFN(mfn = phys_to_machine_mapping(
-                                d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
-                 !get_page(pfn_to_page(mfn), d->domain) )
-            {
-                VMX_DBG_LOG(DBG_LEVEL_VMMU, "Invalid CR3 value = %lx",
-                            d->arch.arch_vmx.cpu_cr3);
-                domain_crash_synchronous(); /* need to take a clean path */
-            }
-            old_base_mfn = pagetable_val(d->arch.guest_table) >> PAGE_SHIFT;
-            if ( old_base_mfn )
-                put_page(pfn_to_page(old_base_mfn));

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [PATCH] [PATCH] Assorted VMX patches, BitKeeper Bot <=