WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm: Clean up AP initialisation. This all

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm: Clean up AP initialisation. This allows AP bringup into emulated
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 07 Dec 2007 16:30:08 -0800
Delivery-date: Fri, 07 Dec 2007 16:30:36 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1197037687 0
# Node ID 98e9485d8fcf1d3e50f9a0e20e109cdb5ae55384
# Parent  c0f7ba3aa9b2109ca2c5e267152b47924c1bfa45
hvm: Clean up AP initialisation. This allows AP bringup into emulated
real mode when running on VMX, as well as removing 100 LOC.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c        |  105 ++++++++++++++++++----------
 xen/arch/x86/hvm/svm/svm.c    |  154 +++++-------------------------------------
 xen/arch/x86/hvm/vmx/vmx.c    |   18 +---
 xen/include/asm-x86/hvm/hvm.h |   10 --
 4 files changed, 95 insertions(+), 192 deletions(-)

diff -r c0f7ba3aa9b2 -r 98e9485d8fcf xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Dec 07 12:48:36 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Dec 07 14:28:07 2007 +0000
@@ -1590,57 +1590,90 @@ void hvm_hypercall_page_initialise(struc
     hvm_funcs.init_hypercall_page(d, hypercall_page);
 }
 
-
-/*
- * only called in HVM domain BSP context
- * when booting, vcpuid is always equal to apic_id
- */
 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
 {
+    struct domain *d = current->domain;
     struct vcpu *v;
-    struct domain *d = current->domain;
     struct vcpu_guest_context *ctxt;
-    int rc = 0;
-
-    BUG_ON(!is_hvm_domain(d));
+    struct segment_register reg;
+
+    ASSERT(is_hvm_domain(d));
 
     if ( (v = d->vcpu[vcpuid]) == NULL )
         return -ENOENT;
 
-    if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
-    {
-        gdprintk(XENLOG_ERR,
-                "Failed to allocate memory in hvm_bringup_ap.\n");
-        return -ENOMEM;
-    }
-
-    hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
-
+    v->fpu_initialised = 0;
+    v->arch.flags |= TF_kernel_mode;
+    v->is_initialised = 1;
+
+    ctxt = &v->arch.guest_context;
+    memset(ctxt, 0, sizeof(*ctxt));
+    ctxt->flags = VGCF_online;
+    ctxt->user_regs.eflags = 2;
+
+#ifdef VMXASSIST
+    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+    {
+        ctxt->user_regs.eip = VMXASSIST_BASE;
+        ctxt->user_regs.edx = vcpuid;
+        ctxt->user_regs.ebx = trampoline_vector;
+        goto done;
+    }
+#endif
+
+    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
+    hvm_update_guest_cr(v, 0);
+
+    v->arch.hvm_vcpu.guest_cr[2] = 0;
+    hvm_update_guest_cr(v, 2);
+
+    v->arch.hvm_vcpu.guest_cr[3] = 0;
+    hvm_update_guest_cr(v, 3);
+
+    v->arch.hvm_vcpu.guest_cr[4] = 0;
+    hvm_update_guest_cr(v, 4);
+
+    v->arch.hvm_vcpu.guest_efer = 0;
+    hvm_update_guest_efer(v);
+
+    reg.sel = trampoline_vector << 8;
+    reg.base = (uint32_t)reg.sel << 4;
+    reg.limit = 0xffff;
+    reg.attr.bytes = 0x89b;
+    hvm_set_segment_register(v, x86_seg_cs, &reg);
+
+    reg.sel = reg.base = 0;
+    reg.limit = 0xffff;
+    reg.attr.bytes = 0x893;
+    hvm_set_segment_register(v, x86_seg_ds, &reg);
+    hvm_set_segment_register(v, x86_seg_es, &reg);
+    hvm_set_segment_register(v, x86_seg_fs, &reg);
+    hvm_set_segment_register(v, x86_seg_gs, &reg);
+    hvm_set_segment_register(v, x86_seg_ss, &reg);
+
+    reg.attr.bytes = 0x82; /* LDT */
+    hvm_set_segment_register(v, x86_seg_ldtr, &reg);
+
+    reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
+    hvm_set_segment_register(v, x86_seg_tr, &reg);
+
+    reg.attr.bytes = 0;
+    hvm_set_segment_register(v, x86_seg_gdtr, &reg);
+    hvm_set_segment_register(v, x86_seg_idtr, &reg);
+
+#ifdef VMXASSIST
+ done:
+#endif
     /* Sync AP's TSC with BSP's. */
     v->arch.hvm_vcpu.cache_tsc_offset =
         v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
     hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
 
-    LOCK_BIGLOCK(d);
-    rc = -EEXIST;
-    if ( !v->is_initialised )
-        rc = boot_vcpu(d, vcpuid, ctxt);
-    UNLOCK_BIGLOCK(d);
-
-    if ( rc != 0 )
-    {
-        gdprintk(XENLOG_ERR,
-               "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
-        goto out;
-    }
-
     if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
         vcpu_wake(v);
-    gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
-
- out:
-    xfree(ctxt);
-    return rc;
+
+    gdprintk(XENLOG_INFO, "AP %d bringup succeeded.\n", vcpuid);
+    return 0;
 }
 
 static int hvmop_set_pci_intx_level(
diff -r c0f7ba3aa9b2 -r 98e9485d8fcf xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Dec 07 12:48:36 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Dec 07 14:28:07 2007 +0000
@@ -62,8 +62,6 @@ int inst_copy_from_guest(unsigned char *
                          int inst_len);
 asmlinkage void do_IRQ(struct cpu_user_regs *);
 
-static int svm_reset_to_realmode(
-    struct vcpu *v, struct cpu_user_regs *regs);
 static void svm_update_guest_cr(struct vcpu *v, unsigned int cr);
 static void svm_update_guest_efer(struct vcpu *v);
 static void svm_inject_exception(
@@ -617,8 +615,24 @@ static void svm_set_segment_register(str
                                      struct segment_register *reg)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    ASSERT(v == current);
+    int sync = 0;
+
+    ASSERT((v == current) || !vcpu_runnable(v));
+
+    switch ( seg )
+    {
+    case x86_seg_fs:
+    case x86_seg_gs:
+    case x86_seg_tr:
+    case x86_seg_ldtr:
+        sync = (v == current);
+        break;
+    default:
+        break;
+    }
+
+    if ( sync )
+        svm_sync_vmcb(v);
 
     switch ( seg )
     {
@@ -632,23 +646,17 @@ static void svm_set_segment_register(str
         memcpy(&vmcb->es, reg, sizeof(*reg));
         break;
     case x86_seg_fs:
-        svm_sync_vmcb(v);
         memcpy(&vmcb->fs, reg, sizeof(*reg));
-        svm_vmload(vmcb);
         break;
     case x86_seg_gs:
-        svm_sync_vmcb(v);
         memcpy(&vmcb->gs, reg, sizeof(*reg));
-        svm_vmload(vmcb);
         break;
     case x86_seg_ss:
         memcpy(&vmcb->ss, reg, sizeof(*reg));
         vmcb->cpl = vmcb->ss.attr.fields.dpl;
         break;
     case x86_seg_tr:
-        svm_sync_vmcb(v);
         memcpy(&vmcb->tr, reg, sizeof(*reg));
-        svm_vmload(vmcb);
         break;
     case x86_seg_gdtr:
         memcpy(&vmcb->gdtr, reg, sizeof(*reg));
@@ -657,13 +665,14 @@ static void svm_set_segment_register(str
         memcpy(&vmcb->idtr, reg, sizeof(*reg));
         break;
     case x86_seg_ldtr:
-        svm_sync_vmcb(v);
         memcpy(&vmcb->ldtr, reg, sizeof(*reg));
-        svm_vmload(vmcb);
         break;
     default:
         BUG();
     }
+
+    if ( sync )
+        svm_vmload(vmcb);
 }
 
 /* Make sure that xen intercepts any FP accesses from current */
@@ -684,45 +693,9 @@ static void svm_stts(struct vcpu *v)
     }
 }
 
-
 static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     v->arch.hvm_svm.vmcb->tsc_offset = offset;
-}
-
-
-static void svm_init_ap_context(
-    struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
-{
-    struct vcpu *v;
-    struct vmcb_struct *vmcb;
-    cpu_user_regs_t *regs;
-    u16 cs_sel;
-
-    /* We know this is safe because hvm_bringup_ap() does it */
-    v = current->domain->vcpu[vcpuid];
-    vmcb = v->arch.hvm_svm.vmcb;
-    regs = &v->arch.guest_context.user_regs;
-
-    memset(ctxt, 0, sizeof(*ctxt));
-
-    /*
-     * We execute the trampoline code in real mode. The trampoline vector
-     * passed to us is page alligned and is the physical frame number for
-     * the code. We will execute this code in real mode.
-     */
-    cs_sel = trampoline_vector << 8;
-    ctxt->user_regs.eip = 0x0;
-    ctxt->user_regs.cs = cs_sel;
-
-    /*
-     * This is the launch of an AP; set state so that we begin executing
-     * the trampoline code in real-mode.
-     */
-    svm_reset_to_realmode(v, regs);  
-    /* Adjust the vmcb's hidden register state. */
-    vmcb->cs.sel = cs_sel;
-    vmcb->cs.base = (cs_sel << 4);
 }
 
 static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
@@ -916,7 +889,6 @@ static struct hvm_function_table svm_fun
     .stts                 = svm_stts,
     .set_tsc_offset       = svm_set_tsc_offset,
     .inject_exception     = svm_inject_exception,
-    .init_ap_context      = svm_init_ap_context,
     .init_hypercall_page  = svm_init_hypercall_page,
     .event_pending        = svm_event_pending
 };
@@ -2037,90 +2009,6 @@ void svm_handle_invlpg(const short invlp
     domain_crash(v->domain);
 }
 
-
-/*
- * Reset to realmode causes execution to start at 0xF000:0xFFF0 in
- * 16-bit realmode.  Basically, this mimics a processor reset.
- *
- * returns 0 on success, non-zero otherwise
- */
-static int svm_reset_to_realmode(struct vcpu *v, 
-                                 struct cpu_user_regs *regs)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    memset(regs, 0, sizeof(struct cpu_user_regs));
-
-    regs->eflags = 2;
-
-    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
-    svm_update_guest_cr(v, 0);
-
-    v->arch.hvm_vcpu.guest_cr[2] = 0;
-    svm_update_guest_cr(v, 2);
-
-    v->arch.hvm_vcpu.guest_cr[4] = 0;
-    svm_update_guest_cr(v, 4);
-
-    vmcb->efer = EFER_SVME;
-
-    /* This will jump to ROMBIOS */
-    regs->eip = 0xFFF0;
-
-    /* Set up the segment registers and all their hidden states. */
-    vmcb->cs.sel = 0xF000;
-    vmcb->cs.attr.bytes = 0x089b;
-    vmcb->cs.limit = 0xffff;
-    vmcb->cs.base = 0x000F0000;
-
-    vmcb->ss.sel = 0x00;
-    vmcb->ss.attr.bytes = 0x0893;
-    vmcb->ss.limit = 0xffff;
-    vmcb->ss.base = 0x00;
-
-    vmcb->ds.sel = 0x00;
-    vmcb->ds.attr.bytes = 0x0893;
-    vmcb->ds.limit = 0xffff;
-    vmcb->ds.base = 0x00;
-    
-    vmcb->es.sel = 0x00;
-    vmcb->es.attr.bytes = 0x0893;
-    vmcb->es.limit = 0xffff;
-    vmcb->es.base = 0x00;
-    
-    vmcb->fs.sel = 0x00;
-    vmcb->fs.attr.bytes = 0x0893;
-    vmcb->fs.limit = 0xffff;
-    vmcb->fs.base = 0x00;
-    
-    vmcb->gs.sel = 0x00;
-    vmcb->gs.attr.bytes = 0x0893;
-    vmcb->gs.limit = 0xffff;
-    vmcb->gs.base = 0x00;
-
-    vmcb->ldtr.sel = 0x00;
-    vmcb->ldtr.attr.bytes = 0x0000;
-    vmcb->ldtr.limit = 0x0;
-    vmcb->ldtr.base = 0x00;
-
-    vmcb->gdtr.sel = 0x00;
-    vmcb->gdtr.attr.bytes = 0x0000;
-    vmcb->gdtr.limit = 0x0;
-    vmcb->gdtr.base = 0x00;
-    
-    vmcb->tr.sel = 0;
-    vmcb->tr.attr.bytes = 0;
-    vmcb->tr.limit = 0x0;
-    vmcb->tr.base = 0;
-
-    vmcb->idtr.sel = 0x00;
-    vmcb->idtr.attr.bytes = 0x0000;
-    vmcb->idtr.limit = 0x3ff;
-    vmcb->idtr.base = 0x00;
-
-    return 0;
-}
-
 asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
 {
     unsigned int exit_reason;
diff -r c0f7ba3aa9b2 -r 98e9485d8fcf xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Dec 07 12:48:36 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Dec 07 14:28:07 2007 +0000
@@ -863,7 +863,7 @@ static void vmx_set_segment_register(str
 {
     uint32_t attr;
 
-    ASSERT(v == current);
+    ASSERT((v == current) || !vcpu_runnable(v));
 
     attr = reg->attr.bytes;
     attr = ((attr & 0xf00) << 4) | (attr & 0xff);
@@ -871,6 +871,8 @@ static void vmx_set_segment_register(str
     /* Not-present must mean unusable. */
     if ( !reg->attr.fields.p )
         attr |= (1u << 16);
+
+    vmx_vmcs_enter(v);
 
     switch ( seg )
     {
@@ -933,6 +935,8 @@ static void vmx_set_segment_register(str
     default:
         BUG();
     }
+
+    vmx_vmcs_exit(v);
 }
 
 /* Make sure that xen intercepts any FP accesses from current */
@@ -963,17 +967,6 @@ static void vmx_set_tsc_offset(struct vc
     __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
 #endif
     vmx_vmcs_exit(v);
-}
-
-static void vmx_init_ap_context(
-    struct vcpu_guest_context *ctxt, int vcpuid, int trampoline_vector)
-{
-    memset(ctxt, 0, sizeof(*ctxt));
-#ifdef VMXASSIST
-    ctxt->user_regs.eip = VMXASSIST_BASE;
-    ctxt->user_regs.edx = vcpuid;
-    ctxt->user_regs.ebx = trampoline_vector;
-#endif
 }
 
 void do_nmi(struct cpu_user_regs *);
@@ -1159,7 +1152,6 @@ static struct hvm_function_table vmx_fun
     .stts                 = vmx_stts,
     .set_tsc_offset       = vmx_set_tsc_offset,
     .inject_exception     = vmx_inject_exception,
-    .init_ap_context      = vmx_init_ap_context,
     .init_hypercall_page  = vmx_init_hypercall_page,
     .event_pending        = vmx_event_pending,
     .cpu_up               = vmx_cpu_up,
diff -r c0f7ba3aa9b2 -r 98e9485d8fcf xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri Dec 07 12:48:36 2007 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri Dec 07 14:28:07 2007 +0000
@@ -121,9 +121,6 @@ struct hvm_function_table {
     void (*inject_exception)(unsigned int trapnr, int errcode,
                              unsigned long cr2);
 
-    void (*init_ap_context)(struct vcpu_guest_context *ctxt,
-                            int vcpuid, int trampoline_vector);
-
     void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
 
     int  (*event_pending)(struct vcpu *v);
@@ -237,13 +234,6 @@ void hvm_cpuid(unsigned int input, unsig
                                    unsigned int *ecx, unsigned int *edx);
 void hvm_migrate_timers(struct vcpu *v);
 void hvm_do_resume(struct vcpu *v);
-
-static inline void
-hvm_init_ap_context(struct vcpu_guest_context *ctxt,
-                    int vcpuid, int trampoline_vector)
-{
-    return hvm_funcs.init_ap_context(ctxt, vcpuid, trampoline_vector);
-}
 
 static inline void
 hvm_inject_exception(unsigned int trapnr, int errcode, unsigned long cr2)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm: Clean up AP initialisation. This allows AP bringup into emulated, Xen patchbot-unstable <=