WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM] Replace relinquish_resources() dest

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM] Replace relinquish_resources() destructor hook with
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 06 Nov 2006 16:50:39 +0000
Delivery-date: Mon, 06 Nov 2006 08:54:19 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 8eb8c0085604f3671e062c8d7f0054b42a20986b
# Parent  a910bf123e5875681d925ad9981129485b1c8eaa
[HVM] Replace relinquish_resources() destructor hook with
separate vcpu and domain destructors that are called at the
point the domain is finally destroyed.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/domain.c            |   14 +++++++++----
 xen/arch/x86/hvm/hvm.c           |   40 +++++++++++++++++++++++++++++--------
 xen/arch/x86/hvm/svm/svm.c       |   42 +++++----------------------------------
 xen/arch/x86/hvm/svm/vmcb.c      |    3 --
 xen/arch/x86/hvm/vlapic.c        |   17 ++++++++++++++-
 xen/arch/x86/hvm/vmx/vmcs.c      |   11 ----------
 xen/arch/x86/hvm/vmx/vmx.c       |   35 +++-----------------------------
 xen/include/asm-x86/hvm/hvm.h    |   13 ++++--------
 xen/include/asm-x86/hvm/io.h     |    1 
 xen/include/asm-x86/hvm/vlapic.h |    3 +-
 10 files changed, 76 insertions(+), 103 deletions(-)

diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/domain.c     Mon Nov 06 16:36:51 2006 +0000
@@ -235,7 +235,7 @@ int arch_domain_create(struct domain *d)
             virt_to_page(d->shared_info), d, XENSHARE_writable);
     }
 
-    return hvm_domain_initialise(d);
+    return is_hvm_domain(d) ? hvm_domain_initialise(d) : 0;
 
  fail:
     free_xenheap_page(d->shared_info);
@@ -249,6 +249,15 @@ int arch_domain_create(struct domain *d)
 
 void arch_domain_destroy(struct domain *d)
 {
+    struct vcpu *v;
+
+    if ( is_hvm_domain(d) )
+    {
+        for_each_vcpu ( d, v )
+            hvm_vcpu_destroy(v);
+        hvm_domain_destroy(d);
+    }
+
     shadow_final_teardown(d);
 
     free_xenheap_pages(
@@ -974,9 +983,6 @@ void domain_relinquish_resources(struct 
 #endif
     }
 
-    if ( is_hvm_domain(d) )
-        hvm_relinquish_guest_resources(d);
-
     /* Tear down shadow mode stuff. */
     shadow_teardown(d);
 
diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Mon Nov 06 16:36:51 2006 +0000
@@ -110,18 +110,10 @@ void hvm_do_resume(struct vcpu *v)
     }
 }
 
-void hvm_release_assist_channel(struct vcpu *v)
-{
-    free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);
-}
-
 int hvm_domain_initialise(struct domain *d)
 {
     struct hvm_domain *platform = &d->arch.hvm_domain;
     int rc;
-
-    if ( !is_hvm_domain(d) )
-        return 0;
 
     if ( !hvm_enabled )
     {
@@ -146,6 +138,20 @@ int hvm_domain_initialise(struct domain 
     return 0;
 }
 
+void hvm_domain_destroy(struct domain *d)
+{
+    kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
+    rtc_deinit(d);
+    pmtimer_deinit(d);
+
+    if ( d->arch.hvm_domain.shared_page_va )
+        unmap_domain_page_global(
+            (void *)d->arch.hvm_domain.shared_page_va);
+
+    if ( d->arch.hvm_domain.buffered_io_va )
+        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
+}
+
 int hvm_vcpu_initialise(struct vcpu *v)
 {
     struct hvm_domain *platform;
@@ -153,6 +159,12 @@ int hvm_vcpu_initialise(struct vcpu *v)
 
     if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
         return rc;
+
+    if ( (rc = vlapic_init(v)) != 0 )
+    {
+        hvm_funcs.vcpu_destroy(v);
+        return rc;
+    }
 
     /* Create ioreq event channel. */
     v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
@@ -160,6 +172,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
         get_vio(v->domain, v->vcpu_id)->vp_eport =
             v->arch.hvm_vcpu.xen_port;
 
+    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
+
     if ( v->vcpu_id != 0 )
         return 0;
 
@@ -176,6 +190,16 @@ int hvm_vcpu_initialise(struct vcpu *v)
     hvm_set_guest_time(v, 0);
 
     return 0;
+}
+
+void hvm_vcpu_destroy(struct vcpu *v)
+{
+    kill_timer(&v->arch.hvm_vcpu.hlt_timer);
+    vlapic_destroy(v);
+    hvm_funcs.vcpu_destroy(v);
+
+    /* Event channel is already freed by evtchn_destroy(). */
+    /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
 }
 
 void pic_irq_request(void *data, int level)
diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon Nov 06 16:36:51 2006 +0000
@@ -61,7 +61,6 @@ extern int svm_dbg_on;
 extern int svm_dbg_on;
 void svm_dump_regs(const char *from, struct cpu_user_regs *regs);
 
-static void svm_relinquish_guest_resources(struct domain *d);
 static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
                                             struct cpu_user_regs *regs);
 
@@ -777,6 +776,11 @@ static int svm_vcpu_initialise(struct vc
     return 0;
 }
 
+static void svm_vcpu_destroy(struct vcpu *v)
+{
+    destroy_vmcb(&v->arch.hvm_svm);
+}
+
 int start_svm(void)
 {
     u32 eax, ecx, edx;
@@ -825,7 +829,7 @@ int start_svm(void)
     hvm_funcs.disable = stop_svm;
 
     hvm_funcs.vcpu_initialise = svm_vcpu_initialise;
-    hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
+    hvm_funcs.vcpu_destroy    = svm_vcpu_destroy;
 
     hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
     hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
@@ -848,40 +852,6 @@ int start_svm(void)
     hvm_enabled = 1;
 
     return 1;
-}
-
-
-static void svm_relinquish_guest_resources(struct domain *d)
-{
-    struct vcpu *v;
-
-    for_each_vcpu ( d, v )
-    {
-        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
-            continue;
-
-        destroy_vmcb(&v->arch.hvm_svm);
-        kill_timer(&v->arch.hvm_vcpu.hlt_timer);
-        if ( VLAPIC(v) != NULL )
-        {
-            kill_timer(&VLAPIC(v)->vlapic_timer);
-            unmap_domain_page_global(VLAPIC(v)->regs);
-            free_domheap_page(VLAPIC(v)->regs_page);
-            xfree(VLAPIC(v));
-        }
-        hvm_release_assist_channel(v);
-    }
-
-    kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
-    rtc_deinit(d);
-    pmtimer_deinit(d);
-
-    if ( d->arch.hvm_domain.shared_page_va )
-        unmap_domain_page_global(
-            (void *)d->arch.hvm_domain.shared_page_va);
-
-    if ( d->arch.hvm_domain.buffered_io_va )
-        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
 }
 
 
diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Mon Nov 06 16:36:51 2006 +0000
@@ -350,9 +350,6 @@ void svm_do_launch(struct vcpu *v)
     clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
     if ( !asidpool_assign_next( vmcb, 0, core, core ))
         BUG();
-
-    vlapic_init(v);
-    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
 
     vmcb->ldtr.sel = 0;
     vmcb->ldtr.base = 0;
diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/hvm/vlapic.c Mon Nov 06 16:36:51 2006 +0000
@@ -1016,7 +1016,7 @@ static int vlapic_reset(struct vlapic *v
 
 int vlapic_init(struct vcpu *v)
 {
-    struct vlapic *vlapic = NULL;
+    struct vlapic *vlapic;
 
     HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "vlapic_init %d", v->vcpu_id);
 
@@ -1047,3 +1047,18 @@ int vlapic_init(struct vcpu *v)
 
     return 0;
 }
+
+void vlapic_destroy(struct vcpu *v)
+{
+    struct vlapic *vlapic = VLAPIC(v);
+    
+    if ( vlapic == NULL )
+        return;
+
+    VLAPIC(v) = NULL;
+
+    kill_timer(&vlapic->vlapic_timer);
+    unmap_domain_page_global(vlapic->regs);
+    free_domheap_page(vlapic->regs_page);
+    xfree(vlapic);
+}
diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Mon Nov 06 16:36:51 2006 +0000
@@ -193,11 +193,7 @@ void vmx_vmcs_enter(struct vcpu *v)
 {
     /*
      * NB. We must *always* run an HVM VCPU on its own VMCS, except for
-     * vmx_vmcs_enter/exit critical regions. This leads to some TODOs:
-     *  1. VMPTRLD as soon as we context-switch to a HVM VCPU.
-     *  2. VMCS destruction needs to happen later (from domain_destroy()).
-     * We can relax this a bit if a paused VCPU always commits its
-     * architectural state to a software structure.
+     * vmx_vmcs_enter/exit critical regions.
      */
     if ( v == current )
         return;
@@ -416,11 +412,6 @@ static int construct_vmcs(struct vcpu *v
         cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
     error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
 
-    /* XXX Move this out. */
-    init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
-    if ( vlapic_init(v) != 0 )
-        return -1;
-
 #ifdef __x86_64__ 
     /* VLAPIC TPR optimisation. */
     v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
diff -r a910bf123e58 -r 8eb8c0085604 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Mon Nov 06 16:36:51 2006 +0000
@@ -74,36 +74,9 @@ static int vmx_vcpu_initialise(struct vc
     return 0;
 }
 
-static void vmx_relinquish_guest_resources(struct domain *d)
-{
-    struct vcpu *v;
-
-    for_each_vcpu ( d, v )
-    {
-        vmx_destroy_vmcs(v);
-        if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
-            continue;
-        kill_timer(&v->arch.hvm_vcpu.hlt_timer);
-        if ( VLAPIC(v) != NULL )
-        {
-            kill_timer(&VLAPIC(v)->vlapic_timer);
-            unmap_domain_page_global(VLAPIC(v)->regs);
-            free_domheap_page(VLAPIC(v)->regs_page);
-            xfree(VLAPIC(v));
-        }
-        hvm_release_assist_channel(v);
-    }
-
-    kill_timer(&d->arch.hvm_domain.pl_time.periodic_tm.timer);
-    rtc_deinit(d);
-    pmtimer_deinit(d);
-
-    if ( d->arch.hvm_domain.shared_page_va )
-        unmap_domain_page_global(
-            (void *)d->arch.hvm_domain.shared_page_va);
-
-    if ( d->arch.hvm_domain.buffered_io_va )
-        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
+static void vmx_vcpu_destroy(struct vcpu *v)
+{
+    vmx_destroy_vmcs(v);
 }
 
 #ifdef __x86_64__
@@ -674,7 +647,7 @@ static void vmx_setup_hvm_funcs(void)
     hvm_funcs.disable = stop_vmx;
 
     hvm_funcs.vcpu_initialise = vmx_vcpu_initialise;
-    hvm_funcs.relinquish_guest_resources = vmx_relinquish_guest_resources;
+    hvm_funcs.vcpu_destroy    = vmx_vcpu_destroy;
 
     hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
     hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
diff -r a910bf123e58 -r 8eb8c0085604 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Mon Nov 06 16:36:51 2006 +0000
@@ -33,10 +33,10 @@ struct hvm_function_table {
     void (*disable)(void);
 
     /*
-     * Initialize/relinguish HVM guest resources
+     * Initialise/destroy HVM VCPU resources
      */
     int  (*vcpu_initialise)(struct vcpu *v);
-    void (*relinquish_guest_resources)(struct domain *d);
+    void (*vcpu_destroy)(struct vcpu *v);
 
     /*
      * Store and load guest state:
@@ -92,13 +92,10 @@ hvm_disable(void)
 }
 
 int hvm_domain_initialise(struct domain *d);
+void hvm_domain_destroy(struct domain *d);
+
 int hvm_vcpu_initialise(struct vcpu *v);
-
-static inline void
-hvm_relinquish_guest_resources(struct domain *d)
-{
-    hvm_funcs.relinquish_guest_resources(d);
-}
+void hvm_vcpu_destroy(struct vcpu *v);
 
 static inline void
 hvm_store_cpu_guest_regs(
diff -r a910bf123e58 -r 8eb8c0085604 xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/include/asm-x86/hvm/io.h      Mon Nov 06 16:36:51 2006 +0000
@@ -151,7 +151,6 @@ extern void hvm_pic_assist(struct vcpu *
 extern void hvm_pic_assist(struct vcpu *v);
 extern int cpu_get_interrupt(struct vcpu *v, int *type);
 extern int cpu_has_pending_irq(struct vcpu *v);
-extern void hvm_release_assist_channel(struct vcpu *v);
 
 // XXX - think about this, maybe use bit 30 of the mfn to signify an MMIO 
frame.
 #define mmio_space(gpa) (!VALID_MFN(get_mfn_from_gpfn((gpa) >> PAGE_SHIFT)))
diff -r a910bf123e58 -r 8eb8c0085604 xen/include/asm-x86/hvm/vlapic.h
--- a/xen/include/asm-x86/hvm/vlapic.h  Mon Nov 06 15:46:28 2006 +0000
+++ b/xen/include/asm-x86/hvm/vlapic.h  Mon Nov 06 16:36:51 2006 +0000
@@ -77,7 +77,8 @@ int vlapic_find_highest_irr(struct vlapi
 
 int cpu_get_apic_interrupt(struct vcpu *v, int *mode);
 
-int vlapic_init(struct vcpu *vc);
+int  vlapic_init(struct vcpu *v);
+void vlapic_destroy(struct vcpu *v);
 
 void vlapic_msr_set(struct vlapic *vlapic, uint64_t value);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM] Replace relinquish_resources() destructor hook with, Xen patchbot-unstable <=