WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] SVM patch to cleanup the host save area allocation and d

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] SVM patch to cleanup the host save area allocation and deallocation,
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 04 May 2006 14:12:09 +0000
Delivery-date: Thu, 04 May 2006 07:13:42 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID e1a47a2696004087852cb9f2e09fe4eb8ad1b928
# Parent  bbce4d11518910328380f6a3325268acfa5b3aff
SVM patch to cleanup the host save area allocation and deallocation,
including removing  memory leaks concerning these areas.  Also fixes
problem where the HSA MSR was not initialized properly for cores>0.

Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>

diff -r bbce4d115189 -r e1a47a269600 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu May 04 10:25:27 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu May 04 11:14:45 2006 +0100
@@ -79,6 +79,8 @@ void svm_dump_regs(const char *from, str
 
 static void svm_relinquish_guest_resources(struct domain *d);
 
+/* Host save area */
+struct host_save_area *host_save_area[ NR_CPUS ] = {0};
 static struct asid_pool ASIDpool[NR_CPUS];
 
 /*
@@ -185,11 +187,16 @@ void stop_svm(void)
 void stop_svm(void)
 {
     u32 eax, edx;    
+    int cpu = smp_processor_id();
 
     /* We turn off the EFER_SVME bit. */
     rdmsr(MSR_EFER, eax, edx);
     eax &= ~EFER_SVME;
     wrmsr(MSR_EFER, eax, edx);
+ 
+    /* release the HSA */
+    free_host_save_area( host_save_area[ cpu ] );
+    host_save_area[ cpu ] = NULL;
 
     printk("AMD SVM Extension is disabled.\n");
 }
@@ -431,8 +438,11 @@ int start_svm(void)
 int start_svm(void)
 {
     u32 eax, ecx, edx;
-    
-    /* Xen does not fill x86_capability words except 0. */
+    u32 phys_hsa_lo, phys_hsa_hi;   
+    u64 phys_hsa;
+    int cpu = smp_processor_id();
+ 
+   /* Xen does not fill x86_capability words except 0. */
     ecx = cpuid_ecx(0x80000001);
     boot_cpu_data.x86_capability[5] = ecx;
     
@@ -443,7 +453,14 @@ int start_svm(void)
     eax |= EFER_SVME;
     wrmsr(MSR_EFER, eax, edx);
     asidpool_init(smp_processor_id());    
-    printk("AMD SVM Extension is enabled for cpu %d.\n", smp_processor_id());
+    printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
+
+    /* Initialize the HSA for this core */
+    host_save_area[ cpu ] = alloc_host_save_area();
+    phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] ); 
+    phys_hsa_lo = (u32) phys_hsa;
+    phys_hsa_hi = (u32) (phys_hsa >> 32);    
+    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
     
     /* Setup HVM interfaces */
     hvm_funcs.disable = stop_svm;
@@ -546,20 +563,6 @@ void save_svm_cpu_user_regs(struct vcpu 
     ctxt->ds = vmcb->ds.sel;
 }
 
-#if defined (__x86_64__)
-void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v )
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    regs->rip    = vmcb->rip;
-    regs->rsp    = vmcb->rsp;
-    regs->rflags = vmcb->rflags;
-    regs->cs     = vmcb->cs.sel;
-    regs->ds     = vmcb->ds.sel;
-    regs->es     = vmcb->es.sel;
-    regs->ss     = vmcb->ss.sel;
-}
-#elif defined (__i386__)
 void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -571,11 +574,11 @@ void svm_store_cpu_user_regs(struct cpu_
     regs->ds     = vmcb->ds.sel;
     regs->es     = vmcb->es.sel;
     regs->ss     = vmcb->ss.sel;
-}
-#endif
+    regs->fs     = vmcb->fs.sel;
+    regs->gs     = vmcb->gs.sel;
+}
 
 /* XXX Use svm_load_cpu_guest_regs instead */
-#if defined (__i386__)
 void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
 { 
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -588,30 +591,17 @@ void svm_load_cpu_user_regs(struct vcpu 
     vmcb->rflags   = regs->eflags;
     vmcb->cs.sel   = regs->cs;
     vmcb->rip      = regs->eip;
+
+    vmcb->ds.sel   = regs->ds;
+    vmcb->es.sel   = regs->es;
+    vmcb->fs.sel   = regs->fs;
+    vmcb->gs.sel   = regs->gs;
+
     if (regs->eflags & EF_TF)
         *intercepts |= EXCEPTION_BITMAP_DB;
     else
         *intercepts &= ~EXCEPTION_BITMAP_DB;
 }
-#else /* (__i386__) */
-void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
-    
-    /* Write the guest register value into VMCB */
-    vmcb->rax      = regs->rax;
-    vmcb->ss.sel   = regs->ss;
-    vmcb->rsp      = regs->rsp;   
-    vmcb->rflags   = regs->rflags;
-    vmcb->cs.sel   = regs->cs;
-    vmcb->rip      = regs->rip;
-    if (regs->rflags & EF_TF)
-        *intercepts |= EXCEPTION_BITMAP_DB;
-    else
-        *intercepts &= ~EXCEPTION_BITMAP_DB;
-}
-#endif /* !(__i386__) */
 
 int svm_paging_enabled(struct vcpu *v)
 {
@@ -735,10 +725,6 @@ static void svm_relinquish_guest_resourc
     {
         if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
             continue;
-#if 0
-        /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
-        free_host_save_area(v->arch.hvm_svm.host_save_area);
-#endif
 
         destroy_vmcb(&v->arch.hvm_svm);
         free_monitor_pagetable(v);
diff -r bbce4d115189 -r e1a47a269600 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu May 04 10:25:27 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu May 04 11:14:45 2006 +0100
@@ -36,9 +36,11 @@
 #include <xen/kernel.h>
 #include <xen/domain_page.h>
 
+extern struct host_save_area *host_save_area[];
 extern int svm_dbg_on;
 extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
                                   int oldcore, int newcore);
+extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
 
 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
 
@@ -309,8 +311,6 @@ int construct_vmcb(struct arch_svm_struc
 {
     int error;
     long rc=0;
-    struct host_save_area *hsa = NULL;
-    u64 phys_hsa;
 
     memset(arch_svm, 0, sizeof(struct arch_svm_struct));
 
@@ -320,36 +320,9 @@ int construct_vmcb(struct arch_svm_struc
         goto err_out;
     }
 
-    /* 
-     * The following code is for allocating host_save_area.
-     * Note: We either allocate a Host Save Area per core or per VCPU. 
-     * However, we do not want a global data structure 
-     * for HSA per core, we decided to implement a HSA for each VCPU. 
-     * It will waste space since VCPU number is larger than core number. 
-     * But before we find a better place for HSA for each core, we will 
-     * stay will this solution.
-     */
-
-    if (!(hsa = alloc_host_save_area())) 
-    {
-        printk("Failed to allocate Host Save Area\n");
-        rc = -ENOMEM;
-        goto err_out;
-    }
-
-    phys_hsa = (u64) virt_to_maddr(hsa);
-    arch_svm->host_save_area = hsa;
-    arch_svm->host_save_pa   = phys_hsa;
-
+    /* update the HSA for the current Core */
+    set_hsa_to_guest( arch_svm );
     arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
-
-    if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa))) 
-    {
-        printk("construct_vmcb: load_vmcb failed: VMCB = %lx\n",
-               (unsigned long) arch_svm->host_save_pa);
-        rc = -EINVAL;         
-        goto err_out;
-    }
 
     if ((error = construct_vmcb_controls(arch_svm))) 
     {
@@ -458,18 +431,11 @@ void svm_do_launch(struct vcpu *v)
 }
 
 
-int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa) 
-{
-    u32 phys_hsa_lo, phys_hsa_hi;
-    
-    phys_hsa_lo = (u32) phys_hsa;
-    phys_hsa_hi = (u32) (phys_hsa >> 32);
-    
-    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
-    set_bit(ARCH_SVM_VMCB_LOADED, &arch_svm->flags); 
-    return 0;
-}
-
+void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
+{
+    arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
+    arch_svm->host_save_pa   = (u64)virt_to_maddr( arch_svm->host_save_area );
+}
 
 /* 
  * Resume the guest.
@@ -481,6 +447,9 @@ void svm_do_resume(struct vcpu *v)
     struct hvm_time_info *time_info = &vpit->time_info;
 
     svm_stts(v);
+
+    /* make sure the HSA is set for the current core */
+    set_hsa_to_guest( &v->arch.hvm_svm );
     
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
     if ( time_info->first_injected ) {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] SVM patch to cleanup the host save area allocation and deallocation,, Xen patchbot -unstable <=