WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Fix SVM guest pin timer migration logic.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Fix SVM guest pin timer migration logic.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 28 Feb 2006 11:28:07 +0000
Delivery-date: Tue, 28 Feb 2006 11:28:27 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 0820df08a996d332d2edab73fb0524346772ee0f
# Parent  985879d9d935c7de0b93ec6ed8b61d280569382b
Fix SVM guest pin timer migration logic.

Signed-off-by: Tom Woller <thomas.woller@xxxxxxx>

diff -r 985879d9d935 -r 0820df08a996 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue Feb 28 09:12:29 2006
+++ b/xen/arch/x86/hvm/svm/svm.c        Tue Feb 28 09:13:26 2006
@@ -820,8 +820,29 @@
 
 void arch_svm_do_resume(struct vcpu *v) 
 {
-    svm_do_resume(v);
-    reset_stack_and_jump(svm_asm_do_resume);
+    /* pinning VCPU to a different core? */
+    if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
+        svm_do_resume( v );
+        reset_stack_and_jump( svm_asm_do_resume );
+    }
+    else {
+        printk("VCPU core pinned: %d to %d\n", v->arch.hvm_svm.launch_core, 
smp_processor_id() );
+        v->arch.hvm_svm.launch_core = smp_processor_id();
+        svm_migrate_timers( v );
+        svm_do_resume( v );
+        reset_stack_and_jump( svm_asm_do_resume );
+    }
+}
+
+
+void svm_migrate_timers(struct vcpu *v)
+{
+    struct hvm_virpit *vpit = &(v->domain->arch.hvm_domain.vpit);
+
+    migrate_timer( &vpit->pit_timer, v->processor );
+    migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
+    if ( hvm_apic_support(v->domain) && VLAPIC( v ))
+        migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
 }
 
 
@@ -2668,26 +2689,23 @@
 {
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    int core = smp_processor_id();
-    int oldcore = v->arch.hvm_svm.core; 
-    /* 
-     * if need to assign new asid or if switching cores, 
-     * then retire asid for old core, and assign new for new core.
-     */
-    if( v->arch.hvm_svm.core != core ) {
-        if (svm_dbg_on)
-            printk("old core %d new core 
%d\n",(int)v->arch.hvm_svm.core,(int)core);
-        v->arch.hvm_svm.core = core;
-    }
-    if( test_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags) ||
-          (oldcore != core)) {
-        if(!asidpool_assign_next(vmcb, 1, 
-                   oldcore, core)) {
+
+   /*
+    * if need to assign new asid, or if switching cores,
+    * retire asid for the old core, and assign a new asid to the current core.
+    */
+    if ( test_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags ) ||
+       ( v->arch.hvm_svm.asid_core != v->arch.hvm_svm.launch_core )) {
+        /* recycle asid */
+        if ( !asidpool_assign_next( vmcb, 1,
+            v->arch.hvm_svm.asid_core, v->arch.hvm_svm.launch_core )) {
             /* If we get here, we have a major problem */
             domain_crash_synchronous();
         }
-    }
-    clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
+
+        v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
+        clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
+    }
 }
 
 /*
diff -r 985879d9d935 -r 0820df08a996 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue Feb 28 09:12:29 2006
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Tue Feb 28 09:13:26 2006
@@ -306,7 +306,7 @@
 {
     if(arch_svm->vmcb != NULL)
     {
-        asidpool_retire(arch_svm->vmcb, arch_svm->core);
+        asidpool_retire(arch_svm->vmcb, arch_svm->asid_core);
          free_vmcb(arch_svm->vmcb);
     }
     if(arch_svm->iopm != NULL) {
@@ -404,18 +404,17 @@
 
 void svm_do_launch(struct vcpu *v)
 {
+    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    int core = smp_processor_id();
+    ASSERT(vmcb);
+
     /* Update CR3, GDT, LDT, TR */
-    struct vmcb_struct *vmcb;
-    int core = smp_processor_id();
-    vmcb = v->arch.hvm_svm.vmcb;
-    ASSERT(vmcb);
-
     svm_stts(v);
 
-    /* current core is the one we will perform the vmrun on */
-    v->arch.hvm_svm.core = core;
+    /* current core is the one we intend to perform the VMRUN on */
+    v->arch.hvm_svm.launch_core = v->arch.hvm_svm.asid_core = core;
     clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
-    if ( !asidpool_assign_next(vmcb, 0, core, core) )
+    if ( !asidpool_assign_next( vmcb, 0, core, core ))
         BUG();
 
     if (v->vcpu_id == 0)
diff -r 985879d9d935 -r 0820df08a996 xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Tue Feb 28 09:12:29 2006
+++ b/xen/include/asm-x86/hvm/svm/svm.h Tue Feb 28 09:13:26 2006
@@ -54,6 +54,8 @@
 /* For debugging. Remove when no longer needed. */
 extern void svm_dump_host_regs(const char *from);
 
+extern void svm_migrate_timers(struct vcpu *v);
+
 /* ASID API */
 enum {
     ASID_AVAILABLE = 0,
diff -r 985879d9d935 -r 0820df08a996 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Feb 28 09:12:29 2006
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Tue Feb 28 09:13:26 2006
@@ -457,7 +457,8 @@
     u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
     int                 injecting_event;
     int                 saved_irq_vector;
-    u32                 core;        /* cpu of last vmexit */
+    u32                 launch_core;
+    u32                 asid_core;
     
     unsigned long       flags;      /* VMCB flags */
     unsigned long       cpu_shadow_cr0; /* copy of guest read shadow CR0 */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Fix SVM guest pin timer migration logic., Xen patchbot -unstable <=