WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Support VCPU migration

# HG changeset patch
# User adsharma@xxxxxxxxxxxxxxxxxxxx
# Node ID e4ad3feadd4e706b630858652de2c601616ce6d0
# Parent  b370beb3e107c21aa848da11b6d15dff8e70453c
Support VCPU migration

Reorganize the low level asm code to support relaunching a VMCS on a different
logical CPU.

Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Arun Sharma <arun.sharma@xxxxxxxxx>

diff -r b370beb3e107 -r e4ad3feadd4e xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Aug  9 19:06:44 2005
+++ b/xen/arch/x86/domain.c     Tue Aug  9 19:06:44 2005
@@ -303,6 +303,7 @@
     if ( VMX_DOMAIN(v) && (v->processor != newcpu) ){
         u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
         __vmpclear(vmcs_phys_ptr);
+        v->arch.schedule_tail = arch_vmx_do_relaunch;
     }
 }
 
@@ -325,6 +326,18 @@
     load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
     vmx_do_launch(v);
     reset_stack_and_jump(vmx_asm_do_launch);
+}
+
+void arch_vmx_do_relaunch(struct vcpu *v)
+{
+    u64 vmcs_phys_ptr = (u64) virt_to_phys(v->arch.arch_vmx.vmcs);
+
+    load_vmcs(&v->arch.arch_vmx, vmcs_phys_ptr);
+    vmx_do_resume(v);
+    vmx_set_host_env(v);
+    v->arch.schedule_tail = arch_vmx_do_resume;
+
+    reset_stack_and_jump(vmx_asm_do_relaunch);
 }
 
 static int vmx_final_setup_guest(
diff -r b370beb3e107 -r e4ad3feadd4e xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Tue Aug  9 19:06:44 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Tue Aug  9 19:06:44 2005
@@ -198,7 +198,7 @@
     host_env.idtr_limit = desc.size;
     host_env.idtr_base = desc.address;
     error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
- 
+
     __asm__ __volatile__ ("sgdt  (%0) \n" :: "a"(&desc) : "memory");
     host_env.gdtr_limit = desc.size;
     host_env.gdtr_base = desc.address;
@@ -210,7 +210,6 @@
     host_env.tr_base = (unsigned long) &init_tss[cpu];
     error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
     error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
-
 }
 
 void vmx_do_launch(struct vcpu *v) 
diff -r b370beb3e107 -r e4ad3feadd4e xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Tue Aug  9 19:06:44 2005
+++ b/xen/arch/x86/x86_32/entry.S       Tue Aug  9 19:06:44 2005
@@ -108,31 +108,26 @@
         pushl %ecx; \
         pushl %ebx;
 
+#define VMX_RESTORE_ALL_NOSEGREGS   \
+        popl %ebx;  \
+        popl %ecx;  \
+        popl %edx;  \
+        popl %esi;  \
+        popl %edi;  \
+        popl %ebp;  \
+        popl %eax;  \
+        addl $(NR_SKIPPED_REGS*4), %esp
+
 ENTRY(vmx_asm_vmexit_handler)
         /* selectors are restored/saved by VMX */
         VMX_SAVE_ALL_NOSEGREGS
         call vmx_vmexit_handler
         jmp vmx_asm_do_resume
 
-ENTRY(vmx_asm_do_launch)
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-        popl %eax
-        addl $(NR_SKIPPED_REGS*4), %esp
-        /* VMLUANCH */
-        .byte 0x0f,0x01,0xc2
-        pushf
-        call vm_launch_fail
-        hlt
-        
-        ALIGN
-        
-ENTRY(vmx_asm_do_resume)
-vmx_test_all_events:
+.macro vmx_asm_common launch initialized
+1:
+/* vmx_test_all_events */
+        .if \initialized
         GET_CURRENT(%ebx)
 /*test_all_events:*/
         xorl %ecx,%ecx
@@ -142,37 +137,52 @@
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
         test %ecx,irq_stat(%eax,1)
-        jnz  vmx_process_softirqs
-
-vmx_restore_all_guest:
+        jnz 2f
+
+/* vmx_restore_all_guest */
         call load_cr2
+        .endif
+        VMX_RESTORE_ALL_NOSEGREGS
         /* 
          * Check if we are going back to VMX-based VM
          * By this time, all the setups in the VMCS must be complete.
          */
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-        popl %eax
-        addl $(NR_SKIPPED_REGS*4), %esp
+        .if \launch
+        /* VMLUANCH */
+        .byte 0x0f,0x01,0xc2
+        pushf
+        call vm_launch_fail
+        .else
         /* VMRESUME */
         .byte 0x0f,0x01,0xc3
         pushf
         call vm_resume_fail
+        .endif
         /* Should never reach here */
         hlt
 
         ALIGN
-vmx_process_softirqs:
+        .if \initialized
+2:
+/* vmx_process_softirqs */
         sti       
         call do_softirq
-        jmp  vmx_test_all_events
+        jmp 1b
+        ALIGN
+        .endif
+.endm
+
+ENTRY(vmx_asm_do_launch)
+    vmx_asm_common 1 0
+
+ENTRY(vmx_asm_do_resume)
+    vmx_asm_common 0 1
+
+ENTRY(vmx_asm_do_relaunch)
+    vmx_asm_common 1 1
+
 #endif
 
-        ALIGN
 restore_all_guest:
         testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
         jnz  restore_all_vm86
diff -r b370beb3e107 -r e4ad3feadd4e xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Tue Aug  9 19:06:44 2005
+++ b/xen/arch/x86/x86_64/entry.S       Tue Aug  9 19:06:44 2005
@@ -194,39 +194,34 @@
         pushq %r14; \
         pushq %r15; \
 
+#define VMX_RESTORE_ALL_NOSEGREGS \
+        popq %r15; \
+        popq %r14; \
+        popq %r13; \
+        popq %r12; \
+        popq %rbp; \
+        popq %rbx; \
+        popq %r11; \
+        popq %r10; \
+        popq %r9;  \
+        popq %r8;  \
+        popq %rax; \
+        popq %rcx; \
+        popq %rdx; \
+        popq %rsi; \
+        popq %rdi; \
+        addq $(NR_SKIPPED_REGS*8), %rsp; \
+
 ENTRY(vmx_asm_vmexit_handler)
         /* selectors are restored/saved by VMX */
         VMX_SAVE_ALL_NOSEGREGS
         call vmx_vmexit_handler
         jmp vmx_asm_do_resume
 
-ENTRY(vmx_asm_do_launch)
-        popq %r15
-        popq %r14
-        popq %r13
-        popq %r12
-        popq %rbp
-        popq %rbx
-        popq %r11
-        popq %r10
-        popq %r9
-        popq %r8
-        popq %rax
-        popq %rcx
-        popq %rdx
-        popq %rsi
-        popq %rdi
-        addq $(NR_SKIPPED_REGS*8), %rsp
-        /* VMLUANCH */
-        .byte 0x0f,0x01,0xc2
-        pushfq
-        call vm_launch_fail
-        hlt
-        
-        ALIGN
-        
-ENTRY(vmx_asm_do_resume)
-vmx_test_all_events:
+.macro vmx_asm_common launch initialized 
+1:
+        .if \initialized
+/* vmx_test_all_events */
         GET_CURRENT(%rbx)
 /* test_all_events: */
         cli                             # tests must not race interrupts
@@ -235,42 +230,51 @@
         shl   $IRQSTAT_shift,%rax
         leaq  irq_stat(%rip), %rdx
         testl $~0,(%rdx,%rax,1)
-        jnz   vmx_process_softirqs
-
-vmx_restore_all_guest:
+        jnz  2f 
+
+/* vmx_restore_all_guest */
         call load_cr2
+        .endif
         /* 
          * Check if we are going back to VMX-based VM
          * By this time, all the setups in the VMCS must be complete.
          */
-        popq %r15
-        popq %r14
-        popq %r13
-        popq %r12
-        popq %rbp
-        popq %rbx
-        popq %r11
-        popq %r10
-        popq %r9
-        popq %r8
-        popq %rax
-        popq %rcx
-        popq %rdx
-        popq %rsi
-        popq %rdi
-        addq $(NR_SKIPPED_REGS*8), %rsp
+        VMX_RESTORE_ALL_NOSEGREGS
+        .if \launch
+        /* VMLUANCH */
+        .byte 0x0f,0x01,0xc2
+        pushfq
+        call vm_launch_fail
+        .else
         /* VMRESUME */
         .byte 0x0f,0x01,0xc3
         pushfq
         call vm_resume_fail
+        .endif
         /* Should never reach here */
         hlt
 
         ALIGN
-vmx_process_softirqs:
+
+        .if \initialized
+2:
+/* vmx_process_softirqs */
         sti       
         call do_softirq
-        jmp  vmx_test_all_events
+        jmp 1b
+        ALIGN
+        .endif
+.endm
+
+ENTRY(vmx_asm_do_launch)
+      vmx_asm_common 1 0
+
+ENTRY(vmx_asm_do_resume)
+      vmx_asm_common 0 1
+
+ENTRY(vmx_asm_do_relaunch)
+      vmx_asm_common 1 1
+
 #endif
 
         ALIGN
diff -r b370beb3e107 -r e4ad3feadd4e xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Tue Aug  9 19:06:44 2005
+++ b/xen/include/asm-x86/vmx.h Tue Aug  9 19:06:44 2005
@@ -35,6 +35,7 @@
 
 extern void arch_vmx_do_launch(struct vcpu *);
 extern void arch_vmx_do_resume(struct vcpu *);
+extern void arch_vmx_do_relaunch(struct vcpu *);
 
 extern int vmcs_size;
 extern unsigned int cpu_rev;
diff -r b370beb3e107 -r e4ad3feadd4e xen/include/asm-x86/vmx_vmcs.h
--- a/xen/include/asm-x86/vmx_vmcs.h    Tue Aug  9 19:06:44 2005
+++ b/xen/include/asm-x86/vmx_vmcs.h    Tue Aug  9 19:06:44 2005
@@ -93,6 +93,7 @@
 
 void vmx_do_launch(struct vcpu *); 
 void vmx_do_resume(struct vcpu *); 
+void vmx_set_host_env(struct vcpu *);
 
 struct vmcs_struct *alloc_vmcs(void);
 void free_vmcs(struct vmcs_struct *);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Support VCPU migration, Xen patchbot -unstable <=