WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] x86/svm: adjust VM entry/exit code

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] x86/svm: adjust VM entry/exit code
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Mon, 14 May 2007 17:40:59 +0200
Delivery-date: Mon, 14 May 2007 08:38:32 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Move saving of rAX into assembly code, to match its restoration. Simplify
assembly code, at once reducing the delta between 32- and 64-bit variants.
Eliminate save_svm_cpu_user_regs() - as on VMX all consumers are required
to call hvm_store_cpu_guest_regs().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-05-14/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/svm.c  2007-05-14 14:28:19.000000000 
+0200
+++ 2007-05-14/xen/arch/x86/hvm/svm/svm.c       2007-05-14 14:33:08.000000000 
+0200
@@ -747,28 +747,10 @@ static void svm_init_hypercall_page(stru
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
-static void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    ctxt->eax = vmcb->rax;
-    ctxt->ss = vmcb->ss.sel;
-    ctxt->esp = vmcb->rsp;
-    ctxt->eflags = vmcb->rflags;
-    ctxt->cs = vmcb->cs.sel;
-    ctxt->eip = vmcb->rip;
-    
-    ctxt->gs = vmcb->gs.sel;
-    ctxt->fs = vmcb->fs.sel;
-    ctxt->es = vmcb->es.sel;
-    ctxt->ds = vmcb->ds.sel;
-}
-
 static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     
-    vmcb->rax      = regs->eax;
     vmcb->ss.sel   = regs->ss;
     vmcb->rsp      = regs->esp;   
     vmcb->rflags   = regs->eflags | 2UL;
@@ -1408,7 +1390,7 @@ static void svm_io_instruction(struct vc
 
     /* Copy current guest state into io instruction state structure. */
     memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
-    hvm_store_cpu_guest_regs(v, regs, NULL);
+    svm_store_cpu_guest_regs(v, regs, NULL);
 
     info.bytes = vmcb->exitinfo1;
 
@@ -2236,7 +2218,6 @@ asmlinkage void svm_vmexit_handler(struc
     int inst_len, rc;
 
     exit_reason = vmcb->exitcode;
-    save_svm_cpu_user_regs(v, regs);
 
     HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
 
Index: 2007-05-14/xen/arch/x86/hvm/svm/x86_32/exits.S
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/x86_32/exits.S 2007-04-23 
10:01:41.000000000 +0200
+++ 2007-05-14/xen/arch/x86/hvm/svm/x86_32/exits.S      2007-05-14 
14:33:08.000000000 +0200
@@ -61,8 +61,11 @@
 #define HVM_SAVE_ALL_NOSEGREGS \
         pushl $HVM_MONITOR_EFLAGS; \
         popf; \
-        subl $(NR_SKIPPED_REGS*4), %esp; \
-        pushl %eax; \
+        /* \
+         * Skip %eax, we need have vmcb address in there. \
+         * Don't worry, EAX is saved during #VMEXIT. \
+         */ \
+        subl $4+(NR_SKIPPED_REGS*4), %esp; \
         pushl %ebp; \
         pushl %edi; \
         pushl %esi; \
@@ -77,8 +80,11 @@
         popl %esi;  \
         popl %edi;  \
         popl %ebp;  \
-        popl %eax;  \
-        addl $(NR_SKIPPED_REGS*4), %esp
+        /* \
+         * Skip %eax, we need to have vmcb address in there. \
+         * Don't worry, EAX is restored through the VMRUN instruction. \
+         */ \
+        addl $4+(NR_SKIPPED_REGS*4), %esp
 
 #define VMRUN  .byte 0x0F,0x01,0xD8
 #define VMLOAD .byte 0x0F,0x01,0xDA
@@ -88,63 +94,53 @@
 
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%ebx)
-        xorl %ecx,%ecx
-        notl %ecx
+.Lresume:
         cli                             # tests must not race interrupts
         movl VCPU_processor(%ebx),%eax
         shl  $IRQSTAT_shift,%eax
-        test %ecx,irq_stat(%eax,1)
-        jnz  svm_process_softirqs
+        cmpl $0,irq_stat(%eax)
+        jne  svm_process_softirqs
         call svm_intr_assist
         call svm_load_cr2
 
         CLGI                
         sti
-        GET_CURRENT(%ebx)
         movl VCPU_svm_vmcb(%ebx), %ecx
-        movl 24(%esp), %eax
+        movl UREGS_eax(%esp), %eax
+        movl VCPU_processor(%ebx), %edx
         movl %eax, VMCB_rax(%ecx)
-        movl VCPU_processor(%ebx), %eax
-        movl root_vmcb_pa(,%eax,8), %eax
+        movl root_vmcb_pa(,%edx,8), %eax
         VMSAVE
 
         movl VCPU_svm_vmcb_pa(%ebx), %eax
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-
-        /* 
-         * Skip %eax, we need to have vmcb address in there.
-         * Don't worry, EAX is restored through the VMRUN instruction.
-         */
-        addl $4, %esp       
-        addl $(NR_SKIPPED_REGS*4), %esp
+        HVM_RESTORE_ALL_NOSEGREGS
+
         VMLOAD
         VMRUN
         VMSAVE
-        /* eax is the only register we're allowed to touch here... */
 
-        GET_CURRENT(%eax)
+        HVM_SAVE_ALL_NOSEGREGS
 
-        movl VCPU_processor(%eax), %eax
-        movl root_vmcb_pa(,%eax,8), %eax
+        GET_CURRENT(%ebx)
+        movl VCPU_processor(%ebx), %ecx
+        movl VCPU_svm_vmcb(%ebx), %edx
+        movl root_vmcb_pa(,%ecx,8), %eax
         VMLOAD
+        movl VMCB_rax(%edx), %eax
 
-        HVM_SAVE_ALL_NOSEGREGS
         STGI
 .globl svm_stgi_label;
 svm_stgi_label:
+
+        movl %eax, UREGS_eax(%esp)
         movl %esp,%eax
         push %eax
         call svm_vmexit_handler
         addl $4,%esp
-        jmp  svm_asm_do_resume
+        jmp  .Lresume
 
         ALIGN
 svm_process_softirqs:
         sti       
         call do_softirq
-        jmp  svm_asm_do_resume
+        jmp  .Lresume
Index: 2007-05-14/xen/arch/x86/hvm/svm/x86_64/exits.S
===================================================================
--- 2007-05-14.orig/xen/arch/x86/hvm/svm/x86_64/exits.S 2007-04-23 
10:01:41.000000000 +0200
+++ 2007-05-14/xen/arch/x86/hvm/svm/x86_64/exits.S      2007-05-14 
14:33:08.000000000 +0200
@@ -85,7 +85,11 @@
         popq %r10; \
         popq %r9;  \
         popq %r8;  \
-        popq %rax; \
+        /* \
+         * Discard %rax, we need to have vmcb address in there. \
+         * Don't worry, RAX is restored through the VMRUN instruction. \
+         */ \
+        addq $8, %rsp; \
         popq %rcx; \
         popq %rdx; \
         popq %rsi; \
@@ -100,68 +104,54 @@
 
 ENTRY(svm_asm_do_resume)
         GET_CURRENT(%rbx)
+.Lresume:
         cli                             # tests must not race interrupts
         movl VCPU_processor(%rbx),%eax
         shl  $IRQSTAT_shift, %rax
         leaq irq_stat(%rip), %rdx
-        testl $~0, (%rdx, %rax, 1)
-        jnz  svm_process_softirqs
+        cmpl $0, (%rdx, %rax)
+        jne  svm_process_softirqs
         call svm_intr_assist
         call svm_load_cr2
 
         CLGI                
         sti
-        GET_CURRENT(%rbx)
         movq VCPU_svm_vmcb(%rbx), %rcx
         movq UREGS_rax(%rsp), %rax
+        movl VCPU_processor(%rbx), %edx
         movq %rax, VMCB_rax(%rcx)
         leaq root_vmcb_pa(%rip), %rax
-        movl VCPU_processor(%rbx), %ecx
-        movq (%rax,%rcx,8), %rax
+        movq (%rax,%rdx,8), %rax
         VMSAVE
 
         movq VCPU_svm_vmcb_pa(%rbx), %rax
-        popq %r15
-        popq %r14
-        popq %r13
-        popq %r12
-        popq %rbp
-        popq %rbx
-        popq %r11
-        popq %r10
-        popq %r9
-        popq %r8
-        /*
-         * Skip %rax, we need to have vmcb address in there.
-         * Don't worry, RAX is restored through the VMRUN instruction.
-         */
-        addq $8, %rsp
-        popq %rcx
-        popq %rdx
-        popq %rsi
-        popq %rdi
-        addq $(NR_SKIPPED_REGS*8), %rsp
+        HVM_RESTORE_ALL_NOSEGREGS
 
         VMLOAD
         VMRUN
         VMSAVE
+
         HVM_SAVE_ALL_NOSEGREGS
 
         GET_CURRENT(%rbx)
-        leaq root_vmcb_pa(%rip), %rax
         movl VCPU_processor(%rbx), %ecx
+        leaq root_vmcb_pa(%rip), %rax
+        movq VCPU_svm_vmcb(%rbx), %rdx
         movq (%rax,%rcx,8), %rax
         VMLOAD
+        movq VMCB_rax(%rdx), %rax
 
         STGI
 .globl svm_stgi_label;
 svm_stgi_label:
+
+        movq %rax, UREGS_rax(%rsp)
         movq %rsp,%rdi
         call svm_vmexit_handler
-        jmp  svm_asm_do_resume
+        jmp  .Lresume
 
         ALIGN
 svm_process_softirqs:
         sti
         call do_softirq
-        jmp  svm_asm_do_resume
+        jmp  .Lresume



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH] x86/svm: adjust VM entry/exit code, Jan Beulich <=