WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86, hvm: Merge 32-bit and 64-bit asm stu

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86, hvm: Merge 32-bit and 64-bit asm stubs into common files.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 22 Apr 2008 07:10:37 -0700
Delivery-date: Tue, 22 Apr 2008 07:13:38 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1208786115 -3600
# Node ID d03f1c098a1e24757cf033cc5494494bd04ba04c
# Parent  491074885dcb2e463ad9ae4397f395b2b1ae7792
x86, hvm: Merge 32-bit and 64-bit asm stubs into common files.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/x86_32/Makefile |    1 
 xen/arch/x86/hvm/svm/x86_32/exits.S  |  126 ----------------------
 xen/arch/x86/hvm/svm/x86_64/Makefile |    1 
 xen/arch/x86/hvm/svm/x86_64/exits.S  |  143 -------------------------
 xen/arch/x86/hvm/vmx/x86_32/Makefile |    1 
 xen/arch/x86/hvm/vmx/x86_32/exits.S  |  147 -------------------------
 xen/arch/x86/hvm/vmx/x86_64/Makefile |    1 
 xen/arch/x86/hvm/vmx/x86_64/exits.S  |  165 -----------------------------
 xen/arch/x86/hvm/svm/Makefile        |    4 
 xen/arch/x86/hvm/svm/entry.S         |  178 +++++++++++++++++++++++++++++++
 xen/arch/x86/hvm/vmx/Makefile        |    4 
 xen/arch/x86/hvm/vmx/entry.S         |  198 +++++++++++++++++++++++++++++++++++
 12 files changed, 378 insertions(+), 591 deletions(-)

diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/svm/Makefile
--- a/xen/arch/x86/hvm/svm/Makefile     Mon Apr 21 12:23:55 2008 +0100
+++ b/xen/arch/x86/hvm/svm/Makefile     Mon Apr 21 14:55:15 2008 +0100
@@ -1,8 +1,6 @@ subdir-$(x86_32) += x86_32
-subdir-$(x86_32) += x86_32
-subdir-$(x86_64) += x86_64
-
 obj-y += asid.o
 obj-y += emulate.o
+obj-y += entry.o
 obj-y += intr.o
 obj-y += svm.o
 obj-y += vmcb.o
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/svm/entry.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/svm/entry.S      Mon Apr 21 14:55:15 2008 +0100
@@ -0,0 +1,178 @@
+/*
+ * entry.S: SVM architecture-specific entry/exit handling.
+ * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2008, Citrix Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/softirq.h>
+#include <asm/types.h>
+#include <asm/asm_defns.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#include <public/xen.h>
+
+#define VMRUN  .byte 0x0F,0x01,0xD8
+#define STGI   .byte 0x0F,0x01,0xDC
+#define CLGI   .byte 0x0F,0x01,0xDD
+
+#define get_current(reg)                        \
+        mov $STACK_SIZE-BYTES_PER_LONG, r(reg); \
+        or  r(sp), r(reg);                      \
+        and $~(BYTES_PER_LONG-1),r(reg);        \
+        mov (r(reg)),r(reg);
+
+#if defined(__x86_64__)
+#define r(reg) %r##reg
+#define addr_of(lbl) lbl(%rip)
+#define call_with_regs(fn)                      \
+        mov  %rsp,%rdi;                         \
+        call fn;
+#else /* defined(__i386__) */
+#define r(reg) %e##reg
+#define addr_of(lbl) lbl
+#define UREGS_rax UREGS_eax
+#define UREGS_rip UREGS_eip
+#define UREGS_rsp UREGS_esp
+#define call_with_regs(fn)                      \
+        mov  %esp,%eax;                         \
+        push %eax;                              \
+        call fn;                                \
+        add  $4,%esp;
+#endif
+
+ENTRY(svm_asm_do_resume)
+        get_current(bx)
+        CLGI
+
+        mov  VCPU_processor(r(bx)),%eax
+        shl  $IRQSTAT_shift,r(ax)
+        lea  addr_of(irq_stat),r(dx)
+        testl $~0,(r(dx),r(ax),1)
+        jnz  .Lsvm_process_softirqs
+
+        call svm_asid_handle_vmrun
+        call svm_intr_assist
+
+        cmpb $0,addr_of(tb_init_done)
+        jnz  .Lsvm_trace
+.Lsvm_trace_done:
+
+        mov  VCPU_svm_vmcb(r(bx)),r(cx)
+        mov  UREGS_rax(r(sp)),r(ax)
+        mov  r(ax),VMCB_rax(r(cx))
+        mov  UREGS_rip(r(sp)),r(ax)
+        mov  r(ax),VMCB_rip(r(cx))
+        mov  UREGS_rsp(r(sp)),r(ax)
+        mov  r(ax),VMCB_rsp(r(cx))
+        mov  UREGS_eflags(r(sp)),r(ax)
+        mov  r(ax),VMCB_rflags(r(cx))
+
+        mov  VCPU_svm_vmcb_pa(r(bx)),r(ax)
+
+#if defined(__x86_64__)
+        pop  %r15
+        pop  %r14
+        pop  %r13
+        pop  %r12
+        pop  %rbp
+        pop  %rbx
+        pop  %r11
+        pop  %r10
+        pop  %r9
+        pop  %r8
+        add  $8,%rsp /* Skip %rax: restored by VMRUN. */
+        pop  %rcx
+        pop  %rdx
+        pop  %rsi
+        pop  %rdi
+#else /* defined(__i386__) */
+        pop  %ebx
+        pop  %ecx
+        pop  %edx
+        pop  %esi
+        pop  %edi
+        pop  %ebp
+#endif
+
+        VMRUN
+
+#if defined(__x86_64__)
+        push %rdi
+        push %rsi
+        push %rdx
+        push %rcx
+        push %rax
+        push %r8
+        push %r9
+        push %r10
+        push %r11
+        push %rbx
+        push %rbp
+        push %r12
+        push %r13
+        push %r14
+        push %r15
+#else /* defined(__i386__) */
+        push %ebp
+        push %edi
+        push %esi
+        push %edx
+        push %ecx
+        push %ebx
+#endif
+
+        get_current(bx)
+        movb $0,VCPU_svm_vmcb_in_sync(r(bx))
+        mov  VCPU_svm_vmcb(r(bx)),r(cx)
+        mov  VMCB_rax(r(cx)),r(ax)
+        mov  r(ax),UREGS_rax(r(sp))
+        mov  VMCB_rip(r(cx)),r(ax)
+        mov  r(ax),UREGS_rip(r(sp))
+        mov  VMCB_rsp(r(cx)),r(ax)
+        mov  r(ax),UREGS_rsp(r(sp))
+        mov  VMCB_rflags(r(cx)),r(ax)
+        mov  r(ax),UREGS_eflags(r(sp))
+
+#ifndef NDEBUG
+        mov  $0xbeef,%ax
+        mov  %ax,UREGS_error_code(r(sp))
+        mov  %ax,UREGS_entry_vector(r(sp))
+        mov  %ax,UREGS_saved_upcall_mask(r(sp))
+        mov  %ax,UREGS_cs(r(sp))
+        mov  %ax,UREGS_ds(r(sp))
+        mov  %ax,UREGS_es(r(sp))
+        mov  %ax,UREGS_fs(r(sp))
+        mov  %ax,UREGS_gs(r(sp))
+        mov  %ax,UREGS_ss(r(sp))
+#endif
+
+        STGI
+.globl svm_stgi_label
+svm_stgi_label:
+        call_with_regs(svm_vmexit_handler)
+        jmp  svm_asm_do_resume
+
+.Lsvm_process_softirqs:
+        STGI
+        call do_softirq
+        jmp  svm_asm_do_resume
+
+.Lsvm_trace:
+        call svm_trace_vmentry
+        jmp  .Lsvm_trace_done
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/svm/x86_32/Makefile
--- a/xen/arch/x86/hvm/svm/x86_32/Makefile      Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-obj-y += exits.o
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S       Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * exits.S: SVM architecture-specific exit handling.
- * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/softirq.h>
-#include <asm/asm_defns.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <public/xen.h>
-
-#define GET_CURRENT(reg)         \
-        movl $STACK_SIZE-4,reg;  \
-        orl  %esp,reg;           \
-        andl $~3,reg;            \
-        movl (reg),reg;
-
-#define VMRUN  .byte 0x0F,0x01,0xD8
-#define STGI   .byte 0x0F,0x01,0xDC
-#define CLGI   .byte 0x0F,0x01,0xDD
-
-ENTRY(svm_asm_do_resume)
-        GET_CURRENT(%ebx)
-        CLGI
-
-        movl VCPU_processor(%ebx),%eax
-        shl  $IRQSTAT_shift,%eax
-        testl $~0,irq_stat(%eax,1)
-        jnz  .Lsvm_process_softirqs
-
-        call svm_asid_handle_vmrun
-        call svm_intr_assist
-
-        /* Check if the trace buffer is initialized. 
-         * Because the below condition is unlikely, we jump out of line
-         * instead of having a mostly taken branch over the unlikely code.
-         */
-        cmpb $0,tb_init_done
-        jnz  .Lsvm_trace
-.Lsvm_trace_done:
-
-        movl VCPU_svm_vmcb(%ebx),%ecx
-        movl UREGS_eax(%esp),%eax
-        movl %eax,VMCB_rax(%ecx)
-        movl UREGS_eip(%esp),%eax
-        movl %eax,VMCB_rip(%ecx)
-        movl UREGS_esp(%esp),%eax
-        movl %eax,VMCB_rsp(%ecx)
-        movl UREGS_eflags(%esp),%eax
-        movl %eax,VMCB_rflags(%ecx)
-
-        movl VCPU_svm_vmcb_pa(%ebx),%eax
-        popl %ebx
-        popl %ecx
-        popl %edx
-        popl %esi
-        popl %edi
-        popl %ebp
-
-        VMRUN
-
-        pushl %ebp
-        pushl %edi
-        pushl %esi
-        pushl %edx
-        pushl %ecx
-        pushl %ebx
-
-        GET_CURRENT(%ebx)
-        movb $0,VCPU_svm_vmcb_in_sync(%ebx)
-        movl VCPU_svm_vmcb(%ebx),%ecx
-        movl VMCB_rax(%ecx),%eax
-        movl %eax,UREGS_eax(%esp)
-        movl VMCB_rip(%ecx),%eax
-        movl %eax,UREGS_eip(%esp)
-        movl VMCB_rsp(%ecx),%eax
-        movl %eax,UREGS_esp(%esp)
-        movl VMCB_rflags(%ecx),%eax
-        movl %eax,UREGS_eflags(%esp)
-
-#ifndef NDEBUG
-        movw $0xbeef,%ax
-        movw %ax,UREGS_error_code(%esp)
-        movw %ax,UREGS_entry_vector(%esp)
-        movw %ax,UREGS_saved_upcall_mask(%esp)
-        movw %ax,UREGS_cs(%esp)
-        movw %ax,UREGS_ds(%esp)
-        movw %ax,UREGS_es(%esp)
-        movw %ax,UREGS_fs(%esp)
-        movw %ax,UREGS_gs(%esp)
-        movw %ax,UREGS_ss(%esp)
-#endif
-
-        STGI
-.globl svm_stgi_label
-svm_stgi_label:
-        movl %esp,%eax
-        push %eax
-        call svm_vmexit_handler
-        addl $4,%esp
-        jmp  svm_asm_do_resume
-
-.Lsvm_process_softirqs:
-        STGI
-        call do_softirq
-        jmp  svm_asm_do_resume
-
-.Lsvm_trace:
-        call svm_trace_vmentry
-        jmp  .Lsvm_trace_done
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/svm/x86_64/Makefile
--- a/xen/arch/x86/hvm/svm/x86_64/Makefile      Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-obj-y += exits.o
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S       Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
-/*
- * exits.S: AMD-V architecture-specific exit handling.
- * Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/softirq.h>
-#include <asm/asm_defns.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <public/xen.h>
-
-#define GET_CURRENT(reg)         \
-        movq $STACK_SIZE-8,reg;  \
-        orq  %rsp,reg;           \
-        andq $~7,reg;            \
-        movq (reg),reg;
-
-#define VMRUN  .byte 0x0F,0x01,0xD8
-#define STGI   .byte 0x0F,0x01,0xDC
-#define CLGI   .byte 0x0F,0x01,0xDD
-
-ENTRY(svm_asm_do_resume)
-        GET_CURRENT(%rbx)
-        CLGI
-
-        movl VCPU_processor(%rbx),%eax
-        shl  $IRQSTAT_shift,%rax
-        leaq irq_stat(%rip),%rdx
-        testl $~0,(%rdx,%rax,1)
-        jnz  .Lsvm_process_softirqs
-
-        call svm_asid_handle_vmrun
-        call svm_intr_assist
-
-        /* Check if the trace buffer is initialized. 
-         * Because the below condition is unlikely, we jump out of line
-         * instead of having a mostly taken branch over the unlikely code.
-         */
-        cmpb $0,tb_init_done(%rip)
-        jnz  .Lsvm_trace
-.Lsvm_trace_done:
-
-        movq VCPU_svm_vmcb(%rbx),%rcx
-        movq UREGS_rax(%rsp),%rax
-        movq %rax,VMCB_rax(%rcx)
-        movq UREGS_rip(%rsp),%rax
-        movq %rax,VMCB_rip(%rcx)
-        movq UREGS_rsp(%rsp),%rax
-        movq %rax,VMCB_rsp(%rcx)
-        movq UREGS_eflags(%rsp),%rax
-        movq %rax,VMCB_rflags(%rcx)
-
-        movq VCPU_svm_vmcb_pa(%rbx),%rax
-        popq %r15
-        popq %r14
-        popq %r13
-        popq %r12
-        popq %rbp
-        popq %rbx
-        popq %r11
-        popq %r10
-        popq %r9
-        popq %r8
-        addq $8,%rsp /* Skip %rax: restored by VMRUN. */
-        popq %rcx
-        popq %rdx
-        popq %rsi
-        popq %rdi
-
-        VMRUN
-
-        pushq %rdi
-        pushq %rsi
-        pushq %rdx
-        pushq %rcx
-        pushq %rax
-        pushq %r8
-        pushq %r9
-        pushq %r10
-        pushq %r11
-        pushq %rbx
-        pushq %rbp
-        pushq %r12
-        pushq %r13
-        pushq %r14
-        pushq %r15
-
-        GET_CURRENT(%rbx)
-        movb $0,VCPU_svm_vmcb_in_sync(%rbx)
-        movq VCPU_svm_vmcb(%rbx),%rcx
-        movq VMCB_rax(%rcx),%rax
-        movq %rax,UREGS_rax(%rsp)
-        movq VMCB_rip(%rcx),%rax
-        movq %rax,UREGS_rip(%rsp)
-        movq VMCB_rsp(%rcx),%rax
-        movq %rax,UREGS_rsp(%rsp)
-        movq VMCB_rflags(%rcx),%rax
-        movq %rax,UREGS_eflags(%rsp)
-
-#ifndef NDEBUG
-        movw $0xbeef,%ax
-        movw %ax,UREGS_error_code(%rsp)
-        movw %ax,UREGS_entry_vector(%rsp)
-        movw %ax,UREGS_saved_upcall_mask(%rsp)
-        movw %ax,UREGS_cs(%rsp)
-        movw %ax,UREGS_ds(%rsp)
-        movw %ax,UREGS_es(%rsp)
-        movw %ax,UREGS_fs(%rsp)
-        movw %ax,UREGS_gs(%rsp)
-        movw %ax,UREGS_ss(%rsp)
-#endif
-
-        STGI
-.globl svm_stgi_label
-svm_stgi_label:
-        movq %rsp,%rdi
-        call svm_vmexit_handler
-        jmp  svm_asm_do_resume
-
-.Lsvm_process_softirqs:
-        STGI
-        call do_softirq
-        jmp  svm_asm_do_resume
-
-.Lsvm_trace:
-        call svm_trace_vmentry
-        jmp  .Lsvm_trace_done
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/vmx/Makefile
--- a/xen/arch/x86/hvm/vmx/Makefile     Mon Apr 21 12:23:55 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/Makefile     Mon Apr 21 14:55:15 2008 +0100
@@ -1,6 +1,4 @@ subdir-$(x86_32) += x86_32
-subdir-$(x86_32) += x86_32
-subdir-$(x86_64) += x86_64
-
+obj-y += entry.o
 obj-y += intr.o
 obj-y += realmode.o
 obj-y += vmcs.o
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/vmx/entry.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/hvm/vmx/entry.S      Mon Apr 21 14:55:15 2008 +0100
@@ -0,0 +1,198 @@
+/*
+ * entry.S: VMX architecture-specific entry/exit handling.
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2008, Citrix Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/softirq.h>
+#include <asm/types.h>
+#include <asm/asm_defns.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#include <public/xen.h>
+
+#define VMRESUME     .byte 0x0f,0x01,0xc3
+#define VMLAUNCH     .byte 0x0f,0x01,0xc2
+#define VMREAD(off)  .byte 0x0f,0x78,0x47,((off)-UREGS_rip)
+#define VMWRITE(off) .byte 0x0f,0x79,0x47,((off)-UREGS_rip)
+
+/* VMCS field encodings */
+#define GUEST_RSP    0x681c
+#define GUEST_RIP    0x681e
+#define GUEST_RFLAGS 0x6820
+
+#define get_current(reg)                        \
+        mov $STACK_SIZE-BYTES_PER_LONG, r(reg); \
+        or  r(sp), r(reg);                      \
+        and $~(BYTES_PER_LONG-1),r(reg);        \
+        mov (r(reg)),r(reg);
+
+#if defined(__x86_64__)
+#define r(reg) %r##reg
+#define addr_of(lbl) lbl(%rip)
+#define call_with_regs(fn)                      \
+        mov  %rsp,%rdi;                         \
+        call fn;
+#else /* defined(__i386__) */
+#define r(reg) %e##reg
+#define addr_of(lbl) lbl
+#define UREGS_rip UREGS_eip
+#define UREGS_rsp UREGS_esp
+#define call_with_regs(fn)                      \
+        mov  %esp,%eax;                         \
+        push %eax;                              \
+        call fn;                                \
+        add  $4,%esp;
+#endif
+
+        ALIGN
+.globl vmx_asm_vmexit_handler
+vmx_asm_vmexit_handler:
+#if defined(__x86_64__)
+        push %rdi
+        push %rsi
+        push %rdx
+        push %rcx
+        push %rax
+        push %r8
+        push %r9
+        push %r10
+        push %r11
+        push %rbx
+        push %rbp
+        push %r12
+        push %r13
+        push %r14
+        push %r15
+#else /* defined(__i386__) */
+        push %eax
+        push %ebp
+        push %edi
+        push %esi
+        push %edx
+        push %ecx
+        push %ebx
+#endif
+
+        get_current(bx)
+
+        movb $1,VCPU_vmx_launched(r(bx))
+
+        lea  UREGS_rip(r(sp)),r(di)
+        mov  $GUEST_RIP,%eax
+        /*VMREAD(UREGS_rip)*/
+        .byte 0x0f,0x78,0x07  /* vmread r(ax),(r(di)) */
+        mov  $GUEST_RSP,%eax
+        VMREAD(UREGS_rsp)
+        mov  $GUEST_RFLAGS,%eax
+        VMREAD(UREGS_eflags)
+
+        mov  %cr2,r(ax)
+        mov  r(ax),VCPU_hvm_guest_cr2(r(bx))
+
+#ifndef NDEBUG
+        mov  $0xbeef,%ax
+        mov  %ax,UREGS_error_code(r(sp))
+        mov  %ax,UREGS_entry_vector(r(sp))
+        mov  %ax,UREGS_saved_upcall_mask(r(sp))
+        mov  %ax,UREGS_cs(r(sp))
+        mov  %ax,UREGS_ds(r(sp))
+        mov  %ax,UREGS_es(r(sp))
+        mov  %ax,UREGS_fs(r(sp))
+        mov  %ax,UREGS_gs(r(sp))
+        mov  %ax,UREGS_ss(r(sp))
+#endif
+
+        call_with_regs(vmx_vmexit_handler)
+
+.globl vmx_asm_do_vmentry
+vmx_asm_do_vmentry:
+        get_current(bx)
+        cli
+
+        mov  VCPU_processor(r(bx)),%eax
+        shl  $IRQSTAT_shift,r(ax)
+        lea  addr_of(irq_stat),r(dx)
+        cmpl $0,(r(dx),r(ax),1)
+        jnz  .Lvmx_process_softirqs
+
+        call vmx_intr_assist
+
+        testb $0xff,VCPU_vmx_emul(r(bx))
+        jnz  .Lvmx_goto_realmode
+
+        mov  VCPU_hvm_guest_cr2(r(bx)),r(ax)
+        mov  r(ax),%cr2
+        call vmx_trace_vmentry
+
+        lea  UREGS_rip(r(sp)),r(di)
+        mov  $GUEST_RIP,%eax
+        /*VMWRITE(UREGS_rip)*/
+        .byte 0x0f,0x79,0x07  /* vmwrite (r(di)),r(ax) */
+        mov  $GUEST_RSP,%eax
+        VMWRITE(UREGS_rsp)
+        mov  $GUEST_RFLAGS,%eax
+        VMWRITE(UREGS_eflags)
+
+        cmpb $0,VCPU_vmx_launched(r(bx))
+#if defined(__x86_64__)
+        pop  %r15
+        pop  %r14
+        pop  %r13
+        pop  %r12
+        pop  %rbp
+        pop  %rbx
+        pop  %r11
+        pop  %r10
+        pop  %r9
+        pop  %r8
+        pop  %rax
+        pop  %rcx
+        pop  %rdx
+        pop  %rsi
+        pop  %rdi
+#else /* defined(__i386__) */
+        pop  %ebx
+        pop  %ecx
+        pop  %edx
+        pop  %esi
+        pop  %edi
+        pop  %ebp
+        pop  %eax
+#endif
+        je   .Lvmx_launch
+
+/*.Lvmx_resume:*/
+        VMRESUME
+        call vm_resume_fail
+        ud2
+
+.Lvmx_launch:
+        VMLAUNCH
+        call vm_launch_fail
+        ud2
+
+.Lvmx_goto_realmode:
+        sti
+        call_with_regs(vmx_realmode)
+        jmp  vmx_asm_do_vmentry
+
+.Lvmx_process_softirqs:
+        sti
+        call do_softirq
+        jmp  vmx_asm_do_vmentry
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/vmx/x86_32/Makefile
--- a/xen/arch/x86/hvm/vmx/x86_32/Makefile      Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-obj-y += exits.o
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/vmx/x86_32/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_32/exits.S       Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,147 +0,0 @@
-/*
- * exits.S: VMX architecture-specific exit handling.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/softirq.h>
-#include <asm/asm_defns.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <public/xen.h>
-
-#define VMRESUME     .byte 0x0f,0x01,0xc3
-#define VMLAUNCH     .byte 0x0f,0x01,0xc2
-#define VMREAD(off)  .byte 0x0f,0x78,0x44,0x24,off
-#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
-
-/* VMCS field encodings */
-#define GUEST_RSP    0x681c
-#define GUEST_RIP    0x681e
-#define GUEST_RFLAGS 0x6820
-
-#define GET_CURRENT(reg)         \
-        movl $STACK_SIZE-4, reg; \
-        orl  %esp, reg;          \
-        andl $~3,reg;            \
-        movl (reg),reg;
-
-#define HVM_SAVE_ALL_NOSEGREGS                                              \
-        pushl %eax;                                                         \
-        pushl %ebp;                                                         \
-        pushl %edi;                                                         \
-        pushl %esi;                                                         \
-        pushl %edx;                                                         \
-        pushl %ecx;                                                         \
-        pushl %ebx;
-
-#define HVM_RESTORE_ALL_NOSEGREGS               \
-        popl %ebx;                              \
-        popl %ecx;                              \
-        popl %edx;                              \
-        popl %esi;                              \
-        popl %edi;                              \
-        popl %ebp;                              \
-        popl %eax
-
-        ALIGN
-.globl vmx_asm_vmexit_handler
-vmx_asm_vmexit_handler:
-        HVM_SAVE_ALL_NOSEGREGS
-        GET_CURRENT(%ebx)
-
-        movb $1,VCPU_vmx_launched(%ebx)
-
-        movl $GUEST_RIP,%eax
-        VMREAD(UREGS_eip)
-        movl $GUEST_RSP,%eax
-        VMREAD(UREGS_esp)
-        movl $GUEST_RFLAGS,%eax
-        VMREAD(UREGS_eflags)
-
-        movl %cr2,%eax
-        movl %eax,VCPU_hvm_guest_cr2(%ebx)
-
-#ifndef NDEBUG
-        movw $0xbeef,%ax
-        movw %ax,UREGS_error_code(%esp)
-        movw %ax,UREGS_entry_vector(%esp)
-        movw %ax,UREGS_saved_upcall_mask(%esp)
-        movw %ax,UREGS_cs(%esp)
-        movw %ax,UREGS_ds(%esp)
-        movw %ax,UREGS_es(%esp)
-        movw %ax,UREGS_fs(%esp)
-        movw %ax,UREGS_gs(%esp)
-        movw %ax,UREGS_ss(%esp)
-#endif
-
-        movl %esp,%eax
-        push %eax
-        call vmx_vmexit_handler
-        addl $4,%esp
-
-.globl vmx_asm_do_vmentry
-vmx_asm_do_vmentry:
-        GET_CURRENT(%ebx)
-        cli                             # tests must not race interrupts
-
-        movl VCPU_processor(%ebx),%eax
-        shl  $IRQSTAT_shift,%eax
-        cmpl $0,irq_stat(%eax,1)
-        jnz  .Lvmx_process_softirqs
-
-        call vmx_intr_assist
-
-        testb $0xff,VCPU_vmx_emul(%ebx)
-        jnz  .Lvmx_goto_realmode
-
-        movl VCPU_hvm_guest_cr2(%ebx),%eax
-        movl %eax,%cr2
-        call vmx_trace_vmentry
-
-        movl $GUEST_RIP,%eax
-        VMWRITE(UREGS_eip)
-        movl $GUEST_RSP,%eax
-        VMWRITE(UREGS_esp)
-        movl $GUEST_RFLAGS,%eax
-        VMWRITE(UREGS_eflags)
-
-        cmpb $0,VCPU_vmx_launched(%ebx)
-        HVM_RESTORE_ALL_NOSEGREGS
-        je   .Lvmx_launch
-
-/*.Lvmx_resume:*/
-        VMRESUME
-        call vm_resume_fail
-        ud2
-
-.Lvmx_launch:
-        VMLAUNCH
-        call vm_launch_fail
-        ud2
-
-.Lvmx_goto_realmode:
-        sti
-        movl %esp,%eax
-        push %eax
-        call vmx_realmode
-        addl $4,%esp
-        jmp vmx_asm_do_vmentry
-
-.Lvmx_process_softirqs:
-        sti
-        call do_softirq
-        jmp vmx_asm_do_vmentry
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/vmx/x86_64/Makefile
--- a/xen/arch/x86/hvm/vmx/x86_64/Makefile      Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-obj-y += exits.o
diff -r 491074885dcb -r d03f1c098a1e xen/arch/x86/hvm/vmx/x86_64/exits.S
--- a/xen/arch/x86/hvm/vmx/x86_64/exits.S       Mon Apr 21 12:23:55 2008 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,165 +0,0 @@
-/*
- * exits.S: VMX architecture-specific exit handling.
- * Copyright (c) 2004, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-#include <xen/config.h>
-#include <xen/errno.h>
-#include <xen/softirq.h>
-#include <asm/asm_defns.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <public/xen.h>
-
-#define VMRESUME     .byte 0x0f,0x01,0xc3
-#define VMLAUNCH     .byte 0x0f,0x01,0xc2
-#define VMREAD(off)  .byte 0x0f,0x78,0x47,((off)-UREGS_rip)
-#define VMWRITE(off) .byte 0x0f,0x79,0x47,((off)-UREGS_rip)
-
-/* VMCS field encodings */
-#define GUEST_RSP    0x681c
-#define GUEST_RIP    0x681e
-#define GUEST_RFLAGS 0x6820
-
-#define GET_CURRENT(reg)         \
-        movq $STACK_SIZE-8, reg; \
-        orq  %rsp, reg;          \
-        andq $~7,reg;            \
-        movq (reg),reg;
-
-#define HVM_SAVE_ALL_NOSEGREGS                  \
-        pushq %rdi;                             \
-        pushq %rsi;                             \
-        pushq %rdx;                             \
-        pushq %rcx;                             \
-        pushq %rax;                             \
-        pushq %r8;                              \
-        pushq %r9;                              \
-        pushq %r10;                             \
-        pushq %r11;                             \
-        pushq %rbx;                             \
-        pushq %rbp;                             \
-        pushq %r12;                             \
-        pushq %r13;                             \
-        pushq %r14;                             \
-        pushq %r15;
-
-#define HVM_RESTORE_ALL_NOSEGREGS               \
-        popq %r15;                              \
-        popq %r14;                              \
-        popq %r13;                              \
-        popq %r12;                              \
-        popq %rbp;                              \
-        popq %rbx;                              \
-        popq %r11;                              \
-        popq %r10;                              \
-        popq %r9;                               \
-        popq %r8;                               \
-        popq %rax;                              \
-        popq %rcx;                              \
-        popq %rdx;                              \
-        popq %rsi;                              \
-        popq %rdi
-
-        ALIGN
-.globl vmx_asm_vmexit_handler
-vmx_asm_vmexit_handler:
-        HVM_SAVE_ALL_NOSEGREGS
-        GET_CURRENT(%rbx)
-
-        movb $1,VCPU_vmx_launched(%rbx)
-
-        leaq UREGS_rip(%rsp),%rdi
-        movl $GUEST_RIP,%eax
-        /*VMREAD(UREGS_rip)*/
-        .byte 0x0f,0x78,0x07  /* vmread %rax,(%rdi) */
-        movl $GUEST_RSP,%eax
-        VMREAD(UREGS_rsp)
-        movl $GUEST_RFLAGS,%eax
-        VMREAD(UREGS_eflags)
-
-        movq %cr2,%rax
-        movq %rax,VCPU_hvm_guest_cr2(%rbx)
-
-#ifndef NDEBUG
-        movw $0xbeef,%ax
-        movw %ax,UREGS_error_code(%rsp)
-        movw %ax,UREGS_entry_vector(%rsp)
-        movw %ax,UREGS_saved_upcall_mask(%rsp)
-        movw %ax,UREGS_cs(%rsp)
-        movw %ax,UREGS_ds(%rsp)
-        movw %ax,UREGS_es(%rsp)
-        movw %ax,UREGS_fs(%rsp)
-        movw %ax,UREGS_gs(%rsp)
-        movw %ax,UREGS_ss(%rsp)
-#endif
-
-        movq %rsp,%rdi
-        call vmx_vmexit_handler
-
-.globl vmx_asm_do_vmentry
-vmx_asm_do_vmentry:
-        GET_CURRENT(%rbx)
-        cli                             # tests must not race interrupts
-
-        movl  VCPU_processor(%rbx),%eax
-        shl   $IRQSTAT_shift,%rax
-        leaq  irq_stat(%rip),%rdx
-        cmpl  $0,(%rdx,%rax,1)
-        jnz   .Lvmx_process_softirqs
-
-        call vmx_intr_assist
-
-        testb $0xff,VCPU_vmx_emul(%rbx)
-        jnz  .Lvmx_goto_realmode
-
-        movq VCPU_hvm_guest_cr2(%rbx),%rax
-        movq %rax,%cr2
-        call vmx_trace_vmentry
-
-        leaq UREGS_rip(%rsp),%rdi
-        movl $GUEST_RIP,%eax
-        /*VMWRITE(UREGS_rip)*/
-        .byte 0x0f,0x79,0x07  /* vmwrite (%rdi),%rax */
-        movl $GUEST_RSP,%eax
-        VMWRITE(UREGS_rsp)
-        movl $GUEST_RFLAGS,%eax
-        VMWRITE(UREGS_eflags)
-
-        cmpb $0,VCPU_vmx_launched(%rbx)
-        HVM_RESTORE_ALL_NOSEGREGS
-        je   .Lvmx_launch
-
-/*.Lvmx_resume:*/
-        VMRESUME
-        call vm_resume_fail
-        ud2
-
-.Lvmx_launch:
-        VMLAUNCH
-        call vm_launch_fail
-        ud2
-
-.Lvmx_goto_realmode:
-        sti
-        movq %rsp,%rdi
-        call vmx_realmode
-        jmp vmx_asm_do_vmentry
-        jmp vmx_asm_do_vmentry
-
-.Lvmx_process_softirqs:
-        sti
-        call do_softirq
-        jmp vmx_asm_do_vmentry

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86, hvm: Merge 32-bit and 64-bit asm stubs into common files., Xen patchbot-unstable <=