WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Add entry points for handling hypercalls

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Add entry points for handling hypercalls from and returning to
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 05 Jan 2007 12:55:17 -0800
Delivery-date: Fri, 05 Jan 2007 12:56:15 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Emmanuel Ackaouy <ack@xxxxxxxxxxxxx>
# Date 1168018467 0
# Node ID 5a690aa51fb5d67e72c1cca442758b214f98dedd
# Parent  7c5eea5feebd78eb314a87338e8632ce206d6634
Add entry points for handling hypercalls from and returning to
compatibility mode guests.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/x86/traps.c               |    6 
 xen/arch/x86/x86_64/Makefile       |    6 
 xen/arch/x86/x86_64/asm-offsets.c  |    6 
 xen/arch/x86/x86_64/compat/entry.S |  395 +++++++++++++++++++++++++++++++++++++
 xen/arch/x86/x86_64/compat/traps.c |  312 +++++++++++++++++++++++++++++
 xen/arch/x86/x86_64/entry.S        |   29 ++
 xen/arch/x86/x86_64/traps.c        |   12 +
 xen/include/asm-x86/processor.h    |    6 
 8 files changed, 769 insertions(+), 3 deletions(-)

diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Fri Jan 05 17:32:00 2007 +0000
+++ b/xen/arch/x86/traps.c      Fri Jan 05 17:34:27 2007 +0000
@@ -123,6 +123,12 @@ static void show_guest_stack(struct cpu_
     if ( is_hvm_vcpu(current) )
         return;
 
+    if ( IS_COMPAT(container_of(regs, struct cpu_info, 
guest_cpu_user_regs)->current_vcpu->domain) )
+    {
+        compat_show_guest_stack(regs, debug_stack_lines);
+        return;
+    }
+
     if ( vm86_mode(regs) )
     {
         stack = (unsigned long *)((regs->ss << 4) + (regs->esp & 0xffff));
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/x86_64/Makefile
--- a/xen/arch/x86/x86_64/Makefile      Fri Jan 05 17:32:00 2007 +0000
+++ b/xen/arch/x86/x86_64/Makefile      Fri Jan 05 17:34:27 2007 +0000
@@ -2,3 +2,9 @@ obj-y += gpr_switch.o
 obj-y += gpr_switch.o
 obj-y += mm.o
 obj-y += traps.o
+
+ifeq ($(CONFIG_COMPAT),y)
+# extra dependencies
+entry.o:       compat/entry.S
+traps.o:       compat/traps.c
+endif
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Fri Jan 05 17:32:00 2007 +0000
+++ b/xen/arch/x86/x86_64/asm-offsets.c Fri Jan 05 17:34:27 2007 +0000
@@ -53,6 +53,7 @@ void __dummy__(void)
     BLANK();
 
     OFFSET(VCPU_processor, struct vcpu, processor);
+    OFFSET(VCPU_domain, struct vcpu, domain);
     OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
     OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
     OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
@@ -87,6 +88,10 @@ void __dummy__(void)
     OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
     BLANK();
 
+    OFFSET(DOMAIN_domain_flags, struct domain, domain_flags);
+    DEFINE(_DOMF_compat, _DOMF_compat);
+    BLANK();
+
     OFFSET(VMCB_rax, struct vmcb_struct, rax);
     OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
     BLANK();
@@ -95,6 +100,7 @@ void __dummy__(void)
     OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
     BLANK();
 
+    OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
     DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
     BLANK();
 
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/x86_64/compat/entry.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/x86_64/compat/entry.S        Fri Jan 05 17:34:27 2007 +0000
@@ -0,0 +1,395 @@
+/*
+ * Compatibility hypercall routines.
+ */
+
+#include <asm/desc.h>
+
+.text
+
+ENTRY(compat_hypercall)
+        pushq $0
+        movl  $TRAP_syscall,4(%rsp)
+        SAVE_ALL
+        GET_CURRENT(%rbx)
+
+        cmpl  $NR_hypercalls,%eax
+        jae   compat_bad_hypercall
+#ifndef NDEBUG
+        /* Deliberately corrupt parameter regs not used by this hypercall. */
+        pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi; 
pushq UREGS_rbp+5*8(%rsp)
+        leaq  compat_hypercall_args_table(%rip),%r10
+        movq  $6,%rcx
+        subb  (%r10,%rax,1),%cl
+        movq  %rsp,%rdi
+        movl  $0xDEADBEEF,%eax
+        rep   stosq
+        popq  %r9 ; popq  %r8 ; popq  %rcx; popq  %rdx; popq  %rsi; popq  %rdi
+        movl  UREGS_rax(%rsp),%eax
+        pushq %rax
+        pushq UREGS_rip+8(%rsp)
+#else
+        movl  %eax,%eax
+        movl  %ebp,%r9d
+        movl  %edi,%r8d
+        xchgl  %ecx,%esi
+        movl  UREGS_rbx(%rsp),%edi
+#endif
+        leaq  compat_hypercall_table(%rip),%r10
+        PERFC_INCR(PERFC_hypercalls, %rax)
+        callq *(%r10,%rax,8)
+#ifndef NDEBUG
+        /* Deliberately corrupt parameter regs used by this hypercall. */
+        popq  %r10         # Shadow RIP
+        cmpq  %r10,UREGS_rip+8(%rsp)
+        popq  %rcx         # Shadow hypercall index
+        jne   compat_skip_clobber /* If RIP has changed then don't clobber. */
+        leaq  compat_hypercall_args_table(%rip),%r10
+        movb  (%r10,%rcx,1),%cl
+        movl  $0xDEADBEEF,%r10d
+        testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
+        cmpb  $2, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
+        cmpb  $3, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
+        cmpb  $4, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
+        cmpb  $5, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
+        cmpb  $6, %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
+compat_skip_clobber:
+#endif
+        movl  %eax,UREGS_rax(%rsp)       # save the return value
+
+/* %rbx: struct vcpu */
+compat_test_all_events:
+        cli                             # tests must not race interrupts
+/*compat_test_softirqs:*/
+        movl  VCPU_processor(%rbx),%eax
+        shlq  $IRQSTAT_shift,%rax
+        leaq  irq_stat(%rip),%rcx
+        testl $~0,(%rcx,%rax,1)
+        jnz   compat_process_softirqs
+        btrq  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
+        jc    compat_process_nmi
+compat_test_guest_events:
+        movq  VCPU_vcpu_info(%rbx),%rax
+        testb $0xFF,VCPUINFO_upcall_mask(%rax)
+        jnz   compat_restore_all_guest
+        testb $0xFF,VCPUINFO_upcall_pending(%rax)
+        jz    compat_restore_all_guest
+/*compat_process_guest_events:*/
+        sti
+        leaq  VCPU_trap_bounce(%rbx),%rdx
+        movl  VCPU_event_addr(%rbx),%eax
+        movl  %eax,TRAPBOUNCE_eip(%rdx)
+        movl  VCPU_event_sel(%rbx),%eax
+        movl  %eax,TRAPBOUNCE_cs(%rdx)
+        movw  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+        call  compat_create_bounce_frame
+        jmp   compat_test_all_events
+
+        ALIGN
+/* %rbx: struct vcpu */
+compat_process_softirqs:
+        sti
+        call  do_softirq
+        jmp   compat_test_all_events
+
+       ALIGN
+/* %rbx: struct vcpu */
+compat_process_nmi:
+        movl  VCPU_nmi_addr(%rbx),%eax
+        testl %eax,%eax
+        jz    compat_test_all_events
+        btsq  $_VCPUF_nmi_masked,VCPU_flags(%rbx)
+        jc    1f
+        sti
+        leaq  VCPU_trap_bounce(%rbx),%rdx
+        movl  %eax,TRAPBOUNCE_eip(%rdx)
+        movl  $FLAT_COMPAT_KERNEL_CS,TRAPBOUNCE_cs(%rdx)
+        movw  $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+        call  compat_create_bounce_frame
+        jmp   compat_test_all_events
+1:
+        btsq  $_VCPUF_nmi_pending,VCPU_flags(%rbx)
+        jmp   compat_test_guest_events
+
+compat_bad_hypercall:
+        movl $-ENOSYS,UREGS_rax(%rsp)
+        jmp  compat_test_all_events
+
+/* %rbx: struct vcpu, interrupts disabled */
+compat_restore_all_guest:
+        RESTORE_ALL
+        addq  $8,%rsp
+CFLT0:  iretq
+
+.section .fixup,"ax"
+CFIX0:  popq  -15*8-8(%rsp)            # error_code/entry_vector
+        SAVE_ALL                       # 15*8 bytes pushed
+        movq  -8(%rsp),%rsi            # error_code/entry_vector
+        sti                            # after stack abuse (-1024(%rsp))
+        pushq $__HYPERVISOR_DS         # SS
+        leaq  8(%rsp),%rax
+        pushq %rax                     # RSP
+        pushfq                         # RFLAGS
+        pushq $__HYPERVISOR_CS         # CS
+        leaq  CDBLFLT0(%rip),%rax
+        pushq %rax                     # RIP
+        pushq %rsi                     # error_code/entry_vector
+        jmp   handle_exception
+CDBLFLT0:GET_CURRENT(%rbx)
+        jmp   compat_test_all_events
+compat_failsafe_callback:
+        GET_CURRENT(%rbx)
+        leaq  VCPU_trap_bounce(%rbx),%rdx
+        movl  VCPU_failsafe_addr(%rbx),%eax
+        movl  %eax,TRAPBOUNCE_eip(%rdx)
+        movl  VCPU_failsafe_sel(%rbx),%eax
+        movl  %eax,TRAPBOUNCE_cs(%rdx)
+        movw  $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
+        btq   $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
+        jnc   1f
+        orw   $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+1:
+        call  compat_create_bounce_frame
+        jmp   compat_test_all_events
+.previous
+.section __pre_ex_table,"a"
+       .quad CFLT0,CFIX0
+.previous
+.section __ex_table,"a"
+        .quad CDBLFLT0,compat_failsafe_callback
+.previous
+
+/* %rdx: trap_bounce, %rbx: struct vcpu */
+compat_post_handle_exception:
+        testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
+        jz    compat_test_all_events
+        call  compat_create_bounce_frame
+        jmp   compat_test_all_events
+
+/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
+/*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
+/* %rdx: trap_bounce, %rbx: struct vcpu                                  */
+/* On return only %rbx is guaranteed non-clobbered.                      */
+compat_create_bounce_frame:
+        mov   %fs,%edi
+        testb $2,UREGS_cs+8(%rsp)
+        jz    1f
+        /* Push new frame at registered guest-OS stack base. */
+        movl  VCPU_kernel_sp(%rbx),%esi
+CFLT1:  mov   VCPU_kernel_ss(%rbx),%fs
+        subl  $2*4,%esi
+        movl  UREGS_rsp+8(%rsp),%eax
+CFLT2:  movl  %eax,%fs:(%rsi)
+        movl  UREGS_ss+8(%rsp),%eax
+CFLT3:  movl  %eax,%fs:4(%rsi)
+        jmp   2f
+1:      /* In kernel context already: push new frame at existing %rsp. */
+        movl  UREGS_rsp+8(%rsp),%esi
+CFLT4:  mov   UREGS_ss+8(%rsp),%fs
+2:
+        movb  TRAPBOUNCE_flags(%rdx),%cl
+        subl  $3*4,%esi
+        movq  VCPU_vcpu_info(%rbx),%rax
+        pushq VCPUINFO_upcall_mask(%rax)
+        testb $TBF_INTERRUPT,%cl
+        setnz %ch                       # TBF_INTERRUPT -> set upcall mask
+        orb   %ch,VCPUINFO_upcall_mask(%rax)
+        popq  %rax
+        shll  $16,%eax                  # Bits 16-23: saved_upcall_mask
+        movw  UREGS_cs+8(%rsp),%ax      # Bits  0-15: CS
+CFLT5:  movl  %eax,%fs:4(%rsi)          # CS / saved_upcall_mask
+        shrl  $16,%eax
+        testb %al,%al                   # Bits 0-7: saved_upcall_mask
+        setz  %ch                       # %ch == !saved_upcall_mask
+        movl  UREGS_eflags+8(%rsp),%eax
+        andl  $~X86_EFLAGS_IF,%eax
+        shlb  $1,%ch                    # Bit 9 (EFLAGS.IF)
+        orb   %ch,%ah                   # Fold EFLAGS.IF into %eax
+CFLT6:  movl  %eax,%fs:2*4(%rsi)        # EFLAGS
+        movl  UREGS_rip+8(%rsp),%eax
+CFLT7:  movl  %eax,%fs:(%rsi)           # EIP
+        testb $TBF_EXCEPTION_ERRCODE,%cl
+        jz    1f
+        subl  $4,%esi
+        movl  TRAPBOUNCE_error_code(%rdx),%eax
+CFLT8:  movl  %eax,%fs:(%rsi)           # ERROR CODE
+1:
+        testb $TBF_FAILSAFE,%cl
+        jz    2f
+        subl  $4*4,%esi
+        movl  %gs,%eax
+CFLT9:  movl  %eax,%fs:3*4(%rsi)        # GS
+CFLT10: movl  %edi,%fs:2*4(%rsi)        # FS
+        movl  %es,%eax
+CFLT11: movl  %eax,%fs:1*4(%rsi)        # ES
+        movl  %ds,%eax
+CFLT12: movl  %eax,%fs:0*4(%rsi)        # DS
+2:
+        /* Rewrite our stack frame and return to guest-OS mode. */
+        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
+        movl  $TRAP_syscall,UREGS_entry_vector+8(%rsp)
+        andl  $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
+                 X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+8(%rsp)
+        mov   %fs,UREGS_ss+8(%rsp)
+        movl  %esi,UREGS_rsp+8(%rsp)
+CFLT13: mov   %edi,%fs
+        movzwl TRAPBOUNCE_cs(%rdx),%eax
+        /* Null selectors (0-3) are not allowed. */
+        testl $~3,%eax
+        jz    domain_crash_synchronous
+        movl  %eax,UREGS_cs+8(%rsp)
+        movl  TRAPBOUNCE_eip(%rdx),%eax
+        movl  %eax,UREGS_rip+8(%rsp)
+        movb  $0,TRAPBOUNCE_flags(%rdx)
+        ret
+.section .fixup,"ax"
+CFIX13:
+        xorl  %edi,%edi
+        jmp   CFLT13
+.previous
+.section __ex_table,"a"
+        .quad  CFLT1,domain_crash_synchronous  ,  CFLT2,compat_crash_page_fault
+        .quad  CFLT3,compat_crash_page_fault_4 ,  
CFLT4,domain_crash_synchronous
+        .quad  CFLT5,compat_crash_page_fault_4 ,  
CFLT6,compat_crash_page_fault_8
+        .quad  CFLT7,compat_crash_page_fault   ,  CFLT8,compat_crash_page_fault
+        .quad  CFLT9,compat_crash_page_fault_12, 
CFLT10,compat_crash_page_fault_8
+        .quad CFLT11,compat_crash_page_fault_4 , CFLT12,compat_crash_page_fault
+        .quad CFLT13,CFIX13
+.previous
+
+compat_crash_page_fault_12:
+        addl  $4,%esi
+compat_crash_page_fault_8:
+        addl  $4,%esi
+compat_crash_page_fault_4:
+        addl  $4,%esi
+compat_crash_page_fault:
+CFLT14: mov   %edi,%fs
+        movl  %esi,%edi
+        call  show_page_walk
+        jmp   domain_crash_synchronous
+.section .fixup,"ax"
+CFIX14:
+        xorl  %edi,%edi
+        jmp   CFLT14
+.previous
+.section __ex_table,"a"
+        .quad CFLT14,CFIX14
+.previous
+
+.section .rodata, "a", @progbits
+
+#define compat_set_trap_table domain_crash_synchronous
+#define compat_mmu_update domain_crash_synchronous
+#define compat_set_gdt domain_crash_synchronous
+#define compat_stack_switch domain_crash_synchronous
+#define compat_fpu_taskswitch domain_crash_synchronous
+#define compat_arch_sched_op_compat domain_crash_synchronous
+#define compat_platform_op domain_crash_synchronous
+#define compat_set_debugreg domain_crash_synchronous
+#define compat_get_debugreg domain_crash_synchronous
+#define compat_update_descriptor domain_crash_synchronous
+#define compat_memory_op domain_crash_synchronous
+#define compat_multicall domain_crash_synchronous
+#define compat_update_va_mapping domain_crash_synchronous
+#define compat_set_timer_op domain_crash_synchronous
+#define compat_event_channel_op_compat domain_crash_synchronous
+#define compat_xen_version domain_crash_synchronous
+#define compat_console_io domain_crash_synchronous
+#define compat_physdev_op_compat domain_crash_synchronous
+#define compat_grant_table_op domain_crash_synchronous
+#define compat_vm_assist domain_crash_synchronous
+#define compat_update_va_mapping_otherdomain domain_crash_synchronous
+#define compat_vcpu_op domain_crash_synchronous
+#define compat_mmuext_op domain_crash_synchronous
+#define compat_acm_op domain_crash_synchronous
+#define compat_nmi_op domain_crash_synchronous
+#define compat_arch_sched_op domain_crash_synchronous
+#define compat_xenoprof_op domain_crash_synchronous
+#define compat_event_channel_op domain_crash_synchronous
+#define compat_physdev_op domain_crash_synchronous
+#define compat_sysctl domain_crash_synchronous
+#define compat_domctl domain_crash_synchronous
+
+ENTRY(compat_hypercall_table)
+        .quad compat_set_trap_table     /*  0 */
+        .quad compat_mmu_update
+        .quad compat_set_gdt
+        .quad compat_stack_switch
+        .quad compat_set_callbacks
+        .quad compat_fpu_taskswitch     /*  5 */
+        .quad compat_arch_sched_op_compat
+        .quad compat_platform_op
+        .quad compat_set_debugreg
+        .quad compat_get_debugreg
+        .quad compat_update_descriptor  /* 10 */
+        .quad do_ni_hypercall
+        .quad compat_memory_op
+        .quad compat_multicall
+        .quad compat_update_va_mapping
+        .quad compat_set_timer_op       /* 15 */
+        .quad compat_event_channel_op_compat
+        .quad compat_xen_version
+        .quad compat_console_io
+        .quad compat_physdev_op_compat
+        .quad compat_grant_table_op     /* 20 */
+        .quad compat_vm_assist
+        .quad compat_update_va_mapping_otherdomain
+        .quad compat_iret
+        .quad compat_vcpu_op
+        .quad do_ni_hypercall           /* 25 */
+        .quad compat_mmuext_op
+        .quad compat_acm_op
+        .quad compat_nmi_op
+        .quad compat_arch_sched_op
+        .quad compat_callback_op        /* 30 */
+        .quad compat_xenoprof_op
+        .quad compat_event_channel_op
+        .quad compat_physdev_op
+        .quad do_ni_hypercall
+        .quad compat_sysctl             /* 35 */
+        .quad compat_domctl
+        .rept NR_hypercalls-((.-compat_hypercall_table)/8)
+        .quad do_ni_hypercall
+        .endr
+
+ENTRY(compat_hypercall_args_table)
+        .byte 1 /* compat_set_trap_table    */  /*  0 */
+        .byte 4 /* compat_mmu_update        */
+        .byte 2 /* compat_set_gdt           */
+        .byte 2 /* compat_stack_switch      */
+        .byte 4 /* compat_set_callbacks     */
+        .byte 1 /* compat_fpu_taskswitch    */  /*  5 */
+        .byte 2 /* compat_arch_sched_op_compat */
+        .byte 1 /* compat_platform_op       */
+        .byte 2 /* compat_set_debugreg      */
+        .byte 1 /* compat_get_debugreg      */
+        .byte 4 /* compat_update_descriptor */  /* 10 */
+        .byte 0 /* do_ni_hypercall          */
+        .byte 2 /* compat_memory_op         */
+        .byte 2 /* compat_multicall         */
+        .byte 4 /* compat_update_va_mapping */
+        .byte 2 /* compat_set_timer_op      */  /* 15 */
+        .byte 1 /* compat_event_channel_op_compat */
+        .byte 2 /* compat_xen_version       */
+        .byte 3 /* compat_console_io        */
+        .byte 1 /* compat_physdev_op_compat */
+        .byte 3 /* compat_grant_table_op    */  /* 20 */
+        .byte 2 /* compat_vm_assist         */
+        .byte 5 /* compat_update_va_mapping_otherdomain */
+        .byte 0 /* compat_iret              */
+        .byte 3 /* compat_vcpu_op           */
+        .byte 0 /* do_ni_hypercall          */  /* 25 */
+        .byte 4 /* compat_mmuext_op         */
+        .byte 1 /* compat_acm_op            */
+        .byte 2 /* compat_nmi_op            */
+        .byte 2 /* compat_arch_sched_op     */
+        .byte 2 /* compat_callback_op       */  /* 30 */
+        .byte 2 /* compat_xenoprof_op       */
+        .byte 2 /* compat_event_channel_op  */
+        .byte 2 /* compat_physdev_op        */
+        .byte 0 /* do_ni_hypercall          */
+        .byte 1 /* compat_sysctl            */  /* 35 */
+        .byte 1 /* compat_domctl            */
+        .rept NR_hypercalls-(.-compat_hypercall_args_table)
+        .byte 0 /* do_ni_hypercall          */
+        .endr
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/x86_64/compat/traps.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/x86_64/compat/traps.c        Fri Jan 05 17:34:27 2007 +0000
@@ -0,0 +1,312 @@
+#ifdef CONFIG_COMPAT
+
+#if 0 /* XXX */
+#include <compat/callback.h>
+#else
+struct compat_xen_callback {
+    unsigned int cs;
+    unsigned int eip;
+};
+typedef struct compat_xen_callback xen_callback_compat_t;
+
+struct compat_callback_register {
+    uint16_t type;
+    uint16_t flags;
+    xen_callback_compat_t address;
+};
+
+struct compat_callback_unregister {
+    uint16_t type;
+    uint16_t _unused;
+};
+#endif
+
+void compat_show_guest_stack(struct cpu_user_regs *regs, int debug_stack_lines)
+{
+    unsigned int i, *stack, addr;
+
+    stack = (unsigned int *)(unsigned long)regs->_esp;
+    printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
+
+    for ( i = 0; i < debug_stack_lines * 8; i++ )
+    {
+        if ( (((long)stack + 3) & (STACK_SIZE - 4)) == 0 )
+            break;
+        if ( get_user(addr, stack) )
+        {
+            if ( i != 0 )
+                printk("\n    ");
+            printk("Fault while accessing guest memory.");
+            i = 1;
+            break;
+        }
+        if ( (i != 0) && ((i % 8) == 0) )
+            printk("\n ");
+        printk(" %08x", addr);
+        stack++;
+    }
+    if ( i == 0 )
+        printk("Stack empty.");
+    printk("\n");
+}
+
+unsigned int compat_iret(void)
+{
+    struct cpu_user_regs *regs = guest_cpu_user_regs();
+    u32 eflags;
+
+    /* Restore EAX (clobbered by hypercall). */
+    if ( unlikely(__get_user(regs->_eax, (u32 __user *)regs->rsp)) )
+        goto exit_and_crash;
+
+    /* Restore CS and EIP. */
+    if ( unlikely(__get_user(regs->_eip, (u32 __user *)regs->rsp + 1)) ||
+        unlikely(__get_user(regs->cs, (u32 __user *)regs->rsp + 2)) )
+        goto exit_and_crash;
+
+    /*
+     * Fix up and restore EFLAGS. We fix up in a local staging area
+     * to avoid firing the BUG_ON(IOPL) check in arch_getdomaininfo_ctxt.
+     */
+    if ( unlikely(__get_user(eflags, (u32 __user *)regs->rsp + 3)) )
+        goto exit_and_crash;
+    regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+
+    if ( unlikely(eflags & X86_EFLAGS_VM) )
+    {
+        /*
+         * Cannot return to VM86 mode: inject a GP fault instead. Note that
+         * the GP fault is reported on the first VM86 mode instruction, not on
+         * the IRET (which is why we can simply leave the stack frame as-is
+         * (except for perhaps having to copy it), which in turn seems better
+         * than teaching create_bounce_frame() to needlessly deal with vm86
+         * mode frames).
+         */
+        const struct trap_info *ti;
+        u32 x, ksp = current->arch.guest_context.kernel_sp - 40;
+        unsigned int i;
+        int rc = 0;
+
+        gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
+                 regs->_esp, ksp);
+        if ( ksp < regs->_esp )
+        {
+            for (i = 1; i < 10; ++i)
+            {
+                rc |= __get_user(x, (u32 __user *)regs->rsp + i);
+                rc |= __put_user(x, (u32 __user *)(unsigned long)ksp + i);
+            }
+        }
+        else if ( ksp > regs->_esp )
+        {
+            for (i = 9; i > 0; ++i)
+            {
+                rc |= __get_user(x, (u32 __user *)regs->rsp + i);
+                rc |= __put_user(x, (u32 __user *)(unsigned long)ksp + i);
+            }
+        }
+        if ( rc )
+            goto exit_and_crash;
+        regs->_esp = ksp;
+        regs->ss = current->arch.guest_context.kernel_ss;
+
+        ti = &current->arch.guest_context.trap_ctxt[13];
+        if ( TI_GET_IF(ti) )
+            eflags &= ~X86_EFLAGS_IF;
+        regs->_eflags = eflags & ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+                                   X86_EFLAGS_NT|X86_EFLAGS_TF);
+
+        if ( unlikely(__put_user(0, (u32 __user *)regs->rsp)) )
+            goto exit_and_crash;
+        regs->_eip = ti->address;
+        regs->cs = ti->cs;
+    }
+    else if ( unlikely(ring_0(regs)) )
+        goto exit_and_crash;
+    else if ( !ring_1(regs) )
+    {
+        /* Return to ring 2/3: restore ESP and SS. */
+        if ( __get_user(regs->ss, (u32 __user *)regs->rsp + 5)
+            || __get_user(regs->_esp, (u32 __user *)regs->rsp + 4))
+            goto exit_and_crash;
+    }
+    else
+        regs->_esp += 16;
+
+    /* No longer in NMI context. */
+    clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
+
+    /* Restore upcall mask from supplied EFLAGS.IF. */
+    current->vcpu_info->evtchn_upcall_mask = !(eflags & X86_EFLAGS_IF);
+
+    /*
+     * The hypercall exit path will overwrite EAX with this return
+     * value.
+     */
+    return regs->_eax;
+
+ exit_and_crash:
+    gdprintk(XENLOG_ERR, "Fatal error\n");
+    domain_crash(current->domain);
+    return 0;
+}
+
+static long compat_register_guest_callback(struct compat_callback_register 
*reg)
+{
+    long ret = 0;
+    struct vcpu *v = current;
+
+    fixup_guest_code_selector(v->domain, reg->address.cs);
+
+    switch ( reg->type )
+    {
+    case CALLBACKTYPE_event:
+        v->arch.guest_context.event_callback_cs     = reg->address.cs;
+        v->arch.guest_context.event_callback_eip    = reg->address.eip;
+        break;
+
+    case CALLBACKTYPE_failsafe:
+        v->arch.guest_context.failsafe_callback_cs  = reg->address.cs;
+        v->arch.guest_context.failsafe_callback_eip = reg->address.eip;
+        if ( reg->flags & CALLBACKF_mask_events )
+            set_bit(_VGCF_failsafe_disables_events,
+                    &v->arch.guest_context.flags);
+        else
+            clear_bit(_VGCF_failsafe_disables_events,
+                      &v->arch.guest_context.flags);
+        break;
+
+    case CALLBACKTYPE_nmi:
+        ret = register_guest_nmi_callback(reg->address.eip);
+        break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+static long compat_unregister_guest_callback(struct compat_callback_unregister 
*unreg)
+{
+    long ret;
+
+    switch ( unreg->type )
+    {
+    case CALLBACKTYPE_nmi:
+        ret = unregister_guest_nmi_callback();
+        break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+
+long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+    long ret;
+
+    switch ( cmd )
+    {
+    case CALLBACKOP_register:
+    {
+        struct compat_callback_register reg;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&reg, arg, 1) )
+            break;
+
+        ret = compat_register_guest_callback(&reg);
+    }
+    break;
+
+    case CALLBACKOP_unregister:
+    {
+        struct compat_callback_unregister unreg;
+
+        ret = -EFAULT;
+        if ( copy_from_guest(&unreg, arg, 1) )
+            break;
+
+        ret = compat_unregister_guest_callback(&unreg);
+    }
+    break;
+
+    default:
+        ret = -EINVAL;
+        break;
+    }
+
+    return ret;
+}
+
+long compat_set_callbacks(unsigned long event_selector,
+                          unsigned long event_address,
+                          unsigned long failsafe_selector,
+                          unsigned long failsafe_address)
+{
+    struct compat_callback_register event = {
+        .type = CALLBACKTYPE_event,
+        .address = {
+            .cs = event_selector,
+            .eip = event_address
+        }
+    };
+    struct compat_callback_register failsafe = {
+        .type = CALLBACKTYPE_failsafe,
+        .address = {
+            .cs = failsafe_selector,
+            .eip = failsafe_address
+        }
+    };
+
+    compat_register_guest_callback(&event);
+    compat_register_guest_callback(&failsafe);
+
+    return 0;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
+{
+    char *p;
+    int i;
+
+    /* Fill in all the transfer points with template machine code. */
+
+    for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+    {
+        p = (char *)(hypercall_page + (i * 32));
+        *(u8  *)(p+ 0) = 0xb8;    /* mov  $<i>,%eax */
+        *(u32 *)(p+ 1) = i;
+        *(u16 *)(p+ 5) = 0x82cd;  /* int  $0x82 */
+        *(u8  *)(p+ 7) = 0xc3;    /* ret */
+    }
+
+    /*
+     * HYPERVISOR_iret is special because it doesn't return and expects a
+     * special stack frame. Guests jump at this transfer point instead of
+     * calling it.
+     */
+    p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
+    *(u8  *)(p+ 0) = 0x50;    /* push %eax */
+    *(u8  *)(p+ 1) = 0xb8;    /* mov  $__HYPERVISOR_iret,%eax */
+    *(u32 *)(p+ 2) = __HYPERVISOR_iret;
+    *(u16 *)(p+ 6) = 0x82cd;  /* int  $0x82 */
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Fri Jan 05 17:32:00 2007 +0000
+++ b/xen/arch/x86/x86_64/entry.S       Fri Jan 05 17:34:27 2007 +0000
@@ -324,7 +324,16 @@ domain_crash_synchronous:
         GET_GUEST_REGS(%rax)
         movq  %rax,%rsp
         # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
+#ifdef CONFIG_COMPAT
+        movq  CPUINFO_current_vcpu(%rax),%rax
+        movq  VCPU_domain(%rax),%rax
+        btl   $_DOMF_compat,DOMAIN_domain_flags(%rax)
+        setnc %al
+        leal  (%rax,%rax,2),%eax
+        orb   %al,UREGS_cs(%rsp)
+#else
         orb   $3,UREGS_cs(%rsp)
+#endif
         # printk(domain_crash_synchronous_string)
         leaq  domain_crash_synchronous_string(%rip),%rdi
         xorl  %eax,%eax
@@ -336,8 +345,15 @@ ENTRY(ret_from_intr)
 ENTRY(ret_from_intr)
         GET_CURRENT(%rbx)
         testb $3,UREGS_cs(%rsp)
-        jnz   test_all_events
-        jmp   restore_all_xen
+        jz    restore_all_xen
+#ifndef CONFIG_COMPAT
+        jmp   test_all_events
+#else
+        movq  VCPU_domain(%rbx),%rax
+        btl   $_DOMF_compat,DOMAIN_domain_flags(%rax)
+        jnc   test_all_events
+        jmp   compat_test_all_events
+#endif
 
         ALIGN
 /* No special register assumptions. */
@@ -355,6 +371,11 @@ handle_exception:
         testb $3,UREGS_cs(%rsp)
         jz    restore_all_xen
         leaq  VCPU_trap_bounce(%rbx),%rdx
+#ifdef CONFIG_COMPAT
+        movq  VCPU_domain(%rbx),%rax
+        btl   $_DOMF_compat,DOMAIN_domain_flags(%rax)
+        jc    compat_post_handle_exception
+#endif
         testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
         jz    test_all_events
         call  create_bounce_frame
@@ -612,3 +633,7 @@ ENTRY(hypercall_args_table)
         .rept NR_hypercalls-(.-hypercall_args_table)
         .byte 0 /* do_ni_hypercall      */
         .endr
+
+#ifdef CONFIG_COMPAT
+#include "compat/entry.S"
+#endif
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Fri Jan 05 17:32:00 2007 +0000
+++ b/xen/arch/x86/x86_64/traps.c       Fri Jan 05 17:34:27 2007 +0000
@@ -246,6 +246,7 @@ unsigned long do_iret(void)
 }
 
 asmlinkage void syscall_enter(void);
+asmlinkage void compat_hypercall(void);
 void __init percpu_traps_init(void)
 {
     char *stack_bottom, *stack;
@@ -257,6 +258,11 @@ void __init percpu_traps_init(void)
         set_intr_gate(TRAP_double_fault, &double_fault);
         idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
         idt_table[TRAP_nmi].a          |= 2UL << 32; /* IST2 */
+
+#ifdef CONFIG_COMPAT
+        /* The hypercall entry vector is only accessible from ring 1. */
+        _set_gate(idt_table+HYPERCALL_VECTOR, 15, 1, &compat_hypercall);
+#endif
     }
 
     stack_bottom = (char *)get_stack_bottom();
@@ -503,12 +509,16 @@ static void hypercall_page_initialise_ri
     *(u16 *)(p+ 9) = 0x050f;  /* syscall */
 }
 
+#include "compat/traps.c"
+
 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
 {
     if ( is_hvm_domain(d) )
         hvm_hypercall_page_initialise(d, hypercall_page);
+    else if ( !IS_COMPAT(d) )
+        hypercall_page_initialise_ring3_kernel(hypercall_page);
     else
-        hypercall_page_initialise_ring3_kernel(hypercall_page);
+        hypercall_page_initialise_ring1_kernel(hypercall_page);
 }
 
 /*
diff -r 7c5eea5feebd -r 5a690aa51fb5 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Fri Jan 05 17:32:00 2007 +0000
+++ b/xen/include/asm-x86/processor.h   Fri Jan 05 17:34:27 2007 +0000
@@ -559,6 +559,12 @@ void show_page_walk(unsigned long addr);
 void show_page_walk(unsigned long addr);
 asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
 
+#ifdef CONFIG_COMPAT
+void compat_show_guest_stack(struct cpu_user_regs *, int lines);
+#else
+#define compat_show_guest_stack(regs, lines) ((void)0)
+#endif
+
 /* Dumps current register and stack state. */
 #define dump_execution_state()                                              \
     /* NB. Needs interrupts enabled else we end up in fatal_trap(). */      \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Add entry points for handling hypercalls from and returning to, Xen patchbot-unstable <=