WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Some fixes to IRET hypercall and failsafe callback handl

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Some fixes to IRET hypercall and failsafe callback handlers:
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 29 Mar 2006 14:26:08 +0000
Delivery-date: Wed, 29 Mar 2006 14:27:56 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID ae6af19f47d3805fd07f320a8d2132963768c67a
# Parent  e84b1185013ff6784ce94c557ce710d78c828de0
Some fixes to IRET hypercall and failsafe callback handlers:
 1. IRET hypercall must restore the event callback mask. This
    was missing on x86/64, and both subarchitectures now restore
    from EFLAGS.IF.
 2. Failsafe callbacks are fixed to detect whether fault is due to
    bad segment or due to IRET. In the latter case we now
    immediatiately kill the process. This avoids infinite looping
    between IRET hypercall and failsafe callback handler in the guest.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r e84b1185013f -r ae6af19f47d3 
linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S
--- a/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S Wed Mar 29 09:46:36 2006
+++ b/linux-2.6-xen-sparse/arch/i386/kernel/entry-xen.S Wed Mar 29 11:02:45 2006
@@ -65,7 +65,6 @@
 ORIG_EAX       = 0x24
 EIP            = 0x28
 CS             = 0x2C
-EVENT_MASK     = 0x2E
 EFLAGS         = 0x30
 OLDESP         = 0x34
 OLDSS          = 0x38
@@ -290,14 +289,14 @@
 restore_nocheck:
 #else
 restore_nocheck:
-       testl $(VM_MASK|NMI_MASK), EFLAGS(%esp)
+       movl EFLAGS(%esp), %eax
+       testl $(VM_MASK|NMI_MASK), %eax
        jnz hypervisor_iret
-       movb EVENT_MASK(%esp), %al
-       notb %al                        # %al == ~saved_mask
+       shr $9, %eax                    # EAX[0] == IRET_EFLAGS.IF
        GET_VCPU_INFO
        andb evtchn_upcall_mask(%esi),%al
-       andb $1,%al                     # %al == mask & ~saved_mask
-       jnz restore_all_enable_events   #     != 0 => reenable event delivery
+       andb $1,%al                     # EAX[0] == IRET_EFLAGS.IF & event_mask
+       jnz restore_all_enable_events   #        != 0 => enable event delivery
 #endif
        RESTORE_REGS
        addl $4, %esp
@@ -555,14 +554,9 @@
        RESTORE_REGS
        addl $4, %esp
 1:     iret
-.section .fixup,"ax"
-2:     pushl $0
-       pushl $do_iret_error
-       jmp error_code
-.previous
 .section __ex_table,"a"
        .align 4
-       .long 1b,2b
+       .long 1b,iret_exc
 .previous
 14:    __DISABLE_INTERRUPTS
        jmp  11b
@@ -614,30 +608,51 @@
        .byte 0x00,0x00                 # jmp  11b
 
 # Hypervisor uses this for application faults while it executes.
+# We get here for two reasons:
+#  1. Fault while reloading DS, ES, FS or GS
+#  2. Fault while executing IRET
+# Category 1 we fix up by reattempting the load, and zeroing the segment
+# register if the load fails.
+# Category 2 we fix up by jumping to do_iret_error. We cannot use the
+# normal Linux return path in this case because if we use the IRET hypercall
+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
+# We distinguish between categories by maintaining a status value in EAX.
 ENTRY(failsafe_callback)
-1:     popl %ds
-2:     popl %es
-3:     popl %fs
-4:     popl %gs
-       subl $4,%esp
-       SAVE_ALL
-       jmp  ret_from_exception
-.section .fixup,"ax";  \
-6:     movl $0,(%esp); \
-       jmp 1b;         \
-7:     movl $0,(%esp); \
-       jmp 2b;         \
-8:     movl $0,(%esp); \
-       jmp 3b;         \
-9:     movl $0,(%esp); \
-       jmp 4b;         \
-.previous;             \
-.section __ex_table,"a";\
-       .align 4;       \
-       .long 1b,6b;    \
-       .long 2b,7b;    \
-       .long 3b,8b;    \
-       .long 4b,9b;    \
+       pushl %eax
+       movl $1,%eax
+1:     mov 4(%esp),%ds
+2:     mov 8(%esp),%es
+3:     mov 12(%esp),%fs
+4:     mov 16(%esp),%gs
+       testl %eax,%eax
+       popl %eax
+       jz 5f
+       addl $16,%esp           # EAX != 0 => Category 2 (Bad IRET)
+       jmp iret_exc
+5:     addl $16,%esp           # EAX == 0 => Category 1 (Bad segment)
+       pushl $0
+       SAVE_ALL
+       jmp ret_from_exception
+.section .fixup,"ax";          \
+6:     xorl %eax,%eax;         \
+       movl %eax,4(%esp);      \
+       jmp 1b;                 \
+7:     xorl %eax,%eax;         \
+       movl %eax,8(%esp);      \
+       jmp 2b;                 \
+8:     xorl %eax,%eax;         \
+       movl %eax,12(%esp);     \
+       jmp 3b;                 \
+9:     xorl %eax,%eax;         \
+       movl %eax,16(%esp);     \
+       jmp 4b;                 \
+.previous;                     \
+.section __ex_table,"a";       \
+       .align 4;               \
+       .long 1b,6b;            \
+       .long 2b,7b;            \
+       .long 3b,8b;            \
+       .long 4b,9b;            \
 .previous
 #endif
 
diff -r e84b1185013f -r ae6af19f47d3 
linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S       Wed Mar 29 
09:46:36 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/entry-xen.S       Wed Mar 29 
11:02:45 2006
@@ -520,13 +520,13 @@
        CFI_REMEMBER_STATE
        jnz  retint_careful
 retint_restore_args:
-        movb EVENT_MASK-REST_SKIP(%rsp), %al
-        notb %al                       # %al == ~saved_mask
-        XEN_GET_VCPU_INFO(%rsi)
-        andb evtchn_upcall_mask(%rsi),%al
-       andb $1,%al                     # %al == mask & ~saved_mask
-       jnz restore_all_enable_events   # != 0 => reenable event delivery      
-        XEN_PUT_VCPU_INFO(%rsi)
+       movl EFLAGS-REST_SKIP(%rsp), %eax
+       shr $9, %eax                    # EAX[0] == IRET_EFLAGS.IF
+       XEN_GET_VCPU_INFO(%rsi)
+       andb evtchn_upcall_mask(%rsi),%al
+       andb $1,%al                     # EAX[0] == IRET_EFLAGS.IF & event_mask
+       jnz restore_all_enable_events   #        != 0 => enable event delivery
+       XEN_PUT_VCPU_INFO(%rsi)
                
        RESTORE_ARGS 0,8,0
        HYPERVISOR_IRET 0
@@ -803,12 +803,11 @@
 # So, on entry to the handler we detect whether we interrupted an
 # existing activation in its critical region -- if so, we pop the current
 # activation and restart the handler using the previous one.
-ENTRY(do_hypervisor_callback)   # do_hyperviosr_callback(struct *pt_regs)
+ENTRY(do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
 # Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
 # see the correct pointer to the pt_regs
        movq %rdi, %rsp            # we don't return, adjust the stack frame
-11:    movb $0, EVENT_MASK(%rdi)
-       movq %gs:pda_irqstackptr,%rax
+11:    movq %gs:pda_irqstackptr,%rax
        incl %gs:pda_irqcount
        cmovzq %rax,%rsp
        pushq %rdi
@@ -853,15 +852,44 @@
 # When the kernel is interrupted in the critical section, the kernel 
 # will do IRET in that case, and everything will be restored at that point, 
 # i.e. it just resumes from the next instruction interrupted with the same 
context. 
-       
+
 # Hypervisor uses this for application faults while it executes.
-# Unlike i386 there is no need to reload the saved segment selectors:
-# Xen already reloaded all valid ones and zeroed the others.
+# We get here for two reasons:
+#  1. Fault while reloading DS, ES, FS or GS
+#  2. Fault while executing IRET
+# Category 1 we do not need to fix up as Xen has already reloaded all segment
+# registers that could be reloaded and zeroed the others.
+# Category 2 we fix up by killing the current process. We cannot use the
+# normal Linux return path in this case because if we use the IRET hypercall
+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
+# We distinguish between categories by comparing each saved segment register
+# with its current contents: any discrepancy means we in category 1.
 ENTRY(failsafe_callback)
-       addq $0x30,%rsp /* skip %rcx,%r11,%ds,%es,%fs,%gs */
+       movw %ds,%cx
+       cmpw %cx,0x10(%rsp)
+       jne 1f
+       movw %es,%cx
+       cmpw %cx,0x18(%rsp)
+       jne 1f
+       movw %fs,%cx
+       cmpw %cx,0x20(%rsp)
+       jne 1f
+       movw %gs,%cx
+       cmpw %cx,0x28(%rsp)
+       jne 1f
+       /* All segments match their saved values => Category 2 (Bad IRET). */
+       movq (%rsp),%rcx
+       movq 8(%rsp),%r11
+       addq $0x30,%rsp
+       movq $-9999,%rdi        /* better code? */
+       jmp do_exit                     
+1:     /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
+       movq (%rsp),%rcx
+       movq 8(%rsp),%r11
+       addq $0x30,%rsp
+       pushq $0
        SAVE_ALL
-       jmp  error_exit
- 
+       jmp error_exit
 #if 0        
         .section __ex_table,"a"
         .align 8
diff -r e84b1185013f -r ae6af19f47d3 
linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S
--- a/linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S       Wed Mar 29 
09:46:36 2006
+++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/xen_entry.S       Wed Mar 29 
11:02:45 2006
@@ -2,7 +2,7 @@
  * Copied from arch/xen/i386/kernel/entry.S
  */                        
 /* Offsets into shared_info_t. */                
-#define evtchn_upcall_pending          0
+#define evtchn_upcall_pending          /* 0 */
 #define evtchn_upcall_mask             1
 
 #define sizeof_vcpu_shift              6
@@ -35,7 +35,6 @@
                                XEN_PUT_VCPU_INFO(reg)
 #define XEN_TEST_PENDING(reg)  testb $0xFF,evtchn_upcall_pending(reg)
 
-EVENT_MASK      = (CS+4)
 VGCF_IN_SYSCALL = (1<<8)
         
        
diff -r e84b1185013f -r ae6af19f47d3 xen/arch/x86/x86_32/asm-offsets.c
--- a/xen/arch/x86/x86_32/asm-offsets.c Wed Mar 29 09:46:36 2006
+++ b/xen/arch/x86/x86_32/asm-offsets.c Wed Mar 29 11:02:45 2006
@@ -44,7 +44,6 @@
     OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
     OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
     OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
-    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
     OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
     DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
     BLANK();
diff -r e84b1185013f -r ae6af19f47d3 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Wed Mar 29 09:46:36 2006
+++ b/xen/arch/x86/x86_32/traps.c       Wed Mar 29 11:02:45 2006
@@ -230,8 +230,8 @@
     /* No longer in NMI context. */
     clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
 
-    /* Restore upcall mask from saved value. */
-    current->vcpu_info->evtchn_upcall_mask = regs->saved_upcall_mask;
+    /* Restore upcall mask from supplied EFLAGS.IF. */
+    current->vcpu_info->evtchn_upcall_mask = !(eflags & X86_EFLAGS_IF);
 
     /*
      * The hypercall exit path will overwrite EAX with this return
diff -r e84b1185013f -r ae6af19f47d3 xen/arch/x86/x86_64/asm-offsets.c
--- a/xen/arch/x86/x86_64/asm-offsets.c Wed Mar 29 09:46:36 2006
+++ b/xen/arch/x86/x86_64/asm-offsets.c Wed Mar 29 11:02:45 2006
@@ -48,7 +48,6 @@
     OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
     OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
     OFFSET(UREGS_ss, struct cpu_user_regs, ss);
-    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
     OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
     DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
     BLANK();
diff -r e84b1185013f -r ae6af19f47d3 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Wed Mar 29 09:46:36 2006
+++ b/xen/arch/x86/x86_64/traps.c       Wed Mar 29 11:02:45 2006
@@ -210,6 +210,9 @@
     /* No longer in NMI context. */
     clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
 
+    /* Restore upcall mask from supplied EFLAGS.IF. */
+    current->vcpu_info->evtchn_upcall_mask = !(iret_saved.rflags & EF_IE);
+
     /* Saved %rax gets written back to regs->rax in entry.S. */
     return iret_saved.rax;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Some fixes to IRET hypercall and failsafe callback handlers:, Xen patchbot -unstable <=