WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] i386: remove NMI deferral

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] i386: remove NMI deferral
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: Tue, 19 Jun 2007 12:01:46 +0200
Delivery-date: Tue, 19 Jun 2007 02:59:09 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
.. by instead making sure selector registers are always stored/
restored correctly despite the potential for an NMI (and also MCE,
with a subsequent patch) to kick in.

The idea is to always check values read from %ds and %es against
__HYPERVISOR_DS, and only store into the current frame (all normal
handlers) or the outer-most one (NMI and MCE) if the value read is
different. That way, any NMI or MCE occurring during frame setup will
store selectors not saved so far on behalf of the interrupted handler,
with that interrupted handler either having managed to read the guest
selector (in which case it can store it regardless of whether NMI/MCE
kicked in between the read and the store) or finding __HYPERVISOR_DS
already in the register, in which case it'll know not to store (as the
nested handler would have done the store).

For the restore portion this makes use of the fact that there's exactly
one such code sequence, and by moving the selector restore part past
all other restores (including all stack pointer adjustments) the
NMI/MCE handlers can safely detect whether any selector would have been
restored already (by range checking EIP) and move EIP back to the
beginning of the selector restore sequence without having to play with
the stack pointer itself or any other gpr.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2007-06-18/xen/arch/x86/x86_32/entry.S
===================================================================
--- 2007-06-18.orig/xen/arch/x86/x86_32/entry.S 2007-06-18 11:46:54.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/x86_32/entry.S      2007-06-18 11:51:56.000000000 
+0200
@@ -77,14 +77,29 @@
 restore_all_guest:
         ASSERT_INTERRUPTS_DISABLED
         testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
-        jnz  restore_all_vm86
-#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
+#ifndef CONFIG_X86_SUPERVISOR_MODE_KERNEL
+        popl %ebx
+        popl %ecx
+        popl %edx
+        popl %esi
+        popl %edi
+        popl %ebp
+        popl %eax
+        leal 4(%esp),%esp
+        jnz  .Lrestore_iret_guest
+.Lrestore_sregs_guest:
+.Lft1:  mov  UREGS_ds-UREGS_eip(%esp),%ds
+.Lft2:  mov  UREGS_es-UREGS_eip(%esp),%es
+.Lft3:  mov  UREGS_fs-UREGS_eip(%esp),%fs
+.Lft4:  mov  UREGS_gs-UREGS_eip(%esp),%gs
+.Lrestore_iret_guest:
+#else
+        jnz   restore_all_vm86
         testl $2,UREGS_cs(%esp)
         jnz   1f
         call  restore_ring0_guest
         jmp   restore_all_vm86
 1:
-#endif
 .Lft1:  mov  UREGS_ds(%esp),%ds
 .Lft2:  mov  UREGS_es(%esp),%es
 .Lft3:  mov  UREGS_fs(%esp),%fs
@@ -98,6 +113,7 @@ restore_all_vm86:
         popl %ebp
         popl %eax
         addl $4,%esp
+#endif
 .Lft5:  iret
 .section .fixup,"ax"
 .Lfx5:  subl  $28,%esp
@@ -109,9 +125,13 @@ restore_all_vm86:
         movl  %edx,UREGS_edx+4(%esp)
         movl  %ecx,UREGS_ecx+4(%esp)
         movl  %ebx,UREGS_ebx+4(%esp)
+#ifndef CONFIG_X86_SUPERVISOR_MODE_KERNEL
+.equ .Lfx1, .Lfx5
+#else
 .Lfx1:  SET_XEN_SEGMENTS(a)
         movl  %eax,%fs
         movl  %eax,%gs
+#endif
         sti
         popl  %esi
         pushfl                         # EFLAGS
@@ -169,8 +189,8 @@ restore_all_xen:
 ENTRY(hypercall)
         subl $4,%esp
         FIXUP_RING0_GUEST_STACK
-        SAVE_ALL(b)
-        sti
+        SAVE_ALL(1f,1f)
+1:      sti
         GET_CURRENT(%ebx)
         cmpl  $NR_hypercalls,%eax
         jae   bad_hypercall
@@ -420,9 +440,13 @@ ENTRY(divide_error)
         ALIGN
 handle_exception:
         FIXUP_RING0_GUEST_STACK
-        SAVE_ALL_NOSEGREGS(a)
-        SET_XEN_SEGMENTS(a)
-        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
+        SAVE_ALL(1f,2f)
+        .text 1
+1:      mov   %ecx,%ds
+        mov   %ecx,%es
+        jmp   2f
+        .previous
+2:      testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
         jz    exception_with_ints_disabled
         sti                             # re-enable interrupts
 1:      xorl  %eax,%eax
@@ -542,9 +566,9 @@ ENTRY(spurious_interrupt_bug)
         jmp   handle_exception
 
 ENTRY(early_page_fault)
-        SAVE_ALL_NOSEGREGS(a)
-        movl  %esp,%edx
-        pushl %edx
+        SAVE_ALL(1f,1f)
+1:      movl  %esp,%eax
+        pushl %eax
         call  do_early_page_fault
         addl  $4,%esp
         jmp   restore_all_xen
@@ -555,49 +579,44 @@ ENTRY(nmi)
         iret
 #else
         # Save state but do not trash the segment registers!
-        # We may otherwise be unable to reload them or copy them to ring 1. 
+        pushl $TRAP_nmi<<16
+        SAVE_ALL(.Lnmi_xen,.Lnmi_common)
+.Lnmi_common:
+        movl  %esp,%eax
         pushl %eax
-        SAVE_ALL_NOSEGREGS(a)
-
-        # We can only process the NMI if:
-        #  A. We are the outermost Xen activation (in which case we have
-        #     the selectors safely saved on our stack)
-        #  B. DS and ES contain sane Xen values.
-        # In all other cases we bail without touching DS-GS, as we have
-        # interrupted an enclosing Xen activation in tricky prologue or
-        # epilogue code.
-        movl  UREGS_eflags(%esp),%eax
-        movb  UREGS_cs(%esp),%al
-        testl $(3|X86_EFLAGS_VM),%eax
-        jnz   continue_nmi
-        movl  %ds,%eax
-        cmpw  $(__HYPERVISOR_DS),%ax
-        jne   defer_nmi
-        movl  %es,%eax
-        cmpw  $(__HYPERVISOR_DS),%ax
-        jne   defer_nmi
-
-continue_nmi:
-        SET_XEN_SEGMENTS(d)
-        movl  %esp,%edx
-        pushl %edx
         call  do_nmi
         addl  $4,%esp
         jmp   ret_from_intr
-
-defer_nmi:
-        movl  $FIXMAP_apic_base,%eax
-        # apic_wait_icr_idle()
-1:      movl  %ss:APIC_ICR(%eax),%ebx
-        testl $APIC_ICR_BUSY,%ebx
-        jnz   1b
-        # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
-        movl  $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_PHYSICAL | \
-                TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
-        jmp   restore_all_xen
+.Lnmi_xen:
+        GET_GUEST_REGS(%ebx)
+        testl $X86_EFLAGS_VM,%ss:UREGS_eflags(%ebx)
+        mov   %ds,%eax
+        mov   %es,%edx
+        jnz   .Lnmi_vm86
+        cmpw  %ax,%cx
+        mov   %ecx,%ds
+        cmovel UREGS_ds(%ebx),%eax
+        cmpw  %dx,%cx
+        movl  %eax,UREGS_ds(%ebx)
+        cmovel UREGS_es(%ebx),%edx
+        mov   %ecx,%es
+        movl  $.Lrestore_sregs_guest,%ecx
+        movl  %edx,UREGS_es(%ebx)
+        cmpl  %ecx,UREGS_eip(%esp)
+        jbe   .Lnmi_common
+        cmpl  $.Lrestore_iret_guest,UREGS_eip(%esp)
+        ja    .Lnmi_common
+        movl  %ecx,UREGS_eip(%esp)
+        jmp   .Lnmi_common
+.Lnmi_vm86:
+        mov   %ecx,%ds
+        mov   %ecx,%es
+        jmp   .Lnmi_common
 #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
 
 ENTRY(setup_vm86_frame)
+        mov %ecx,%ds
+        mov %ecx,%es
         # Copies the entire stack frame forwards by 16 bytes.
         .macro copy_vm86_words count=18
         .if \count
Index: 2007-06-18/xen/arch/x86/x86_32/traps.c
===================================================================
--- 2007-06-18.orig/xen/arch/x86/x86_32/traps.c 2007-06-18 11:46:54.000000000 
+0200
+++ 2007-06-18/xen/arch/x86/x86_32/traps.c      2007-06-18 11:47:30.000000000 
+0200
@@ -232,15 +232,6 @@ unsigned long do_iret(void)
     return 0;
 }
 
-#include <asm/asm_defns.h>
-BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
-fastcall void smp_deferred_nmi(struct cpu_user_regs *regs)
-{
-    asmlinkage void do_nmi(struct cpu_user_regs *);
-    ack_APIC_irq();
-    do_nmi(regs);
-}
-
 void __init percpu_traps_init(void)
 {
     struct tss_struct *tss = &doublefault_tss;
@@ -252,8 +243,6 @@ void __init percpu_traps_init(void)
     /* The hypercall entry vector is only accessible from ring 1. */
     _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
 
-    set_intr_gate(TRAP_deferred_nmi, &deferred_nmi);
-
     /*
      * Make a separate task for double faults. This will get us debug output if
      * we blow the kernel stack.
Index: 2007-06-18/xen/include/asm-x86/processor.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/processor.h     2007-06-18 
11:46:54.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/processor.h  2007-06-18 11:48:56.000000000 
+0200
@@ -104,7 +104,6 @@
 #define TRAP_alignment_check  17
 #define TRAP_machine_check    18
 #define TRAP_simd_error       19
-#define TRAP_deferred_nmi     31
 
 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
 /* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
Index: 2007-06-18/xen/include/asm-x86/x86_32/asm_defns.h
===================================================================
--- 2007-06-18.orig/xen/include/asm-x86/x86_32/asm_defns.h      2007-06-18 
11:46:54.000000000 +0200
+++ 2007-06-18/xen/include/asm-x86/x86_32/asm_defns.h   2007-06-18 
11:47:30.000000000 +0200
@@ -26,7 +26,7 @@
 #define ASSERT_INTERRUPTS_ENABLED  ASSERT_INTERRUPT_STATUS(nz)
 #define ASSERT_INTERRUPTS_DISABLED ASSERT_INTERRUPT_STATUS(z)
 
-#define __SAVE_ALL_PRE                                  \
+#define SAVE_ALL(xen_lbl, vm86_lbl)                     \
         cld;                                            \
         pushl %eax;                                     \
         pushl %ebp;                                     \
@@ -37,31 +37,32 @@
         pushl %ecx;                                     \
         pushl %ebx;                                     \
         testl $(X86_EFLAGS_VM),UREGS_eflags(%esp);      \
-        jz 2f;                                          \
-        call setup_vm86_frame;                          \
-        jmp 3f;                                         \
-        2:testb $3,UREGS_cs(%esp);                      \
-        jz 1f;                                          \
-        mov %ds,UREGS_ds(%esp);                         \
-        mov %es,UREGS_es(%esp);                         \
+        mov %ds,%edi;                                   \
+        mov %es,%esi;                                   \
+        movl $(__HYPERVISOR_DS),%ecx;                   \
+        jnz 86f;                                        \
+        .text 1;                                        \
+        86:call setup_vm86_frame;                       \
+        jmp vm86_lbl;                                   \
+        .previous;                                      \
+        testb $3,UREGS_cs(%esp);                        \
+        jz xen_lbl;                                     \
+        cmpw %cx,%di;                                   \
+        mov %ecx,%ds;                                   \
         mov %fs,UREGS_fs(%esp);                         \
+        cmovel UREGS_ds(%esp),%edi;                     \
+        cmpw %cx,%si;                                   \
+        mov %edi,UREGS_ds(%esp);                        \
+        cmovel UREGS_es(%esp),%esi;                     \
+        mov %ecx,%es;                                   \
         mov %gs,UREGS_gs(%esp);                         \
-        3:
-
-#define SAVE_ALL_NOSEGREGS(_reg)                \
-        __SAVE_ALL_PRE                          \
-        1:
+        mov %esi,UREGS_es(%esp)
 
 #define SET_XEN_SEGMENTS(_reg)                          \
         movl $(__HYPERVISOR_DS),%e ## _reg ## x;        \
         mov %e ## _reg ## x,%ds;                        \
         mov %e ## _reg ## x,%es;
 
-#define SAVE_ALL(_reg)                          \
-        __SAVE_ALL_PRE                          \
-        SET_XEN_SEGMENTS(_reg)                  \
-        1:
-
 #ifdef PERF_COUNTERS
 #define PERFC_INCR(_name,_idx,_cur)                     \
         pushl _cur;                                     \
@@ -97,8 +98,8 @@ __asm__(                                
     STR(x) ":\n\t"                              \
     "pushl $"#v"<<16\n\t"                       \
     STR(FIXUP_RING0_GUEST_STACK)                \
-    STR(SAVE_ALL(a))                            \
-    "movl %esp,%eax\n\t"                        \
+    STR(SAVE_ALL(1f,1f)) "\n\t"                 \
+    "1:movl %esp,%eax\n\t"                      \
     "pushl %eax\n\t"                            \
     "call "STR(smp_##x)"\n\t"                   \
     "addl $4,%esp\n\t"                          \
@@ -109,8 +110,8 @@ __asm__(                                
     "\n" __ALIGN_STR"\n"                        \
     "common_interrupt:\n\t"                     \
     STR(FIXUP_RING0_GUEST_STACK)                \
-    STR(SAVE_ALL(a))                            \
-    "movl %esp,%eax\n\t"                        \
+    STR(SAVE_ALL(1f,1f)) "\n\t"                 \
+    "1:movl %esp,%eax\n\t"                      \
     "pushl %eax\n\t"                            \
     "call " STR(do_IRQ) "\n\t"                  \
     "addl $4,%esp\n\t"                          \



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>