WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1/5] x86: move init_tss into per-CPU space

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 1/5] x86: move init_tss into per-CPU space
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Fri, 10 Jul 2009 15:42:14 +0100
Delivery-date: Fri, 10 Jul 2009 07:42:52 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2009-07-10.orig/xen/arch/x86/acpi/suspend.c 2009-04-09 14:05:35.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/acpi/suspend.c      2009-07-10 13:57:21.000000000 
+0200
@@ -57,7 +57,7 @@ void restore_rest_processor_state(void)
     }
 #else /* !defined(CONFIG_X86_64) */
     if ( supervisor_mode_kernel && cpu_has_sep )
-        wrmsr(MSR_IA32_SYSENTER_ESP, &init_tss[smp_processor_id()].esp1, 0);
+        wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0);
 #endif
 
     /* Maybe load the debug registers. */
--- 2009-07-10.orig/xen/arch/x86/cpu/common.c   2009-03-24 09:04:02.000000000 
+0100
+++ 2009-07-10/xen/arch/x86/cpu/common.c        2009-07-10 13:57:21.000000000 
+0200
@@ -576,7 +576,7 @@ void __init early_cpu_init(void)
 void __cpuinit cpu_init(void)
 {
        int cpu = smp_processor_id();
-       struct tss_struct *t = &init_tss[cpu];
+       struct tss_struct *t = &this_cpu(init_tss);
        struct desc_ptr gdt_desc = {
                .base = (unsigned long)(this_cpu(gdt_table) - 
FIRST_RESERVED_GDT_ENTRY),
                .limit = LAST_RESERVED_GDT_BYTE
--- 2009-07-10.orig/xen/arch/x86/domain.c       2009-07-03 10:20:57.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/domain.c    2009-07-10 13:57:21.000000000 +0200
@@ -1223,7 +1223,7 @@ static void save_segments(struct vcpu *v
 
 static inline void switch_kernel_stack(struct vcpu *v)
 {
-    struct tss_struct *tss = &init_tss[smp_processor_id()];
+    struct tss_struct *tss = &this_cpu(init_tss);
     tss->esp1 = v->arch.guest_context.kernel_sp;
     tss->ss1  = v->arch.guest_context.kernel_ss;
 }
--- 2009-07-10.orig/xen/arch/x86/hvm/vmx/vmcs.c 2009-07-10 08:51:30.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/hvm/vmx/vmcs.c      2009-07-10 13:57:21.000000000 
+0200
@@ -502,7 +502,7 @@ static void vmx_set_host_env(struct vcpu
     __vmwrite(HOST_IDTR_BASE, (unsigned long)idt_tables[cpu]);
 
     __vmwrite(HOST_TR_SELECTOR, TSS_ENTRY << 3);
-    __vmwrite(HOST_TR_BASE, (unsigned long)&init_tss[cpu]);
+    __vmwrite(HOST_TR_BASE, (unsigned long)&per_cpu(init_tss, cpu));
 
     __vmwrite(HOST_SYSENTER_ESP, get_stack_bottom());
 
--- 2009-07-10.orig/xen/arch/x86/setup.c        2009-07-10 08:51:30.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/setup.c     2009-07-10 13:57:21.000000000 +0200
@@ -117,7 +117,7 @@ DEFINE_PER_CPU(struct desc_struct *, com
     = boot_cpu_compat_gdt_table;
 #endif
 
-struct tss_struct init_tss[NR_CPUS];
+DEFINE_PER_CPU(struct tss_struct, init_tss);
 
 char __attribute__ ((__section__(".bss.stack_aligned"))) 
cpu0_stack[STACK_SIZE];
 
--- 2009-07-10.orig/xen/arch/x86/traps.c        2009-06-19 11:11:23.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/traps.c     2009-07-10 13:57:21.000000000 +0200
@@ -326,7 +326,7 @@ void show_stack_overflow(unsigned int cp
 
     printk("Valid stack range: %p-%p, sp=%p, tss.esp0=%p\n",
            (void *)esp_top, (void *)esp_bottom, (void *)esp,
-           (void *)init_tss[cpu].esp0);
+           (void *)per_cpu(init_tss, cpu).esp0);
 
     /* Trigger overflow trace if %esp is within 512 bytes of the guard page. */
     if ( ((unsigned long)(esp - esp_top) > 512) &&
@@ -3066,7 +3066,7 @@ void set_intr_gate(unsigned int n, void 
 
 void load_TR(void)
 {
-    struct tss_struct *tss = &init_tss[smp_processor_id()];
+    struct tss_struct *tss = &this_cpu(init_tss);
     struct desc_ptr old_gdt, tss_gdt = {
         .base = (long)(this_cpu(gdt_table) - FIRST_RESERVED_GDT_ENTRY),
         .limit = LAST_RESERVED_GDT_BYTE
--- 2009-07-10.orig/xen/arch/x86/x86_32/mm.c    2009-06-19 11:11:23.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/x86_32/mm.c 2009-07-10 13:57:21.000000000 +0200
@@ -227,8 +227,7 @@ long subarch_memory_op(int op, XEN_GUEST
 
 long do_stack_switch(unsigned long ss, unsigned long esp)
 {
-    int nr = smp_processor_id();
-    struct tss_struct *t = &init_tss[nr];
+    struct tss_struct *t = &this_cpu(init_tss);
 
     fixup_guest_stack_selector(current->domain, ss);
 
--- 2009-07-10.orig/xen/arch/x86/x86_32/supervisor_mode_kernel.S        
2009-06-05 11:59:48.000000000 +0200
+++ 2009-07-10/xen/arch/x86/x86_32/supervisor_mode_kernel.S     2009-07-10 
13:57:21.000000000 +0200
@@ -102,8 +102,8 @@ ENTRY(fixup_ring0_guest_stack)
 
         movl  $PER_CPU_GDT_ENTRY*8,%ecx
         lsll  %ecx,%ecx
-        shll  $7,%ecx                                   # Each TSS entry is 
0x80 bytes
-        addl  $init_tss,%ecx
+        shll  $PERCPU_SHIFT,%ecx
+        addl  $per_cpu__init_tss,%ecx
 
         # Load Xen stack from TSS.
         movw  TSS_ss0(%ecx),%ax
--- 2009-07-10.orig/xen/arch/x86/x86_32/traps.c 2009-05-27 13:54:05.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/x86_32/traps.c      2009-07-10 13:57:21.000000000 
+0200
@@ -204,7 +204,7 @@ asmlinkage void do_double_fault(void)
     asm ( "lsll %1, %0" : "=r" (cpu) : "rm" (PER_CPU_GDT_ENTRY << 3) );
 
     /* Find information saved during fault and dump it to the console. */
-    tss = &init_tss[cpu];
+    tss = &per_cpu(init_tss, cpu);
     printk("*** DOUBLE FAULT ***\n");
     print_xen_info();
     printk("CPU:    %d\nEIP:    %04x:[<%08x>]",
--- 2009-07-10.orig/xen/arch/x86/x86_64/traps.c 2009-07-03 10:20:57.000000000 
+0200
+++ 2009-07-10/xen/arch/x86/x86_64/traps.c      2009-07-10 13:57:21.000000000 
+0200
@@ -433,13 +433,13 @@ void __devinit subarch_percpu_traps_init
     BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
 
     /* Machine Check handler has its own per-CPU 4kB stack. */
-    init_tss[cpu].ist[IST_MCE] = (unsigned long)&stack[IST_MCE * PAGE_SIZE];
+    this_cpu(init_tss).ist[IST_MCE] = (unsigned long)&stack[IST_MCE * 
PAGE_SIZE];
 
     /* Double-fault handler has its own per-CPU 4kB stack. */
-    init_tss[cpu].ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
+    this_cpu(init_tss).ist[IST_DF] = (unsigned long)&stack[IST_DF * PAGE_SIZE];
 
     /* NMI handler has its own per-CPU 4kB stack. */
-    init_tss[cpu].ist[IST_NMI] = (unsigned long)&stack[IST_NMI * PAGE_SIZE];
+    this_cpu(init_tss).ist[IST_NMI] = (unsigned long)&stack[IST_NMI * 
PAGE_SIZE];
 
     /* Trampoline for SYSCALL entry from long mode. */
     stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */
--- 2009-07-10.orig/xen/include/asm-x86/processor.h     2009-06-19 
11:11:23.000000000 +0200
+++ 2009-07-10/xen/include/asm-x86/processor.h  2009-07-10 13:57:21.000000000 
+0200
@@ -456,7 +456,7 @@ struct tss_struct {
 extern idt_entry_t idt_table[];
 extern idt_entry_t *idt_tables[];
 
-extern struct tss_struct init_tss[NR_CPUS];
+DECLARE_PER_CPU(struct tss_struct, init_tss);
 
 extern void init_int80_direct_trap(struct vcpu *v);
 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 1/5] x86: move init_tss into per-CPU space, Jan Beulich <=