WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [XEN] Assorted further PER_CPU- or read_m

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [XEN] Assorted further PER_CPU- or read_mostly-ifications.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 08 Aug 2006 16:00:42 +0000
Delivery-date: Tue, 08 Aug 2006 09:03:38 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b3dd6ceda9bc931be8424a4cf2ca1ab4a42c53c1
# Parent  6b821e3105977b393c261a6f474d991256264484
[XEN] Assorted further PER_CPU- or read_mostly-ifications.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vmx.c  |   10 +++++-----
 xen/arch/x86/irq.c          |   26 +++++++++++++-------------
 xen/arch/x86/nmi.c          |   38 ++++++++++++++++++--------------------
 xen/arch/x86/x86_32/traps.c |    2 +-
 xen/common/domain.c         |   10 +++++-----
 xen/common/schedule.c       |   17 ++++++++---------
 6 files changed, 50 insertions(+), 53 deletions(-)

diff -r 6b821e310597 -r b3dd6ceda9bc xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue Aug 08 15:14:43 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Tue Aug 08 15:43:54 2006 +0100
@@ -156,7 +156,7 @@ static void vmx_relinquish_guest_resourc
 
 #ifdef __x86_64__
 
-static struct vmx_msr_state percpu_msr[NR_CPUS];
+static DEFINE_PER_CPU(struct vmx_msr_state, percpu_msr);
 
 static u32 msr_data_index[VMX_MSR_COUNT] =
 {
@@ -177,7 +177,7 @@ static void vmx_save_segments(struct vcp
  */
 static void vmx_load_msrs(void)
 {
-    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
+    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
     int i;
 
     while ( host_state->flags )
@@ -190,7 +190,7 @@ static void vmx_load_msrs(void)
 
 static void vmx_save_init_msrs(void)
 {
-    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
+    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
     int i;
 
     for ( i = 0; i < VMX_MSR_COUNT; i++ )
@@ -279,7 +279,7 @@ static inline int long_mode_do_msr_write
     u64 msr_content = regs->eax | ((u64)regs->edx << 32);
     struct vcpu *v = current;
     struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
-    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
+    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
 
     HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
                 (unsigned long)regs->ecx, msr_content);
@@ -361,7 +361,7 @@ static void vmx_restore_msrs(struct vcpu
     unsigned long guest_flags ;
 
     guest_state = &v->arch.hvm_vmx.msr_content;;
-    host_state = &percpu_msr[smp_processor_id()];
+    host_state = &this_cpu(percpu_msr);
 
     wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
     guest_flags = guest_state->flags;
diff -r 6b821e310597 -r b3dd6ceda9bc xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Tue Aug 08 15:14:43 2006 +0100
+++ b/xen/arch/x86/irq.c        Tue Aug 08 15:43:54 2006 +0100
@@ -673,7 +673,7 @@ static int __init setup_dump_irqs(void)
 }
 __initcall(setup_dump_irqs);
 
-static struct timer end_irq_timer[NR_CPUS];
+static DEFINE_PER_CPU(struct timer, end_irq_timer);
 
 /*
  * force_intack: Forcibly emit all pending EOIs on each CPU every second.
@@ -682,22 +682,13 @@ static struct timer end_irq_timer[NR_CPU
 
 static void end_irq_timeout(void *unused)
 {
-    int cpu = smp_processor_id();
-
     local_irq_disable();
     flush_all_pending_eoi(NULL);
     local_irq_enable();
 
     on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 0);
 
-    set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
-}
-
-static void __init __setup_irq_timeout(void *unused)
-{
-    int cpu = smp_processor_id();
-    init_timer(&end_irq_timer[cpu], end_irq_timeout, NULL, cpu);
-    set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
+    set_timer(&this_cpu(end_irq_timer), NOW() + MILLISECS(1000));
 }
 
 static int force_intack;
@@ -705,8 +696,17 @@ boolean_param("force_intack", force_inta
 
 static int __init setup_irq_timeout(void)
 {
-    if ( force_intack )
-        on_each_cpu(__setup_irq_timeout, NULL, 1, 1);
+    unsigned int cpu;
+
+    if ( !force_intack )
+        return 0;
+
+    for_each_online_cpu ( cpu )
+    {
+        init_timer(&per_cpu(end_irq_timer, cpu), end_irq_timeout, NULL, cpu);
+        set_timer(&per_cpu(end_irq_timer, cpu), NOW() + MILLISECS(1000));
+    }
+
     return 0;
 }
 __initcall(setup_irq_timeout);
diff -r 6b821e310597 -r b3dd6ceda9bc xen/arch/x86/nmi.c
--- a/xen/arch/x86/nmi.c        Tue Aug 08 15:14:43 2006 +0100
+++ b/xen/arch/x86/nmi.c        Tue Aug 08 15:43:54 2006 +0100
@@ -36,8 +36,8 @@ static unsigned int nmi_hz = HZ;
 static unsigned int nmi_hz = HZ;
 static unsigned int nmi_perfctr_msr;   /* the MSR to reset in NMI handler */
 static unsigned int nmi_p4_cccr_val;
-static struct timer nmi_timer[NR_CPUS];
-static unsigned int nmi_timer_ticks[NR_CPUS];
+static DEFINE_PER_CPU(struct timer, nmi_timer);
+static DEFINE_PER_CPU(unsigned int, nmi_timer_ticks);
 
 /*
  * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -132,9 +132,8 @@ int __init check_nmi_watchdog (void)
 
 static void nmi_timer_fn(void *unused)
 {
-    int cpu = smp_processor_id();
-    nmi_timer_ticks[cpu]++;
-    set_timer(&nmi_timer[cpu], NOW() + MILLISECS(1000));
+    this_cpu(nmi_timer_ticks)++;
+    set_timer(&this_cpu(nmi_timer), NOW() + MILLISECS(1000));
 }
 
 static void disable_lapic_nmi_watchdog(void)
@@ -340,9 +339,8 @@ void __pminit setup_apic_nmi_watchdog(vo
     nmi_active = 1;
 }
 
-static unsigned int
-last_irq_sums [NR_CPUS],
-    alert_counter [NR_CPUS];
+static DEFINE_PER_CPU(unsigned int, last_irq_sums);
+static DEFINE_PER_CPU(unsigned int, alert_counter);
 
 static atomic_t watchdog_disable_count = ATOMIC_INIT(1);
 
@@ -366,35 +364,35 @@ void watchdog_enable(void)
      */
     for_each_online_cpu ( cpu )
     {
-        init_timer(&nmi_timer[cpu], nmi_timer_fn, NULL, cpu);
-        set_timer(&nmi_timer[cpu], NOW());
+        init_timer(&per_cpu(nmi_timer, cpu), nmi_timer_fn, NULL, cpu);
+        set_timer(&per_cpu(nmi_timer, cpu), NOW());
     }
 }
 
 void nmi_watchdog_tick(struct cpu_user_regs * regs)
 {
-    int sum, cpu = smp_processor_id();
-
-    sum = nmi_timer_ticks[cpu];
-
-    if ( (last_irq_sums[cpu] == sum) && !atomic_read(&watchdog_disable_count) )
+    unsigned int sum = this_cpu(nmi_timer_ticks);
+
+    if ( (this_cpu(last_irq_sums) == sum) &&
+         !atomic_read(&watchdog_disable_count) )
     {
         /*
          * Ayiee, looks like this CPU is stuck ... wait a few IRQs (5 seconds) 
          * before doing the oops ...
          */
-        alert_counter[cpu]++;
-        if ( alert_counter[cpu] == 5*nmi_hz )
+        this_cpu(alert_counter)++;
+        if ( this_cpu(alert_counter) == 5*nmi_hz )
         {
             console_force_unlock();
-            printk("Watchdog timer detects that CPU%d is stuck!\n", cpu);
+            printk("Watchdog timer detects that CPU%d is stuck!\n",
+                   smp_processor_id());
             fatal_trap(TRAP_nmi, regs);
         }
     } 
     else 
     {
-        last_irq_sums[cpu] = sum;
-        alert_counter[cpu] = 0;
+        this_cpu(last_irq_sums) = sum;
+        this_cpu(alert_counter) = 0;
     }
 
     if ( nmi_perfctr_msr )
diff -r 6b821e310597 -r b3dd6ceda9bc xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Tue Aug 08 15:14:43 2006 +0100
+++ b/xen/arch/x86/x86_32/traps.c       Tue Aug 08 15:43:54 2006 +0100
@@ -19,7 +19,7 @@
 #include <public/callback.h>
 
 /* All CPUs have their own IDT to allow int80 direct trap. */
-idt_entry_t *idt_tables[NR_CPUS] = { 0 };
+idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
 
 void show_registers(struct cpu_user_regs *regs)
 {
diff -r 6b821e310597 -r b3dd6ceda9bc xen/common/domain.c
--- a/xen/common/domain.c       Tue Aug 08 15:14:43 2006 +0100
+++ b/xen/common/domain.c       Tue Aug 08 15:43:54 2006 +0100
@@ -33,7 +33,7 @@ struct domain *domain_list;
 
 struct domain *dom0;
 
-struct vcpu *idle_vcpu[NR_CPUS];
+struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
 
 struct domain *alloc_domain(domid_t domid)
 {
@@ -245,15 +245,15 @@ void __domain_crash_synchronous(void)
 }
 
 
-static struct domain *domain_shuttingdown[NR_CPUS];
+static DEFINE_PER_CPU(struct domain *, domain_shuttingdown);
 
 static void domain_shutdown_finalise(void)
 {
     struct domain *d;
     struct vcpu *v;
 
-    d = domain_shuttingdown[smp_processor_id()];
-    domain_shuttingdown[smp_processor_id()] = NULL;
+    d = this_cpu(domain_shuttingdown);
+    this_cpu(domain_shuttingdown) = NULL;
 
     BUG_ON(d == NULL);
     BUG_ON(d == current->domain);
@@ -302,7 +302,7 @@ void domain_shutdown(struct domain *d, u
         vcpu_sleep_nosync(v);
 
     get_knownalive_domain(d);
-    domain_shuttingdown[smp_processor_id()] = d;
+    this_cpu(domain_shuttingdown) = d;
     raise_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ);
 }
 
diff -r 6b821e310597 -r b3dd6ceda9bc xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Aug 08 15:14:43 2006 +0100
+++ b/xen/common/schedule.c     Tue Aug 08 15:43:54 2006 +0100
@@ -67,7 +67,7 @@ static struct scheduler ops;
           : (typeof(ops.fn(__VA_ARGS__)))0 )
 
 /* Per-CPU periodic timer sends an event to the currently-executing domain. */
-static struct timer t_timer[NR_CPUS]; 
+static DEFINE_PER_CPU(struct timer, t_timer);
 
 static inline void vcpu_runstate_change(
     struct vcpu *v, int new_state, s_time_t new_entry_time)
@@ -593,10 +593,9 @@ static void s_timer_fn(void *unused)
 /* Periodic tick timer: send timer event to current domain */
 static void t_timer_fn(void *unused)
 {
-    struct vcpu  *v   = current;
-    unsigned int  cpu = smp_processor_id();
-
-    per_cpu(schedule_data, cpu).tick++;
+    struct vcpu *v   = current;
+
+    this_cpu(schedule_data).tick++;
 
     if ( !is_idle_vcpu(v) )
     {
@@ -606,9 +605,9 @@ static void t_timer_fn(void *unused)
 
     page_scrub_schedule_work();
 
-    SCHED_OP(tick, cpu);
-
-    set_timer(&t_timer[cpu], NOW() + MILLISECS(10));
+    SCHED_OP(tick, smp_processor_id());
+
+    set_timer(&this_cpu(t_timer), NOW() + MILLISECS(10));
 }
 
 /* Per-VCPU timer function: sends a virtual timer interrupt. */
@@ -637,7 +636,7 @@ void __init scheduler_init(void)
     {
         spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
         init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
-        init_timer(&t_timer[i], t_timer_fn, NULL, i);
+        init_timer(&per_cpu(t_timer, i), t_timer_fn, NULL, i);
     }
 
     for ( i = 0; schedulers[i] != NULL; i++ )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [XEN] Assorted further PER_CPU- or read_mostly-ifications., Xen patchbot-unstable <=