WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Eliminate unnecessary NR_CPUS-sized array

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Eliminate unnecessary NR_CPUS-sized arrays from 't' key handler
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 09 Jul 2010 04:50:44 -0700
Delivery-date: Fri, 09 Jul 2010 04:52:56 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1278674707 -3600
# Node ID 5a0f99137e6beac8d1f756ed8e564adb8a503e62
# Parent  2cb6d755a08b2f51ac018bfd82ead1faff82ab52
Eliminate unnecessary NR_CPUS-sized arrays from 't' key handler

Replace them with per-CPU data.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/common/keyhandler.c |   32 ++++++++++++++++++--------------
 1 files changed, 18 insertions(+), 14 deletions(-)

diff -r 2cb6d755a08b -r 5a0f99137e6b xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Fri Jul 09 12:24:41 2010 +0100
+++ b/xen/common/keyhandler.c   Fri Jul 09 12:25:07 2010 +0100
@@ -307,8 +307,8 @@ static struct keyhandler dump_domains_ke
 };
 
 static cpumask_t read_clocks_cpumask = CPU_MASK_NONE;
-static s_time_t read_clocks_time[NR_CPUS];
-static u64 read_cycles_time[NR_CPUS];
+static DEFINE_PER_CPU(s_time_t, read_clocks_time);
+static DEFINE_PER_CPU(u64, read_cycles_time);
 
 static void read_clocks_slave(void *unused)
 {
@@ -316,8 +316,8 @@ static void read_clocks_slave(void *unus
     local_irq_disable();
     while ( !cpu_isset(cpu, read_clocks_cpumask) )
         cpu_relax();
-    read_clocks_time[cpu] = NOW();
-    read_cycles_time[cpu] = get_cycles();
+    per_cpu(read_clocks_time, cpu) = NOW();
+    per_cpu(read_cycles_time, cpu) = get_cycles();
     cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 }
@@ -339,8 +339,8 @@ static void read_clocks(unsigned char ke
 
     local_irq_disable();
     read_clocks_cpumask = cpu_online_map;
-    read_clocks_time[cpu] = NOW();
-    read_cycles_time[cpu] = get_cycles();
+    per_cpu(read_clocks_time, cpu) = NOW();
+    per_cpu(read_cycles_time, cpu) = get_cycles();
     cpu_clear(cpu, read_clocks_cpumask);
     local_irq_enable();
 
@@ -350,20 +350,24 @@ static void read_clocks(unsigned char ke
     min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
     for_each_online_cpu ( cpu )
     {
-        if ( read_clocks_time[cpu] < read_clocks_time[min_stime_cpu] )
+        if ( per_cpu(read_clocks_time, cpu) <
+             per_cpu(read_clocks_time, min_stime_cpu) )
             min_stime_cpu = cpu;
-        if ( read_clocks_time[cpu] > read_clocks_time[max_stime_cpu] )
+        if ( per_cpu(read_clocks_time, cpu) >
+             per_cpu(read_clocks_time, max_stime_cpu) )
             max_stime_cpu = cpu;
-        if ( read_cycles_time[cpu] < read_cycles_time[min_cycles_cpu] )
+        if ( per_cpu(read_cycles_time, cpu) <
+             per_cpu(read_cycles_time, min_cycles_cpu) )
             min_cycles_cpu = cpu;
-        if ( read_cycles_time[cpu] > read_cycles_time[max_cycles_cpu] )
+        if ( per_cpu(read_cycles_time, cpu) >
+             per_cpu(read_cycles_time, max_cycles_cpu) )
             max_cycles_cpu = cpu;
     }
 
-    min_stime = read_clocks_time[min_stime_cpu];
-    max_stime = read_clocks_time[max_stime_cpu];
-    min_cycles = read_cycles_time[min_cycles_cpu];
-    max_cycles = read_cycles_time[max_cycles_cpu];
+    min_stime = per_cpu(read_clocks_time, min_stime_cpu);
+    max_stime = per_cpu(read_clocks_time, max_stime_cpu);
+    min_cycles = per_cpu(read_cycles_time, min_cycles_cpu);
+    max_cycles = per_cpu(read_cycles_time, max_cycles_cpu);
 
     spin_unlock(&lock);
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Eliminate unnecessary NR_CPUS-sized arrays from 't' key handler, Xen patchbot-unstable <=