WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH6/6] cpuidle: redefine cpumask_lock as rwlock_t

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH6/6] cpuidle: redefine cpumask_lock as rwlock_t
From: "Wei, Gang" <gang.wei@xxxxxxxxx>
Date: Thu, 17 Jun 2010 15:38:47 +0800
Accept-language: zh-CN, en-US
Acceptlanguage: zh-CN, en-US
Cc: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>, "Wei, Gang" <gang.wei@xxxxxxxxx>
Delivery-date: Thu, 17 Jun 2010 00:46:22 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcsN8B7SdiTVQlHOTHeQMmuOrbv2Nw==
Thread-topic: [PATCH6/6] cpuidle: redefine cpumask_lock as rwlock_t
cpuidle: redefine cpumask_lock as rwlock_t

Multiple cpus clear hpet channel cpumask simultaneously is ok. So we can change 
the cpumask_lock to rwlock_t.

Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>

diff -r b3acfa50b013 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Sun Jun 13 07:45:17 2010 +0800
+++ b/xen/arch/x86/hpet.c       Sun Jun 13 07:46:32 2010 +0800
@@ -34,7 +34,7 @@ struct hpet_event_channel
     int           shift;
     s_time_t      next_event;
     cpumask_t     cpumask;
-    spinlock_t    cpumask_lock;
+    rwlock_t      cpumask_lock;
     spinlock_t    lock;
     void          (*event_handler)(struct hpet_event_channel *);
 
@@ -197,7 +197,7 @@ again:
     /* find all expired events */
     for_each_cpu_mask(cpu, ch->cpumask)
     {
-        spin_lock_irq(&ch->cpumask_lock);
+        write_lock_irq(&ch->cpumask_lock);
 
         if ( cpumask_test_cpu(cpu, ch->cpumask) )
         {
@@ -207,7 +207,7 @@ again:
                 next_event = per_cpu(timer_deadline_end, cpu);
         }
 
-        spin_unlock_irq(&ch->cpumask_lock);
+        write_unlock_irq(&ch->cpumask_lock);
     }
 
     /* wakeup the cpus which have an expired event. */
@@ -580,7 +580,7 @@ void hpet_broadcast_init(void)
             hpet_events[i].next_event = STIME_MAX;
             hpet_events[i].event_handler = handle_hpet_broadcast;
             spin_lock_init(&hpet_events[i].lock);
-            spin_lock_init(&hpet_events[i].cpumask_lock);
+            rwlock_init(&hpet_events[i].cpumask_lock);
         }
 
         return;
@@ -615,7 +615,7 @@ void hpet_broadcast_init(void)
     legacy_hpet_event.idx = 0;
     legacy_hpet_event.flags = 0;
     spin_lock_init(&legacy_hpet_event.lock);
-    spin_lock_init(&legacy_hpet_event.cpumask_lock);
+    rwlock_init(&legacy_hpet_event.cpumask_lock);
 
     if ( !force_hpet_broadcast )
         pv_rtc_handler = handle_rtc_once;
@@ -692,9 +692,9 @@ void hpet_broadcast_exit(void)
     if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
         raise_softirq(TIMER_SOFTIRQ);
 
-    spin_lock_irq(&ch->cpumask_lock);
+    read_lock_irq(&ch->cpumask_lock);
     cpu_clear(cpu, ch->cpumask);
-    spin_unlock_irq(&ch->cpumask_lock);
+    read_unlock_irq(&ch->cpumask_lock);
 
     if ( ch != &legacy_hpet_event )
     {

Attachment: 6-deepc-wakeup-optimize-3.patch
Description: 6-deepc-wakeup-optimize-3.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>