WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] CPUIDLE: shorten hpet spin_lock holding time

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] CPUIDLE: shorten hpet spin_lock holding time
From: "Wei, Gang" <gang.wei@xxxxxxxxx>
Date: Tue, 20 Apr 2010 13:39:37 +0800
Accept-language: zh-CN, en-US
Acceptlanguage: zh-CN, en-US
Cc: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>
Delivery-date: Mon, 19 Apr 2010 22:40:34 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcrgS919AC3RHXCET4295IJeXmmgRw==
Thread-topic: [PATCH] CPUIDLE: shorten hpet spin_lock holding time
CPUIDLE: shorten hpet spin_lock holding time

Try to reduce spin_lock overhead for deep C state entry/exit. This will benefit 
systems with a lot of cpus which need the hpet broadcast to wakeup from deep C 
state.

Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>

diff -r 7ee8bb40200a xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Thu Apr 15 19:11:16 2010 +0100
+++ b/xen/arch/x86/hpet.c       Fri Apr 16 15:05:28 2010 +0800
@@ -186,6 +186,9 @@ static void handle_hpet_broadcast(struct
 
 again:
     ch->next_event = STIME_MAX;
+
+    spin_unlock_irq(&ch->lock);
+
     next_event = STIME_MAX;
     mask = (cpumask_t)CPU_MASK_NONE;
     now = NOW();
@@ -204,10 +207,14 @@ again:
 
     if ( next_event != STIME_MAX )
     {
-        if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
+        spin_lock_irq(&ch->lock);
+
+        if ( next_event < ch->next_event &&
+             reprogram_hpet_evt_channel(ch, next_event, now, 0) )
             goto again;
-    }
-    spin_unlock_irq(&ch->lock);
+
+        spin_unlock_irq(&ch->lock);
+    }
 }
 
 static void hpet_interrupt_handler(int irq, void *data,
@@ -656,10 +663,15 @@ void hpet_broadcast_enter(void)
     BUG_ON( !ch );
 
     ASSERT(!local_irq_is_enabled());
-    spin_lock(&ch->lock);
 
     if ( hpet_attach_channel )
+    {
+        spin_lock(&ch->lock);
+
         hpet_attach_channel(cpu, ch);
+
+        spin_unlock(&ch->lock);
+    }
 
     /* Cancel any outstanding LAPIC timer event and disable interrupts. */
     reprogram_timer(0);
@@ -667,6 +679,8 @@ void hpet_broadcast_enter(void)
 
     cpu_set(cpu, ch->cpumask);
 
+    spin_lock(&ch->lock);
+
     /* reprogram if current cpu expire time is nearer */
     if ( this_cpu(timer_deadline_end) < ch->next_event )
         reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
@@ -683,8 +697,6 @@ void hpet_broadcast_exit(void)
         return;
 
     BUG_ON( !ch );
-
-    spin_lock_irq(&ch->lock);
 
     if ( cpu_test_and_clear(cpu, ch->cpumask) )
     {
@@ -693,14 +705,22 @@ void hpet_broadcast_exit(void)
         if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
             raise_softirq(TIMER_SOFTIRQ);
 
+        spin_lock_irq(&ch->lock);
+
         if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
             reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
+
+        spin_unlock_irq(&ch->lock);
     }
 
     if ( hpet_detach_channel )
+    {
+        spin_lock_irq(&ch->lock);
+
         hpet_detach_channel(cpu);
 
-    spin_unlock_irq(&ch->lock);
+        spin_unlock_irq(&ch->lock);
+    }
 }
 
 int hpet_broadcast_is_available(void)

Attachment: hpet_short_lock.patch
Description: hpet_short_lock.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel