WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] timers: Simplify implementation logic.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] timers: Simplify implementation logic.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 26 Aug 2010 03:30:20 -0700
Delivery-date: Thu, 26 Aug 2010 03:30:36 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1282139761 -3600
# Node ID 28546f5ec0eb066a5e3bc536fd07f13566282df9
# Parent  d20cbccb6feaa4e3fab303a1080273a3de8fb613
timers: Simplify implementation logic.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/acpi/cpu_idle.c     |    2 -
 xen/arch/x86/acpi/cpuidle_menu.c |    2 -
 xen/arch/x86/hpet.c              |   22 ++++++-------
 xen/arch/x86/time.c              |    2 -
 xen/common/timer.c               |   65 ++++++++-------------------------------
 xen/include/xen/timer.h          |    9 +----
 6 files changed, 30 insertions(+), 72 deletions(-)

diff -r d20cbccb6fea -r 28546f5ec0eb xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Wed Aug 18 14:22:48 2010 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c      Wed Aug 18 14:56:01 2010 +0100
@@ -252,7 +252,7 @@ static void mwait_idle_with_hints(unsign
 static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 {
     unsigned int cpu = smp_processor_id();
-    s_time_t expires = per_cpu(timer_deadline_start, cpu);
+    s_time_t expires = per_cpu(timer_deadline, cpu);
 
     __monitor((void *)&mwait_wakeup(cpu), 0, 0);
     smp_mb();
diff -r d20cbccb6fea -r 28546f5ec0eb xen/arch/x86/acpi/cpuidle_menu.c
--- a/xen/arch/x86/acpi/cpuidle_menu.c  Wed Aug 18 14:22:48 2010 +0100
+++ b/xen/arch/x86/acpi/cpuidle_menu.c  Wed Aug 18 14:56:01 2010 +0100
@@ -173,7 +173,7 @@ static inline s_time_t avg_intr_interval
 
 static unsigned int get_sleep_length_us(void)
 {
-    s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000;
+    s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000;
     /*
      * while us < 0 or us > (u32)-1, return a large u32,
      * choose (unsigned int)-2000 to avoid wrapping while added with exit
diff -r d20cbccb6fea -r 28546f5ec0eb xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Wed Aug 18 14:22:48 2010 +0100
+++ b/xen/arch/x86/hpet.c       Wed Aug 18 14:56:01 2010 +0100
@@ -36,14 +36,14 @@ struct hpet_event_channel
     cpumask_t     cpumask;
     /*
      * cpumask_lock is used to prevent hpet intr handler from accessing other
-     * cpu's timer_deadline_start/end after the other cpu's mask was cleared --
-     * mask cleared means cpu waken up, then accessing timer_deadline_xxx from
+     * cpu's timer_deadline after the other cpu's mask was cleared --
+     * mask cleared means cpu waken up, then accessing timer_deadline from
      * other cpu is not safe.
      * It is not used for protecting cpumask, so set ops needn't take it.
      * Multiple cpus clear cpumask simultaneously is ok due to the atomic
      * feature of cpu_clear, so hpet_broadcast_exit() can take read lock for 
      * clearing cpumask, and handle_hpet_broadcast() have to take write lock 
-     * for read cpumask & access timer_deadline_xxx.
+     * for read cpumask & access timer_deadline.
      */
     rwlock_t      cpumask_lock;
     spinlock_t    lock;
@@ -212,10 +212,10 @@ again:
 
         if ( cpu_isset(cpu, ch->cpumask) )
         {
-            if ( per_cpu(timer_deadline_start, cpu) <= now )
+            if ( per_cpu(timer_deadline, cpu) <= now )
                 cpu_set(cpu, mask);
-            else if ( per_cpu(timer_deadline_end, cpu) < next_event )
-                next_event = per_cpu(timer_deadline_end, cpu);
+            else if ( per_cpu(timer_deadline, cpu) < next_event )
+                next_event = per_cpu(timer_deadline, cpu);
         }
 
         write_unlock_irq(&ch->cpumask_lock);
@@ -661,7 +661,7 @@ void hpet_broadcast_enter(void)
     int cpu = smp_processor_id();
     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
 
-    if ( this_cpu(timer_deadline_start) == 0 )
+    if ( this_cpu(timer_deadline) == 0 )
         return;
 
     if ( !ch )
@@ -682,8 +682,8 @@ void hpet_broadcast_enter(void)
 
     spin_lock(&ch->lock);
     /* reprogram if current cpu expire time is nearer */
-    if ( this_cpu(timer_deadline_end) < ch->next_event )
-        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
+    if ( this_cpu(timer_deadline) < ch->next_event )
+        reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
     spin_unlock(&ch->lock);
 }
 
@@ -692,7 +692,7 @@ void hpet_broadcast_exit(void)
     int cpu = smp_processor_id();
     struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
 
-    if ( this_cpu(timer_deadline_start) == 0 )
+    if ( this_cpu(timer_deadline) == 0 )
         return;
 
     if ( !ch )
@@ -700,7 +700,7 @@ void hpet_broadcast_exit(void)
 
     /* Reprogram the deadline; trigger timer work now if it has passed. */
     enable_APIC_timer();
-    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+    if ( !reprogram_timer(this_cpu(timer_deadline)) )
         raise_softirq(TIMER_SOFTIRQ);
 
     read_lock_irq(&ch->cpumask_lock);
diff -r d20cbccb6fea -r 28546f5ec0eb xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Wed Aug 18 14:22:48 2010 +0100
+++ b/xen/arch/x86/time.c       Wed Aug 18 14:56:01 2010 +0100
@@ -1488,7 +1488,7 @@ void pit_broadcast_exit(void)
     int cpu = smp_processor_id();
 
     if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
-        reprogram_timer(per_cpu(timer_deadline_start, cpu));
+        reprogram_timer(this_cpu(timer_deadline));
 }
 
 int pit_broadcast_is_available(void)
diff -r d20cbccb6fea -r 28546f5ec0eb xen/common/timer.c
--- a/xen/common/timer.c        Wed Aug 18 14:22:48 2010 +0100
+++ b/xen/common/timer.c        Wed Aug 18 14:56:01 2010 +0100
@@ -23,16 +23,12 @@
 #include <asm/system.h>
 #include <asm/desc.h>
 
-/*
- * We pull handlers off the timer list this far in future,
- * rather than reprogramming the time hardware.
- */
+/* We program the time hardware this far behind the closest deadline. */
 static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
 integer_param("timer_slop", timer_slop);
 
 struct timers {
     spinlock_t     lock;
-    bool_t         overflow;
     struct timer **heap;
     struct timer  *list;
     struct timer  *running;
@@ -43,8 +39,7 @@ static DEFINE_PER_CPU(struct timers, tim
 
 static cpumask_t timer_valid_cpumask;
 
-DEFINE_PER_CPU(s_time_t, timer_deadline_start);
-DEFINE_PER_CPU(s_time_t, timer_deadline_end);
+DEFINE_PER_CPU(s_time_t, timer_deadline);
 
 /****************************************************************************
  * HEAP OPERATIONS.
@@ -210,7 +205,6 @@ static int add_entry(struct timer *t)
         return rc;
 
     /* Fall back to adding to the slower linked list. */
-    timers->overflow = 1;
     t->status = TIMER_STATUS_in_list;
     return add_to_list(&timers->list, t);
 }
@@ -311,7 +305,6 @@ void set_timer(struct timer *timer, s_ti
         deactivate_timer(timer);
 
     timer->expires = expires;
-    timer->expires_end = expires + timer_slop;
 
     activate_timer(timer);
 
@@ -427,13 +420,13 @@ static void timer_softirq_action(void)
 {
     struct timer  *t, **heap, *next;
     struct timers *ts;
-    s_time_t       now;
+    s_time_t       now, deadline;
 
     ts = &this_cpu(timers);
     heap = ts->heap;
 
     /* If we overflowed the heap, try to allocate a larger heap. */
-    if ( unlikely(ts->overflow) )
+    if ( unlikely(ts->list != NULL) )
     {
         /* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
         int old_limit = GET_HEAP_LIMIT(heap);
@@ -481,46 +474,16 @@ static void timer_softirq_action(void)
         add_entry(t);
     }
 
-    ts->overflow = (ts->list != NULL);
-    if ( unlikely(ts->overflow) )
-    {
-        /* Find earliest deadline at head of list or top of heap. */
-        this_cpu(timer_deadline_start) = ts->list->expires;
-        if ( (GET_HEAP_SIZE(heap) != 0) &&
-             ((t = heap[1])->expires < this_cpu(timer_deadline_start)) )
-            this_cpu(timer_deadline_start) = t->expires;
-        this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start);
-    }
-    else
-    {
-        /*
-         * Find the earliest deadline that encompasses largest number of timers
-         * on the heap. To do this we take timers from the heap while their
-         * valid deadline ranges continue to intersect.
-         */
-        s_time_t start = 0, end = STIME_MAX;
-        struct timer **list_tail = &ts->list;
-
-        while ( (GET_HEAP_SIZE(heap) != 0) &&
-                ((t = heap[1])->expires <= end) )
-        {
-            remove_entry(t);
-
-            t->status = TIMER_STATUS_in_list;
-            t->list_next = NULL;
-            *list_tail = t;
-            list_tail = &t->list_next;
-
-            start = t->expires;
-            if ( end > t->expires_end )
-                end = t->expires_end;
-        }
-
-        this_cpu(timer_deadline_start) = start;
-        this_cpu(timer_deadline_end) = end;
-    }
-
-    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+    /* Find earliest deadline from head of linked list and top of heap. */
+    deadline = STIME_MAX;
+    if ( GET_HEAP_SIZE(heap) != 0 )
+        deadline = heap[1]->expires;
+    if ( (ts->list != NULL) && (ts->list->expires < deadline) )
+        deadline = ts->list->expires;
+    this_cpu(timer_deadline) =
+        (deadline == STIME_MAX) ? 0 : deadline + timer_slop;
+
+    if ( !reprogram_timer(this_cpu(timer_deadline)) )
         raise_softirq(TIMER_SOFTIRQ);
 
     spin_unlock_irq(&ts->lock);
diff -r d20cbccb6fea -r 28546f5ec0eb xen/include/xen/timer.h
--- a/xen/include/xen/timer.h   Wed Aug 18 14:22:48 2010 +0100
+++ b/xen/include/xen/timer.h   Wed Aug 18 14:56:01 2010 +0100
@@ -16,7 +16,6 @@ struct timer {
 struct timer {
     /* System time expiry value (nanoseconds since boot). */
     s_time_t expires;
-    s_time_t expires_end;
 
     /* Position in active-timer data structure. */
     union {
@@ -82,12 +81,8 @@ void kill_timer(struct timer *timer);
 /* Bootstrap initialisation. Must be called before any other timer function. */
 void timer_init(void);
 
-/*
- * Next timer deadline for each CPU.
- * Modified only by the local CPU and never in interrupt context.
- */
-DECLARE_PER_CPU(s_time_t, timer_deadline_start);
-DECLARE_PER_CPU(s_time_t, timer_deadline_end);
+/* Next timer deadline for each CPU. */
+DECLARE_PER_CPU(s_time_t, timer_deadline);
 
 /* Arch-defined function to reprogram timer hardware for new deadline. */
 int reprogram_timer(s_time_t timeout);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] timers: Simplify implementation logic., Xen patchbot-unstable <=