# HG changeset patch
# User Ian Jackson <Ian.Jackson@xxxxxxxxxxxxx>
# Date 1282147873 -3600
# Node ID 49e17551ef903e34490ece2d44c166d488719c06
# Parent 9a4acc688e9fcb87d1b92e658c33a240182ebc06
# Parent 28546f5ec0eb066a5e3bc536fd07f13566282df9
Merge
---
xen/arch/x86/acpi/cpu_idle.c | 2
xen/arch/x86/acpi/cpuidle_menu.c | 2
xen/arch/x86/hpet.c | 22 ++++-----
xen/arch/x86/time.c | 2
xen/common/timer.c | 90 +++++++++++----------------------------
xen/include/xen/timer.h | 9 ---
6 files changed, 42 insertions(+), 85 deletions(-)
diff -r 9a4acc688e9f -r 49e17551ef90 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c Wed Aug 18 17:09:59 2010 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c Wed Aug 18 17:11:13 2010 +0100
@@ -252,7 +252,7 @@ static void mwait_idle_with_hints(unsign
static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
unsigned int cpu = smp_processor_id();
- s_time_t expires = per_cpu(timer_deadline_start, cpu);
+ s_time_t expires = per_cpu(timer_deadline, cpu);
__monitor((void *)&mwait_wakeup(cpu), 0, 0);
smp_mb();
diff -r 9a4acc688e9f -r 49e17551ef90 xen/arch/x86/acpi/cpuidle_menu.c
--- a/xen/arch/x86/acpi/cpuidle_menu.c Wed Aug 18 17:09:59 2010 +0100
+++ b/xen/arch/x86/acpi/cpuidle_menu.c Wed Aug 18 17:11:13 2010 +0100
@@ -173,7 +173,7 @@ static inline s_time_t avg_intr_interval
static unsigned int get_sleep_length_us(void)
{
- s_time_t us = (this_cpu(timer_deadline_start) - NOW()) / 1000;
+ s_time_t us = (this_cpu(timer_deadline) - NOW()) / 1000;
/*
* while us < 0 or us > (u32)-1, return a large u32,
* choose (unsigned int)-2000 to avoid wrapping while added with exit
diff -r 9a4acc688e9f -r 49e17551ef90 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c Wed Aug 18 17:09:59 2010 +0100
+++ b/xen/arch/x86/hpet.c Wed Aug 18 17:11:13 2010 +0100
@@ -36,14 +36,14 @@ struct hpet_event_channel
cpumask_t cpumask;
/*
* cpumask_lock is used to prevent hpet intr handler from accessing other
- * cpu's timer_deadline_start/end after the other cpu's mask was cleared --
- * mask cleared means cpu waken up, then accessing timer_deadline_xxx from
+ * cpu's timer_deadline after the other cpu's mask was cleared --
+ * mask cleared means cpu waken up, then accessing timer_deadline from
* other cpu is not safe.
* It is not used for protecting cpumask, so set ops needn't take it.
* Multiple cpus clear cpumask simultaneously is ok due to the atomic
* feature of cpu_clear, so hpet_broadcast_exit() can take read lock for
* clearing cpumask, and handle_hpet_broadcast() have to take write lock
- * for read cpumask & access timer_deadline_xxx.
+ * for read cpumask & access timer_deadline.
*/
rwlock_t cpumask_lock;
spinlock_t lock;
@@ -212,10 +212,10 @@ again:
if ( cpu_isset(cpu, ch->cpumask) )
{
- if ( per_cpu(timer_deadline_start, cpu) <= now )
+ if ( per_cpu(timer_deadline, cpu) <= now )
cpu_set(cpu, mask);
- else if ( per_cpu(timer_deadline_end, cpu) < next_event )
- next_event = per_cpu(timer_deadline_end, cpu);
+ else if ( per_cpu(timer_deadline, cpu) < next_event )
+ next_event = per_cpu(timer_deadline, cpu);
}
write_unlock_irq(&ch->cpumask_lock);
@@ -661,7 +661,7 @@ void hpet_broadcast_enter(void)
int cpu = smp_processor_id();
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
- if ( this_cpu(timer_deadline_start) == 0 )
+ if ( this_cpu(timer_deadline) == 0 )
return;
if ( !ch )
@@ -682,8 +682,8 @@ void hpet_broadcast_enter(void)
spin_lock(&ch->lock);
/* reprogram if current cpu expire time is nearer */
- if ( this_cpu(timer_deadline_end) < ch->next_event )
- reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
+ if ( this_cpu(timer_deadline) < ch->next_event )
+ reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline), NOW(), 1);
spin_unlock(&ch->lock);
}
@@ -692,7 +692,7 @@ void hpet_broadcast_exit(void)
int cpu = smp_processor_id();
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
- if ( this_cpu(timer_deadline_start) == 0 )
+ if ( this_cpu(timer_deadline) == 0 )
return;
if ( !ch )
@@ -700,7 +700,7 @@ void hpet_broadcast_exit(void)
/* Reprogram the deadline; trigger timer work now if it has passed. */
enable_APIC_timer();
- if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+ if ( !reprogram_timer(this_cpu(timer_deadline)) )
raise_softirq(TIMER_SOFTIRQ);
read_lock_irq(&ch->cpumask_lock);
diff -r 9a4acc688e9f -r 49e17551ef90 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Wed Aug 18 17:09:59 2010 +0100
+++ b/xen/arch/x86/time.c Wed Aug 18 17:11:13 2010 +0100
@@ -1488,7 +1488,7 @@ void pit_broadcast_exit(void)
int cpu = smp_processor_id();
if ( cpu_test_and_clear(cpu, pit_broadcast_mask) )
- reprogram_timer(per_cpu(timer_deadline_start, cpu));
+ reprogram_timer(this_cpu(timer_deadline));
}
int pit_broadcast_is_available(void)
diff -r 9a4acc688e9f -r 49e17551ef90 xen/common/timer.c
--- a/xen/common/timer.c Wed Aug 18 17:09:59 2010 +0100
+++ b/xen/common/timer.c Wed Aug 18 17:11:13 2010 +0100
@@ -19,19 +19,16 @@
#include <xen/keyhandler.h>
#include <xen/percpu.h>
#include <xen/cpu.h>
+#include <xen/symbols.h>
#include <asm/system.h>
#include <asm/desc.h>
-/*
- * We pull handlers off the timer list this far in future,
- * rather than reprogramming the time hardware.
- */
+/* We program the time hardware this far behind the closest deadline. */
static unsigned int timer_slop __read_mostly = 50000; /* 50 us */
integer_param("timer_slop", timer_slop);
struct timers {
spinlock_t lock;
- bool_t overflow;
struct timer **heap;
struct timer *list;
struct timer *running;
@@ -42,8 +39,7 @@ static DEFINE_PER_CPU(struct timers, tim
static cpumask_t timer_valid_cpumask;
-DEFINE_PER_CPU(s_time_t, timer_deadline_start);
-DEFINE_PER_CPU(s_time_t, timer_deadline_end);
+DEFINE_PER_CPU(s_time_t, timer_deadline);
/****************************************************************************
* HEAP OPERATIONS.
@@ -209,7 +205,6 @@ static int add_entry(struct timer *t)
return rc;
/* Fall back to adding to the slower linked list. */
- timers->overflow = 1;
t->status = TIMER_STATUS_in_list;
return add_to_list(&timers->list, t);
}
@@ -310,7 +305,6 @@ void set_timer(struct timer *timer, s_ti
deactivate_timer(timer);
timer->expires = expires;
- timer->expires_end = expires + timer_slop;
activate_timer(timer);
@@ -426,13 +420,13 @@ static void timer_softirq_action(void)
{
struct timer *t, **heap, *next;
struct timers *ts;
- s_time_t now;
+ s_time_t now, deadline;
ts = &this_cpu(timers);
heap = ts->heap;
/* If we overflowed the heap, try to allocate a larger heap. */
- if ( unlikely(ts->overflow) )
+ if ( unlikely(ts->list != NULL) )
{
/* old_limit == (2^n)-1; new_limit == (2^(n+4))-1 */
int old_limit = GET_HEAP_LIMIT(heap);
@@ -480,46 +474,16 @@ static void timer_softirq_action(void)
add_entry(t);
}
- ts->overflow = (ts->list != NULL);
- if ( unlikely(ts->overflow) )
- {
- /* Find earliest deadline at head of list or top of heap. */
- this_cpu(timer_deadline_start) = ts->list->expires;
- if ( (GET_HEAP_SIZE(heap) != 0) &&
- ((t = heap[1])->expires < this_cpu(timer_deadline_start)) )
- this_cpu(timer_deadline_start) = t->expires;
- this_cpu(timer_deadline_end) = this_cpu(timer_deadline_start);
- }
- else
- {
- /*
- * Find the earliest deadline that encompasses largest number of timers
- * on the heap. To do this we take timers from the heap while their
- * valid deadline ranges continue to intersect.
- */
- s_time_t start = 0, end = STIME_MAX;
- struct timer **list_tail = &ts->list;
-
- while ( (GET_HEAP_SIZE(heap) != 0) &&
- ((t = heap[1])->expires <= end) )
- {
- remove_entry(t);
-
- t->status = TIMER_STATUS_in_list;
- t->list_next = NULL;
- *list_tail = t;
- list_tail = &t->list_next;
-
- start = t->expires;
- if ( end > t->expires_end )
- end = t->expires_end;
- }
-
- this_cpu(timer_deadline_start) = start;
- this_cpu(timer_deadline_end) = end;
- }
-
- if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+ /* Find earliest deadline from head of linked list and top of heap. */
+ deadline = STIME_MAX;
+ if ( GET_HEAP_SIZE(heap) != 0 )
+ deadline = heap[1]->expires;
+ if ( (ts->list != NULL) && (ts->list->expires < deadline) )
+ deadline = ts->list->expires;
+ this_cpu(timer_deadline) =
+ (deadline == STIME_MAX) ? 0 : deadline + timer_slop;
+
+ if ( !reprogram_timer(this_cpu(timer_deadline)) )
raise_softirq(TIMER_SOFTIRQ);
spin_unlock_irq(&ts->lock);
@@ -531,6 +495,13 @@ s_time_t align_timer(s_time_t firsttick,
return firsttick;
return firsttick + (period - 1) - ((firsttick - 1) % period);
+}
+
+static void dump_timer(struct timer *t, s_time_t now)
+{
+ printk(" ex=%8ldus timer=%p cb=%p(%p)",
+ (t->expires - now) / 1000, t, t->function, t->data);
+ print_symbol(" %s\n", (unsigned long)t->function);
}
static void dump_timerq(unsigned char key)
@@ -541,28 +512,19 @@ static void dump_timerq(unsigned char ke
s_time_t now = NOW();
int i, j;
- printk("Dumping timer queues: NOW=0x%08X%08X\n",
- (u32)(now>>32), (u32)now);
+ printk("Dumping timer queues:\n");
for_each_online_cpu( i )
{
ts = &per_cpu(timers, i);
- printk("CPU[%02d] ", i);
+ printk("CPU%02d:\n", i);
spin_lock_irqsave(&ts->lock, flags);
for ( j = 1; j <= GET_HEAP_SIZE(ts->heap); j++ )
- {
- t = ts->heap[j];
- printk (" %d : %p ex=0x%08X%08X %p %p\n",
- j, t, (u32)(t->expires>>32), (u32)t->expires,
- t->data, t->function);
- }
+ dump_timer(ts->heap[j], now);
for ( t = ts->list, j = 0; t != NULL; t = t->list_next, j++ )
- printk (" L%d : %p ex=0x%08X%08X %p %p\n",
- j, t, (u32)(t->expires>>32), (u32)t->expires,
- t->data, t->function);
+ dump_timer(t, now);
spin_unlock_irqrestore(&ts->lock, flags);
- printk("\n");
}
}
diff -r 9a4acc688e9f -r 49e17551ef90 xen/include/xen/timer.h
--- a/xen/include/xen/timer.h Wed Aug 18 17:09:59 2010 +0100
+++ b/xen/include/xen/timer.h Wed Aug 18 17:11:13 2010 +0100
@@ -16,7 +16,6 @@ struct timer {
struct timer {
/* System time expiry value (nanoseconds since boot). */
s_time_t expires;
- s_time_t expires_end;
/* Position in active-timer data structure. */
union {
@@ -82,12 +81,8 @@ void kill_timer(struct timer *timer);
/* Bootstrap initialisation. Must be called before any other timer function. */
void timer_init(void);
-/*
- * Next timer deadline for each CPU.
- * Modified only by the local CPU and never in interrupt context.
- */
-DECLARE_PER_CPU(s_time_t, timer_deadline_start);
-DECLARE_PER_CPU(s_time_t, timer_deadline_end);
+/* Next timer deadline for each CPU. */
+DECLARE_PER_CPU(s_time_t, timer_deadline);
/* Arch-defined function to reprogram timer hardware for new deadline. */
int reprogram_timer(s_time_t timeout);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|