WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] timers: Track inactive timers and migrate

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] timers: Track inactive timers and migrate them on cpu offline.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 02 Jun 2010 05:15:23 -0700
Delivery-date: Wed, 02 Jun 2010 05:16:41 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1275480791 -3600
# Node ID 5aabc6f94df5f275647d55caa24780eff0c81355
# Parent  a3bdee5a20daf590ae7a440dad4e3b104b99c620
timers: Track inactive timers and migrate them on cpu offline.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/irq.c      |   22 +++-----
 xen/common/domain.c     |    7 --
 xen/common/schedule.c   |   23 --------
 xen/common/timer.c      |  129 +++++++++++++++++++++++++++++++++---------------
 xen/include/xen/timer.h |   85 +++++++++++--------------------
 5 files changed, 131 insertions(+), 135 deletions(-)

diff -r a3bdee5a20da -r 5aabc6f94df5 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Wed Jun 02 10:54:32 2010 +0100
+++ b/xen/arch/x86/irq.c        Wed Jun 02 13:13:11 2010 +0100
@@ -46,8 +46,6 @@ static DECLARE_BITMAP(used_vectors, NR_V
 
 struct irq_cfg __read_mostly *irq_cfg = NULL;
 
-static struct timer *__read_mostly irq_guest_eoi_timer;
-
 static DEFINE_SPINLOCK(vector_lock);
 
 DEFINE_PER_CPU(vector_irq_t, vector_irq);
@@ -275,18 +273,15 @@ int init_irq_data(void)
     irq_desc = xmalloc_array(struct irq_desc, nr_irqs);
     irq_cfg = xmalloc_array(struct irq_cfg, nr_irqs);
     irq_status = xmalloc_array(int, nr_irqs);
-    irq_guest_eoi_timer = xmalloc_array(struct timer, nr_irqs);
     irq_vector = xmalloc_array(u8, nr_irqs_gsi);
     
-    if (!irq_desc || !irq_cfg || !irq_status ||! irq_vector ||
-        !irq_guest_eoi_timer)
+    if ( !irq_desc || !irq_cfg || !irq_status ||! irq_vector )
         return -ENOMEM;
 
     memset(irq_desc, 0,  nr_irqs * sizeof(*irq_desc));
     memset(irq_cfg, 0,  nr_irqs * sizeof(*irq_cfg));
     memset(irq_status, 0,  nr_irqs * sizeof(*irq_status));
     memset(irq_vector, 0, nr_irqs_gsi * sizeof(*irq_vector));
-    memset(irq_guest_eoi_timer, 0, nr_irqs * sizeof(*irq_guest_eoi_timer));
     
     for (irq = 0; irq < nr_irqs; irq++) {
         desc = irq_to_desc(irq);
@@ -741,6 +736,7 @@ typedef struct {
 #define ACKTYPE_UNMASK 1     /* Unmask PIC hardware (from any CPU)   */
 #define ACKTYPE_EOI    2     /* EOI on the CPU that was interrupted  */
     cpumask_t cpu_eoi_map;   /* CPUs that need to EOI this interrupt */
+    struct timer eoi_timer;
     struct domain *guest[IRQ_MAX_GUESTS];
 } irq_guest_action_t;
 
@@ -850,7 +846,7 @@ static void __do_IRQ_guest(int irq)
 
     if ( already_pending == action->nr_guests )
     {
-        stop_timer(&irq_guest_eoi_timer[irq]);
+        stop_timer(&action->eoi_timer);
         desc->handler->disable(irq);
         desc->status |= IRQ_GUEST_EOI_PENDING;
         for ( i = 0; i < already_pending; ++i )
@@ -866,9 +862,8 @@ static void __do_IRQ_guest(int irq)
              * - skip the timer setup below.
              */
         }
-        init_timer(&irq_guest_eoi_timer[irq],
-                   irq_guest_eoi_timer_fn, desc, smp_processor_id());
-        set_timer(&irq_guest_eoi_timer[irq], NOW() + MILLISECS(1));
+        migrate_timer(&action->eoi_timer, smp_processor_id());
+        set_timer(&action->eoi_timer, NOW() + MILLISECS(1));
     }
 }
 
@@ -979,7 +974,7 @@ static void __pirq_guest_eoi(struct doma
     if ( action->ack_type == ACKTYPE_NONE )
     {
         ASSERT(!test_bit(pirq, d->pirq_mask));
-        stop_timer(&irq_guest_eoi_timer[irq]);
+        stop_timer(&action->eoi_timer);
         _irq_guest_eoi(desc);
     }
 
@@ -1163,6 +1158,7 @@ int pirq_guest_bind(struct vcpu *v, int 
         action->shareable   = will_share;
         action->ack_type    = pirq_acktype(v->domain, pirq);
         cpus_clear(action->cpu_eoi_map);
+        init_timer(&action->eoi_timer, irq_guest_eoi_timer_fn, desc, 0);
 
         desc->depth = 0;
         desc->status |= IRQ_GUEST;
@@ -1267,7 +1263,7 @@ static irq_guest_action_t *__pirq_guest_
         }
         break;
     case ACKTYPE_NONE:
-        stop_timer(&irq_guest_eoi_timer[irq]);
+        stop_timer(&action->eoi_timer);
         _irq_guest_eoi(desc);
         break;
     }
@@ -1309,7 +1305,7 @@ static irq_guest_action_t *__pirq_guest_
     desc->action = NULL;
     desc->status &= ~IRQ_GUEST;
     desc->status &= ~IRQ_INPROGRESS;
-    kill_timer(&irq_guest_eoi_timer[irq]);
+    kill_timer(&action->eoi_timer);
     desc->handler->shutdown(irq);
 
     /* Caller frees the old guest descriptor block. */
diff -r a3bdee5a20da -r 5aabc6f94df5 xen/common/domain.c
--- a/xen/common/domain.c       Wed Jun 02 10:54:32 2010 +0100
+++ b/xen/common/domain.c       Wed Jun 02 13:13:11 2010 +0100
@@ -847,12 +847,7 @@ long do_vcpu_op(int cmd, int vcpuid, XEN
              (set.timeout_abs_ns < NOW()) )
             return -ETIME;
 
-        if ( v->singleshot_timer.cpu != smp_processor_id() )
-        {
-            stop_timer(&v->singleshot_timer);
-            v->singleshot_timer.cpu = smp_processor_id();
-        }
-
+        migrate_timer(&v->singleshot_timer, smp_processor_id());
         set_timer(&v->singleshot_timer, set.timeout_abs_ns);
 
         break;
diff -r a3bdee5a20da -r 5aabc6f94df5 xen/common/schedule.c
--- a/xen/common/schedule.c     Wed Jun 02 10:54:32 2010 +0100
+++ b/xen/common/schedule.c     Wed Jun 02 13:13:11 2010 +0100
@@ -475,18 +475,6 @@ int cpu_disable_scheduler(unsigned int c
                 cpus_setall(v->cpu_affinity);
             }
 
-            /*
-             * Migrate single-shot timers to CPU0. A new cpu will automatically
-             * be chosen when the timer is next re-set.
-             */
-            if ( v->singleshot_timer.cpu == cpu )
-            {
-                int cpu_mig = first_cpu(c->cpu_valid);
-                if ( cpu_mig == cpu )
-                    cpu_mig = next_cpu(cpu_mig, c->cpu_valid);
-                migrate_timer(&v->singleshot_timer, cpu_mig);
-            }
-
             if ( v->processor == cpu )
             {
                 set_bit(_VPF_migrating, &v->pause_flags);
@@ -804,12 +792,7 @@ long do_set_timer_op(s_time_t timeout)
     }
     else
     {
-        if ( v->singleshot_timer.cpu != smp_processor_id() )
-        {
-            stop_timer(&v->singleshot_timer);
-            v->singleshot_timer.cpu = smp_processor_id();
-        }
-
+        migrate_timer(&v->singleshot_timer, smp_processor_id());
         set_timer(&v->singleshot_timer, timeout);
     }
 
@@ -890,8 +873,6 @@ static void vcpu_periodic_timer_work(str
     s_time_t now = NOW();
     uint64_t periodic_next_event;
 
-    ASSERT(!active_timer(&v->periodic_timer));
-
     if ( v->periodic_period == 0 )
         return;
 
@@ -904,7 +885,7 @@ static void vcpu_periodic_timer_work(str
         periodic_next_event = now + v->periodic_period;
     }
 
-    v->periodic_timer.cpu = smp_processor_id();
+    migrate_timer(&v->periodic_timer, smp_processor_id());
     set_timer(&v->periodic_timer, periodic_next_event);
 }
 
diff -r a3bdee5a20da -r 5aabc6f94df5 xen/common/timer.c
--- a/xen/common/timer.c        Wed Jun 02 10:54:32 2010 +0100
+++ b/xen/common/timer.c        Wed Jun 02 13:13:11 2010 +0100
@@ -35,6 +35,7 @@ struct timers {
     struct timer **heap;
     struct timer  *list;
     struct timer  *running;
+    struct list_head inactive;
 } __cacheline_aligned;
 
 static DEFINE_PER_CPU(struct timers, timers);
@@ -169,8 +170,9 @@ static int add_to_list(struct timer **pp
  * TIMER OPERATIONS.
  */
 
-static int remove_entry(struct timers *timers, struct timer *t)
-{
+static int remove_entry(struct timer *t)
+{
+    struct timers *timers = &per_cpu(timers, t->cpu);
     int rc;
 
     switch ( t->status )
@@ -186,15 +188,16 @@ static int remove_entry(struct timers *t
         BUG();
     }
 
-    t->status = TIMER_STATUS_inactive;
+    t->status = TIMER_STATUS_invalid;
     return rc;
 }
 
-static int add_entry(struct timers *timers, struct timer *t)
-{
+static int add_entry(struct timer *t)
+{
+    struct timers *timers = &per_cpu(timers, t->cpu);
     int rc;
 
-    ASSERT(t->status == TIMER_STATUS_inactive);
+    ASSERT(t->status == TIMER_STATUS_invalid);
 
     /* Try to add to heap. t->heap_offset indicates whether we succeed. */
     t->heap_offset = 0;
@@ -209,18 +212,23 @@ static int add_entry(struct timers *time
     return add_to_list(&timers->list, t);
 }
 
-static inline void __add_timer(struct timer *timer)
-{
-    int cpu = timer->cpu;
-    if ( add_entry(&per_cpu(timers, cpu), timer) )
-        cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
-}
-
-static inline void __stop_timer(struct timer *timer)
-{
-    int cpu = timer->cpu;
-    if ( remove_entry(&per_cpu(timers, cpu), timer) )
-        cpu_raise_softirq(cpu, TIMER_SOFTIRQ);
+static inline void activate_timer(struct timer *timer)
+{
+    ASSERT(timer->status == TIMER_STATUS_inactive);
+    timer->status = TIMER_STATUS_invalid;
+    list_del(&timer->inactive);
+
+    if ( add_entry(timer) )
+        cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
+}
+
+static inline void deactivate_timer(struct timer *timer)
+{
+    if ( remove_entry(timer) )
+        cpu_raise_softirq(timer->cpu, TIMER_SOFTIRQ);
+
+    timer->status = TIMER_STATUS_inactive;
+    list_add(&timer->inactive, &per_cpu(timers, timer->cpu).inactive);
 }
 
 static inline void timer_lock(struct timer *timer)
@@ -253,6 +261,32 @@ static inline void timer_unlock(struct t
     do { timer_unlock(t); local_irq_restore(flags); } while ( 0 )
 
 
+static bool_t active_timer(struct timer *timer)
+{
+    ASSERT(timer->status >= TIMER_STATUS_inactive);
+    ASSERT(timer->status <= TIMER_STATUS_in_list);
+    return (timer->status >= TIMER_STATUS_in_heap);
+}
+
+
+void init_timer(
+    struct timer *timer,
+    void        (*function)(void *),
+    void         *data,
+    unsigned int  cpu)
+{
+    unsigned long flags;
+    memset(timer, 0, sizeof(*timer));
+    timer->function = function;
+    timer->data = data;
+    timer->cpu = cpu;
+    timer->status = TIMER_STATUS_inactive;
+    timer_lock_irqsave(timer, flags);
+    list_add(&timer->inactive, &per_cpu(timers, cpu).inactive);
+    timer_unlock_irqrestore(timer, flags);
+}
+
+
 void set_timer(struct timer *timer, s_time_t expires)
 {
     unsigned long flags;
@@ -260,13 +294,13 @@ void set_timer(struct timer *timer, s_ti
     timer_lock_irqsave(timer, flags);
 
     if ( active_timer(timer) )
-        __stop_timer(timer);
+        deactivate_timer(timer);
 
     timer->expires = expires;
     timer->expires_end = expires + timer_slop;
 
     if ( likely(timer->status != TIMER_STATUS_killed) )
-        __add_timer(timer);
+        activate_timer(timer);
 
     timer_unlock_irqrestore(timer, flags);
 }
@@ -279,7 +313,7 @@ void stop_timer(struct timer *timer)
     timer_lock_irqsave(timer, flags);
 
     if ( active_timer(timer) )
-        __stop_timer(timer);
+        deactivate_timer(timer);
 
     timer_unlock_irqrestore(timer, flags);
 }
@@ -287,7 +321,8 @@ void stop_timer(struct timer *timer)
 
 void migrate_timer(struct timer *timer, unsigned int new_cpu)
 {
-    int           old_cpu;
+    int old_cpu;
+    bool_t active;
     unsigned long flags;
 
     for ( ; ; )
@@ -313,16 +348,16 @@ void migrate_timer(struct timer *timer, 
         spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
     }
 
-    if ( active_timer(timer) )
-    {
-        __stop_timer(timer);
-        timer->cpu = new_cpu;
-        __add_timer(timer);
-    }
-    else
-    {
-        timer->cpu = new_cpu;
-    }
+    active = active_timer(timer);
+    if ( active )
+        deactivate_timer(timer);
+
+    list_del(&timer->inactive);
+    timer->cpu = new_cpu;
+    list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
+
+    if ( active )
+        activate_timer(timer);
 
     spin_unlock(&per_cpu(timers, old_cpu).lock);
     spin_unlock_irqrestore(&per_cpu(timers, new_cpu).lock, flags);
@@ -339,7 +374,9 @@ void kill_timer(struct timer *timer)
     timer_lock_irqsave(timer, flags);
 
     if ( active_timer(timer) )
-        __stop_timer(timer);
+        deactivate_timer(timer);
+
+    list_del(&timer->inactive);
     timer->status = TIMER_STATUS_killed;
 
     timer_unlock_irqrestore(timer, flags);
@@ -354,6 +391,9 @@ static void execute_timer(struct timers 
 {
     void (*fn)(void *) = t->function;
     void *data = t->data;
+
+    t->status = TIMER_STATUS_inactive;
+    list_add(&t->inactive, &ts->inactive);
 
     ts->running = t;
     spin_unlock_irq(&ts->lock);
@@ -401,7 +441,6 @@ static void timer_softirq_action(void)
             ((t = heap[1])->expires < now) )
     {
         remove_from_heap(heap, t);
-        t->status = TIMER_STATUS_inactive;
         execute_timer(ts, t);
     }
 
@@ -409,7 +448,6 @@ static void timer_softirq_action(void)
     while ( ((t = ts->list) != NULL) && (t->expires < now) )
     {
         ts->list = t->list_next;
-        t->status = TIMER_STATUS_inactive;
         execute_timer(ts, t);
     }
 
@@ -419,8 +457,8 @@ static void timer_softirq_action(void)
     while ( unlikely((t = next) != NULL) )
     {
         next = t->list_next;
-        t->status = TIMER_STATUS_inactive;
-        add_entry(ts, t);
+        t->status = TIMER_STATUS_invalid;
+        add_entry(t);
     }
 
     ts->overflow = (ts->list != NULL);
@@ -446,7 +484,7 @@ static void timer_softirq_action(void)
         while ( (GET_HEAP_SIZE(heap) != 0) &&
                 ((t = heap[1])->expires <= end) )
         {
-            remove_entry(ts, t);
+            remove_entry(t);
 
             t->status = TIMER_STATUS_in_list;
             t->list_next = NULL;
@@ -529,13 +567,23 @@ static void migrate_timers_from_cpu(unsi
 
     while ( (t = GET_HEAP_SIZE(ts->heap) ? ts->heap[1] : ts->list) != NULL )
     {
-        remove_entry(ts, t);
+        remove_entry(t);
         t->cpu = 0;
-        __add_timer(t);
+        add_entry(t);
+    }
+
+    while ( !list_empty(&ts->inactive) )
+    {
+        t = list_entry(ts->inactive.next, struct timer, inactive);
+        list_del(&t->inactive);
+        t->cpu = 0;
+        list_add(&t->inactive, &per_cpu(timers, 0).inactive);
     }
 
     spin_unlock(&ts->lock);
     spin_unlock_irq(&per_cpu(timers, 0).lock);
+
+    cpu_raise_softirq(0, TIMER_SOFTIRQ);
 }
 
 static struct timer *dummy_heap;
@@ -549,6 +597,7 @@ static int cpu_callback(
     switch ( action )
     {
     case CPU_UP_PREPARE:
+        INIT_LIST_HEAD(&ts->inactive);
         spin_lock_init(&ts->lock);
         ts->heap = &dummy_heap;
         break;
diff -r a3bdee5a20da -r 5aabc6f94df5 xen/include/xen/timer.h
--- a/xen/include/xen/timer.h   Wed Jun 02 10:54:32 2010 +0100
+++ b/xen/include/xen/timer.h   Wed Jun 02 13:13:11 2010 +0100
@@ -11,6 +11,7 @@
 #include <xen/spinlock.h>
 #include <xen/time.h>
 #include <xen/string.h>
+#include <xen/list.h>
 
 struct timer {
     /* System time expiry value (nanoseconds since boot). */
@@ -19,10 +20,12 @@ struct timer {
 
     /* Position in active-timer data structure. */
     union {
-        /* Timer-heap offset. */
+        /* Timer-heap offset (TIMER_STATUS_in_heap). */
         unsigned int heap_offset;
-        /* Linked list. */
+        /* Linked list (TIMER_STATUS_in_list). */
         struct timer *list_next;
+        /* Linked list of inactive timers (TIMER_STATUS_inactive). */
+        struct list_head inactive;
     };
 
     /* On expiry, '(*function)(data)' will be executed in softirq context. */
@@ -33,10 +36,11 @@ struct timer {
     uint16_t cpu;
 
     /* Timer status. */
-#define TIMER_STATUS_inactive 0 /* Not in use; can be activated.    */
-#define TIMER_STATUS_killed   1 /* Not in use; canot be activated.  */
-#define TIMER_STATUS_in_heap  2 /* In use; on timer heap.           */
-#define TIMER_STATUS_in_list  3 /* In use; on overflow linked list. */
+#define TIMER_STATUS_invalid  0 /* Should never see this.           */
+#define TIMER_STATUS_inactive 1 /* Not in use; can be activated.    */
+#define TIMER_STATUS_killed   2 /* Not in use; cannot be activated. */
+#define TIMER_STATUS_in_heap  3 /* In use; on timer heap.           */
+#define TIMER_STATUS_in_list  4 /* In use; on overflow linked list. */
     uint8_t status;
 };
 
@@ -45,67 +49,38 @@ struct timer {
  */
 
 /*
- * Returns TRUE if the given timer is on a timer list.
- * The timer must *previously* have been initialised by init_timer(), or its
- * structure initialised to all-zeroes.
+ * Initialise a timer structure with an initial callback CPU, callback
+ * function and callback data pointer. This function must only be called on
+ * a brand new timer, or a killed timer. It must *never* execute concurrently
+ * with any other operation on the same timer.
  */
-static inline int active_timer(struct timer *timer)
-{
-    return (timer->status >= TIMER_STATUS_in_heap);
-}
+void init_timer(
+    struct timer *timer,
+    void        (*function)(void *),
+    void         *data,
+    unsigned int  cpu);
 
-/*
- * Initialise a timer structure with an initial callback CPU, callback
- * function and callback data pointer. This function may be called at any
- * time (and multiple times) on an inactive timer. It must *never* execute
- * concurrently with any other operation on the same timer.
- */
-static inline void init_timer(
-    struct timer *timer,
-    void           (*function)(void *),
-    void            *data,
-    unsigned int     cpu)
-{
-    memset(timer, 0, sizeof(*timer));
-    timer->function = function;
-    timer->data     = data;
-    timer->cpu      = cpu;
-}
-
-/*
- * Set the expiry time and activate a timer. The timer must *previously* have
- * been initialised by init_timer() (so that callback details are known).
- */
-extern void set_timer(struct timer *timer, s_time_t expires);
+/* Set the expiry time and activate a timer. */
+void set_timer(struct timer *timer, s_time_t expires);
 
 /*
  * Deactivate a timer This function has no effect if the timer is not currently
  * active.
- * The timer must *previously* have been initialised by init_timer(), or its
- * structure initialised to all zeroes.
  */
-extern void stop_timer(struct timer *timer);
+void stop_timer(struct timer *timer);
 
-/*
- * Migrate a timer to a different CPU. The timer may be currently active.
- * The timer must *previously* have been initialised by init_timer(), or its
- * structure initialised to all zeroes.
- */
-extern void migrate_timer(struct timer *timer, unsigned int new_cpu);
+/* Migrate a timer to a different CPU. The timer may be currently active. */
+void migrate_timer(struct timer *timer, unsigned int new_cpu);
 
 /*
  * Deactivate a timer and prevent it from being re-set (future calls to
  * set_timer will silently fail). When this function returns it is guaranteed
  * that the timer callback handler is not running on any CPU.
- * The timer must *previously* have been initialised by init_timer(), or its
- * structure initialised to all zeroes.
  */
-extern void kill_timer(struct timer *timer);
+void kill_timer(struct timer *timer);
 
-/*
- * Bootstrap initialisation. Must be called before any other timer function.
- */
-extern void timer_init(void);
+/* Bootstrap initialisation. Must be called before any other timer function. */
+void timer_init(void);
 
 /*
  * Next timer deadline for each CPU.
@@ -115,10 +90,10 @@ DECLARE_PER_CPU(s_time_t, timer_deadline
 DECLARE_PER_CPU(s_time_t, timer_deadline_end);
 
 /* Arch-defined function to reprogram timer hardware for new deadline. */
-extern int reprogram_timer(s_time_t timeout);
+int reprogram_timer(s_time_t timeout);
 
-/* calculate the aligned first tick time for a given periodic timer */ 
-extern s_time_t align_timer(s_time_t firsttick, uint64_t period);
+/* Calculate the aligned first tick time for a given periodic timer. */
+s_time_t align_timer(s_time_t firsttick, uint64_t period);
 
 #endif /* _TIMER_H_ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] timers: Track inactive timers and migrate them on cpu offline., Xen patchbot-unstable <=