WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] cpuidle: suspend/resume scheduler tick ti

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] cpuidle: suspend/resume scheduler tick timer during cpu idle state entry/exit
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 07 Apr 2009 23:01:00 -0700
Delivery-date: Tue, 07 Apr 2009 23:03:29 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1238496716 -3600
# Node ID f6a2bf60d49cfd05bb36ecafb3598139ca142804
# Parent  ee3c5a08f80e862511253b1f06dd0e5a69f5ef4a
cpuidle: suspend/resume scheduler tick timer during cpu idle state entry/exit

cpuidle can collaborate with scheduler to reduce unnecessary timer
interrupt. For example, credit scheduler accounting timer
doesn't need to be active at idle time, so it can be stopped at
cpuidle entry and resumed at cpuidle exit. This patch implements this
function by adding two ops in scheduler: tick_suspend/tick_resume, and
implement them for credit scheduler

Signed-off-by: Yu Ke <ke.yu@xxxxxxxxx>
Signed-off-by: Tian Kevin <kevin.tian@xxxxxxxxx>
---
 xen/arch/x86/acpi/cpu_idle.c |   14 ++++++++++++
 xen/common/sched_credit.c    |   49 ++++++++++++++++++++++++++++++-------------
 xen/common/schedule.c        |   10 ++++++++
 xen/include/xen/sched-if.h   |    3 ++
 xen/include/xen/sched.h      |    2 +
 5 files changed, 64 insertions(+), 14 deletions(-)

diff -r ee3c5a08f80e -r f6a2bf60d49c xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Tue Mar 31 11:49:56 2009 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c      Tue Mar 31 11:51:56 2009 +0100
@@ -195,6 +195,15 @@ static void acpi_processor_idle(void)
     int sleep_ticks = 0;
     u32 t1, t2 = 0;
 
+    sched_tick_suspend();
+    /*
+     * sched_tick_suspend may raise TIMER_SOFTIRQ by __stop_timer,
+     * which will break the later assumption of no sofirq pending,
+     * so add do_softirq
+     */
+    if ( softirq_pending(smp_processor_id()) )
+        do_softirq();
+
     /*
      * Interrupts must be disabled during bus mastering calculations and
      * for C2/C3 transitions.
@@ -204,6 +213,7 @@ static void acpi_processor_idle(void)
     if ( softirq_pending(smp_processor_id()) )
     {
         local_irq_enable();
+        sched_tick_resume();
         return;
     }
 
@@ -223,6 +233,7 @@ static void acpi_processor_idle(void)
             pm_idle_save();
         else
             acpi_safe_halt();
+        sched_tick_resume();
         return;
     }
 
@@ -329,6 +340,7 @@ static void acpi_processor_idle(void)
 
     default:
         local_irq_enable();
+        sched_tick_resume();
         return;
     }
 
@@ -338,6 +350,8 @@ static void acpi_processor_idle(void)
         power->last_residency = acpi_pm_tick_to_ns(sleep_ticks) / 1000UL;
         cx->time += sleep_ticks;
     }
+
+    sched_tick_resume();
 
     if ( cpuidle_current_governor->reflect )
         cpuidle_current_governor->reflect(power);
diff -r ee3c5a08f80e -r f6a2bf60d49c xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Tue Mar 31 11:49:56 2009 +0100
+++ b/xen/common/sched_credit.c Tue Mar 31 11:51:56 2009 +0100
@@ -154,6 +154,7 @@ struct csched_private {
     spinlock_t lock;
     struct list_head active_sdom;
     uint32_t ncpus;
+    struct timer  master_ticker;
     unsigned int master;
     cpumask_t idlers;
     uint32_t weight;
@@ -757,7 +758,7 @@ csched_runq_sort(unsigned int cpu)
 }
 
 static void
-csched_acct(void)
+csched_acct(void* dummy)
 {
     unsigned long flags;
     struct list_head *iter_vcpu, *next_vcpu;
@@ -792,7 +793,7 @@ csched_acct(void)
         csched_priv.credit_balance = 0;
         spin_unlock_irqrestore(&csched_priv.lock, flags);
         CSCHED_STAT_CRANK(acct_no_work);
-        return;
+        goto out;
     }
 
     CSCHED_STAT_CRANK(acct_run);
@@ -950,6 +951,10 @@ csched_acct(void)
 
     /* Inform each CPU that its runq needs to be sorted */
     csched_priv.runq_sort++;
+
+out:
+    set_timer( &csched_priv.master_ticker, NOW() +
+            MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
 }
 
 static void
@@ -965,18 +970,6 @@ csched_tick(void *_cpu)
      */
     if ( !is_idle_vcpu(current) )
         csched_vcpu_acct(cpu);
-
-    /*
-     * Host-wide accounting duty
-     *
-     * Note: Currently, this is always done by the master boot CPU. Eventually,
-     * we could distribute or at the very least cycle the duty.
-     */
-    if ( (csched_priv.master == cpu) &&
-         (spc->tick % CSCHED_TICKS_PER_ACCT) == 0 )
-    {
-        csched_acct();
-    }
 
     /*
      * Check if runq needs to be sorted
@@ -1310,10 +1303,35 @@ static __init int csched_start_tickers(v
         set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK));
     }
 
+    init_timer( &csched_priv.master_ticker, csched_acct, NULL,
+                    csched_priv.master);
+
+    set_timer( &csched_priv.master_ticker, NOW() +
+            MILLISECS(CSCHED_MSECS_PER_TICK) * CSCHED_TICKS_PER_ACCT );
+
     return 0;
 }
 __initcall(csched_start_tickers);
 
+static void csched_tick_suspend(void)
+{
+    struct csched_pcpu *spc;
+
+    spc = CSCHED_PCPU(smp_processor_id());
+
+    stop_timer(&spc->ticker);
+}
+
+static void csched_tick_resume(void)
+{
+    struct csched_pcpu *spc;
+    uint64_t now = NOW();
+
+    spc = CSCHED_PCPU(smp_processor_id());
+
+    set_timer(&spc->ticker, now + MILLISECS(CSCHED_MSECS_PER_TICK)
+            - now % MILLISECS(CSCHED_MSECS_PER_TICK) );
+}
 
 struct scheduler sched_credit_def = {
     .name           = "SMP Credit Scheduler",
@@ -1337,4 +1355,7 @@ struct scheduler sched_credit_def = {
     .dump_cpu_state = csched_dump_pcpu,
     .dump_settings  = csched_dump,
     .init           = csched_init,
+
+    .tick_suspend   = csched_tick_suspend,
+    .tick_resume    = csched_tick_resume,
 };
diff -r ee3c5a08f80e -r f6a2bf60d49c xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Mar 31 11:49:56 2009 +0100
+++ b/xen/common/schedule.c     Tue Mar 31 11:51:56 2009 +0100
@@ -964,6 +964,16 @@ void dump_runq(unsigned char key)
     local_irq_restore(flags);
 }
 
+void sched_tick_suspend(void)
+{
+    SCHED_OP(tick_suspend);
+}
+
+void sched_tick_resume(void)
+{
+    SCHED_OP(tick_resume);
+}
+
 #ifdef CONFIG_COMPAT
 #include "compat/schedule.c"
 #endif
diff -r ee3c5a08f80e -r f6a2bf60d49c xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h        Tue Mar 31 11:49:56 2009 +0100
+++ b/xen/include/xen/sched-if.h        Tue Mar 31 11:51:56 2009 +0100
@@ -77,6 +77,9 @@ struct scheduler {
                                     struct xen_domctl_scheduler_op *);
     void         (*dump_settings)  (void);
     void         (*dump_cpu_state) (int);
+
+    void         (*tick_suspend)    (void);
+    void         (*tick_resume)     (void);
 };
 
 #endif /* __XEN_SCHED_IF_H__ */
diff -r ee3c5a08f80e -r f6a2bf60d49c xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Tue Mar 31 11:49:56 2009 +0100
+++ b/xen/include/xen/sched.h   Tue Mar 31 11:51:56 2009 +0100
@@ -428,6 +428,8 @@ void sched_destroy_domain(struct domain 
 void sched_destroy_domain(struct domain *d);
 long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
 int  sched_id(void);
+void sched_tick_suspend(void);
+void sched_tick_resume(void);
 void vcpu_wake(struct vcpu *d);
 void vcpu_sleep_nosync(struct vcpu *d);
 void vcpu_sleep_sync(struct vcpu *d);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] cpuidle: suspend/resume scheduler tick timer during cpu idle state entry/exit, Xen patchbot-unstable <=