# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1221041916 -3600
# Node ID 706844309f361e0382c4f0a457b050d612357ed4
# Parent cfbe4df8d47c4fb1644eb5dfea5fa664bfaaf7c6
CPUIDLE: Port Linux menu governor to replace the initial ladder governor
The ladder governor has long pro/demotion delay shortcome while
applying to tickless mode, because it needs to count usage. Menu
governor chooses the next state simply via break-event prediction
including the factors of next timer event & last residency time etc,
so it would have faster response speed.
Signed-off-by: Gang Wei <gang.wei@xxxxxxxxx>
---
xen/arch/x86/acpi/Makefile | 2
xen/arch/x86/acpi/cpu_idle.c | 411 +++++++--------------------------------
xen/arch/x86/acpi/cpuidle_menu.c | 132 ++++++++++++
xen/include/xen/cpuidle.h | 82 +++++++
4 files changed, 289 insertions(+), 338 deletions(-)
diff -r cfbe4df8d47c -r 706844309f36 xen/arch/x86/acpi/Makefile
--- a/xen/arch/x86/acpi/Makefile Wed Sep 10 11:17:13 2008 +0100
+++ b/xen/arch/x86/acpi/Makefile Wed Sep 10 11:18:36 2008 +0100
@@ -1,5 +1,5 @@ subdir-y += cpufreq
subdir-y += cpufreq
obj-y += boot.o
-obj-y += power.o suspend.o wakeup_prot.o cpu_idle.o
+obj-y += power.o suspend.o wakeup_prot.o cpu_idle.o cpuidle_menu.o
obj-y += pmstat.o
diff -r cfbe4df8d47c -r 706844309f36 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c Wed Sep 10 11:17:13 2008 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c Wed Sep 10 11:18:36 2008 +0100
@@ -39,6 +39,7 @@
#include <xen/smp.h>
#include <xen/guest_access.h>
#include <xen/keyhandler.h>
+#include <xen/cpuidle.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/hpet.h>
@@ -49,12 +50,9 @@
#define DEBUG_PM_CX
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
+#define PM_TIMER_TICKS_TO_US(t) ((t * 1000) / (PM_TIMER_FREQUENCY / 1000))
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
-
-#define ACPI_PROCESSOR_MAX_POWER 8
-#define ACPI_PROCESSOR_MAX_C2_LATENCY 100
-#define ACPI_PROCESSOR_MAX_C3_LATENCY 1000
static void (*lapic_timer_off)(void);
static void (*lapic_timer_on)(void);
@@ -65,66 +63,6 @@ static void (*pm_idle_save) (void) __rea
static void (*pm_idle_save) (void) __read_mostly;
unsigned int max_cstate __read_mostly = 2;
integer_param("max_cstate", max_cstate);
-/*
- * bm_history -- bit-mask with a bit per jiffy of bus-master activity
- * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
- * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
- * 100 HZ: 0x0000000F: 4 jiffies = 40ms
- * reduce history for more aggressive entry into C3
- */
-unsigned int bm_history __read_mostly =
- (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
-integer_param("bm_history", bm_history);
-
-struct acpi_processor_cx;
-
-struct acpi_processor_cx_policy
-{
- u32 count;
- struct acpi_processor_cx *state;
- struct
- {
- u32 time;
- u32 ticks;
- u32 count;
- u32 bm;
- } threshold;
-};
-
-struct acpi_processor_cx
-{
- u8 valid;
- u8 type;
- u32 address;
- u8 space_id;
- u32 latency;
- u32 latency_ticks;
- u32 power;
- u32 usage;
- u64 time;
- struct acpi_processor_cx_policy promotion;
- struct acpi_processor_cx_policy demotion;
-};
-
-struct acpi_processor_flags
-{
- u8 bm_control:1;
- u8 bm_check:1;
- u8 has_cst:1;
- u8 power_setup_done:1;
- u8 bm_rld_set:1;
-};
-
-struct acpi_processor_power
-{
- struct acpi_processor_flags flags;
- struct acpi_processor_cx *state;
- s_time_t bm_check_timestamp;
- u32 default_state;
- u32 bm_activity;
- u32 count;
- struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
-};
static struct acpi_processor_power processor_powers[NR_CPUS];
@@ -133,26 +71,21 @@ static void print_acpi_power(uint32_t cp
uint32_t i;
printk("==cpu%d==\n", cpu);
- printk("active state:\t\tC%d\n", (power->state)?power->state->type:-1);
+ printk("active state:\t\tC%d\n",
+ (power->last_state) ? power->last_state->type : -1);
printk("max_cstate:\t\tC%d\n", max_cstate);
- printk("bus master activity:\t%08x\n", power->bm_activity);
printk("states:\n");
for ( i = 1; i < power->count; i++ )
{
- printk((power->states[i].type == power->state->type) ? " *" : "
");
+ if ( power->last_state &&
+ power->states[i].type == power->last_state->type )
+ printk(" *");
+ else
+ printk(" ");
printk("C%d:\t\t", i);
printk("type[C%d] ", power->states[i].type);
- if ( power->states[i].promotion.state )
- printk("promotion[C%d] ", power->states[i].promotion.state->type);
- else
- printk("promotion[--] ");
- if ( power->states[i].demotion.state )
- printk("demotion[C%d] ", power->states[i].demotion.state->type);
- else
- printk("demotion[--] ");
- printk("latency[%03d]\n ", power->states[i].latency);
- printk("\t\t\t");
+ printk("latency[%03d] ", power->states[i].latency);
printk("usage[%08d] ", power->states[i].usage);
printk("duration[%"PRId64"]\n", power->states[i].time);
}
@@ -180,48 +113,6 @@ static inline u32 ticks_elapsed(u32 t1,
return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
else
return ((0xFFFFFFFF - t1) + t2);
-}
-
-static void acpi_processor_power_activate(struct acpi_processor_power *power,
- struct acpi_processor_cx *new)
-{
- struct acpi_processor_cx *old;
-
- if ( !power || !new )
- return;
-
- old = power->state;
-
- if ( old )
- old->promotion.count = 0;
- new->demotion.count = 0;
-
- /* Cleanup from old state. */
- if ( old )
- {
- switch ( old->type )
- {
- case ACPI_STATE_C3:
- /* Disable bus master reload */
- if ( new->type != ACPI_STATE_C3 && power->flags.bm_check )
- acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
- break;
- }
- }
-
- /* Prepare to use new state. */
- switch ( new->type )
- {
- case ACPI_STATE_C3:
- /* Enable bus master reload */
- if ( old->type != ACPI_STATE_C3 && power->flags.bm_check )
- acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
- break;
- }
-
- power->state = new;
-
- return;
}
static void acpi_safe_halt(void)
@@ -263,6 +154,40 @@ static void acpi_idle_do_entry(struct ac
}
}
+static inline void acpi_idle_update_bm_rld(struct acpi_processor_power *power,
+ struct acpi_processor_cx *target)
+{
+ if ( !power->flags.bm_check )
+ return;
+
+ if ( power->flags.bm_rld_set && target->type != ACPI_STATE_C3 )
+ {
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ power->flags.bm_rld_set = 0;
+ }
+
+ if ( !power->flags.bm_rld_set && target->type == ACPI_STATE_C3 )
+ {
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
+ power->flags.bm_rld_set = 1;
+ }
+}
+
+static int acpi_idle_bm_check(void)
+{
+ u32 bm_status = 0;
+
+ acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
+ if ( bm_status )
+ acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
+ /*
+ * TBD: PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
+ * the true state of bus mastering activity; forcing us to
+ * manually check the BMIDEA bit of each IDE channel.
+ */
+ return bm_status;
+}
+
static struct {
spinlock_t lock;
unsigned int count;
@@ -272,7 +197,7 @@ static void acpi_processor_idle(void)
{
struct acpi_processor_power *power = NULL;
struct acpi_processor_cx *cx = NULL;
- struct acpi_processor_cx *next_state = NULL;
+ int next_state;
int sleep_ticks = 0;
u32 t1, t2 = 0;
@@ -290,7 +215,16 @@ static void acpi_processor_idle(void)
return;
}
- cx = power->state;
+ next_state = cpuidle_current_governor->select(power);
+ if ( next_state > 0 )
+ {
+ cx = &power->states[next_state];
+ if ( power->flags.bm_check && acpi_idle_bm_check()
+ && cx->type == ACPI_STATE_C3 )
+ cx = power->safe_state;
+ if ( cx->type > max_cstate )
+ cx = &power->states[max_cstate];
+ }
if ( !cx )
{
if ( pm_idle_save )
@@ -306,69 +240,14 @@ static void acpi_processor_idle(void)
return;
}
- /*
- * Check BM Activity
- * -----------------
- * Check for bus mastering activity (if required), record, and check
- * for demotion.
- */
- if ( power->flags.bm_check )
- {
- u32 bm_status = 0;
- unsigned long diff = (NOW() - power->bm_check_timestamp) >> 23;
-
- if ( diff > 31 )
- diff = 31;
-
- power->bm_activity <<= diff;
-
- acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
- if ( bm_status )
- {
- power->bm_activity |= 0x1;
- acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
- }
- /*
- * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
- * the true state of bus mastering activity; forcing us to
- * manually check the BMIDEA bit of each IDE channel.
- */
- /*else if ( errata.piix4.bmisx )
- {
- if ( (inb_p(errata.piix4.bmisx + 0x02) & 0x01)
- || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01) )
- pr->power.bm_activity |= 0x1;
- }*/
-
- power->bm_check_timestamp = NOW();
-
- /*
- * If bus mastering is or was active this jiffy, demote
- * to avoid a faulty transition. Note that the processor
- * won't enter a low-power state during this call (to this
- * function) but should upon the next.
- *
- * TBD: A better policy might be to fallback to the demotion
- * state (use it for this quantum only) istead of
- * demoting -- and rely on duration as our sole demotion
- * qualification. This may, however, introduce DMA
- * issues (e.g. floppy DMA transfer overrun/underrun).
- */
- if ( (power->bm_activity & 0x1) && cx->demotion.threshold.bm )
- {
- local_irq_enable();
- next_state = cx->demotion.state;
- goto end;
- }
- }
+ power->last_state = cx;
/*
* Sleep:
* ------
* Invoke the current Cx state to put the processor to sleep.
*/
- if ( cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3 )
- smp_mb__after_clear_bit();
+ acpi_idle_update_bm_rld(power, cx);
switch ( cx->type )
{
@@ -480,162 +359,13 @@ static void acpi_processor_idle(void)
cx->usage++;
if ( sleep_ticks > 0 )
+ {
+ power->last_residency = PM_TIMER_TICKS_TO_US(sleep_ticks);
cx->time += sleep_ticks;
-
- next_state = power->state;
-
- /*
- * Promotion?
- * ----------
- * Track the number of longs (time asleep is greater than threshold)
- * and promote when the count threshold is reached. Note that bus
- * mastering activity may prevent promotions.
- * Do not promote above max_cstate.
- */
- if ( cx->promotion.state &&
- ((cx->promotion.state - power->states) <= max_cstate) )
- {
- if ( sleep_ticks > cx->promotion.threshold.ticks )
- {
- cx->promotion.count++;
- cx->demotion.count = 0;
- if ( cx->promotion.count >= cx->promotion.threshold.count )
- {
- if ( power->flags.bm_check )
- {
- if ( !(power->bm_activity & cx->promotion.threshold.bm) )
- {
- next_state = cx->promotion.state;
- goto end;
- }
- }
- else
- {
- next_state = cx->promotion.state;
- goto end;
- }
- }
- }
- }
-
- /*
- * Demotion?
- * ---------
- * Track the number of shorts (time asleep is less than time threshold)
- * and demote when the usage threshold is reached.
- */
- if ( cx->demotion.state )
- {
- if ( sleep_ticks < cx->demotion.threshold.ticks )
- {
- cx->demotion.count++;
- cx->promotion.count = 0;
- if ( cx->demotion.count >= cx->demotion.threshold.count )
- {
- next_state = cx->demotion.state;
- goto end;
- }
- }
- }
-
-end:
- /*
- * Demote if current state exceeds max_cstate
- */
- if ( (power->state - power->states) > max_cstate )
- {
- if ( cx->demotion.state )
- next_state = cx->demotion.state;
- }
-
- /*
- * New Cx State?
- * -------------
- * If we're going to start using a new Cx state we must clean up
- * from the previous and prepare to use the new.
- */
- if ( next_state != power->state )
- acpi_processor_power_activate(power, next_state);
-}
-
-static int acpi_processor_set_power_policy(struct acpi_processor_power *power)
-{
- unsigned int i;
- unsigned int state_is_set = 0;
- struct acpi_processor_cx *lower = NULL;
- struct acpi_processor_cx *higher = NULL;
- struct acpi_processor_cx *cx;
-
- if ( !power )
- return -EINVAL;
-
- /*
- * This function sets the default Cx state policy (OS idle handler).
- * Our scheme is to promote quickly to C2 but more conservatively
- * to C3. We're favoring C2 for its characteristics of low latency
- * (quick response), good power savings, and ability to allow bus
- * mastering activity. Note that the Cx state policy is completely
- * customizable and can be altered dynamically.
- */
-
- /* startup state */
- for ( i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++ )
- {
- cx = &power->states[i];
- if ( !cx->valid )
- continue;
-
- if ( !state_is_set )
- power->state = cx;
- state_is_set++;
- break;
- }
-
- if ( !state_is_set )
- return -ENODEV;
-
- /* demotion */
- for ( i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++ )
- {
- cx = &power->states[i];
- if ( !cx->valid )
- continue;
-
- if ( lower )
- {
- cx->demotion.state = lower;
- cx->demotion.threshold.ticks = cx->latency_ticks;
- cx->demotion.threshold.count = 1;
- if ( cx->type == ACPI_STATE_C3 )
- cx->demotion.threshold.bm = bm_history;
- }
-
- lower = cx;
- }
-
- /* promotion */
- for ( i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i-- )
- {
- cx = &power->states[i];
- if ( !cx->valid )
- continue;
-
- if ( higher )
- {
- cx->promotion.state = higher;
- cx->promotion.threshold.ticks = cx->latency_ticks;
- if ( cx->type >= ACPI_STATE_C2 )
- cx->promotion.threshold.count = 4;
- else
- cx->promotion.threshold.count = 10;
- if ( higher->type == ACPI_STATE_C3 )
- cx->promotion.threshold.bm = bm_history;
- }
-
- higher = cx;
- }
-
- return 0;
+ }
+
+ if ( cpuidle_current_governor->reflect )
+ cpuidle_current_governor->reflect(power);
}
static int init_cx_pminfo(struct acpi_processor_power *acpi_power)
@@ -824,6 +554,8 @@ static int check_cx(struct acpi_processo
return 0;
}
+static unsigned int latency_factor = 2;
+
static void set_cx(
struct acpi_processor_power *acpi_power,
xen_processor_cx_t *xen_cx)
@@ -845,6 +577,9 @@ static void set_cx(
cx->power = xen_cx->power;
cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
+ cx->target_residency = cx->latency * latency_factor;
+ if ( cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 )
+ acpi_power->safe_state = cx;
}
int get_cpu_id(u8 acpi_id)
@@ -939,6 +674,7 @@ long set_cx_pminfo(uint32_t cpu, struct
init_cx_pminfo(acpi_power);
+ acpi_power->cpu = cpu_id;
acpi_power->flags.bm_check = power->flags.bm_check;
acpi_power->flags.bm_control = power->flags.bm_control;
acpi_power->flags.has_cst = power->flags.has_cst;
@@ -953,10 +689,11 @@ long set_cx_pminfo(uint32_t cpu, struct
set_cx(acpi_power, &xen_cx);
}
+ if ( cpuidle_current_governor->enable &&
+ cpuidle_current_governor->enable(acpi_power) )
+ return -EFAULT;
+
/* FIXME: C-state dependency is not supported by far */
-
- /* initialize default policy */
- acpi_processor_set_power_policy(acpi_power);
print_acpi_power(cpu_id, acpi_power);
@@ -981,7 +718,7 @@ int pmstat_get_cx_stat(uint32_t cpuid, s
uint64_t usage;
int i;
- stat->last = (power->state) ? power->state->type : 0;
+ stat->last = (power->last_state) ? power->last_state->type : 0;
stat->nr = processor_powers[cpuid].count;
stat->idle_time = v->runstate.time[RUNSTATE_running];
if ( v->is_running )
diff -r cfbe4df8d47c -r 706844309f36 xen/arch/x86/acpi/cpuidle_menu.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/acpi/cpuidle_menu.c Wed Sep 10 11:18:36 2008 +0100
@@ -0,0 +1,132 @@
+/*
+ * cpuidle_menu - menu governor for cpu idle, main idea come from Linux.
+ * drivers/cpuidle/governors/menu.c
+ *
+ * Copyright (C) 2006-2007 Adam Belay <abelay@xxxxxxxxxx>
+ * Copyright (C) 2007, 2008 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <xen/config.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/types.h>
+#include <xen/acpi.h>
+#include <xen/timer.h>
+#include <xen/cpuidle.h>
+
+#define BREAK_FUZZ 4 /* 4 us */
+#define USEC_PER_SEC 1000000
+
+struct menu_device
+{
+ int last_state_idx;
+ unsigned int expected_us;
+ unsigned int predicted_us;
+ unsigned int last_measured_us;
+ unsigned int elapsed_us;
+};
+
+static DEFINE_PER_CPU(struct menu_device, menu_devices);
+
+static s_time_t get_sleep_length_ns(void)
+{
+ return per_cpu(timer_deadline, smp_processor_id()) - NOW();
+}
+
+static int menu_select(struct acpi_processor_power *power)
+{
+ struct menu_device *data = &__get_cpu_var(menu_devices);
+ int i;
+
+ /* determine the expected residency time */
+ data->expected_us = (u32) get_sleep_length_ns() / 1000;
+
+ /* find the deepest idle state that satisfies our constraints */
+ for ( i = 1; i < power->count; i++ )
+ {
+ struct acpi_processor_cx *s = &power->states[i];
+
+ if ( s->target_residency > data->expected_us + s->latency )
+ break;
+ if ( s->target_residency > data->predicted_us )
+ break;
+ /* TBD: we need to check the QoS requirment in future */
+ }
+
+ data->last_state_idx = i - 1;
+ return i - 1;
+}
+
+static void menu_reflect(struct acpi_processor_power *power)
+{
+ struct menu_device *data = &__get_cpu_var(menu_devices);
+ struct acpi_processor_cx *target = &power->states[data->last_state_idx];
+ unsigned int last_residency;
+ unsigned int measured_us;
+
+ /*
+ * Ugh, this idle state doesn't support residency measurements, so we
+ * are basically lost in the dark. As a compromise, assume we slept
+ * for one full standard timer tick. However, be aware that this
+ * could potentially result in a suboptimal state transition.
+ */
+ if ( target->type == ACPI_STATE_C1 )
+ last_residency = USEC_PER_SEC / HZ;
+ else
+ last_residency = power->last_residency;
+
+ measured_us = last_residency + data->elapsed_us;
+
+ /* if wrapping, set to max uint (-1) */
+ measured_us = data->elapsed_us <= measured_us ? measured_us : -1;
+
+ /* Predict time remaining until next break event */
+ data->predicted_us = max(measured_us, data->last_measured_us);
+
+ /* Distinguish between expected & non-expected events */
+ if ( last_residency + BREAK_FUZZ
+ < data->expected_us + target->latency )
+ {
+ data->last_measured_us = measured_us;
+ data->elapsed_us = 0;
+ }
+ else
+ data->elapsed_us = measured_us;
+}
+
+static int menu_enable_device(struct acpi_processor_power *power)
+{
+ struct menu_device *data = &per_cpu(menu_devices, power->cpu);
+
+ memset(data, 0, sizeof(struct menu_device));
+
+ return 0;
+}
+
+static struct cpuidle_governor menu_governor =
+{
+ .name = "menu",
+ .rating = 20,
+ .enable = menu_enable_device,
+ .select = menu_select,
+ .reflect = menu_reflect,
+};
+
+struct cpuidle_governor *cpuidle_current_governor = &menu_governor;
diff -r cfbe4df8d47c -r 706844309f36 xen/include/xen/cpuidle.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/cpuidle.h Wed Sep 10 11:18:36 2008 +0100
@@ -0,0 +1,82 @@
+/*
+ * cpuidle.h - xen idle state module derived from Linux
+ *
+ * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@xxxxxxxxx>
+ * Shaohua Li <shaohua.li@xxxxxxxxx>
+ * Adam Belay <abelay@xxxxxxxxxx>
+ * Copyright (C) 2008 Intel Corporation
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef _XEN_CPUIDLE_H
+#define _XEN_CPUIDLE_H
+
+#define ACPI_PROCESSOR_MAX_POWER 8
+#define CPUIDLE_NAME_LEN 16
+
+struct acpi_processor_cx
+{
+ u8 valid;
+ u8 type;
+ u32 address;
+ u8 space_id;
+ u32 latency;
+ u32 latency_ticks;
+ u32 power;
+ u32 usage;
+ u64 time;
+ u32 target_residency;
+};
+
+struct acpi_processor_flags
+{
+ u8 bm_control:1;
+ u8 bm_check:1;
+ u8 has_cst:1;
+ u8 power_setup_done:1;
+ u8 bm_rld_set:1;
+};
+
+struct acpi_processor_power
+{
+ unsigned int cpu;
+ struct acpi_processor_flags flags;
+ struct acpi_processor_cx *last_state;
+ struct acpi_processor_cx *safe_state;
+ u32 last_residency;
+ void *gdata; /* governor specific data */
+ u32 count;
+ struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER];
+};
+
+struct cpuidle_governor
+{
+ char name[CPUIDLE_NAME_LEN];
+ unsigned int rating;
+
+ int (*enable) (struct acpi_processor_power *dev);
+ void (*disable) (struct acpi_processor_power *dev);
+
+ int (*select) (struct acpi_processor_power *dev);
+ void (*reflect) (struct acpi_processor_power *dev);
+};
+
+extern struct cpuidle_governor *cpuidle_current_governor;
+
+#endif /* _XEN_CPUIDLE_H */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|