WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: Setup cpufreq infrasturcture, driver

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: Setup cpufreq infrasturcture, driver and tools
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 15 May 2008 04:40:21 -0700
Delivery-date: Thu, 15 May 2008 04:40:48 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1210769711 -3600
# Node ID 50fb7620d05ad869f354619e9fdf72e316c0d2b5
# Parent  66ddfc4d69631a069d8914bff12bd54fe97a4e9f
x86: Setup cpufreq infrasturcture, driver and tools

Initializing basic Px data structure, setup cpufreq infrastructure:
* cpufreq data structure initializing, including freq table and policy;
* basic driver and tools, to measure freq or drive to Px target;
* setup _PSD domain to coordinately work;

Signed-off-by: Liu Jinsong <jinsong.liu@xxxxxxxxx>
---
 xen/arch/x86/acpi/cpufreq/Makefile           |    2 
 xen/arch/x86/acpi/cpufreq/cpufreq.c          |  557 ++++++++++++++++++++++++++-
 xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c |    6 
 xen/arch/x86/acpi/cpufreq/utility.c          |  167 ++++++++
 xen/include/acpi/cpufreq/cpufreq.h           |  129 ++++++
 5 files changed, 858 insertions(+), 3 deletions(-)

diff -r 66ddfc4d6963 -r 50fb7620d05a xen/arch/x86/acpi/cpufreq/Makefile
--- a/xen/arch/x86/acpi/cpufreq/Makefile        Wed May 14 13:50:46 2008 +0100
+++ b/xen/arch/x86/acpi/cpufreq/Makefile        Wed May 14 13:55:11 2008 +0100
@@ -1,1 +1,3 @@ obj-y += cpufreq.o
 obj-y += cpufreq.o
+obj-y += utility.o
+obj-y += cpufreq_ondemand.o
diff -r 66ddfc4d6963 -r 50fb7620d05a xen/arch/x86/acpi/cpufreq/cpufreq.c
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c       Wed May 14 13:50:46 2008 +0100
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c       Wed May 14 13:55:11 2008 +0100
@@ -42,11 +42,562 @@
 #include <asm/percpu.h>
 #include <asm/cpufeature.h>
 #include <acpi/acpi.h>
-#include <acpi/cpufreq/processor_perf.h>
+#include <acpi/cpufreq/cpufreq.h>
 
 struct processor_pminfo processor_pminfo[NR_CPUS];
+struct cpufreq_policy xen_px_policy[NR_CPUS];
+
+enum {
+    UNDEFINED_CAPABLE = 0,
+    SYSTEM_INTEL_MSR_CAPABLE,
+    SYSTEM_IO_CAPABLE,
+};
+
+#define INTEL_MSR_RANGE         (0xffff)
+#define CPUID_6_ECX_APERFMPERF_CAPABILITY       (0x1)
+
+struct acpi_cpufreq_data {
+    struct processor_performance *acpi_data;
+    struct cpufreq_frequency_table *freq_table;
+    unsigned int max_freq;
+    unsigned int resume;
+    unsigned int cpu_feature;
+};
+
+static struct acpi_cpufreq_data *drv_data[NR_CPUS];
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+static int check_est_cpu(unsigned int cpuid)
+{
+    struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
+
+    if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+        !cpu_has(cpu, X86_FEATURE_EST))
+        return 0;
+
+    return 1;
+}
+
+static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+{
+    struct processor_performance *perf;
+    int i;
+
+    perf = data->acpi_data;
+
+    for (i=0; i<perf->state_count; i++) {
+        if (value == perf->states[i].status)
+            return data->freq_table[i].frequency;
+    }
+    return 0;
+}
+
+static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
+{
+    int i;
+    struct processor_performance *perf;
+
+    msr &= INTEL_MSR_RANGE;
+    perf = data->acpi_data;
+
+    for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+        if (msr == perf->states[data->freq_table[i].index].status)
+            return data->freq_table[i].frequency;
+    }
+    return data->freq_table[0].frequency;
+}
+
+static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+{
+    switch (data->cpu_feature) {
+    case SYSTEM_INTEL_MSR_CAPABLE:
+        return extract_msr(val, data);
+    case SYSTEM_IO_CAPABLE:
+        return extract_io(val, data);
+    default:
+        return 0;
+    }
+}
+
+struct msr_addr {
+    u32 reg;
+};
+
+struct io_addr {
+    u16 port;
+    u8 bit_width;
+};
+
+typedef union {
+    struct msr_addr msr;
+    struct io_addr io;
+} drv_addr_union;
+
+struct drv_cmd {
+    unsigned int type;
+    cpumask_t mask;
+    drv_addr_union addr;
+    u32 val;
+};
+
+static void do_drv_read(struct drv_cmd *cmd)
+{
+    u32 h;
+
+    switch (cmd->type) {
+    case SYSTEM_INTEL_MSR_CAPABLE:
+        rdmsr(cmd->addr.msr.reg, cmd->val, h);
+        break;
+    case SYSTEM_IO_CAPABLE:
+        acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
+            &cmd->val, (u32)cmd->addr.io.bit_width);
+        break;
+    default:
+        break;
+    }
+}
+
+static void do_drv_write(void *drvcmd)
+{
+    struct drv_cmd *cmd;
+    u32 lo, hi;
+
+    cmd = (struct drv_cmd *)drvcmd;
+
+    switch (cmd->type) {
+    case SYSTEM_INTEL_MSR_CAPABLE:
+        rdmsr(cmd->addr.msr.reg, lo, hi);
+        lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
+        wrmsr(cmd->addr.msr.reg, lo, hi);
+        break;
+    case SYSTEM_IO_CAPABLE:
+        acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
+            cmd->val, (u32)cmd->addr.io.bit_width);
+        break;
+    default:
+        break;
+    }
+}
+
+static void drv_read(struct drv_cmd *cmd)
+{
+    cmd->val = 0;
+
+    do_drv_read(cmd);
+}
+
+static void drv_write(struct drv_cmd *cmd)
+{
+    on_selected_cpus( cmd->mask, do_drv_write, (void *)cmd, 0, 0);
+}
+
+static u32 get_cur_val(cpumask_t mask)
+{
+    struct processor_performance *perf;
+    struct drv_cmd cmd;
+
+    if (unlikely(cpus_empty(mask)))
+        return 0;
+
+    switch (drv_data[first_cpu(mask)]->cpu_feature) {
+    case SYSTEM_INTEL_MSR_CAPABLE:
+        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+        cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+        break;
+    case SYSTEM_IO_CAPABLE:
+        cmd.type = SYSTEM_IO_CAPABLE;
+        perf = drv_data[first_cpu(mask)]->acpi_data;
+        cmd.addr.io.port = perf->control_register.address;
+        cmd.addr.io.bit_width = perf->control_register.bit_width;
+        break;
+    default:
+        return 0;
+    }
+
+    cmd.mask = mask;
+
+    drv_read(&cmd);
+    return cmd.val;
+}
+
+/*
+ * Return the measured active (C0) frequency on this CPU since last call
+ * to this function.
+ * Input: cpu number
+ * Return: Average CPU frequency in terms of max frequency (zero on error)
+ *
+ * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
+ * over a period of time, while CPU is in C0 state.
+ * IA32_MPERF counts at the rate of max advertised frequency
+ * IA32_APERF counts at the rate of actual CPU frequency
+ * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
+ * no meaning should be associated with absolute values of these MSRs.
+ */
+/* FIXME: handle query on non-current cpu later */
+static unsigned int get_measured_perf(unsigned int cpu)
+{
+    union {
+        struct {
+            uint32_t lo;
+            uint32_t hi;
+        } split;
+        uint64_t whole;
+    } aperf_cur, mperf_cur;
+
+    unsigned int perf_percent;
+    unsigned int retval;
+
+    rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
+    rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
+
+    wrmsr(MSR_IA32_APERF, 0,0);
+    wrmsr(MSR_IA32_MPERF, 0,0);
+
+    if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
+        int shift_count = 7;
+        aperf_cur.whole >>= shift_count;
+        mperf_cur.whole >>= shift_count;
+    }
+
+    if (aperf_cur.whole && mperf_cur.whole)
+        perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
+    else
+        perf_percent = 0;
+
+
+    retval = drv_data[cpu]->max_freq * perf_percent / 100;
+    return retval;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+    struct acpi_cpufreq_data *data = drv_data[cpu];
+    unsigned int freq;
+
+    if (unlikely(data == NULL ||
+        data->acpi_data == NULL || data->freq_table == NULL)) {
+        return 0;
+    }
+
+    freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
+    return freq;
+}
+
+static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
+                                struct acpi_cpufreq_data *data)
+{
+    unsigned int cur_freq;
+    unsigned int i;
+
+    for (i=0; i<100; i++) {
+        cur_freq = extract_freq(get_cur_val(mask), data);
+        if (cur_freq == freq)
+            return 1;
+        udelay(10);
+    }
+    return 0;
+}
+
+static int acpi_cpufreq_target(struct cpufreq_policy *policy,
+                               unsigned int target_freq, unsigned int relation)
+{
+    struct acpi_cpufreq_data *data = drv_data[policy->cpu];
+    struct processor_performance *perf;
+    struct cpufreq_freqs freqs;
+    cpumask_t online_policy_cpus;
+    struct drv_cmd cmd;
+    unsigned int next_state = 0; /* Index into freq_table */
+    unsigned int next_perf_state = 0; /* Index into perf table */
+    int result = 0;
+
+    if (unlikely(data == NULL ||
+        data->acpi_data == NULL || data->freq_table == NULL)) {
+        return -ENODEV;
+    }
+
+    perf = data->acpi_data;
+    result = cpufreq_frequency_table_target(policy,
+                                            data->freq_table,
+                                            target_freq,
+                                            relation, &next_state);
+    if (unlikely(result))
+        return -ENODEV;
+
+    online_policy_cpus = policy->cpus;
+
+    next_perf_state = data->freq_table[next_state].index;
+    if (perf->state == next_perf_state) {
+        if (unlikely(data->resume)) {
+            printk("xen_pminfo: @acpi_cpufreq_target, "
+                "Called after resume, resetting to P%d\n", 
+                next_perf_state);
+            data->resume = 0;
+        }
+        else
+            return 0;
+    }
+
+    switch (data->cpu_feature) {
+    case SYSTEM_INTEL_MSR_CAPABLE:
+        cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+        cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
+        cmd.val = (u32) perf->states[next_perf_state].control;
+        break;
+    case SYSTEM_IO_CAPABLE:
+        cmd.type = SYSTEM_IO_CAPABLE;
+        cmd.addr.io.port = perf->control_register.address;
+        cmd.addr.io.bit_width = perf->control_register.bit_width;
+        cmd.val = (u32) perf->states[next_perf_state].control;
+        break;
+    default:
+        return -ENODEV;
+    }
+
+    cpus_clear(cmd.mask);
+
+    if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
+        cmd.mask = online_policy_cpus;
+    else
+        cpu_set(policy->cpu, cmd.mask);
+
+    freqs.old = perf->states[perf->state].core_frequency * 1000;
+    freqs.new = data->freq_table[next_state].frequency;
+
+    drv_write(&cmd);
+
+    if (!check_freqs(cmd.mask, freqs.new, data))
+        return -EAGAIN;
+
+    perf->state = next_perf_state;
+    policy->cur = freqs.new;
+
+    return result;
+}
+
+static unsigned long
+acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
+{
+    struct processor_performance *perf = data->acpi_data;
+
+    if (cpu_khz) {
+        /* search the closest match to cpu_khz */
+        unsigned int i;
+        unsigned long freq;
+        unsigned long freqn = perf->states[0].core_frequency * 1000;
+
+        for (i=0; i<(perf->state_count-1); i++) {
+            freq = freqn;
+            freqn = perf->states[i+1].core_frequency * 1000;
+            if ((2 * cpu_khz) > (freqn + freq)) {
+                perf->state = i;
+                return freq;
+            }
+        }
+        perf->state = perf->state_count-1;
+        return freqn;
+    } else {
+        /* assume CPU is at P0... */
+        perf->state = 0;
+        return perf->states[0].core_frequency * 1000;
+    }
+}
+
+static int 
+acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+    unsigned int i;
+    unsigned int valid_states = 0;
+    unsigned int cpu = policy->cpu;
+    struct acpi_cpufreq_data *data;
+    unsigned int result = 0;
+    struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
+    struct processor_performance *perf;
+
+    data = xmalloc(struct acpi_cpufreq_data);
+    if (!data)
+        return -ENOMEM;
+    memset(data, 0, sizeof(struct acpi_cpufreq_data));
+
+    drv_data[cpu] = data;
+
+    data->acpi_data = &processor_pminfo[cpu].perf;
+
+    perf = data->acpi_data;
+    policy->shared_type = perf->shared_type;
+
+    /*
+     * Will let policy->cpus know about dependency only when software
+     * coordination is required.
+     */
+    if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
+        policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+        policy->cpus = perf->shared_cpu_map;
+    } else {
+        policy->cpus = cpumask_of_cpu(cpu);    
+    }
+
+    /* capability check */
+    if (perf->state_count <= 1) {
+        printk("No P-States\n");
+        result = -ENODEV;
+        goto err_unreg;
+    }
+
+    if (perf->control_register.space_id != perf->status_register.space_id) {
+        result = -ENODEV;
+        goto err_unreg;
+    }
+
+    switch (perf->control_register.space_id) {
+    case ACPI_ADR_SPACE_SYSTEM_IO:
+        printk("xen_pminfo: @acpi_cpufreq_cpu_init,"
+            "SYSTEM IO addr space\n");
+        data->cpu_feature = SYSTEM_IO_CAPABLE;
+        break;
+    case ACPI_ADR_SPACE_FIXED_HARDWARE:
+        printk("xen_pminfo: @acpi_cpufreq_cpu_init," 
+            "HARDWARE addr space\n");
+        if (!check_est_cpu(cpu)) {
+            result = -ENODEV;
+            goto err_unreg;
+        }
+        data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+        break;
+    default:
+        result = -ENODEV;
+        goto err_unreg;
+    }
+
+    data->freq_table = xmalloc_array(struct cpufreq_frequency_table, 
+                                    (perf->state_count+1));
+    if (!data->freq_table) {
+        result = -ENOMEM;
+        goto err_unreg;
+    }
+
+    /* detect transition latency */
+    policy->cpuinfo.transition_latency = 0;
+    for (i=0; i<perf->state_count; i++) {
+        if ((perf->states[i].transition_latency * 1000) >
+            policy->cpuinfo.transition_latency)
+            policy->cpuinfo.transition_latency =
+                perf->states[i].transition_latency * 1000;
+    }
+
+    data->max_freq = perf->states[0].core_frequency * 1000;
+    /* table init */
+    for (i=0; i<perf->state_count; i++) {
+        if (i>0 && perf->states[i].core_frequency >=
+            data->freq_table[valid_states-1].frequency / 1000)
+            continue;
+
+        data->freq_table[valid_states].index = i;
+        data->freq_table[valid_states].frequency =
+            perf->states[i].core_frequency * 1000;
+        valid_states++;
+    }
+    data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+    perf->state = 0;
+
+    result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+    if (result)
+        goto err_freqfree;
+
+    switch (perf->control_register.space_id) {
+    case ACPI_ADR_SPACE_SYSTEM_IO:
+        /* Current speed is unknown and not detectable by IO port */
+        policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+        break;
+    case ACPI_ADR_SPACE_FIXED_HARDWARE:
+        acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+        policy->cur = get_cur_freq_on_cpu(cpu);
+        break;
+    default:
+        break;
+    }
+
+    /* Check for APERF/MPERF support in hardware */
+    if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
+        unsigned int ecx;
+        ecx = cpuid_ecx(6);
+        if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
+            acpi_cpufreq_driver.getavg = get_measured_perf;
+    }
+
+    /*
+     * the first call to ->target() should result in us actually
+     * writing something to the appropriate registers.
+     */
+    data->resume = 1;
+
+    return result;
+
+err_freqfree:
+    xfree(data->freq_table);
+err_unreg:
+    xfree(data);
+    drv_data[cpu] = NULL;
+
+    return result;
+}
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+    .target = acpi_cpufreq_target,
+    .init   = acpi_cpufreq_cpu_init,
+};
 
 int acpi_cpufreq_init(void)
 {
-    return 0;
-}
+    unsigned int i, ret = 0;
+    unsigned int dom, max_dom = 0;
+    cpumask_t *pt, dom_mask;
+
+    cpus_clear(dom_mask);
+
+    for_each_online_cpu(i) {
+        cpu_set(processor_pminfo[i].perf.domain_info.domain, dom_mask);
+        if (max_dom < processor_pminfo[i].perf.domain_info.domain)
+            max_dom = processor_pminfo[i].perf.domain_info.domain;
+    }
+    max_dom++;
+
+    pt = xmalloc_array(cpumask_t, max_dom);
+    if (!pt)
+        return -ENOMEM;
+    memset(pt, 0, max_dom * sizeof(cpumask_t));
+
+    /* get cpumask of each psd domain */
+    for_each_online_cpu(i)
+        cpu_set(i, pt[processor_pminfo[i].perf.domain_info.domain]);
+
+    for_each_online_cpu(i)
+        processor_pminfo[i].perf.shared_cpu_map = 
+            pt[processor_pminfo[i].perf.domain_info.domain];
+
+    cpufreq_driver = &acpi_cpufreq_driver;
+
+    /* setup cpufreq infrastructure */
+    for_each_online_cpu(i) {
+        xen_px_policy[i].cpu = i;
+
+        ret = acpi_cpufreq_cpu_init(&xen_px_policy[i]);
+        if (ret)
+            goto cpufreq_init_out;
+    }
+
+    /* setup ondemand cpufreq */
+    for (dom=0; dom<max_dom; dom++) {
+        if (!cpu_isset(dom, dom_mask))
+            continue;
+        i = first_cpu(pt[dom]);
+        ret = cpufreq_governor_dbs(&xen_px_policy[i], CPUFREQ_GOV_START);
+        if (ret)
+            goto cpufreq_init_out;
+    }
+
+cpufreq_init_out:
+    xfree(pt);
+   
+    return ret;
+}
diff -r 66ddfc4d6963 -r 50fb7620d05a 
xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq_ondemand.c      Wed May 14 13:55:11 
2008 +0100
@@ -0,0 +1,6 @@
+#include <acpi/cpufreq/cpufreq.h>
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
+{
+    return 0;
+}
diff -r 66ddfc4d6963 -r 50fb7620d05a xen/arch/x86/acpi/cpufreq/utility.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/acpi/cpufreq/utility.c       Wed May 14 13:55:11 2008 +0100
@@ -0,0 +1,167 @@
+/*
+ *  utility.c - misc functions for cpufreq driver and Px statistic
+ *
+ *  Copyright (C) 2001 Russell King
+ *            (C) 2002 - 2003 Dominik Brodowski <linux@xxxxxxxx>
+ *
+ *  Oct 2005 - Ashok Raj <ashok.raj@xxxxxxxxx>
+ *    Added handling for CPU hotplug
+ *  Feb 2006 - Jacob Shin <jacob.shin@xxxxxxx>
+ *    Fix handling for CPU hotplug -- affected CPUs
+ *  Feb 2008 - Liu Jinsong <jinsong.liu@xxxxxxxxx>
+ *    1. Merge cpufreq.c and freq_table.c of linux 2.6.23
+ *    And poring to Xen hypervisor
+ *    2. some Px statistic interface funcdtions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <xen/errno.h>
+#include <xen/cpumask.h>
+#include <xen/types.h>
+#include <xen/spinlock.h>
+#include <xen/percpu.h>
+#include <xen/types.h>
+#include <xen/sched.h>
+#include <xen/timer.h>
+#include <asm/config.h>
+#include <acpi/cpufreq/cpufreq.h>
+#include <public/sysctl.h>
+
+struct cpufreq_driver *cpufreq_driver;
+
+/*********************************************************************
+ *                   FREQUENCY TABLE HELPERS                         *
+ *********************************************************************/
+
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+                                    struct cpufreq_frequency_table *table)
+{
+    unsigned int min_freq = ~0;
+    unsigned int max_freq = 0;
+    unsigned int i;
+
+    for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+        unsigned int freq = table[i].frequency;
+        if (freq == CPUFREQ_ENTRY_INVALID)
+            continue;
+        if (freq < min_freq)
+            min_freq = freq;
+        if (freq > max_freq)
+            max_freq = freq;
+    }
+
+    policy->min = policy->cpuinfo.min_freq = min_freq;
+    policy->max = policy->cpuinfo.max_freq = max_freq;
+
+    if (policy->min == ~0)
+        return -EINVAL;
+    else
+        return 0;
+}
+
+int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+                                   struct cpufreq_frequency_table *table,
+                                   unsigned int target_freq,
+                                   unsigned int relation,
+                                   unsigned int *index)
+{
+    struct cpufreq_frequency_table optimal = {
+        .index = ~0,
+        .frequency = 0,
+    };
+    struct cpufreq_frequency_table suboptimal = {
+        .index = ~0,
+        .frequency = 0,
+    };
+    unsigned int i;
+
+    switch (relation) {
+    case CPUFREQ_RELATION_H:
+        suboptimal.frequency = ~0;
+        break;
+    case CPUFREQ_RELATION_L:
+        optimal.frequency = ~0;
+        break;
+    }
+
+    if (!cpu_online(policy->cpu))
+        return -EINVAL;
+
+    for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+        unsigned int freq = table[i].frequency;
+        if (freq == CPUFREQ_ENTRY_INVALID)
+            continue;
+        if ((freq < policy->min) || (freq > policy->max))
+            continue;
+        switch(relation) {
+        case CPUFREQ_RELATION_H:
+            if (freq <= target_freq) {
+                if (freq >= optimal.frequency) {
+                    optimal.frequency = freq;
+                    optimal.index = i;
+                }
+            } else {
+                if (freq <= suboptimal.frequency) {
+                    suboptimal.frequency = freq;
+                    suboptimal.index = i;
+                }
+            }
+            break;
+        case CPUFREQ_RELATION_L:
+            if (freq >= target_freq) {
+                if (freq <= optimal.frequency) {
+                    optimal.frequency = freq;
+                    optimal.index = i;
+                }
+            } else {
+                if (freq >= suboptimal.frequency) {
+                    suboptimal.frequency = freq;
+                    suboptimal.index = i;
+                }
+            }
+            break;
+        }
+    }
+    if (optimal.index > i) {
+        if (suboptimal.index > i)
+            return -EINVAL;
+        *index = suboptimal.index;
+    } else
+        *index = optimal.index;
+
+    return 0;
+}
+
+
+/*********************************************************************
+ *               GOVERNORS                                           *
+ *********************************************************************/
+
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+                            unsigned int target_freq,
+                            unsigned int relation)
+{
+    int retval = -EINVAL;
+
+    if (cpu_online(policy->cpu) && cpufreq_driver->target)
+        retval = cpufreq_driver->target(policy, target_freq, relation);
+
+    return retval;
+}
+
+int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
+{
+    int ret = 0;
+
+    if (!policy)
+        return -EINVAL;
+
+    if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
+        ret = cpufreq_driver->getavg(policy->cpu);
+
+    return ret;
+}
diff -r 66ddfc4d6963 -r 50fb7620d05a xen/include/acpi/cpufreq/cpufreq.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/acpi/cpufreq/cpufreq.h        Wed May 14 13:55:11 2008 +0100
@@ -0,0 +1,129 @@
+/*
+ *  xen/include/acpi/cpufreq/cpufreq.h
+ *
+ *  Copyright (C) 2001 Russell King
+ *            (C) 2002 - 2003 Dominik Brodowski <linux@xxxxxxxx>
+ *
+ * $Id: cpufreq.h,v 1.36 2003/01/20 17:31:48 db Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <xen/types.h>
+#include <xen/list.h>
+#include <xen/cpumask.h>
+
+#include "processor_perf.h"
+
+#define CPUFREQ_NAME_LEN 16
+
+struct cpufreq_cpuinfo {
+    unsigned int        max_freq;
+    unsigned int        min_freq;
+    unsigned int        transition_latency; /* in 10^(-9) s = nanoseconds */
+};
+
+struct cpufreq_policy {
+    cpumask_t           cpus;          /* affected CPUs */
+    unsigned int        shared_type;   /* ANY or ALL affected CPUs
+                                          should set cpufreq */
+    unsigned int        cpu;           /* cpu nr of registered CPU */
+    struct cpufreq_cpuinfo    cpuinfo; /* see above */
+
+    unsigned int        min;    /* in kHz */
+    unsigned int        max;    /* in kHz */
+    unsigned int        cur;    /* in kHz, only needed if cpufreq
+                                 * governors are used */
+};
+
+#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
+#define CPUFREQ_SHARED_TYPE_HW   (1) /* HW does needed coordination */
+#define CPUFREQ_SHARED_TYPE_ALL  (2) /* All dependent CPUs should set freq */
+#define CPUFREQ_SHARED_TYPE_ANY  (3) /* Freq can be set from any dependent 
CPU*/
+
+/******************** cpufreq transition notifiers *******************/
+
+struct cpufreq_freqs {
+    unsigned int cpu;    /* cpu nr */
+    unsigned int old;
+    unsigned int new;
+    u8 flags;            /* flags of cpufreq_driver, see below. */
+};
+
+
+/*********************************************************************
+ *                          CPUFREQ GOVERNORS                        *
+ *********************************************************************/
+
+#define CPUFREQ_GOV_START  1
+#define CPUFREQ_GOV_STOP   2
+#define CPUFREQ_GOV_LIMITS 3
+
+/* pass a target to the cpufreq driver */
+extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
+                                   unsigned int target_freq,
+                                   unsigned int relation);
+extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy);
+
+
+/*********************************************************************
+ *                      CPUFREQ DRIVER INTERFACE                     *
+ *********************************************************************/
+
+#define CPUFREQ_RELATION_L 0  /* lowest frequency at or above target */
+#define CPUFREQ_RELATION_H 1  /* highest frequency below or at target */
+
+struct cpufreq_driver {
+    int    (*init)(struct cpufreq_policy *policy);
+    int    (*verify)(struct cpufreq_policy *policy);
+    int    (*target)(struct cpufreq_policy *policy,
+                     unsigned int target_freq,
+                     unsigned int relation);
+    unsigned int    (*get)(unsigned int cpu);
+    unsigned int    (*getavg)(unsigned int cpu);
+    int    (*exit)(struct cpufreq_policy *policy);
+};
+
+extern struct cpufreq_driver *cpufreq_driver;
+
+void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int 
state);
+
+/*********************************************************************
+ *                     FREQUENCY TABLE HELPERS                       *
+ *********************************************************************/
+
+#define CPUFREQ_ENTRY_INVALID ~0
+#define CPUFREQ_TABLE_END     ~1
+
+struct cpufreq_frequency_table {
+    unsigned int    index;     /* any */
+    unsigned int    frequency; /* kHz - doesn't need to be in ascending
+                                * order */
+};
+
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+                   struct cpufreq_frequency_table *table);
+
+int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+                   struct cpufreq_frequency_table *table,
+                   unsigned int target_freq,
+                   unsigned int relation,
+                   unsigned int *index);
+
+
+/*********************************************************************
+ *                     UNIFIED DEBUG HELPERS                         *
+ *********************************************************************/
+
+struct cpu_dbs_info_s {
+    uint64_t prev_cpu_idle;
+    uint64_t prev_cpu_wall;
+    struct cpufreq_policy *cur_policy;
+    struct cpufreq_frequency_table *freq_table;
+    int cpu;
+    unsigned int enable:1;
+};
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: Setup cpufreq infrasturcture, driver and tools, Xen patchbot-unstable <=