WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [linux-2.6.18-xen] merge with linux-2.6.18-xen.hg

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [linux-2.6.18-xen] merge with linux-2.6.18-xen.hg
From: "Xen patchbot-linux-2.6.18-xen" <patchbot-linux-2.6.18-xen@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 16 Sep 2008 15:40:11 -0700
Delivery-date: Tue, 16 Sep 2008 15:40:52 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1221567954 -32400
# Node ID 7d032c5bb346a7254f364f9692437d49504259ce
# Parent  6fcc6c1e87f612b16cff3ef967e0afd0abdd8f9b
# Parent  8ca4d2b16eb3c3ee4ec670c392a2e129163754ee
merge with linux-2.6.18-xen.hg
---
 arch/i386/kernel/acpi/processor_extcntl_xen.c   |   96 +-
 arch/i386/kernel/io_apic-xen.c                  |    3 
 arch/x86_64/kernel/io_apic-xen.c                |    3 
 buildconfigs/linux-defconfig_xen0_x86_32        |    2 
 buildconfigs/linux-defconfig_xen0_x86_64        |    2 
 buildconfigs/linux-defconfig_xen_x86_32         |    2 
 buildconfigs/linux-defconfig_xen_x86_64         |    2 
 drivers/acpi/processor_core.c                   |    7 
 drivers/acpi/processor_extcntl.c                |   17 
 drivers/acpi/processor_throttling.c             | 1049 ++++++++++++++++++++++--
 drivers/pci/msi-xen.c                           |   28 
 drivers/xen/blktap/common.h                     |    1 
 drivers/xen/blktap/interface.c                  |    7 
 drivers/xen/blktap/xenbus.c                     |    2 
 drivers/xen/core/evtchn.c                       |  157 ++-
 drivers/xen/pciback/conf_space_capability_msi.c |   30 
 drivers/xen/xenoprof/xenoprofile.c              |   27 
 include/acpi/processor.h                        |   59 +
 include/asm-i386/mach-xen/irq_vectors.h         |   14 
 include/asm-ia64/irq.h                          |    6 
 include/asm-powerpc/xen/asm/hypervisor.h        |    7 
 include/asm-x86_64/mach-xen/irq_vectors.h       |    6 
 include/xen/evtchn.h                            |    7 
 include/xen/interface/memory.h                  |   17 
 include/xen/interface/platform.h                |    4 
 25 files changed, 1340 insertions(+), 215 deletions(-)

diff -r 6fcc6c1e87f6 -r 7d032c5bb346 
arch/i386/kernel/acpi/processor_extcntl_xen.c
--- a/arch/i386/kernel/acpi/processor_extcntl_xen.c     Fri Sep 12 11:28:00 
2008 +0900
+++ b/arch/i386/kernel/acpi/processor_extcntl_xen.c     Tue Sep 16 21:25:54 
2008 +0900
@@ -31,8 +31,6 @@
 #include <linux/cpufreq.h>
 #include <acpi/processor.h>
 #include <asm/hypercall.h>
-
-static int xen_processor_pmbits;
 
 static int xen_cx_notifier(struct acpi_processor *pr, int action)
 {
@@ -143,7 +141,7 @@ static void convert_psd_pack(struct xen_
 
 static int xen_px_notifier(struct acpi_processor *pr, int action)
 {
-       int ret;
+       int ret = -EINVAL;
        xen_platform_op_t op = {
                .cmd                    = XENPF_set_processor_pminfo,
                .interface_version      = XENPF_INTERFACE_VERSION,
@@ -155,48 +153,66 @@ static int xen_px_notifier(struct acpi_p
        struct acpi_processor_performance *px;
        struct acpi_psd_package *pdomain;
 
-       /* leave dynamic ppc handle in the future */
-       if (action == PROCESSOR_PM_CHANGE)
-               return 0;
+       if (!pr)
+               return -EINVAL;
 
        perf = &op.u.set_pminfo.perf;
        px = pr->performance;
 
-       perf->flags = XEN_PX_PPC | 
-                     XEN_PX_PCT | 
-                     XEN_PX_PSS | 
-                     XEN_PX_PSD;
-
-       /* ppc */
-       perf->ppc = pr->performance_platform_limit;
-
-       /* pct */
-       convert_pct_reg(&perf->control_register, &px->control_register);
-       convert_pct_reg(&perf->status_register, &px->status_register);
-
-       /* pss */
-       perf->state_count = px->state_count;
-       states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
-       if (!states)
-               return -ENOMEM;
-       convert_pss_states(states, px->states, px->state_count);
-       set_xen_guest_handle(perf->states, states);
-
-       /* psd */
-       pdomain = &px->domain_info;
-       convert_psd_pack(&perf->domain_info, pdomain);
-       if (perf->domain_info.num_processors) {
+       switch(action) {
+       case PROCESSOR_PM_CHANGE:
+               /* ppc dynamic handle */
+               perf->flags = XEN_PX_PPC;
+               perf->platform_limit = pr->performance_platform_limit;
+
+               ret = HYPERVISOR_platform_op(&op);
+               break;
+
+       case PROCESSOR_PM_INIT:
+               /* px normal init */
+               perf->flags = XEN_PX_PPC | 
+                             XEN_PX_PCT | 
+                             XEN_PX_PSS | 
+                             XEN_PX_PSD;
+
+               /* ppc */
+               perf->platform_limit = pr->performance_platform_limit;
+
+               /* pct */
+               convert_pct_reg(&perf->control_register, &px->control_register);
+               convert_pct_reg(&perf->status_register, &px->status_register);
+
+               /* pss */
+               perf->state_count = px->state_count;
+               states = 
kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
+               if (!states)
+                       return -ENOMEM;
+               convert_pss_states(states, px->states, px->state_count);
+               set_xen_guest_handle(perf->states, states);
+
+               /* psd */
+               pdomain = &px->domain_info;
+               convert_psd_pack(&perf->domain_info, pdomain);
                if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
                        perf->shared_type = CPUFREQ_SHARED_TYPE_ALL;
                else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
                        perf->shared_type = CPUFREQ_SHARED_TYPE_ANY;
                else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
                        perf->shared_type = CPUFREQ_SHARED_TYPE_HW;
-       } else
-               perf->shared_type = CPUFREQ_SHARED_TYPE_NONE;
-
-       ret = HYPERVISOR_platform_op(&op);
-       kfree(states);
+               else {
+                       ret = -ENODEV;
+                       kfree(states);
+                       break;
+               }
+
+               ret = HYPERVISOR_platform_op(&op);
+               kfree(states);
+               break;
+
+       default:
+               break;
+       }
+
        return ret;
 }
 
@@ -215,13 +231,13 @@ static struct processor_extcntl_ops xen_
 
 void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
 {
-       xen_processor_pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
-
-       if (xen_processor_pmbits & XEN_PROCESSOR_PM_CX)
+       unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
+
+       if (pmbits & XEN_PROCESSOR_PM_CX)
                xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
-       if (xen_processor_pmbits & XEN_PROCESSOR_PM_PX)
+       if (pmbits & XEN_PROCESSOR_PM_PX)
                xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier;
-       if (xen_processor_pmbits & XEN_PROCESSOR_PM_TX)
+       if (pmbits & XEN_PROCESSOR_PM_TX)
                xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
 
        *ops = &xen_extcntl_ops;
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 arch/i386/kernel/io_apic-xen.c
--- a/arch/i386/kernel/io_apic-xen.c    Fri Sep 12 11:28:00 2008 +0900
+++ b/arch/i386/kernel/io_apic-xen.c    Tue Sep 16 21:25:54 2008 +0900
@@ -47,6 +47,7 @@
 
 #include <xen/interface/xen.h>
 #include <xen/interface/physdev.h>
+#include <xen/evtchn.h>
 
 /* Fake i8259 */
 #define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
@@ -1260,7 +1261,7 @@ static void ioapic_register_intr(int irq
        set_intr_gate(vector, interrupt[idx]);
 }
 #else
-#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
+#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
 #endif
 
 static void __init setup_IO_APIC_irqs(void)
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 arch/x86_64/kernel/io_apic-xen.c
--- a/arch/x86_64/kernel/io_apic-xen.c  Fri Sep 12 11:28:00 2008 +0900
+++ b/arch/x86_64/kernel/io_apic-xen.c  Tue Sep 16 21:25:54 2008 +0900
@@ -95,6 +95,7 @@ int vector_irq[NR_VECTORS] __read_mostly
 
 #include <xen/interface/xen.h>
 #include <xen/interface/physdev.h>
+#include <xen/evtchn.h>
 
 /* Fake i8259 */
 #define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
@@ -940,7 +941,7 @@ static void ioapic_register_intr(int irq
        set_intr_gate(vector, interrupt[idx]);
 }
 #else
-#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
+#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
 #endif /* !CONFIG_XEN */
 
 static void __init setup_IO_APIC_irqs(void)
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 buildconfigs/linux-defconfig_xen0_x86_32
--- a/buildconfigs/linux-defconfig_xen0_x86_32  Fri Sep 12 11:28:00 2008 +0900
+++ b/buildconfigs/linux-defconfig_xen0_x86_32  Tue Sep 16 21:25:54 2008 +0900
@@ -248,7 +248,7 @@ CONFIG_XEN_PCIDEV_FRONTEND=y
 CONFIG_XEN_PCIDEV_FRONTEND=y
 # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
 # CONFIG_PCIEPORTBUS is not set
-# CONFIG_PCI_MSI is not set
+CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
 CONFIG_ISA_DMA_API=y
 # CONFIG_SCx200 is not set
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 buildconfigs/linux-defconfig_xen0_x86_64
--- a/buildconfigs/linux-defconfig_xen0_x86_64  Fri Sep 12 11:28:00 2008 +0900
+++ b/buildconfigs/linux-defconfig_xen0_x86_64  Tue Sep 16 21:25:54 2008 +0900
@@ -204,7 +204,7 @@ CONFIG_XEN_PCIDEV_FRONTEND=y
 CONFIG_XEN_PCIDEV_FRONTEND=y
 # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
 # CONFIG_PCIEPORTBUS is not set
-# CONFIG_PCI_MSI is not set
+CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
 
 #
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 buildconfigs/linux-defconfig_xen_x86_32
--- a/buildconfigs/linux-defconfig_xen_x86_32   Fri Sep 12 11:28:00 2008 +0900
+++ b/buildconfigs/linux-defconfig_xen_x86_32   Tue Sep 16 21:25:54 2008 +0900
@@ -254,7 +254,7 @@ CONFIG_XEN_PCIDEV_FRONTEND=y
 CONFIG_XEN_PCIDEV_FRONTEND=y
 # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
 # CONFIG_PCIEPORTBUS is not set
-# CONFIG_PCI_MSI is not set
+CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
 CONFIG_ISA_DMA_API=y
 CONFIG_SCx200=m
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 buildconfigs/linux-defconfig_xen_x86_64
--- a/buildconfigs/linux-defconfig_xen_x86_64   Fri Sep 12 11:28:00 2008 +0900
+++ b/buildconfigs/linux-defconfig_xen_x86_64   Tue Sep 16 21:25:54 2008 +0900
@@ -209,7 +209,7 @@ CONFIG_XEN_PCIDEV_FRONTEND=y
 CONFIG_XEN_PCIDEV_FRONTEND=y
 # CONFIG_XEN_PCIDEV_FE_DEBUG is not set
 # CONFIG_PCIEPORTBUS is not set
-# CONFIG_PCI_MSI is not set
+CONFIG_PCI_MSI=y
 # CONFIG_PCI_DEBUG is not set
 
 #
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/acpi/processor_core.c
--- a/drivers/acpi/processor_core.c     Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/acpi/processor_core.c     Tue Sep 16 21:25:54 2008 +0900
@@ -67,6 +67,7 @@
 #define ACPI_PROCESSOR_FILE_LIMIT      "limit"
 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
 #define ACPI_PROCESSOR_NOTIFY_POWER    0x81
+#define ACPI_PROCESSOR_NOTIFY_THROTTLING     0x82
 
 #define ACPI_PROCESSOR_LIMIT_USER      0
 #define ACPI_PROCESSOR_LIMIT_THERMAL   1
@@ -618,6 +619,10 @@ static void acpi_processor_notify(acpi_h
                acpi_processor_cst_has_changed(pr);
                acpi_bus_generate_event(device, event, 0);
                break;
+       case ACPI_PROCESSOR_NOTIFY_THROTTLING:
+               acpi_processor_tstate_has_changed(pr);
+               acpi_bus_generate_event(device, event, 0);
+               break;
        default:
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Unsupported event [0x%x]\n", event));
@@ -965,6 +970,8 @@ static int __init acpi_processor_init(vo
 
        acpi_processor_ppc_init();
 
+       acpi_processor_throttling_init();
+
        return 0;
 }
 
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/acpi/processor_extcntl.c
--- a/drivers/acpi/processor_extcntl.c  Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/acpi/processor_extcntl.c  Tue Sep 16 21:25:54 2008 +0900
@@ -203,13 +203,22 @@ static int processor_extcntl_get_perform
         * processor objects to external logic. In this case, it's preferred
         * to use ACPI ID instead.
         */
-       pr->performance->domain_info.num_processors = 0;
+       pdomain = &pr->performance->domain_info;
+       pdomain->num_processors = 0;
        ret = acpi_processor_get_psd(pr);
-       if (ret < 0)
-               goto err_out;
+       if (ret < 0) {
+               /*
+                * _PSD is optional - assume no coordination if absent (or
+                * broken), matching native kernels' behavior.
+                */
+               pdomain->num_entries = ACPI_PSD_REV0_ENTRIES;
+               pdomain->revision = ACPI_PSD_REV0_REVISION;
+               pdomain->domain = pr->acpi_id;
+               pdomain->coord_type = DOMAIN_COORD_TYPE_SW_ALL;
+               pdomain->num_processors = 1;
+       }
 
        /* Some sanity check */
-       pdomain = &pr->performance->domain_info;
        if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
            (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) ||
            ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) &&
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/acpi/processor_throttling.c
--- a/drivers/acpi/processor_throttling.c       Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/acpi/processor_throttling.c       Tue Sep 16 21:25:54 2008 +0900
@@ -29,6 +29,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/cpufreq.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
@@ -41,21 +42,559 @@
 
 #define ACPI_PROCESSOR_COMPONENT        0x01000000
 #define ACPI_PROCESSOR_CLASS            "processor"
-#define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
-ACPI_MODULE_NAME("acpi_processor")
+ACPI_MODULE_NAME("processor_throttling");
+
+struct throttling_tstate {
+       unsigned int cpu;               /* cpu nr */
+       int target_state;               /* target T-state */
+};
+
+#define THROTTLING_PRECHANGE       (1)
+#define THROTTLING_POSTCHANGE      (2)
+
+static int acpi_processor_get_throttling(struct acpi_processor *pr);
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+
+static int acpi_processor_update_tsd_coord(void)
+{
+       int count, count_target;
+       int retval = 0;
+       unsigned int i, j;
+       cpumask_t covered_cpus;
+       struct acpi_processor *pr, *match_pr;
+       struct acpi_tsd_package *pdomain, *match_pdomain;
+       struct acpi_processor_throttling *pthrottling, *match_pthrottling;
+
+       /*
+        * Now that we have _TSD data from all CPUs, lets setup T-state
+        * coordination between all CPUs.
+        */
+       for_each_possible_cpu(i) {
+               pr = processors[i];
+               if (!pr)
+                       continue;
+
+               /* Basic validity check for domain info */
+               pthrottling = &(pr->throttling);
+
+               /*
+                * If tsd package for one cpu is invalid, the coordination
+                * among all CPUs is thought as invalid.
+                * Maybe it is ugly.
+                */
+               if (!pthrottling->tsd_valid_flag) {
+                       retval = -EINVAL;
+                       break;
+               }
+       }
+       if (retval)
+               goto err_ret;
+
+       cpus_clear(covered_cpus);
+       for_each_possible_cpu(i) {
+               pr = processors[i];
+               if (!pr)
+                       continue;
+
+               if (cpu_isset(i, covered_cpus))
+                       continue;
+               pthrottling = &pr->throttling;
+
+               pdomain = &(pthrottling->domain_info);
+               cpu_set(i, pthrottling->shared_cpu_map);
+               cpu_set(i, covered_cpus);
+               /*
+                * If the number of processor in the TSD domain is 1, it is
+                * unnecessary to parse the coordination for this CPU.
+                */
+               if (pdomain->num_processors <= 1)
+                       continue;
+
+               /* Validate the Domain info */
+               count_target = pdomain->num_processors;
+               count = 1;
+
+               for_each_possible_cpu(j) {
+                       if (i == j)
+                               continue;
+
+                       match_pr = processors[j];
+                       if (!match_pr)
+                               continue;
+
+                       match_pthrottling = &(match_pr->throttling);
+                       match_pdomain = &(match_pthrottling->domain_info);
+                       if (match_pdomain->domain != pdomain->domain)
+                               continue;
+
+                       /* Here i and j are in the same domain.
+                        * If two TSD packages have the same domain, they
+                        * should have the same num_porcessors and
+                        * coordination type. Otherwise it will be regarded
+                        * as illegal.
+                        */
+                       if (match_pdomain->num_processors != count_target) {
+                               retval = -EINVAL;
+                               goto err_ret;
+                       }
+
+                       if (pdomain->coord_type != match_pdomain->coord_type) {
+                               retval = -EINVAL;
+                               goto err_ret;
+                       }
+
+                       cpu_set(j, covered_cpus);
+                       cpu_set(j, pthrottling->shared_cpu_map);
+                       count++;
+               }
+               for_each_possible_cpu(j) {
+                       if (i == j)
+                               continue;
+
+                       match_pr = processors[j];
+                       if (!match_pr)
+                               continue;
+
+                       match_pthrottling = &(match_pr->throttling);
+                       match_pdomain = &(match_pthrottling->domain_info);
+                       if (match_pdomain->domain != pdomain->domain)
+                               continue;
+
+                       /*
+                        * If some CPUS have the same domain, they
+                        * will have the same shared_cpu_map.
+                        */
+                       match_pthrottling->shared_cpu_map =
+                               pthrottling->shared_cpu_map;
+               }
+       }
+
+err_ret:
+       for_each_possible_cpu(i) {
+               pr = processors[i];
+               if (!pr)
+                       continue;
+
+               /*
+                * Assume no coordination on any error parsing domain info.
+                * The coordination type will be forced as SW_ALL.
+                */
+               if (retval) {
+                       pthrottling = &(pr->throttling);
+                       cpus_clear(pthrottling->shared_cpu_map);
+                       cpu_set(i, pthrottling->shared_cpu_map);
+                       pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+               }
+       }
+
+       return retval;
+}
+
+/*
+ * Update the T-state coordination after the _TSD
+ * data for all cpus is obtained.
+ */
+void acpi_processor_throttling_init(void)
+{
+       if (acpi_processor_update_tsd_coord())
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                       "Assume no T-state coordination\n"));
+
+       return;
+}
+
+static int acpi_processor_throttling_notifier(unsigned long event, void *data)
+{
+       struct throttling_tstate *p_tstate = data;
+       struct acpi_processor *pr;
+       unsigned int cpu ;
+       int target_state;
+       struct acpi_processor_limit *p_limit;
+       struct acpi_processor_throttling *p_throttling;
+
+       cpu = p_tstate->cpu;
+       pr = processors[cpu];
+       if (!pr) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
+               return 0;
+       }
+       if (!pr->flags.throttling) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
+                               "unsupported on CPU %d\n", cpu));
+               return 0;
+       }
+       target_state = p_tstate->target_state;
+       p_throttling = &(pr->throttling);
+       switch (event) {
+       case THROTTLING_PRECHANGE:
+               /*
+                * Prechange event is used to choose one proper t-state,
+                * which meets the limits of thermal, user and _TPC.
+                */
+               p_limit = &pr->limit;
+               if (p_limit->thermal.tx > target_state)
+                       target_state = p_limit->thermal.tx;
+               if (p_limit->user.tx > target_state)
+                       target_state = p_limit->user.tx;
+               if (pr->throttling_platform_limit > target_state)
+                       target_state = pr->throttling_platform_limit;
+               if (target_state >= p_throttling->state_count) {
+                       printk(KERN_WARNING
+                               "Exceed the limit of T-state \n");
+                       target_state = p_throttling->state_count - 1;
+               }
+               p_tstate->target_state = target_state;
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
+                               "target T-state of CPU %d is T%d\n",
+                               cpu, target_state));
+               break;
+       case THROTTLING_POSTCHANGE:
+               /*
+                * Postchange event is only used to update the
+                * T-state flag of acpi_processor_throttling.
+                */
+               p_throttling->state = target_state;
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
+                               "CPU %d is switched to T%d\n",
+                               cpu, target_state));
+               break;
+       default:
+               printk(KERN_WARNING
+                       "Unsupported Throttling notifier event\n");
+               break;
+       }
+
+       return 0;
+}
+
+/*
+ * _TPC - Throttling Present Capabilities
+ */
+static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+{
+       acpi_status status = 0;
+       unsigned long tpc = 0;
+
+       if (!pr)
+               return -EINVAL;
+       status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND) {
+                       ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
+               }
+               return -ENODEV;
+       }
+       pr->throttling_platform_limit = (int)tpc;
+       return 0;
+}
+
+int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+{
+       int result = 0;
+       int throttling_limit;
+       int current_state;
+       struct acpi_processor_limit *limit;
+       int target_state;
+
+       result = acpi_processor_get_platform_limit(pr);
+       if (result) {
+               /* Throttling Limit is unsupported */
+               return result;
+       }
+
+       throttling_limit = pr->throttling_platform_limit;
+       if (throttling_limit >= pr->throttling.state_count) {
+               /* Uncorrect Throttling Limit */
+               return -EINVAL;
+       }
+
+       current_state = pr->throttling.state;
+       if (current_state > throttling_limit) {
+               /*
+                * The current state can meet the requirement of
+                * _TPC limit. But it is reasonable that OSPM changes
+                * t-states from high to low for better performance.
+                * Of course the limit condition of thermal
+                * and user should be considered.
+                */
+               limit = &pr->limit;
+               target_state = throttling_limit;
+               if (limit->thermal.tx > target_state)
+                       target_state = limit->thermal.tx;
+               if (limit->user.tx > target_state)
+                       target_state = limit->user.tx;
+       } else if (current_state == throttling_limit) {
+               /*
+                * Unnecessary to change the throttling state
+                */
+               return 0;
+       } else {
+               /*
+                * If the current state is lower than the limit of _TPC, it
+                * will be forced to switch to the throttling state defined
+                * by throttling_platfor_limit.
+                * Because the previous state meets with the limit condition
+                * of thermal and user, it is unnecessary to check it again.
+                */
+               target_state = throttling_limit;
+       }
+       return acpi_processor_set_throttling(pr, target_state);
+}
+
+/*
+ * _PTC - Processor Throttling Control (and status) register location
+ */
+static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
+{
+       int result = 0;
+       acpi_status status = 0;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *ptc = NULL;
+       union acpi_object obj = { 0 };
+       struct acpi_processor_throttling *throttling;
+
+       status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND) {
+                       ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
+               }
+               return -ENODEV;
+       }
+
+       ptc = (union acpi_object *)buffer.pointer;
+       if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
+           || (ptc->package.count != 2)) {
+               printk(KERN_ERR PREFIX "Invalid _PTC data\n");
+               result = -EFAULT;
+               goto end;
+       }
+
+       /*
+        * control_register
+        */
+
+       obj = ptc->package.elements[0];
+
+       if ((obj.type != ACPI_TYPE_BUFFER)
+           || (obj.buffer.length < sizeof(struct acpi_ptc_register))
+           || (obj.buffer.pointer == NULL)) {
+               printk(KERN_ERR PREFIX
+                      "Invalid _PTC data (control_register)\n");
+               result = -EFAULT;
+               goto end;
+       }
+       memcpy(&pr->throttling.control_register, obj.buffer.pointer,
+              sizeof(struct acpi_ptc_register));
+
+       /*
+        * status_register
+        */
+
+       obj = ptc->package.elements[1];
+
+       if ((obj.type != ACPI_TYPE_BUFFER)
+           || (obj.buffer.length < sizeof(struct acpi_ptc_register))
+           || (obj.buffer.pointer == NULL)) {
+               printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
+               result = -EFAULT;
+               goto end;
+       }
+
+       memcpy(&pr->throttling.status_register, obj.buffer.pointer,
+              sizeof(struct acpi_ptc_register));
+
+       throttling = &pr->throttling;
+
+       if ((throttling->control_register.bit_width +
+               throttling->control_register.bit_offset) > 32) {
+               printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
+               result = -EFAULT;
+               goto end;
+       }
+
+       if ((throttling->status_register.bit_width +
+               throttling->status_register.bit_offset) > 32) {
+               printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
+               result = -EFAULT;
+               goto end;
+       }
+
+      end:
+       kfree(buffer.pointer);
+
+       return result;
+}
+
+/*
+ * _TSS - Throttling Supported States
+ */
+static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
+{
+       int result = 0;
+       acpi_status status = AE_OK;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
+       struct acpi_buffer state = { 0, NULL };
+       union acpi_object *tss = NULL;
+       int i;
+
+       status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND) {
+                       ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
+               }
+               return -ENODEV;
+       }
+
+       tss = buffer.pointer;
+       if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
+               printk(KERN_ERR PREFIX "Invalid _TSS data\n");
+               result = -EFAULT;
+               goto end;
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
+                         tss->package.count));
+
+       pr->throttling.state_count = tss->package.count;
+       pr->throttling.states_tss =
+           kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
+                   GFP_KERNEL);
+       if (!pr->throttling.states_tss) {
+               result = -ENOMEM;
+               goto end;
+       }
+
+       for (i = 0; i < pr->throttling.state_count; i++) {
+
+               struct acpi_processor_tx_tss *tx =
+                   (struct acpi_processor_tx_tss *)&(pr->throttling.
+                                                     states_tss[i]);
+
+               state.length = sizeof(struct acpi_processor_tx_tss);
+               state.pointer = tx;
+
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
+
+               status = acpi_extract_package(&(tss->package.elements[i]),
+                                             &format, &state);
+               if (ACPI_FAILURE(status)) {
+                       ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
+                       result = -EFAULT;
+                       kfree(pr->throttling.states_tss);
+                       goto end;
+               }
+
+               if (!tx->freqpercentage) {
+                       printk(KERN_ERR PREFIX
+                              "Invalid _TSS data: freq is zero\n");
+                       result = -EFAULT;
+                       kfree(pr->throttling.states_tss);
+                       goto end;
+               }
+       }
+
+      end:
+       kfree(buffer.pointer);
+
+       return result;
+}
+
+/*
+ * _TSD - T-State Dependencies
+ */
+static int acpi_processor_get_tsd(struct acpi_processor *pr)
+{
+       int result = 0;
+       acpi_status status = AE_OK;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
+       struct acpi_buffer state = { 0, NULL };
+       union acpi_object *tsd = NULL;
+       struct acpi_tsd_package *pdomain;
+       struct acpi_processor_throttling *pthrottling;
+
+       pthrottling = &pr->throttling;
+       pthrottling->tsd_valid_flag = 0;
+
+       status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND) {
+                       ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
+               }
+               return -ENODEV;
+       }
+
+       tsd = buffer.pointer;
+       if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       if (tsd->package.count != 1) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       pdomain = &(pr->throttling.domain_info);
+
+       state.length = sizeof(struct acpi_tsd_package);
+       state.pointer = pdomain;
+
+       status = acpi_extract_package(&(tsd->package.elements[0]),
+                                     &format, &state);
+       if (ACPI_FAILURE(status)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _TSD data\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:num_entries\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _TSD:revision\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       pthrottling = &pr->throttling;
+       pthrottling->tsd_valid_flag = 1;
+       pthrottling->shared_type = pdomain->coord_type;
+       cpu_set(pr->id, pthrottling->shared_cpu_map);
+       /*
+        * If the coordination type is not defined in ACPI spec,
+        * the tsd_valid_flag will be clear and coordination type
+        * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
+        */
+       if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+               pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+               pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+               pthrottling->tsd_valid_flag = 0;
+               pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+       }
+
+      end:
+       kfree(buffer.pointer);
+       return result;
+}
 
 /* --------------------------------------------------------------------------
                               Throttling Control
    -------------------------------------------------------------------------- 
*/
-static int acpi_processor_get_throttling(struct acpi_processor *pr)
+static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
 {
        int state = 0;
        u32 value = 0;
        u32 duty_mask = 0;
        u32 duty_value = 0;
 
-
        if (!pr)
                return -EINVAL;
 
@@ -95,13 +634,259 @@ static int acpi_processor_get_throttling
        return 0;
 }
 
-int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+#ifdef CONFIG_X86
+static int acpi_throttling_rdmsr(struct acpi_processor *pr,
+                                       acpi_integer * value)
+{
+       struct cpuinfo_x86 *c;
+       u64 msr_high, msr_low;
+       unsigned int cpu;
+       u64 msr = 0;
+       int ret = -1;
+
+       cpu = pr->id;
+       c = &cpu_data[cpu];
+
+       if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+               !cpu_has(c, X86_FEATURE_ACPI)) {
+               printk(KERN_ERR PREFIX
+                       "HARDWARE addr space,NOT supported yet\n");
+       } else {
+               msr_low = 0;
+               msr_high = 0;
+               rdmsr_safe(MSR_IA32_THERM_CONTROL,
+                       (u32 *)&msr_low , (u32 *) &msr_high);
+               msr = (msr_high << 32) | msr_low;
+               *value = (acpi_integer) msr;
+               ret = 0;
+       }
+       return ret;
+}
+
+static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
+{
+       struct cpuinfo_x86 *c;
+       unsigned int cpu;
+       int ret = -1;
+       u64 msr;
+
+       cpu = pr->id;
+       c = &cpu_data[cpu];
+
+       if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+               !cpu_has(c, X86_FEATURE_ACPI)) {
+               printk(KERN_ERR PREFIX
+                       "HARDWARE addr space,NOT supported yet\n");
+       } else {
+               msr = value;
+               wrmsr_safe(MSR_IA32_THERM_CONTROL,
+                          (u32)msr, (u32)(msr >> 32));
+               ret = 0;
+       }
+       return ret;
+}
+#else
+static int acpi_throttling_rdmsr(struct acpi_processor *pr,
+                               acpi_integer * value)
+{
+       printk(KERN_ERR PREFIX
+               "HARDWARE addr space,NOT supported yet\n");
+       return -1;
+}
+
+static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
+{
+       printk(KERN_ERR PREFIX
+               "HARDWARE addr space,NOT supported yet\n");
+       return -1;
+}
+#endif
+
+static int acpi_read_throttling_status(struct acpi_processor *pr,
+                                       acpi_integer *value)
+{
+       u32 bit_width, bit_offset;
+       u64 ptc_value;
+       u64 ptc_mask;
+       struct acpi_processor_throttling *throttling;
+       int ret = -1;
+
+       throttling = &pr->throttling;
+       switch (throttling->status_register.space_id) {
+       case ACPI_ADR_SPACE_SYSTEM_IO:
+               ptc_value = 0;
+               bit_width = throttling->status_register.bit_width;
+               bit_offset = throttling->status_register.bit_offset;
+
+               acpi_os_read_port((acpi_io_address) throttling->status_register.
+                                 address, (u32 *) &ptc_value,
+                                 (u32) (bit_width + bit_offset));
+               ptc_mask = (1 << bit_width) - 1;
+               *value = (acpi_integer) ((ptc_value >> bit_offset) & ptc_mask);
+               ret = 0;
+               break;
+       case ACPI_ADR_SPACE_FIXED_HARDWARE:
+               ret = acpi_throttling_rdmsr(pr, value);
+               break;
+       default:
+               printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+                      (u32) (throttling->status_register.space_id));
+       }
+       return ret;
+}
+
+static int acpi_write_throttling_state(struct acpi_processor *pr,
+                               acpi_integer value)
+{
+       u32 bit_width, bit_offset;
+       u64 ptc_value;
+       u64 ptc_mask;
+       struct acpi_processor_throttling *throttling;
+       int ret = -1;
+
+       throttling = &pr->throttling;
+       switch (throttling->control_register.space_id) {
+       case ACPI_ADR_SPACE_SYSTEM_IO:
+               bit_width = throttling->control_register.bit_width;
+               bit_offset = throttling->control_register.bit_offset;
+               ptc_mask = (1 << bit_width) - 1;
+               ptc_value = value & ptc_mask;
+
+               acpi_os_write_port((acpi_io_address) throttling->
+                                       control_register.address,
+                                       (u32) (ptc_value << bit_offset),
+                                       (u32) (bit_width + bit_offset));
+               ret = 0;
+               break;
+       case ACPI_ADR_SPACE_FIXED_HARDWARE:
+               ret = acpi_throttling_wrmsr(pr, value);
+               break;
+       default:
+               printk(KERN_ERR PREFIX "Unknown addr space %d\n",
+                      (u32) (throttling->control_register.space_id));
+       }
+       return ret;
+}
+
+static int acpi_get_throttling_state(struct acpi_processor *pr,
+                               acpi_integer value)
+{
+       int i;
+
+       for (i = 0; i < pr->throttling.state_count; i++) {
+               struct acpi_processor_tx_tss *tx =
+                   (struct acpi_processor_tx_tss *)&(pr->throttling.
+                                                     states_tss[i]);
+               if (tx->control == value)
+                       break;
+       }
+       if (i > pr->throttling.state_count)
+               i = -1;
+       return i;
+}
+
+static int acpi_get_throttling_value(struct acpi_processor *pr,
+                       int state, acpi_integer *value)
+{
+       int ret = -1;
+
+       if (state >= 0 && state <= pr->throttling.state_count) {
+               struct acpi_processor_tx_tss *tx =
+                   (struct acpi_processor_tx_tss *)&(pr->throttling.
+                                                     states_tss[state]);
+               *value = tx->control;
+               ret = 0;
+       }
+       return ret;
+}
+
+static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
+{
+       int state = 0;
+       int ret;
+       acpi_integer value;
+
+       if (!pr)
+               return -EINVAL;
+
+       if (!pr->flags.throttling)
+               return -ENODEV;
+
+       pr->throttling.state = 0;
+
+       value = 0;
+       ret = acpi_read_throttling_status(pr, &value);
+       if (ret >= 0) {
+               state = acpi_get_throttling_state(pr, value);
+               pr->throttling.state = state;
+       }
+
+       return 0;
+}
+
+static int acpi_processor_get_throttling(struct acpi_processor *pr)
+{
+       cpumask_t saved_mask;
+       int ret;
+
+       if (!pr)
+               return -EINVAL;
+
+       if (!pr->flags.throttling)
+               return -ENODEV;
+       /*
+        * Migrate task to the cpu pointed by pr.
+        */
+       saved_mask = current->cpus_allowed;
+       set_cpus_allowed(current, cpumask_of_cpu(pr->id));
+       ret = pr->throttling.acpi_processor_get_throttling(pr);
+       /* restore the previous state */
+       set_cpus_allowed(current, saved_mask);
+
+       return ret;
+}
+
+static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+{
+       int i, step;
+
+       if (!pr->throttling.address) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
+               return -EINVAL;
+       } else if (!pr->throttling.duty_width) {
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
+               return -EINVAL;
+       }
+       /* TBD: Support duty_cycle values that span bit 4. */
+       else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
+               printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
+               return -EINVAL;
+       }
+
+       pr->throttling.state_count = 1 << acpi_fadt.duty_width;
+
+       /*
+        * Compute state values. Note that throttling displays a linear power
+        * performance relationship (at 50% performance the CPU will consume
+        * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
+        */
+
+       step = (1000 / pr->throttling.state_count);
+
+       for (i = 0; i < pr->throttling.state_count; i++) {
+               pr->throttling.states[i].performance = 1000 - step * i;
+               pr->throttling.states[i].power = 1000 - step * i;
+       }
+       return 0;
+}
+
+static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
+                                             int state)
 {
        u32 value = 0;
        u32 duty_mask = 0;
        u32 duty_value = 0;
 
-
        if (!pr)
                return -EINVAL;
 
@@ -114,6 +899,8 @@ int acpi_processor_set_throttling(struct
        if (state == pr->throttling.state)
                return 0;
 
+       if (state < pr->throttling_platform_limit)
+               return -EPERM;
        /*
         * Calculate the duty_value and duty_mask.
         */
@@ -166,12 +953,135 @@ int acpi_processor_set_throttling(struct
        return 0;
 }
 
+static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+                                            int state)
+{
+       int ret;
+       acpi_integer value;
+
+       if (!pr)
+               return -EINVAL;
+
+       if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+               return -EINVAL;
+
+       if (!pr->flags.throttling)
+               return -ENODEV;
+
+       if (state == pr->throttling.state)
+               return 0;
+
+       if (state < pr->throttling_platform_limit)
+               return -EPERM;
+
+       value = 0;
+       ret = acpi_get_throttling_value(pr, state, &value);
+       if (ret >= 0) {
+               acpi_write_throttling_state(pr, value);
+               pr->throttling.state = state;
+       }
+
+       return 0;
+}
+
+int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
+{
+       cpumask_t saved_mask;
+       int ret = 0;
+       unsigned int i;
+       struct acpi_processor *match_pr;
+       struct acpi_processor_throttling *p_throttling;
+       struct throttling_tstate t_state;
+       cpumask_t online_throttling_cpus;
+
+       if (!pr)
+               return -EINVAL;
+
+       if (!pr->flags.throttling)
+               return -ENODEV;
+
+       if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+               return -EINVAL;
+
+       saved_mask = current->cpus_allowed;
+       t_state.target_state = state;
+       p_throttling = &(pr->throttling);
+       cpus_and(online_throttling_cpus, cpu_online_map,
+                       p_throttling->shared_cpu_map);
+       /*
+        * The throttling notifier will be called for every
+        * affected cpu in order to get one proper T-state.
+        * The notifier event is THROTTLING_PRECHANGE.
+        */
+       for_each_cpu_mask(i, online_throttling_cpus) {
+               t_state.cpu = i;
+               acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
+                                                       &t_state);
+       }
+       /*
+        * The function of acpi_processor_set_throttling will be called
+        * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
+        * it is necessary to call it for every affected cpu. Otherwise
+        * it can be called only for the cpu pointed by pr.
+        */
+       if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
+               set_cpus_allowed(current, cpumask_of_cpu(pr->id));
+               ret = p_throttling->acpi_processor_set_throttling(pr,
+                                               t_state.target_state);
+       } else {
+               /*
+                * When the T-state coordination is SW_ALL or HW_ALL,
+                * it is necessary to set T-state for every affected
+                * cpus.
+                */
+               for_each_cpu_mask(i, online_throttling_cpus) {
+                       match_pr = processors[i];
+                       /*
+                        * If the pointer is invalid, we will report the
+                        * error message and continue.
+                        */
+                       if (!match_pr) {
+                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                       "Invalid Pointer for CPU %d\n", i));
+                               continue;
+                       }
+                       /*
+                        * If the throttling control is unsupported on CPU i,
+                        * we will report the error message and continue.
+                        */
+                       if (!match_pr->flags.throttling) {
+                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                       "Throttling Controll is unsupported "
+                                       "on CPU %d\n", i));
+                               continue;
+                       }
+                       t_state.cpu = i;
+                       set_cpus_allowed(current, cpumask_of_cpu(i));
+                       ret = match_pr->throttling.
+                               acpi_processor_set_throttling(
+                               match_pr, t_state.target_state);
+               }
+       }
+       /*
+        * After the set_throttling is called, the
+        * throttling notifier is called for every
+        * affected cpu to update the T-states.
+        * The notifier event is THROTTLING_POSTCHANGE
+        */
+       for_each_cpu_mask(i, online_throttling_cpus) {
+               t_state.cpu = i;
+               acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
+                                                       &t_state);
+       }
+       /* restore the previous state */
+       set_cpus_allowed(current, saved_mask);
+       return ret;
+}
+
 int acpi_processor_get_throttling_info(struct acpi_processor *pr)
 {
        int result = 0;
-       int step = 0;
-       int i = 0;
-
+       struct acpi_processor_throttling *pthrottling;
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                          "pblk_address[0x%08x] duty_offset[%d] 
duty_width[%d]\n",
@@ -182,19 +1092,36 @@ int acpi_processor_get_throttling_info(s
        if (!pr)
                return -EINVAL;
 
-       /* TBD: Support ACPI 2.0 objects */
-
-       if (!pr->throttling.address) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
-               return 0;
-       } else if (!pr->throttling.duty_width) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
-               return 0;
-       }
-       /* TBD: Support duty_cycle values that span bit 4. */
-       else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
-               printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
-               return 0;
+       /*
+        * Evaluate _PTC, _TSS and _TPC
+        * They must all be present or none of them can be used.
+        */
+       if (acpi_processor_get_throttling_control(pr) ||
+               acpi_processor_get_throttling_states(pr) ||
+               acpi_processor_get_platform_limit(pr))
+       {
+               pr->throttling.acpi_processor_get_throttling =
+                   &acpi_processor_get_throttling_fadt;
+               pr->throttling.acpi_processor_set_throttling =
+                   &acpi_processor_set_throttling_fadt;
+               if (acpi_processor_get_fadt_info(pr))
+                       return 0;
+       } else {
+               pr->throttling.acpi_processor_get_throttling =
+                   &acpi_processor_get_throttling_ptc;
+               pr->throttling.acpi_processor_set_throttling =
+                   &acpi_processor_set_throttling_ptc;
+       }
+
+       /*
+        * If TSD package for one CPU can't be parsed successfully, it means
+        * that this CPU will have no coordination with other CPUs.
+        */
+       if (acpi_processor_get_tsd(pr)) {
+               pthrottling = &pr->throttling;
+               pthrottling->tsd_valid_flag = 0;
+               cpu_set(pr->id, pthrottling->shared_cpu_map);
+               pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
        }
 
        /*
@@ -206,21 +1133,6 @@ int acpi_processor_get_throttling_info(s
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Throttling not supported on PIIX4 A- or 
B-step\n"));
                return 0;
-       }
-
-       pr->throttling.state_count = 1 << acpi_fadt.duty_width;
-
-       /*
-        * Compute state values. Note that throttling displays a linear power/
-        * performance relationship (at 50% performance the CPU will consume
-        * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
-        */
-
-       step = (1000 / pr->throttling.state_count);
-
-       for (i = 0; i < pr->throttling.state_count; i++) {
-               pr->throttling.states[i].performance = step * i;
-               pr->throttling.states[i].power = step * i;
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
@@ -259,10 +1171,9 @@ static int acpi_processor_throttling_seq
 static int acpi_processor_throttling_seq_show(struct seq_file *seq,
                                              void *offset)
 {
-       struct acpi_processor *pr = (struct acpi_processor *)seq->private;
+       struct acpi_processor *pr = seq->private;
        int i = 0;
        int result = 0;
-
 
        if (!pr)
                goto end;
@@ -281,15 +1192,27 @@ static int acpi_processor_throttling_seq
        }
 
        seq_printf(seq, "state count:             %d\n"
-                  "active state:            T%d\n",
-                  pr->throttling.state_count, pr->throttling.state);
+                  "active state:            T%d\n"
+                  "state available: T%d to T%d\n",
+                  pr->throttling.state_count, pr->throttling.state,
+                  pr->throttling_platform_limit,
+                  pr->throttling.state_count - 1);
 
        seq_puts(seq, "states:\n");
-       for (i = 0; i < pr->throttling.state_count; i++)
-               seq_printf(seq, "   %cT%d:                  %02d%%\n",
-                          (i == pr->throttling.state ? '*' : ' '), i,
-                          (pr->throttling.states[i].performance ? pr->
-                           throttling.states[i].performance / 10 : 0));
+       if (pr->throttling.acpi_processor_get_throttling ==
+                       acpi_processor_get_throttling_fadt) {
+               for (i = 0; i < pr->throttling.state_count; i++)
+                       seq_printf(seq, "   %cT%d:                  %02d%%\n",
+                                  (i == pr->throttling.state ? '*' : ' '), i,
+                                  (pr->throttling.states[i].performance ? pr->
+                                   throttling.states[i].performance / 10 : 0));
+       } else {
+               for (i = 0; i < pr->throttling.state_count; i++)
+                       seq_printf(seq, "   %cT%d:                  %02d%%\n",
+                                  (i == pr->throttling.state ? '*' : ' '), i,
+                                  (int)pr->throttling.states_tss[i].
+                                  freqpercentage);
+       }
 
       end:
        return 0;
@@ -302,15 +1225,17 @@ static int acpi_processor_throttling_ope
                           PDE(inode)->data);
 }
 
-static ssize_t acpi_processor_write_throttling(struct file * file,
+static ssize_t acpi_processor_write_throttling(struct file *file,
                                               const char __user * buffer,
                                               size_t count, loff_t * data)
 {
        int result = 0;
-       struct seq_file *m = (struct seq_file *)file->private_data;
-       struct acpi_processor *pr = (struct acpi_processor *)m->private;
-       char state_string[12] = { '\0' };
-
+       struct seq_file *m = file->private_data;
+       struct acpi_processor *pr = m->private;
+       char state_string[5] = "";
+       char *charp = NULL;
+       size_t state_val = 0;
+       char tmpbuf[5] = "";
 
        if (!pr || (count > sizeof(state_string) - 1))
                return -EINVAL;
@@ -319,10 +1244,23 @@ static ssize_t acpi_processor_write_thro
                return -EFAULT;
 
        state_string[count] = '\0';
-
-       result = acpi_processor_set_throttling(pr,
-                                              simple_strtoul(state_string,
-                                                             NULL, 0));
+       if ((count > 0) && (state_string[count-1] == '\n'))
+               state_string[count-1] = '\0';
+
+       charp = state_string;
+       if ((state_string[0] == 't') || (state_string[0] == 'T'))
+               charp++;
+
+       state_val = simple_strtoul(charp, NULL, 0);
+       if (state_val >= pr->throttling.state_count)
+               return -EINVAL;
+
+       snprintf(tmpbuf, 5, "%zu", state_val);
+
+       if (strcmp(tmpbuf, charp) != 0)
+               return -EINVAL;
+
+       result = acpi_processor_set_throttling(pr, state_val);
        if (result)
                return result;
 
@@ -330,6 +1268,7 @@ static ssize_t acpi_processor_write_thro
 }
 
 struct file_operations acpi_processor_throttling_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_processor_throttling_open_fs,
        .read = seq_read,
        .write = acpi_processor_write_throttling,
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/pci/msi-xen.c
--- a/drivers/pci/msi-xen.c     Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/pci/msi-xen.c     Tue Sep 16 21:25:54 2008 +0900
@@ -15,6 +15,8 @@
 #include <linux/pci.h>
 #include <linux/proc_fs.h>
 
+#include <xen/evtchn.h>
+
 #include <asm/errno.h>
 #include <asm/io.h>
 #include <asm/smp.h>
@@ -156,13 +158,15 @@ static int msi_unmap_pirq(struct pci_dev
        int rc;
 
        unmap.domid = msi_get_dev_owner(dev);
-       unmap.pirq = pirq;
+       unmap.pirq = evtchn_get_xen_pirq(pirq);
 
        if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
                printk(KERN_WARNING "unmap irq %x failed\n", pirq);
 
        if (rc < 0)
                return rc;
+
+       evtchn_map_pirq(pirq, 0);
        return 0;
 }
 
@@ -197,7 +201,7 @@ static int msi_map_pirq_to_vector(struct
        map_irq.domid = domid;
        map_irq.type = MAP_PIRQ_TYPE_MSI;
        map_irq.index = -1;
-       map_irq.pirq = pirq;
+       map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
        map_irq.bus = dev->bus->number;
        map_irq.devfn = dev->devfn;
        map_irq.entry_nr = entry_nr;
@@ -208,8 +212,12 @@ static int msi_map_pirq_to_vector(struct
 
        if (rc < 0)
                return rc;
-
-       return map_irq.pirq;
+       /* This happens when MSI support is not enabled in Xen. */
+       if (rc == 0 && map_irq.pirq < 0)
+               return -ENOSYS;
+
+       BUG_ON(map_irq.pirq <= 0);
+       return evtchn_map_pirq(pirq, map_irq.pirq);
 }
 
 static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
@@ -364,9 +372,15 @@ void pci_restore_msix_state(struct pci_d
 
        spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
        list_for_each_entry_safe(pirq_entry, tmp,
-                                &msi_dev_entry->pirq_list_head, list)
-               msi_map_pirq_to_vector(dev, pirq_entry->pirq,
-                                      pirq_entry->entry_nr, table_base);
+                                &msi_dev_entry->pirq_list_head, list) {
+               int rc = msi_map_pirq_to_vector(dev, pirq_entry->pirq,
+                                               pirq_entry->entry_nr, 
table_base);
+               if (rc < 0)
+                       printk(KERN_WARNING
+                              "%s: re-mapping irq #%d (pirq%d) failed: %d\n",
+                              pci_name(dev), pirq_entry->entry_nr,
+                              pirq_entry->pirq, rc);
+       }
        spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
 
        enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/xen/blktap/common.h
--- a/drivers/xen/blktap/common.h       Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/xen/blktap/common.h       Tue Sep 16 21:25:54 2008 +0900
@@ -89,6 +89,7 @@ typedef struct blkif_st {
 
 blkif_t *tap_alloc_blkif(domid_t domid);
 void tap_blkif_free(blkif_t *blkif);
+void tap_blkif_kmem_cache_free(blkif_t *blkif);
 int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
                  unsigned int evtchn);
 void tap_blkif_unmap(blkif_t *blkif);
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/xen/blktap/interface.c
--- a/drivers/xen/blktap/interface.c    Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/xen/blktap/interface.c    Tue Sep 16 21:25:54 2008 +0900
@@ -162,8 +162,15 @@ void tap_blkif_free(blkif_t *blkif)
 {
        atomic_dec(&blkif->refcnt);
        wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
+       atomic_inc(&blkif->refcnt);
 
        tap_blkif_unmap(blkif);
+}
+
+void tap_blkif_kmem_cache_free(blkif_t *blkif)
+{
+       if (!atomic_dec_and_test(&blkif->refcnt))
+               BUG();
        kmem_cache_free(blkif_cachep, blkif);
 }
 
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/xen/blktap/xenbus.c
--- a/drivers/xen/blktap/xenbus.c       Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/xen/blktap/xenbus.c       Tue Sep 16 21:25:54 2008 +0900
@@ -182,6 +182,7 @@ static int blktap_remove(struct xenbus_d
                        kthread_stop(be->blkif->xenblkd);
                signal_tapdisk(be->blkif->dev_num);
                tap_blkif_free(be->blkif);
+               tap_blkif_kmem_cache_free(be->blkif);
                be->blkif = NULL;
        }
        kfree(be);
@@ -364,6 +365,7 @@ static void tap_frontend_changed(struct 
                        kthread_stop(be->blkif->xenblkd);
                        be->blkif->xenblkd = NULL;
                }
+               tap_blkif_free(be->blkif);
                xenbus_switch_state(dev, XenbusStateClosing);
                break;
 
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/xen/core/evtchn.c
--- a/drivers/xen/core/evtchn.c Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/xen/core/evtchn.c Tue Sep 16 21:25:54 2008 +0900
@@ -66,13 +66,27 @@ enum {
        IRQT_VIRQ,
        IRQT_IPI,
        IRQT_LOCAL_PORT,
-       IRQT_CALLER_PORT
+       IRQT_CALLER_PORT,
+       _IRQT_COUNT
 };
+
+#define _IRQT_BITS 4
+#define _EVTCHN_BITS 12
+#define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
 
 /* Constructor for packed IRQ information. */
 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
 {
-       return ((type << 24) | (index << 16) | evtchn);
+       BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
+
+       BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
+       BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
+       BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
+       BUG_ON(index >> _INDEX_BITS);
+
+       BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
+
+       return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
 }
 
 /* Convenient shorthand for packed representation of an unbound IRQ. */
@@ -84,17 +98,17 @@ static inline u32 mk_irq_info(u32 type, 
 
 static inline unsigned int evtchn_from_irq(int irq)
 {
-       return (u16)(irq_info[irq]);
+       return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
 }
 
 static inline unsigned int index_from_irq(int irq)
 {
-       return (u8)(irq_info[irq] >> 16);
+       return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
 }
 
 static inline unsigned int type_from_irq(int irq)
 {
-       return (u8)(irq_info[irq] >> 24);
+       return irq_info[irq] >> (32 - _IRQT_BITS);
 }
 
 /* IRQ <-> VIRQ mapping. */
@@ -305,13 +319,11 @@ static int find_unbound_irq(void)
 static int find_unbound_irq(void)
 {
        static int warned;
-       int dynirq, irq;
-
-       for (dynirq = 0; dynirq < NR_DYNIRQS; dynirq++) {
-               irq = dynirq_to_irq(dynirq);
+       int irq;
+
+       for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++)
                if (irq_bindcount[irq] == 0)
                        return irq;
-       }
 
        if (!warned) {
                warned = 1;
@@ -742,22 +754,78 @@ static struct hw_interrupt_type dynirq_t
        .retrigger = resend_irq_on_evtchn,
 };
 
-static inline void pirq_unmask_notify(int pirq)
-{
-       struct physdev_eoi eoi = { .irq = pirq };
-       if (unlikely(test_bit(pirq, pirq_needs_eoi)))
+void evtchn_register_pirq(int irq)
+{
+       irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
+}
+
+#if defined(CONFIG_X86_IO_APIC)
+#define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
+#elif defined(CONFIG_X86)
+#define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < 16)
+#else
+#define identity_mapped_irq(irq) (1)
+#endif
+
+int evtchn_map_pirq(int irq, int xen_pirq)
+{
+       if (irq < 0) {
+               static DEFINE_SPINLOCK(irq_alloc_lock);
+
+               irq = PIRQ_BASE + NR_PIRQS - 1;
+               spin_lock(&irq_alloc_lock);
+               do {
+                       if (identity_mapped_irq(irq))
+                               continue;
+                       if (!index_from_irq(irq)) {
+                               BUG_ON(type_from_irq(irq) != IRQT_UNBOUND);
+                               irq_info[irq] = mk_irq_info(IRQT_PIRQ,
+                                                           xen_pirq, 0);
+                               break;
+                       }
+               } while (--irq >= PIRQ_BASE);
+               spin_unlock(&irq_alloc_lock);
+               if (irq < PIRQ_BASE)
+                       return -ENOSPC;
+       } else if (!xen_pirq) {
+               if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
+                       return -EINVAL;
+               irq_info[irq] = IRQ_UNBOUND;
+               return 0;
+       } else if (type_from_irq(irq) != IRQT_PIRQ
+                  || index_from_irq(irq) != xen_pirq) {
+               printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
+                               "cannot map to PIRQ#%u\n",
+                      irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
+               return -EINVAL;
+       }
+       return index_from_irq(irq) ? irq : -EINVAL;
+}
+
+int evtchn_get_xen_pirq(int irq)
+{
+       if (identity_mapped_irq(irq))
+               return irq;
+       BUG_ON(type_from_irq(irq) != IRQT_PIRQ);
+       return index_from_irq(irq);
+}
+
+static inline void pirq_unmask_notify(int irq)
+{
+       struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
+       if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
                VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
 }
 
-static inline void pirq_query_unmask(int pirq)
+static inline void pirq_query_unmask(int irq)
 {
        struct physdev_irq_status_query irq_status;
-       irq_status.irq = pirq;
+       irq_status.irq = evtchn_get_xen_pirq(irq);
        if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
                irq_status.flags = 0;
-       clear_bit(pirq, pirq_needs_eoi);
+       clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
        if (irq_status.flags & XENIRQSTAT_needs_eoi)
-               set_bit(pirq, pirq_needs_eoi);
+               set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
 }
 
 /*
@@ -774,7 +842,7 @@ static unsigned int startup_pirq(unsigne
        if (VALID_EVTCHN(evtchn))
                goto out;
 
-       bind_pirq.pirq  = irq;
+       bind_pirq.pirq = evtchn_get_xen_pirq(irq);
        /* NB. We are happy to share unless we are probing. */
        bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
        if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
@@ -785,15 +853,15 @@ static unsigned int startup_pirq(unsigne
        }
        evtchn = bind_pirq.port;
 
-       pirq_query_unmask(irq_to_pirq(irq));
+       pirq_query_unmask(irq);
 
        evtchn_to_irq[evtchn] = irq;
        bind_evtchn_to_cpu(evtchn, 0);
-       irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
+       irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
 
  out:
        unmask_evtchn(evtchn);
-       pirq_unmask_notify(irq_to_pirq(irq));
+       pirq_unmask_notify(irq);
 
        return 0;
 }
@@ -814,7 +882,7 @@ static void shutdown_pirq(unsigned int i
 
        bind_evtchn_to_cpu(evtchn, 0);
        evtchn_to_irq[evtchn] = -1;
-       irq_info[irq] = IRQ_UNBOUND;
+       irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
 }
 
 static void enable_pirq(unsigned int irq)
@@ -847,7 +915,7 @@ static void end_pirq(unsigned int irq)
                shutdown_pirq(irq);
        } else if (VALID_EVTCHN(evtchn)) {
                unmask_evtchn(evtchn);
-               pirq_unmask_notify(irq_to_pirq(irq));
+               pirq_unmask_notify(irq);
        }
 }
 
@@ -994,7 +1062,7 @@ static void restore_cpu_ipis(unsigned in
 
 void irq_resume(void)
 {
-       unsigned int cpu, pirq, irq, evtchn;
+       unsigned int cpu, irq, evtchn;
 
        init_evtchn_cpu_bindings();
 
@@ -1003,12 +1071,12 @@ void irq_resume(void)
                mask_evtchn(evtchn);
 
        /* Check that no PIRQs are still bound. */
-       for (pirq = 0; pirq < NR_PIRQS; pirq++)
-               BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
+       for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++)
+               BUG_ON(irq_info[irq] != IRQ_UNBOUND);
 
        /* No IRQ <-> event-channel mappings. */
        for (irq = 0; irq < NR_IRQS; irq++)
-               irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
+               irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
        for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
                evtchn_to_irq[evtchn] = -1;
 
@@ -1034,28 +1102,29 @@ void __init xen_init_IRQ(void)
                irq_info[i] = IRQ_UNBOUND;
 
        /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-       for (i = 0; i < NR_DYNIRQS; i++) {
-               irq_bindcount[dynirq_to_irq(i)] = 0;
-
-               irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
-               irq_desc[dynirq_to_irq(i)].action = NULL;
-               irq_desc[dynirq_to_irq(i)].depth = 1;
-               irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
+       for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
+               irq_bindcount[i] = 0;
+
+               irq_desc[i].status = IRQ_DISABLED;
+               irq_desc[i].action = NULL;
+               irq_desc[i].depth = 1;
+               irq_desc[i].chip = &dynirq_type;
        }
 
        /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-       for (i = 0; i < NR_PIRQS; i++) {
-               irq_bindcount[pirq_to_irq(i)] = 1;
+       for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_PIRQS); i++) {
+               irq_bindcount[i] = 1;
 
 #ifdef RTC_IRQ
                /* If not domain 0, force our RTC driver to fail its probe. */
-               if ((i == RTC_IRQ) && !is_initial_xendomain())
+               if (identity_mapped_irq(i) && ((i - PIRQ_BASE) == RTC_IRQ)
+                   && !is_initial_xendomain())
                        continue;
 #endif
 
-               irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
-               irq_desc[pirq_to_irq(i)].action = NULL;
-               irq_desc[pirq_to_irq(i)].depth = 1;
-               irq_desc[pirq_to_irq(i)].chip = &pirq_type;
-       }
-}
+               irq_desc[i].status = IRQ_DISABLED;
+               irq_desc[i].action = NULL;
+               irq_desc[i].depth = 1;
+               irq_desc[i].chip = &pirq_type;
+       }
+}
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 
drivers/xen/pciback/conf_space_capability_msi.c
--- a/drivers/xen/pciback/conf_space_capability_msi.c   Fri Sep 12 11:28:00 
2008 +0900
+++ b/drivers/xen/pciback/conf_space_capability_msi.c   Tue Sep 16 21:25:54 
2008 +0900
@@ -2,6 +2,7 @@
  * PCI Backend -- Configuration overlay for MSI capability
  */
 #include <linux/pci.h>
+#include <linux/slab.h>
 #include "conf_space.h"
 #include "conf_space_capability.h"
 #include <xen/interface/io/pciif.h>
@@ -37,22 +38,31 @@ int pciback_enable_msix(struct pciback_d
 int pciback_enable_msix(struct pciback_device *pdev,
                struct pci_dev *dev, struct xen_pci_op *op)
 {
-       int result;
+       int i, result;
+       struct msix_entry *entries;
 
        if (op->value > SH_INFO_MAX_VEC)
                return -EINVAL;
-       else {
-               struct msix_entry entries[op->value];
-               int i;
 
-               for (i = 0; i < op->value; i++) {
-                       entries[i].entry = op->msix_entries[i].entry;
-                       entries[i].vector = op->msix_entries[i].vector;
-               }
+       entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+       if (entries == NULL)
+               return -ENOMEM;
 
-               result = pci_enable_msix(dev, entries, op->value);
-               op->value = result;
+       for (i = 0; i < op->value; i++) {
+               entries[i].entry = op->msix_entries[i].entry;
+               entries[i].vector = op->msix_entries[i].vector;
        }
+
+       result = pci_enable_msix(dev, entries, op->value);
+
+       for (i = 0; i < op->value; i++) {
+               op->msix_entries[i].entry = entries[i].entry;
+               op->msix_entries[i].vector = entries[i].vector;
+       }
+
+       kfree(entries);
+
+       op->value = result;
 
        return result;
 }
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 drivers/xen/xenoprof/xenoprofile.c
--- a/drivers/xen/xenoprof/xenoprofile.c        Fri Sep 12 11:28:00 2008 +0900
+++ b/drivers/xen/xenoprof/xenoprofile.c        Tue Sep 16 21:25:54 2008 +0900
@@ -35,14 +35,14 @@
 #define MAX_XENOPROF_SAMPLES 16
 
 /* sample buffers shared with Xen */
-xenoprof_buf_t * xenoprof_buf[MAX_VIRT_CPUS];
+static xenoprof_buf_t *xenoprof_buf[MAX_VIRT_CPUS];
 /* Shared buffer area */
-struct xenoprof_shared_buffer shared_buffer;
+static struct xenoprof_shared_buffer shared_buffer;
 
 /* Passive sample buffers shared with Xen */
-xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
+static xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
 /* Passive shared buffer area */
-struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
+static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
 
 static int xenoprof_start(void);
 static void xenoprof_stop(void);
@@ -54,11 +54,11 @@ extern unsigned long backtrace_depth;
 extern unsigned long backtrace_depth;
 
 /* Number of buffers in shared area (one per VCPU) */
-int nbuf;
+static int nbuf;
 /* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
-int ovf_irq[NR_CPUS];
+static int ovf_irq[NR_CPUS];
 /* cpu model type string - copied from Xen on XENOPROF_init command */
-char cpu_type[XENOPROF_CPU_TYPE_SIZE];
+static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
 
 #ifdef CONFIG_PM
 
@@ -111,11 +111,11 @@ static void exit_driverfs(void)
 #define exit_driverfs() do { } while (0)
 #endif /* CONFIG_PM */
 
-unsigned long long oprofile_samples = 0;
-unsigned long long p_oprofile_samples = 0;
-
-unsigned int pdomains;
-struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
+static unsigned long long oprofile_samples;
+static unsigned long long p_oprofile_samples;
+
+static unsigned int pdomains;
+static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
 
 /* Check whether the given entry is an escape code */
 static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
@@ -483,8 +483,7 @@ static void xenoprof_dummy_backtrace(str
 }
 
 
-
-struct oprofile_operations xenoprof_ops = {
+static struct oprofile_operations xenoprof_ops = {
 #ifdef HAVE_XENOPROF_CREATE_FILES
        .create_files   = xenoprof_create_files,
 #endif
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/acpi/processor.h
--- a/include/acpi/processor.h  Fri Sep 12 11:28:00 2008 +0900
+++ b/include/acpi/processor.h  Tue Sep 16 21:25:54 2008 +0900
@@ -18,8 +18,11 @@
 
 #define ACPI_PDC_REVISION_ID           0x1
 
-#define ACPI_PSD_REV0_REVISION         0 /* Support for _PSD as in ACPI 3.0 */
+#define ACPI_PSD_REV0_REVISION         0       /* Support for _PSD as in ACPI 
3.0 */
 #define ACPI_PSD_REV0_ENTRIES          5
+
+#define ACPI_TSD_REV0_REVISION         0       /* Support for _PSD as in ACPI 
3.0 */
+#define ACPI_TSD_REV0_ENTRIES          5
 
 #ifdef CONFIG_XEN
 #define NR_ACPI_CPUS                   (NR_CPUS < 256 ? 256 : NR_CPUS)
@@ -142,24 +145,62 @@ struct acpi_processor_performance {
 
 /* Throttling Control */
 
+struct acpi_tsd_package {
+       acpi_integer num_entries;
+       acpi_integer revision;
+       acpi_integer domain;
+       acpi_integer coord_type;
+       acpi_integer num_processors;
+} __attribute__ ((packed));
+
+struct acpi_ptc_register {
+       u8 descriptor;
+       u16 length;
+       u8 space_id;
+       u8 bit_width;
+       u8 bit_offset;
+       u8 reserved;
+       u64 address;
+} __attribute__ ((packed));
+
+struct acpi_processor_tx_tss {
+       acpi_integer freqpercentage;    /* */
+       acpi_integer power;     /* milliWatts */
+       acpi_integer transition_latency;        /* microseconds */
+       acpi_integer control;   /* control value */
+       acpi_integer status;    /* success indicator */
+};
 struct acpi_processor_tx {
        u16 power;
        u16 performance;
 };
 
+struct acpi_processor;
 struct acpi_processor_throttling {
-       int state;
+       unsigned int state;
+       unsigned int platform_limit;
+       struct acpi_pct_register control_register;
+       struct acpi_pct_register status_register;
+       unsigned int state_count;
+       struct acpi_processor_tx_tss *states_tss;
+       struct acpi_tsd_package domain_info;
+       cpumask_t shared_cpu_map;
+       int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
+       int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
+                                             int state);
+
        u32 address;
        u8 duty_offset;
        u8 duty_width;
-       int state_count;
+       u8 tsd_valid_flag;
+       unsigned int shared_type;
        struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING];
 };
 
 /* Limit Interface */
 
 struct acpi_processor_lx {
-       int px;                 /* performace state */
+       int px;                 /* performance state */
        int tx;                 /* throttle level */
 };
 
@@ -186,6 +227,9 @@ struct acpi_processor {
        u32 id;
        u32 pblk;
        int performance_platform_limit;
+       int throttling_platform_limit;
+       /* 0 - states 0..n-th state available */
+
        struct acpi_processor_flags flags;
        struct acpi_processor_power power;
        struct acpi_processor_performance *performance;
@@ -273,10 +317,11 @@ static inline int acpi_processor_ppc_has
 #endif                         /* CONFIG_CPU_FREQ */
 
 /* in processor_throttling.c */
+int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
-int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
 extern struct file_operations acpi_processor_throttling_fops;
-
+extern void acpi_processor_throttling_init(void);
 /* in processor_idle.c */
 int acpi_processor_power_init(struct acpi_processor *pr,
                              struct acpi_device *device);
@@ -326,7 +371,7 @@ struct processor_extcntl_ops {
        /* Transfer processor PM events to external control logic */
        int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event);
        /* Notify physical processor status to external control logic */
-       int (*hotplug)(struct acpi_processor *pr, int event);
+       int (*hotplug)(struct acpi_processor *pr, int type);
 };
 extern const struct processor_extcntl_ops *processor_extcntl_ops;
 
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/asm-i386/mach-xen/irq_vectors.h
--- a/include/asm-i386/mach-xen/irq_vectors.h   Fri Sep 12 11:28:00 2008 +0900
+++ b/include/asm-i386/mach-xen/irq_vectors.h   Tue Sep 16 21:25:54 2008 +0900
@@ -108,7 +108,13 @@
  */
 
 #define PIRQ_BASE              0
-#define NR_PIRQS               256
+#if !defined(MAX_IO_APICS)
+# define NR_PIRQS              (NR_VECTORS + 32 * NR_CPUS)
+#elif NR_CPUS < MAX_IO_APICS
+# define NR_PIRQS              (NR_VECTORS + 32 * NR_CPUS)
+#else
+# define NR_PIRQS              (NR_VECTORS + 32 * MAX_IO_APICS)
+#endif
 
 #define DYNIRQ_BASE            (PIRQ_BASE + NR_PIRQS)
 #define NR_DYNIRQS             256
@@ -116,10 +122,4 @@
 #define NR_IRQS                        (NR_PIRQS + NR_DYNIRQS)
 #define NR_IRQ_VECTORS         NR_IRQS
 
-#define pirq_to_irq(_x)                ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x)                ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x)      ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x)      ((_x) - DYNIRQ_BASE)
-
 #endif /* _ASM_IRQ_VECTORS_H */
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/asm-ia64/irq.h
--- a/include/asm-ia64/irq.h    Fri Sep 12 11:28:00 2008 +0900
+++ b/include/asm-ia64/irq.h    Tue Sep 16 21:25:54 2008 +0900
@@ -34,12 +34,6 @@
 #define NR_IRQS                        (NR_PIRQS + NR_DYNIRQS)
 #define NR_IRQ_VECTORS         NR_IRQS
 
-#define pirq_to_irq(_x)                ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x)                ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x)      ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x)      ((_x) - DYNIRQ_BASE)
-
 #define RESCHEDULE_VECTOR      0
 #define IPI_VECTOR             1
 #define CMCP_VECTOR            2
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/asm-powerpc/xen/asm/hypervisor.h
--- a/include/asm-powerpc/xen/asm/hypervisor.h  Fri Sep 12 11:28:00 2008 +0900
+++ b/include/asm-powerpc/xen/asm/hypervisor.h  Tue Sep 16 21:25:54 2008 +0900
@@ -154,13 +154,6 @@ int direct_remap_pfn_range(struct vm_are
 
 #define NR_IRQ_VECTORS         NR_IRQS
 
-#define pirq_to_irq(_x)                ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x)                ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x)      ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x)      ((_x) - DYNIRQ_BASE)
-
-
 /* END:  all of these need a new home */
 
 #if defined(CONFIG_X86_64)
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/asm-x86_64/mach-xen/irq_vectors.h
--- a/include/asm-x86_64/mach-xen/irq_vectors.h Fri Sep 12 11:28:00 2008 +0900
+++ b/include/asm-x86_64/mach-xen/irq_vectors.h Tue Sep 16 21:25:54 2008 +0900
@@ -114,10 +114,4 @@
 #define NR_IRQS                        (NR_PIRQS + NR_DYNIRQS)
 #define NR_IRQ_VECTORS         NR_IRQS
 
-#define pirq_to_irq(_x)                ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x)                ((_x) - PIRQ_BASE)
-
-#define dynirq_to_irq(_x)      ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x)      ((_x) - DYNIRQ_BASE)
-
 #endif /* _ASM_IRQ_VECTORS_H */
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/xen/evtchn.h
--- a/include/xen/evtchn.h      Fri Sep 12 11:28:00 2008 +0900
+++ b/include/xen/evtchn.h      Tue Sep 16 21:25:54 2008 +0900
@@ -101,6 +101,13 @@ asmlinkage void evtchn_do_upcall(struct 
 /* Entry point for notifications into the userland character device. */
 void evtchn_device_upcall(int port);
 
+/* Mark a PIRQ as unavailable for dynamic allocation. */
+void evtchn_register_pirq(int irq);
+/* Map a Xen-supplied PIRQ to a dynamically allocated one. */
+int evtchn_map_pirq(int irq, int xen_pirq);
+/* Look up a Xen-supplied PIRQ for a dynamically allocated one. */
+int evtchn_get_xen_pirq(int irq);
+
 void mask_evtchn(int port);
 void disable_all_local_evtchn(void);
 void unmask_evtchn(int port);
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/xen/interface/memory.h
--- a/include/xen/interface/memory.h    Fri Sep 12 11:28:00 2008 +0900
+++ b/include/xen/interface/memory.h    Tue Sep 16 21:25:54 2008 +0900
@@ -204,6 +204,7 @@ struct xen_add_to_physmap {
     /* Source mapping space. */
 #define XENMAPSPACE_shared_info 0 /* shared info page */
 #define XENMAPSPACE_grant_table 1 /* grant table page */
+#define XENMAPSPACE_mfn         2 /* usual MFN */
     unsigned int space;
 
     /* Index into source mapping space. */
@@ -214,6 +215,22 @@ struct xen_add_to_physmap {
 };
 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
+
+/*
+ * Unmaps the page appearing at a particular GPFN from the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_remove_from_physmap_t.
+ */
+#define XENMEM_remove_from_physmap      15
+struct xen_remove_from_physmap {
+    /* Which domain to change the mapping for. */
+    domid_t domid;
+
+    /* GPFN of the current mapping of the page. */
+    xen_pfn_t     gpfn;
+};
+typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
+DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
 
 /*
  * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
diff -r 6fcc6c1e87f6 -r 7d032c5bb346 include/xen/interface/platform.h
--- a/include/xen/interface/platform.h  Fri Sep 12 11:28:00 2008 +0900
+++ b/include/xen/interface/platform.h  Tue Sep 16 21:25:54 2008 +0900
@@ -97,7 +97,7 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_read_memty
 #define XENPF_microcode_update    35
 struct xenpf_microcode_update {
     /* IN variables. */
-    XEN_GUEST_HANDLE(void) data;      /* Pointer to microcode data */
+    XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */
     uint32_t length;                  /* Length of microcode data. */
 };
 typedef struct xenpf_microcode_update xenpf_microcode_update_t;
@@ -289,7 +289,7 @@ struct xen_psd_package {
 
 struct xen_processor_performance {
     uint32_t flags;     /* flag for Px sub info type */
-    uint32_t ppc;       /* Platform limitation on freq usage */
+    uint32_t platform_limit;  /* Platform limitation on freq usage */
     struct xen_pct_register control_register;
     struct xen_pct_register status_register;
     uint32_t state_count;     /* total available performance states */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [linux-2.6.18-xen] merge with linux-2.6.18-xen.hg, Xen patchbot-linux-2.6.18-xen <=