WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: Implement cpu hotplug notifiers. Use

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: Implement cpu hotplug notifiers. Use them.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 19 May 2010 05:15:27 -0700
Delivery-date: Wed, 19 May 2010 05:16:01 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1273846968 -3600
# Node ID df955a89b53c0bb4614476eb655538393e48c2a0
# Parent  578375084a9ee1063f4bc5a8e64e59677804cdb9
x86: Implement cpu hotplug notifiers. Use them.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/ia64/xen/xen.lds.S         |    2 
 xen/arch/ia64/xen/xensetup.c        |   17 +----
 xen/arch/x86/cpu/mcheck/mce_intel.c |   34 ++++++++++-
 xen/arch/x86/domain.c               |    1 
 xen/arch/x86/hvm/hvm.c              |   25 ++++++++
 xen/arch/x86/platform_hypercall.c   |    1 
 xen/arch/x86/setup.c                |   21 ------
 xen/arch/x86/smpboot.c              |  109 ++++++++++++++++++++----------------
 xen/arch/x86/xen.lds.S              |    2 
 xen/common/cpu.c                    |   34 +++++++++++
 xen/common/cpupool.c                |   82 +++++++++++++++++----------
 xen/common/gdbstub.c                |   13 ++--
 xen/common/kernel.c                 |   16 +++++
 xen/common/notifier.c               |    3 
 xen/common/rcupdate.c               |   33 ++++++++--
 xen/common/tasklet.c                |   41 ++++++++++---
 xen/drivers/cpufreq/cpufreq.c       |   36 +++++++++++
 xen/include/asm-x86/hvm/hvm.h       |    6 -
 xen/include/asm-x86/processor.h     |    2 
 xen/include/asm-x86/smp.h           |    3 
 xen/include/xen/cpu.h               |   28 +++++++++
 xen/include/xen/gdbstub.h           |    6 -
 xen/include/xen/init.h              |   13 ++--
 xen/include/xen/notifier.h          |   31 ++--------
 xen/include/xen/rcupdate.h          |    1 
 xen/include/xen/sched.h             |    3 
 xen/include/xen/tasklet.h           |    1 
 27 files changed, 383 insertions(+), 181 deletions(-)

diff -r 578375084a9e -r df955a89b53c xen/arch/ia64/xen/xen.lds.S
--- a/xen/arch/ia64/xen/xen.lds.S       Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/ia64/xen/xen.lds.S       Fri May 14 15:22:48 2010 +0100
@@ -139,6 +139,8 @@ SECTIONS
   .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
        {
          __initcall_start = .;
+         *(.initcallpresmp.init)
+         __presmp_initcall_end = .;
          *(.initcall1.init)
          *(.initcall2.init)
          *(.initcall3.init)
diff -r 578375084a9e -r df955a89b53c xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/ia64/xen/xensetup.c      Fri May 14 15:22:48 2010 +0100
@@ -89,13 +89,6 @@ xen_count_pages(u64 start, u64 end, void
     return 0;
 }
 
-static void __init do_initcalls(void)
-{
-    initcall_t *call;
-    for ( call = &__initcall_start; call < &__initcall_end; call++ )
-        (*call)();
-}
-
 /*
  * IPF loader only supports one command line currently, for
  * both xen and guest kernel. This function provides pre-parse
@@ -612,15 +605,15 @@ skip_move:
     /*  Enable IRQ to receive IPI (needed for ITC sync).  */
     local_irq_enable();
 
+    do_presmp_initcalls();
+
 printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus);
     for_each_present_cpu ( i )
     {
         if ( num_online_cpus() >= max_cpus )
             break;
-        if ( !cpu_online(i) ) {
-            rcu_online_cpu(i);
-            __cpu_up(i);
-       }
+        if ( !cpu_online(i) )
+            cpu_up(i);
     }
 
     local_irq_disable();
@@ -628,8 +621,6 @@ printk("num_online_cpus=%d, max_cpus=%d\
     printk("Brought up %ld CPUs\n", (long)num_online_cpus());
     smp_cpus_done(max_cpus);
 #endif
-
-    initialise_gdb(); /* could be moved earlier */
 
     iommu_setup();    /* setup iommu if available */
 
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Fri May 14 15:22:48 2010 +0100
@@ -6,6 +6,7 @@
 #include <xen/delay.h>
 #include <xen/smp.h>
 #include <xen/mm.h>
+#include <xen/cpu.h>
 #include <asm/processor.h> 
 #include <public/sysctl.h>
 #include <asm/system.h>
@@ -788,7 +789,7 @@ static void __cpu_mcheck_distribute_cmci
     cmci_discover();
 }
 
-void cpu_mcheck_distribute_cmci(void)
+static void cpu_mcheck_distribute_cmci(void)
 {
     if (cmci_support && !mce_disabled)
         on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0);
@@ -816,7 +817,7 @@ static void clear_cmci(void)
     }
 }
 
-void cpu_mcheck_disable(void)
+static void cpu_mcheck_disable(void)
 {
     clear_in_cr4(X86_CR4_MCE);
 
@@ -1007,4 +1008,31 @@ int intel_mce_rdmsr(uint32_t msr, uint64
     return ret;
 }
 
-
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    switch ( action )
+    {
+    case CPU_DYING:
+        cpu_mcheck_disable();
+        break;
+    case CPU_DEAD:
+        cpu_mcheck_distribute_cmci();
+        break;
+    default:
+        break;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
+static int __init intel_mce_initcall(void)
+{
+    register_cpu_notifier(&cpu_nfb);
+    return 0;
+}
+presmp_initcall(intel_mce_initcall);
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/domain.c     Fri May 14 15:22:48 2010 +0100
@@ -32,6 +32,7 @@
 #include <xen/acpi.h>
 #include <xen/pci.h>
 #include <xen/paging.h>
+#include <xen/cpu.h>
 #include <public/sysctl.h>
 #include <asm/regs.h>
 #include <asm/mc146818rtc.h>
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri May 14 15:22:48 2010 +0100
@@ -33,6 +33,7 @@
 #include <xen/guest_access.h>
 #include <xen/event.h>
 #include <xen/paging.h>
+#include <xen/cpu.h>
 #include <asm/shadow.h>
 #include <asm/hap.h>
 #include <asm/current.h>
@@ -70,6 +71,28 @@ unsigned long __attribute__ ((__section_
 unsigned long __attribute__ ((__section__ (".bss.page_aligned")))
     hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG];
 
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+    int rc = 0;
+
+    switch ( action )
+    {
+    case CPU_UP_PREPARE:
+        rc = hvm_funcs.cpu_prepare(cpu);
+        break;
+    default:
+        break;
+    }
+
+    return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
 void hvm_enable(struct hvm_function_table *fns)
 {
     extern int hvm_port80_allowed;
@@ -91,6 +114,8 @@ void hvm_enable(struct hvm_function_tabl
 
     if ( hvm_funcs.hap_supported )
         printk("HVM: Hardware Assisted Paging detected.\n");
+
+    register_cpu_notifier(&cpu_nfb);
 }
 
 /*
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/platform_hypercall.c
--- a/xen/arch/x86/platform_hypercall.c Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/platform_hypercall.c Fri May 14 15:22:48 2010 +0100
@@ -19,6 +19,7 @@
 #include <xen/iocap.h>
 #include <xen/guest_access.h>
 #include <xen/acpi.h>
+#include <xen/cpu.h>
 #include <asm/current.h>
 #include <public/platform.h>
 #include <acpi/cpufreq/processor_perf.h>
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/setup.c      Fri May 14 15:22:48 2010 +0100
@@ -154,13 +154,6 @@ static void __init parse_acpi_param(char
     {
         acpi_noirq_set();
     }
-}
-
-static void __init do_initcalls(void)
-{
-    initcall_t *call;
-    for ( call = &__initcall_start; call < &__initcall_end; call++ )
-        (*call)();
 }
 
 #define EARLY_FAIL(f, a...) do {                \
@@ -1070,15 +1063,14 @@ void __init __start_xen(unsigned long mb
 
     console_init_postirq();
 
+    do_presmp_initcalls();
+
     for_each_present_cpu ( i )
     {
         if ( num_online_cpus() >= max_cpus )
             break;
         if ( !cpu_online(i) )
-        {
-            rcu_online_cpu(i);
-            __cpu_up(i);
-        }
+            cpu_up(i);
 
         /* Set up cpu_to_node[]. */
         srat_detect_node(i);
@@ -1088,8 +1080,6 @@ void __init __start_xen(unsigned long mb
 
     printk("Brought up %ld CPUs\n", (long)num_online_cpus());
     smp_cpus_done(max_cpus);
-
-    initialise_gdb(); /* could be moved earlier */
 
     do_initcalls();
 
@@ -1098,11 +1088,6 @@ void __init __start_xen(unsigned long mb
     
     if ( !tboot_protect_mem_regions() )
         panic("Could not protect TXT memory regions\n");
-
-    /* Create initial cpupool 0. */
-    cpupool0 = cpupool_create(0, NULL);
-    if ( (cpupool0 == NULL) || cpupool0_cpu_assign(cpupool0) )
-        panic("Error creating cpupool 0\n");
 
     /* Create initial domain 0. */
     dom0 = domain_create(0, DOMCRF_s3_integrity, DOM0_SSIDREF);
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/smpboot.c    Fri May 14 15:22:48 2010 +0100
@@ -47,6 +47,7 @@
 #include <xen/serial.h>
 #include <xen/numa.h>
 #include <xen/event.h>
+#include <xen/cpu.h>
 #include <asm/current.h>
 #include <asm/mc146818rtc.h>
 #include <asm/desc.h>
@@ -106,7 +107,6 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
 void *stack_base[NR_CPUS];
-DEFINE_SPINLOCK(cpu_add_remove_lock);
 
 /*
  * The bootstrap kernel entry code has set these up. Save them for
@@ -1272,17 +1272,6 @@ int __cpu_disable(void)
 {
        int cpu = smp_processor_id();
 
-       /*
-        * Perhaps use cpufreq to drop frequency, but that could go
-        * into generic code.
-        *
-        * We won't take down the boot processor on i386 due to some
-        * interrupts only being able to be serviced by the BSP.
-        * Especially so if we're not using an IOAPIC   -zwane
-        */
-       if (cpu == 0)
-               return -EBUSY;
-
        local_irq_disable();
        clear_local_APIC();
        /* Allow any queued timer interrupts to get serviced */
@@ -1292,8 +1281,6 @@ int __cpu_disable(void)
 
        time_suspend();
 
-       cpu_mcheck_disable();
-
        remove_siblinginfo(cpu);
 
        /* It's now safe to remove this processor from the online map */
@@ -1313,10 +1300,8 @@ void __cpu_die(unsigned int cpu)
 
        for (;;) {
                /* They ack this in play_dead by setting CPU_DEAD */
-               if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-                       printk ("CPU %u is now offline\n", cpu);
+               if (per_cpu(cpu_state, cpu) == CPU_DEAD)
                        return;
-               }
                mdelay(100);
                mb();
                process_pending_softirqs();
@@ -1327,7 +1312,19 @@ void __cpu_die(unsigned int cpu)
 
 static int take_cpu_down(void *unused)
 {
-       return __cpu_disable();
+       void *hcpu = (void *)(long)smp_processor_id();
+       int rc;
+
+       spin_lock(&cpu_add_remove_lock);
+
+       if (cpu_notifier_call_chain(CPU_DYING, hcpu) != NOTIFY_DONE)
+               BUG();
+
+       rc = __cpu_disable();
+
+       spin_unlock(&cpu_add_remove_lock);
+
+       return rc;
 }
 
 /*
@@ -1339,7 +1336,8 @@ static cpumask_t cpu_offlining;
 
 int cpu_down(unsigned int cpu)
 {
-       int err = 0;
+       int err, notifier_rc, nr_calls;
+       void *hcpu = (void *)(long)cpu;
 
        spin_lock(&cpu_add_remove_lock);
 
@@ -1350,32 +1348,42 @@ int cpu_down(unsigned int cpu)
 
        cpu_set(cpu, cpu_offlining);
 
-       err = cpupool_cpu_remove(cpu);
-       if (err)
+       printk("Prepare to bring CPU%d down...\n", cpu);
+
+       notifier_rc = __cpu_notifier_call_chain(
+               CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
+       if (notifier_rc != NOTIFY_DONE) {
+               err = notifier_to_errno(notifier_rc);
+               nr_calls--;
+               notifier_rc = __cpu_notifier_call_chain(
+                       CPU_DOWN_FAILED, hcpu, nr_calls, NULL);
+               BUG_ON(notifier_rc != NOTIFY_DONE);
                goto out;
-
-       printk("Prepare to bring CPU%d down...\n", cpu);
-
-       cpufreq_del_cpu(cpu);
+       }
 
        spin_unlock(&cpu_add_remove_lock);
        err = stop_machine_run(take_cpu_down, NULL, cpu);
        spin_lock(&cpu_add_remove_lock);
 
        if (err < 0) {
-               cpupool_cpu_add(cpu);
+               notifier_rc = cpu_notifier_call_chain(CPU_DOWN_FAILED, hcpu);
+               BUG_ON(notifier_rc != NOTIFY_DONE);
                goto out;
        }
 
        __cpu_die(cpu);
        BUG_ON(cpu_online(cpu));
 
-       migrate_tasklets_from_cpu(cpu);
-       cpu_mcheck_distribute_cmci();
+       notifier_rc = cpu_notifier_call_chain(CPU_DEAD, hcpu);
+       BUG_ON(notifier_rc != NOTIFY_DONE);
 
 out:
-       if (!err)
+       if (!err) {
+               printk("CPU %u is now offline\n", cpu);
                send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
+       } else {
+               printk("Failed to take down CPU %u (error %d)\n", cpu, err);
+       }
        cpu_clear(cpu, cpu_offlining);
        spin_unlock(&cpu_add_remove_lock);
        return err;
@@ -1391,8 +1399,6 @@ int cpu_up(unsigned int cpu)
                err = -EINVAL;
                goto out;
        }
-
-       rcu_online_cpu(cpu);
 
        err = __cpu_up(cpu);
        if (err < 0)
@@ -1525,11 +1531,16 @@ int cpu_add(uint32_t apic_id, uint32_t a
 
 int __devinit __cpu_up(unsigned int cpu)
 {
-       int ret;
-
-       ret = hvm_cpu_prepare(cpu);
-       if (ret)
-               return ret;
+       int notifier_rc, ret = 0, nr_calls;
+       void *hcpu = (void *)(long)cpu;
+
+       notifier_rc = __cpu_notifier_call_chain(
+               CPU_UP_PREPARE, hcpu, -1, &nr_calls);
+       if (notifier_rc != NOTIFY_DONE) {
+               ret = notifier_to_errno(notifier_rc);
+               nr_calls--;
+               goto fail;
+       }
 
        /*
         * We do warm boot only on cpus that had booted earlier
@@ -1542,18 +1553,18 @@ int __devinit __cpu_up(unsigned int cpu)
                smpboot_restore_warm_reset_vector();
        }
 
-       if (ret)
-               return -EIO;
+       if (ret) {
+               ret = -EIO;
+               goto fail;
+       }
 
        /* In case one didn't come up */
        if (!cpu_isset(cpu, cpu_callin_map)) {
                printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
-               local_irq_enable();
-               return -EIO;
-       }
-
-       local_irq_enable();
-       /*per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;*/
+               ret = -EIO;
+               goto fail;
+       }
+
        /* Unleash the CPU! */
        cpu_set(cpu, smp_commenced_mask);
        while (!cpu_isset(cpu, cpu_online_map)) {
@@ -1561,9 +1572,15 @@ int __devinit __cpu_up(unsigned int cpu)
                process_pending_softirqs();
        }
 
-       cpupool_cpu_add(cpu);
-       cpufreq_add_cpu(cpu);
+       notifier_rc = cpu_notifier_call_chain(CPU_ONLINE, hcpu);
+       BUG_ON(notifier_rc != NOTIFY_DONE);
        return 0;
+
+ fail:
+       notifier_rc = __cpu_notifier_call_chain(
+               CPU_UP_CANCELED, hcpu, nr_calls, NULL);
+       BUG_ON(notifier_rc != NOTIFY_DONE);
+       return ret;
 }
 
 
diff -r 578375084a9e -r df955a89b53c xen/arch/x86/xen.lds.S
--- a/xen/arch/x86/xen.lds.S    Fri May 14 11:39:15 2010 +0100
+++ b/xen/arch/x86/xen.lds.S    Fri May 14 15:22:48 2010 +0100
@@ -89,6 +89,8 @@ SECTIONS
   } :text
   .initcall.init : {
        __initcall_start = .;
+       *(.initcallpresmp.init)
+       __presmp_initcall_end = .;
        *(.initcall1.init)
        __initcall_end = .;
   } :text
diff -r 578375084a9e -r df955a89b53c xen/common/cpu.c
--- a/xen/common/cpu.c  Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/cpu.c  Fri May 14 15:22:48 2010 +0100
@@ -1,5 +1,6 @@
 #include <xen/config.h>
 #include <xen/cpumask.h>
+#include <xen/cpu.h>
 
 /*
  * cpu_bit_bitmap[] is a special, "compressed" data structure that
@@ -24,3 +25,36 @@ const unsigned long cpu_bit_bitmap[BITS_
     MASK_DECLARE_8(48), MASK_DECLARE_8(56),
 #endif
 };
+
+DEFINE_SPINLOCK(cpu_add_remove_lock);
+
+static RAW_NOTIFIER_HEAD(cpu_chain);
+
+int register_cpu_notifier(struct notifier_block *nb)
+{
+    int ret;
+    spin_lock(&cpu_add_remove_lock);
+    ret = raw_notifier_chain_register(&cpu_chain, nb);
+    spin_unlock(&cpu_add_remove_lock);
+    return ret;
+}
+
+void unregister_cpu_notifier(struct notifier_block *nb)
+{
+    spin_lock(&cpu_add_remove_lock);
+    raw_notifier_chain_unregister(&cpu_chain, nb);
+    spin_unlock(&cpu_add_remove_lock);
+}
+
+int cpu_notifier_call_chain(unsigned long val, void *v)
+{
+    BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
+    return raw_notifier_call_chain(&cpu_chain, val, v);
+}
+
+int __cpu_notifier_call_chain(
+    unsigned long val, void *v, int nr_to_call, int *nr_calls)
+{
+    BUG_ON(!spin_is_locked(&cpu_add_remove_lock));
+    return __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, nr_calls);
+}
diff -r 578375084a9e -r df955a89b53c xen/common/cpupool.c
--- a/xen/common/cpupool.c      Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/cpupool.c      Fri May 14 15:22:48 2010 +0100
@@ -17,6 +17,7 @@
 #include <xen/percpu.h>
 #include <xen/sched.h>
 #include <xen/sched-if.h>
+#include <xen/cpu.h>
 
 #define for_each_cpupool(ptr)    \
     for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
@@ -178,10 +179,8 @@ static int cpupool_assign_cpu_locked(str
  */
 int cpupool_assign_ncpu(struct cpupool *c, int ncpu)
 {
-    int i;
-    int n;
-
-    n = 0;
+    int i, n = 0;
+
     spin_lock(&cpupool_lock);
     for_each_cpu_mask(i, cpupool_free_cpus)
     {
@@ -294,21 +293,6 @@ out:
 }
 
 /*
- * assign cpus to the default cpupool
- * default are all cpus, less cpus may be specified as boot parameter
- * possible failures:
- * - no cpu assigned
- */
-int __init cpupool0_cpu_assign(struct cpupool *c)
-{
-    if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) )
-        cpupool0_max_cpus = num_online_cpus();
-    if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) )
-        return 1;
-    return 0;
-}
-
-/*
  * add a new domain to a cpupool
  * possible failures:
  * - pool does not exist
@@ -363,16 +347,14 @@ void cpupool_rm_domain(struct domain *d)
  * called to add a new cpu to pool admin
  * we add a hotplugged cpu to the cpupool0 to be able to add it to dom0
  */
-void cpupool_cpu_add(unsigned int cpu)
-{
-    if ( cpupool0 == NULL )
-        return;
+static void cpupool_cpu_add(unsigned int cpu)
+{
     spin_lock(&cpupool_lock);
     cpu_clear(cpu, cpupool_locked_cpus);
     cpu_set(cpu, cpupool_free_cpus);
-    cpupool_assign_cpu_locked(cpupool0, cpu);
-    spin_unlock(&cpupool_lock);
-    return;
+    if ( cpupool0 != NULL )
+        cpupool_assign_cpu_locked(cpupool0, cpu);
+    spin_unlock(&cpupool_lock);
 }
 
 /*
@@ -380,7 +362,7 @@ void cpupool_cpu_add(unsigned int cpu)
  * the cpu to be removed is locked to avoid removing it from dom0
  * returns failure if not in pool0
  */
-int cpupool_cpu_remove(unsigned int cpu)
+static int cpupool_cpu_remove(unsigned int cpu)
 {
     int ret = 0;
        
@@ -588,10 +570,52 @@ void dump_runq(unsigned char key)
     spin_unlock(&cpupool_lock);
 }
 
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+    int rc = 0;
+
+    switch ( action )
+    {
+    case CPU_DOWN_FAILED:
+    case CPU_ONLINE:
+        cpupool_cpu_add(cpu);
+        break;
+    case CPU_DOWN_PREPARE:
+        rc = cpupool_cpu_remove(cpu);
+        break;
+    default:
+        break;
+    }
+
+    return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
+static int __init cpupool_presmp_init(void)
+{
+    void *cpu = (void *)(long)smp_processor_id();
+    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+    register_cpu_notifier(&cpu_nfb);
+    return 0;
+}
+presmp_initcall(cpupool_presmp_init);
+
 static int __init cpupool_init(void)
 {
-    cpupool_free_cpus = cpu_online_map;
-    cpupool_list = NULL;
+    cpupool0 = cpupool_create(0, NULL);
+    BUG_ON(cpupool0 == NULL);
+
+    if ( (cpupool0_max_cpus == 0) || (cpupool0_max_cpus > num_online_cpus()) )
+        cpupool0_max_cpus = num_online_cpus();
+
+    if ( !cpupool_assign_ncpu(cpupool0, cpupool0_max_cpus) )
+        BUG();
+
     return 0;
 }
 __initcall(cpupool_init);
diff -r 578375084a9e -r df955a89b53c xen/common/gdbstub.c
--- a/xen/common/gdbstub.c      Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/gdbstub.c      Fri May 14 15:22:48 2010 +0100
@@ -44,6 +44,7 @@
 #include <xen/console.h>
 #include <xen/errno.h>
 #include <xen/delay.h>
+#include <xen/init.h>
 #include <asm/byteorder.h>
 
 /* Printk isn't particularly safe just after we've trapped to the
@@ -639,23 +640,25 @@ __trap_to_gdb(struct cpu_user_regs *regs
     return rc;
 }
 
-void __init
-initialise_gdb(void)
+static int __init initialise_gdb(void)
 {
     if ( *opt_gdb == '\0' )
-        return;
+        return 0;
 
     gdb_ctx->serhnd = serial_parse_handle(opt_gdb);
     if ( gdb_ctx->serhnd == -1 )
     {
         printk("Bad gdb= option '%s'\n", opt_gdb);
-        return;
+        return 0;
     }
 
     serial_start_sync(gdb_ctx->serhnd);
 
     printk("GDB stub initialised.\n");
-}
+
+    return 0;
+}
+presmp_initcall(initialise_gdb);
 
 static void gdb_pause_this_cpu(void *unused)
 {
diff -r 578375084a9e -r df955a89b53c xen/common/kernel.c
--- a/xen/common/kernel.c       Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/kernel.c       Fri May 14 15:22:48 2010 +0100
@@ -145,6 +145,22 @@ void add_taint(unsigned flag)
 void add_taint(unsigned flag)
 {
     tainted |= flag;
+}
+
+extern initcall_t __initcall_start, __presmp_initcall_end, __initcall_end;
+
+void __init do_presmp_initcalls(void)
+{
+    initcall_t *call;
+    for ( call = &__initcall_start; call < &__presmp_initcall_end; call++ )
+        (*call)();
+}
+
+void __init do_initcalls(void)
+{
+    initcall_t *call;
+    for ( call = &__presmp_initcall_end; call < &__initcall_end; call++ )
+        (*call)();
 }
 
 # define DO(fn) long do_##fn
diff -r 578375084a9e -r df955a89b53c xen/common/notifier.c
--- a/xen/common/notifier.c     Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/notifier.c     Fri May 14 15:22:48 2010 +0100
@@ -64,6 +64,9 @@ static int notifier_call_chain(
 {
     int ret = NOTIFY_DONE;
     struct notifier_block *nb, *next_nb;
+
+    if ( nr_calls )
+        *nr_calls = 0;
 
     nb = rcu_dereference(*nl);
 
diff -r 578375084a9e -r df955a89b53c xen/common/rcupdate.c
--- a/xen/common/rcupdate.c     Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/rcupdate.c     Fri May 14 15:22:48 2010 +0100
@@ -43,6 +43,7 @@
 #include <xen/bitops.h>
 #include <xen/percpu.h>
 #include <xen/softirq.h>
+#include <xen/cpu.h>
 
 /* Definition for rcupdate control block. */
 struct rcu_ctrlblk rcu_ctrlblk = {
@@ -334,15 +335,33 @@ static void rcu_init_percpu_data(int cpu
     rdp->blimit = blimit;
 }
 
-void __devinit rcu_online_cpu(int cpu)
-{
-    struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
-    rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
-}
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    switch ( action )
+    {
+    case CPU_UP_PREPARE: {
+        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+        rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+        break;
+    }
+    default:
+        break;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
 
 void __init rcu_init(void)
 {
-    rcu_online_cpu(smp_processor_id());
+    void *cpu = (void *)(long)smp_processor_id();
+    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+    register_cpu_notifier(&cpu_nfb);
     open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 }
diff -r 578375084a9e -r df955a89b53c xen/common/tasklet.c
--- a/xen/common/tasklet.c      Fri May 14 11:39:15 2010 +0100
+++ b/xen/common/tasklet.c      Fri May 14 15:22:48 2010 +0100
@@ -16,9 +16,10 @@
 #include <xen/sched.h>
 #include <xen/softirq.h>
 #include <xen/tasklet.h>
+#include <xen/cpu.h>
 
 /* Some subsystems call into us before we are initialised. We ignore them. */
-static bool_t tasklets_initialised;
+static cpumask_t tasklets_initialised;
 
 DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
 
@@ -43,7 +44,7 @@ void tasklet_schedule_on_cpu(struct task
 
     spin_lock_irqsave(&tasklet_lock, flags);
 
-    if ( tasklets_initialised && !t->is_dead )
+    if ( cpu_isset(cpu, tasklets_initialised) && !t->is_dead )
     {
         t->scheduled_on = cpu;
         if ( !t->is_running )
@@ -135,7 +136,7 @@ void tasklet_kill(struct tasklet *t)
     spin_unlock_irqrestore(&tasklet_lock, flags);
 }
 
-void migrate_tasklets_from_cpu(unsigned int cpu)
+static void migrate_tasklets_from_cpu(unsigned int cpu)
 {
     struct list_head *list = &per_cpu(tasklet_list, cpu);
     unsigned long flags;
@@ -165,14 +166,36 @@ void tasklet_init(
     t->data = data;
 }
 
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    switch ( action )
+    {
+    case CPU_UP_PREPARE:
+        if ( !cpu_test_and_set(cpu, tasklets_initialised) )
+            INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+        break;
+    case CPU_DEAD:
+        migrate_tasklets_from_cpu(cpu);
+        break;
+    default:
+        break;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
 void __init tasklet_subsys_init(void)
 {
-    unsigned int cpu;
-
-    for_each_possible_cpu ( cpu )
-        INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
-
-    tasklets_initialised = 1;
+    void *hcpu = (void *)(long)smp_processor_id();
+    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
+    register_cpu_notifier(&cpu_nfb);
 }
 
 /*
diff -r 578375084a9e -r df955a89b53c xen/drivers/cpufreq/cpufreq.c
--- a/xen/drivers/cpufreq/cpufreq.c     Fri May 14 11:39:15 2010 +0100
+++ b/xen/drivers/cpufreq/cpufreq.c     Fri May 14 15:22:48 2010 +0100
@@ -38,6 +38,7 @@
 #include <xen/xmalloc.h>
 #include <xen/guest_access.h>
 #include <xen/domain.h>
+#include <xen/cpu.h>
 #include <asm/bug.h>
 #include <asm/io.h>
 #include <asm/config.h>
@@ -582,3 +583,38 @@ void __init cpufreq_cmdline_parse(char *
         str = end;
     } while (str);
 }
+
+static int cpu_callback(
+    struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+    unsigned int cpu = (unsigned long)hcpu;
+
+    switch ( action )
+    {
+    case CPU_DOWN_FAILED:
+    case CPU_ONLINE:
+        (void)cpufreq_add_cpu(cpu);
+        break;
+    case CPU_DOWN_PREPARE:
+        (void)cpufreq_del_cpu(cpu);
+        break;
+    default:
+        break;
+    }
+
+    return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+    .notifier_call = cpu_callback
+};
+
+static int __init cpufreq_presmp_init(void)
+{
+    void *cpu = (void *)(long)smp_processor_id();
+    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
+    register_cpu_notifier(&cpu_nfb);
+    return 0;
+}
+presmp_initcall(cpufreq_presmp_init);
+
diff -r 578375084a9e -r df955a89b53c xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri May 14 15:22:48 2010 +0100
@@ -295,12 +295,6 @@ uint8_t hvm_combine_hw_exceptions(uint8_
 
 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
 
-static inline int
-hvm_cpu_prepare(unsigned int cpu)
-{
-    return (hvm_funcs.cpu_prepare ? hvm_funcs.cpu_prepare(cpu) : 0);
-}
-
 static inline int hvm_cpu_up(void)
 {
     return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 1);
diff -r 578375084a9e -r df955a89b53c xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/asm-x86/processor.h   Fri May 14 15:22:48 2010 +0100
@@ -551,8 +551,6 @@ extern void mtrr_bp_init(void);
 
 void mcheck_init(struct cpuinfo_x86 *c);
 asmlinkage void do_machine_check(struct cpu_user_regs *regs);
-void cpu_mcheck_distribute_cmci(void);
-void cpu_mcheck_disable(void);
 
 int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
           uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
diff -r 578375084a9e -r df955a89b53c xen/include/asm-x86/smp.h
--- a/xen/include/asm-x86/smp.h Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/asm-x86/smp.h Fri May 14 15:22:48 2010 +0100
@@ -53,10 +53,7 @@ extern u32 cpu_2_logical_apicid[];
 #define cpu_physical_id(cpu)   x86_cpu_to_apicid[cpu]
 
 /* State of each CPU. */
-#define CPU_ONLINE     0x0002  /* CPU is up */
-#define CPU_DEAD       0x0004  /* CPU is dead */
 DECLARE_PER_CPU(int, cpu_state);
-extern spinlock_t(cpu_add_remove_lock);
 
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
 extern int cpu_down(unsigned int cpu);
diff -r 578375084a9e -r df955a89b53c xen/include/xen/cpu.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/xen/cpu.h     Fri May 14 15:22:48 2010 +0100
@@ -0,0 +1,28 @@
+#ifndef __XEN_CPU_H__
+#define __XEN_CPU_H__
+
+#include <xen/types.h>
+#include <xen/spinlock.h>
+#include <xen/notifier.h>
+
+extern spinlock_t cpu_add_remove_lock;
+
+int register_cpu_notifier(struct notifier_block *nb);
+void unregister_cpu_notifier(struct notifier_block *nb);
+int cpu_notifier_call_chain(unsigned long val, void *v);
+int __cpu_notifier_call_chain(
+    unsigned long val, void *v, int nr_to_call, int *nr_calls);
+
+/*
+ * Notification actions: note that only CPU_{UP,DOWN}_PREPARE may fail ---
+ * all other handlers *must* return NOTIFY_DONE.
+ */
+#define CPU_UP_PREPARE   0x0002 /* CPU is coming up */
+#define CPU_UP_CANCELED  0x0003 /* CPU is no longer coming up */
+#define CPU_ONLINE       0x0004 /* CPU is up */
+#define CPU_DOWN_PREPARE 0x0005 /* CPU is going down */
+#define CPU_DOWN_FAILED  0x0006 /* CPU is no longer going down */
+#define CPU_DYING        0x0007 /* CPU is nearly dead (in stop_machine ctxt) */
+#define CPU_DEAD         0x0008 /* CPU is dead */
+
+#endif /* __XEN_CPU_H__ */
diff -r 578375084a9e -r df955a89b53c xen/include/xen/gdbstub.h
--- a/xen/include/xen/gdbstub.h Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/xen/gdbstub.h Fri May 14 15:22:48 2010 +0100
@@ -88,12 +88,6 @@ void gdb_arch_exit(struct cpu_user_regs 
 #define SIGALRM         14
 #define SIGTERM         15
 
-void initialise_gdb(void);
-
-#else
-
-#define initialise_gdb() ((void)0)
-
 #endif
 
 #endif /* __XEN_GDBSTUB_H__ */
diff -r 578375084a9e -r df955a89b53c xen/include/xen/init.h
--- a/xen/include/xen/init.h    Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/xen/init.h    Fri May 14 15:22:48 2010 +0100
@@ -18,8 +18,8 @@
     __attribute_used__ __attribute__ ((__section__ (".exit.data")))
 #define __initsetup  \
     __attribute_used__ __attribute__ ((__section__ (".init.setup")))
-#define __init_call  \
-    __attribute_used__ __attribute__ ((__section__ (".initcall1.init")))
+#define __init_call(lvl)  \
+    __attribute_used__ __attribute__ ((__section__ (".initcall" lvl ".init")))
 #define __exit_call  \
     __attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
 
@@ -66,12 +66,15 @@ typedef int (*initcall_t)(void);
 typedef int (*initcall_t)(void);
 typedef void (*exitcall_t)(void);
 
-extern initcall_t __initcall_start, __initcall_end;
-
+#define presmp_initcall(fn) \
+    static initcall_t __initcall_##fn __init_call("presmp") = fn
 #define __initcall(fn) \
-    static initcall_t __initcall_##fn __init_call = fn
+    static initcall_t __initcall_##fn __init_call("1") = fn
 #define __exitcall(fn) \
     static exitcall_t __exitcall_##fn __exit_call = fn
+
+void do_presmp_initcalls(void);
+void do_initcalls(void);
 
 /*
  * Used for kernel command line parameter setup
diff -r 578375084a9e -r df955a89b53c xen/include/xen/notifier.h
--- a/xen/include/xen/notifier.h        Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/xen/notifier.h        Fri May 14 15:22:48 2010 +0100
@@ -52,38 +52,21 @@ int __raw_notifier_call_chain(
     struct raw_notifier_head *nh, unsigned long val, void *v,
     int nr_to_call, int *nr_calls);
 
-#define NOTIFY_DONE  0x0000  /* Don't care */
-#define NOTIFY_OK  0x0001  /* Suits me */
-#define NOTIFY_STOP_MASK 0x8000  /* Don't call further */
-#define NOTIFY_BAD  (NOTIFY_STOP_MASK|0x0002)
-/* Bad/Veto action */
-/*
- * Clean way to return from the notifier and stop further calls.
- */
-#define NOTIFY_STOP  (NOTIFY_OK|NOTIFY_STOP_MASK)
+#define NOTIFY_DONE      0x0000
+#define NOTIFY_STOP_MASK 0x8000
+#define NOTIFY_STOP      (NOTIFY_STOP_MASK|NOTIFY_DONE)
+#define NOTIFY_BAD       (NOTIFY_STOP_MASK|EINVAL)
 
-/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
+/* Encapsulate (negative) errno value. */
 static inline int notifier_from_errno(int err)
 {
-    return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
+    return NOTIFY_STOP_MASK | -err;
 }
 
 /* Restore (negative) errno value from notify return value. */
 static inline int notifier_to_errno(int ret)
 {
-    ret &= ~NOTIFY_STOP_MASK;
-    return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0;
+    return -(ret & ~NOTIFY_STOP_MASK);
 }
 
-#define CPU_ONLINE  0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE  0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED  0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED  0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD  0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING  0x0008 /* CPU (unsigned)v not running any task,
-                           * not handling interrupts, soon dead */
-#define CPU_POST_DEAD  0x0009 /* CPU (unsigned)v dead, cpu_hotplug
-                               * lock is dropped */
-
 #endif /* __XEN_NOTIFIER_H__ */
diff -r 578375084a9e -r df955a89b53c xen/include/xen/rcupdate.h
--- a/xen/include/xen/rcupdate.h        Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/xen/rcupdate.h        Fri May 14 15:22:48 2010 +0100
@@ -190,7 +190,6 @@ typedef struct _rcu_read_lock rcu_read_l
 #define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); })
 
 void rcu_init(void);
-void __devinit rcu_online_cpu(int cpu);
 void rcu_check_callbacks(int cpu);
 
 /* Exported interfaces */
diff -r 578375084a9e -r df955a89b53c xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/xen/sched.h   Fri May 14 15:22:48 2010 +0100
@@ -622,10 +622,7 @@ extern enum cpufreq_controller {
 
 struct cpupool *cpupool_create(int poolid, char *sched);
 int cpupool_destroy(struct cpupool *c);
-int cpupool0_cpu_assign(struct cpupool *c);
 int cpupool_assign_ncpu(struct cpupool *c, int ncpu);
-void cpupool_cpu_add(unsigned int cpu);
-int cpupool_cpu_remove(unsigned int cpu);
 int cpupool_add_domain(struct domain *d, int poolid);
 void cpupool_rm_domain(struct domain *d);
 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
diff -r 578375084a9e -r df955a89b53c xen/include/xen/tasklet.h
--- a/xen/include/xen/tasklet.h Fri May 14 11:39:15 2010 +0100
+++ b/xen/include/xen/tasklet.h Fri May 14 15:22:48 2010 +0100
@@ -35,7 +35,6 @@ void tasklet_schedule(struct tasklet *t)
 void tasklet_schedule(struct tasklet *t);
 void do_tasklet(void);
 void tasklet_kill(struct tasklet *t);
-void migrate_tasklets_from_cpu(unsigned int cpu);
 void tasklet_init(
     struct tasklet *t, void (*func)(unsigned long), unsigned long data);
 void tasklet_subsys_init(void);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: Implement cpu hotplug notifiers. Use them., Xen patchbot-unstable <=