WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Remove unused 'retry' parameter from on_s

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Remove unused 'retry' parameter from on_selected_cpus() etc.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 27 May 2009 04:30:50 -0700
Delivery-date: Wed, 27 May 2009 04:33:26 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1243419387 -3600
# Node ID 7dfc0a20fa598287e61b64d93ff6671e2c649e9a
# Parent  822ea2bf0c54ae8dfb85a6482baa0bdd312e9ef1
Remove unused 'retry' parameter from on_selected_cpus() etc.

Remove the unused "retry" parameter of on_selected_cpus(),
on_each_cpu(), smp_call_function(), and smp_call_function_single().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
 xen/arch/ia64/linux-xen/mca.c               |    4 ++--
 xen/arch/ia64/linux-xen/perfmon.c           |   10 +++++-----
 xen/arch/ia64/linux-xen/smp.c               |   11 +++++------
 xen/arch/ia64/linux-xen/smpboot.c           |    2 +-
 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c |    2 +-
 xen/arch/ia64/vmx/vmmu.c                    |    3 +--
 xen/arch/ia64/vmx/vtlb.c                    |    2 +-
 xen/arch/ia64/xen/cpufreq/cpufreq.c         |    6 ++----
 xen/arch/ia64/xen/flushtlb.c                |    2 +-
 xen/arch/ia64/xen/fw_emul.c                 |   11 +++++------
 xen/arch/ia64/xen/vhpt.c                    |   14 +++++++-------
 xen/arch/x86/acpi/cpufreq/cpufreq.c         |    6 +++---
 xen/arch/x86/acpi/cpufreq/powernow.c        |    2 +-
 xen/arch/x86/cpu/amd.c                      |    2 +-
 xen/arch/x86/cpu/mcheck/amd_nonfatal.c      |    2 +-
 xen/arch/x86/cpu/mcheck/mce.c               |    7 +++----
 xen/arch/x86/cpu/mcheck/mce_intel.c         |    2 +-
 xen/arch/x86/cpu/mcheck/non-fatal.c         |    2 +-
 xen/arch/x86/cpu/mtrr/main.c                |    4 ++--
 xen/arch/x86/hvm/hvm.c                      |    2 +-
 xen/arch/x86/hvm/svm/svm.c                  |    2 +-
 xen/arch/x86/hvm/vmx/vmcs.c                 |    4 ++--
 xen/arch/x86/hvm/vmx/vmx.c                  |    4 ++--
 xen/arch/x86/irq.c                          |    6 +++---
 xen/arch/x86/machine_kexec.c                |    2 +-
 xen/arch/x86/oprofile/nmi_int.c             |   10 +++++-----
 xen/arch/x86/shutdown.c                     |    4 ++--
 xen/arch/x86/smp.c                          |    6 ++----
 xen/arch/x86/time.c                         |    2 +-
 xen/arch/x86/x86_32/traps.c                 |    2 +-
 xen/common/gdbstub.c                        |    2 +-
 xen/common/keyhandler.c                     |    4 ++--
 xen/include/asm-ia64/linux-xen/asm/smp.h    |    4 ++--
 xen/include/xen/smp.h                       |    5 +----
 34 files changed, 71 insertions(+), 82 deletions(-)

diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/linux-xen/mca.c
--- a/xen/arch/ia64/linux-xen/mca.c     Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/linux-xen/mca.c     Wed May 27 11:16:27 2009 +0100
@@ -956,7 +956,7 @@ static void
 static void
 ia64_mca_cmc_vector_disable_keventd(void *unused)
 {
-       on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
+       on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
 }
 
 /*
@@ -968,7 +968,7 @@ static void
 static void
 ia64_mca_cmc_vector_enable_keventd(void *unused)
 {
-       on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
+       on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
 }
 #endif /* !XEN */
 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/linux-xen/perfmon.c
--- a/xen/arch/ia64/linux-xen/perfmon.c Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/linux-xen/perfmon.c Wed May 27 11:16:27 2009 +0100
@@ -1895,7 +1895,7 @@ pfm_syswide_cleanup_other_cpu(pfm_contex
        int ret;
 
        DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
-       ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, 
ctx, 0, 1);
+       ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, 
ctx, 1);
        DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
 }
 #endif /* CONFIG_SMP */
@@ -6895,7 +6895,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_h
        }
 
        /* save the current system wide pmu states */
-       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
+       ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
        if (ret) {
                DPRINT(("on_each_cpu() failed: %d\n", ret));
                goto cleanup_reserve;
@@ -6940,7 +6940,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_ha
 
        pfm_alt_intr_handler = NULL;
 
-       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
+       ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
        if (ret) {
                DPRINT(("on_each_cpu() failed: %d\n", ret));
        }
@@ -7499,7 +7499,7 @@ xenpfm_context_load(XEN_GUEST_HANDLE(pfa
 
        BUG_ON(in_irq());
        spin_lock(&xenpfm_context_lock);
-       smp_call_function(&xenpfm_context_load_cpu, &arg, 1, 1);
+       smp_call_function(&xenpfm_context_load_cpu, &arg, 1);
        xenpfm_context_load_cpu(&arg);
        spin_unlock(&xenpfm_context_lock);
        for_each_online_cpu(cpu) {
@@ -7553,7 +7553,7 @@ xenpfm_context_unload(void)
                return error;
        }
 
-       smp_call_function(&xenpfm_context_unload_cpu, &arg, 1, 1);
+       smp_call_function(&xenpfm_context_unload_cpu, &arg, 1);
        xenpfm_context_unload_cpu(&arg);
        spin_unlock(&xenpfm_context_lock);
        for_each_online_cpu(cpu) {
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/linux-xen/smp.c
--- a/xen/arch/ia64/linux-xen/smp.c     Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/linux-xen/smp.c     Wed May 27 11:16:27 2009 +0100
@@ -274,7 +274,7 @@ void
 void
 smp_flush_tlb_all (void)
 {
-       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
 }
 
 void
@@ -297,7 +297,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
         * anyhow, and once a CPU is interrupted, the cost of 
local_flush_tlb_all() is
         * rather trivial.
         */
-       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
 }
 #endif
 
@@ -314,7 +314,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
  */
 
 int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, 
int nonatomic,
+smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
                          int wait)
 {
        struct call_data_struct data;
@@ -372,7 +372,6 @@ EXPORT_SYMBOL(smp_call_function_single);
  *  [SUMMARY]  Run a function on all other CPUs.
  *  <func>     The function to run. This must be fast and non-blocking.
  *  <info>     An arbitrary pointer to pass to the function.
- *  <nonatomic>        currently unused.
  *  <wait>     If true, wait (atomically) until function has completed on 
other CPUs.
  *  [RETURNS]   0 on success, else a negative status code.
  *
@@ -383,7 +382,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  * hardware interrupt handler or from a bottom half handler.
  */
 int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int 
wait)
+smp_call_function (void (*func) (void *info), void *info, int wait)
 {
        struct call_data_struct data;
        int cpus = num_online_cpus()-1;
@@ -438,7 +437,7 @@ EXPORT_SYMBOL(smp_call_function);
 #ifdef XEN
 int
 on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
-                 void *info, int retry, int wait)
+                 void *info, int wait)
 {
        struct call_data_struct data;
        unsigned int cpu, nr_cpus = cpus_weight(*selected);
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/linux-xen/smpboot.c Wed May 27 11:16:27 2009 +0100
@@ -307,7 +307,7 @@ ia64_sync_itc (unsigned int master)
 
        go[MASTER] = 1;
 
-       if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
+       if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
                printk(KERN_ERR "sync_itc: failed to get attention of CPU 
%u!\n", master);
                return;
        }
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Wed May 27 11:15:08 
2009 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Wed May 27 11:16:27 
2009 +0100
@@ -240,7 +240,7 @@ sn2_global_tlb_purge(unsigned long start
                flush_data.end = end;
                flush_data.nbits = nbits;
                on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu,
-                                &flush_data, 1, 1);
+                                &flush_data, 1);
        }
        spin_unlock(&sn2_ptcg_lock2);
 }
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/vmx/vmmu.c
--- a/xen/arch/ia64/vmx/vmmu.c  Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/vmx/vmmu.c  Wed May 27 11:16:27 2009 +0100
@@ -448,8 +448,7 @@ IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu, u6
             if (cpu != current->processor) {
                 spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
                 /* Flush VHPT on remote processors. */
-                smp_call_function_single(cpu, &ptc_ga_remote_func,
-                                         &args, 0, 1);
+                smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1);
             } else {
                 ptc_ga_remote_func(&args);
             }
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/vmx/vtlb.c
--- a/xen/arch/ia64/vmx/vtlb.c  Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/vmx/vtlb.c  Wed May 27 11:16:27 2009 +0100
@@ -643,7 +643,7 @@ void vmx_vcpu_flush_vtlb_all(VCPU *v)
     if (v->processor == smp_processor_id())
         __thash_purge_all(v);
     else
-        smp_call_function_single(v->processor, __thash_purge_all, v, 1, 1);
+        smp_call_function_single(v->processor, __thash_purge_all, v, 1);
     vcpu_unpause(v);
 }
 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/xen/cpufreq/cpufreq.c
--- a/xen/arch/ia64/xen/cpufreq/cpufreq.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/xen/cpufreq/cpufreq.c       Wed May 27 11:16:27 2009 +0100
@@ -95,8 +95,7 @@ acpi_cpufreq_get (unsigned int cpu)
        if (cpu == smp_processor_id())
                processor_get_freq((void*)&freq);
        else
-               smp_call_function_single(cpu, processor_get_freq,
-                                        (void *)&freq, 0, 1);
+               smp_call_function_single(cpu, processor_get_freq, &freq, 1);
 
        return freq;
 }
@@ -143,8 +142,7 @@ processor_set_freq (struct acpi_cpufreq_
        if (cpu == smp_processor_id())
                processor_set_pstate((void *)&value);
        else
-               smp_call_function_single(cpu, processor_set_pstate,
-                               (void *)&value, 0, 1);
+               smp_call_function_single(cpu, processor_set_pstate, &value, 1);
 
        if (value) {
                printk(KERN_WARNING "Transition failed\n");
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/xen/flushtlb.c
--- a/xen/arch/ia64/xen/flushtlb.c      Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/xen/flushtlb.c      Wed May 27 11:16:27 2009 +0100
@@ -70,7 +70,7 @@ new_tlbflush_clock_period(void)
 new_tlbflush_clock_period(void)
 {
     /* flush all vhpt of physical cpu and mTLB */
-    on_each_cpu(tlbflush_clock_local_flush, NULL, 1, 1);
+    on_each_cpu(tlbflush_clock_local_flush, NULL, 1);
 
     /*
      * if global TLB shootdown is finished, increment tlbflush_time
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/xen/fw_emul.c
--- a/xen/arch/ia64/xen/fw_emul.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/xen/fw_emul.c       Wed May 27 11:16:27 2009 +0100
@@ -281,7 +281,7 @@ sal_emulator (long index, unsigned long 
                                IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
                                ret = smp_call_function_single(e->cpuid,
                                                               
get_state_info_on,
-                                                              &arg, 0, 1);
+                                                              &arg, 1);
                                if (ret < 0) {
                                        printk("SAL_GET_STATE_INFO "
                                               "smp_call_function_single error:"
@@ -344,7 +344,7 @@ sal_emulator (long index, unsigned long 
                                int ret;
                                IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: 
remote\n");
                                ret = smp_call_function_single(e->cpuid,
-                                       clear_state_info_on, &arg, 0, 1);
+                                       clear_state_info_on, &arg, 1);
                                if (ret < 0) {
                                        printk("sal_emulator: "
                                               "SAL_CLEAR_STATE_INFO "
@@ -845,8 +845,7 @@ xen_pal_emulator(unsigned long index, u6
                                .progress = 0,
                                .status = 0
                        };
-                       smp_call_function(remote_pal_cache_flush,
-                                         (void *)&args, 1, 1);
+                       smp_call_function(remote_pal_cache_flush, &args, 1);
                        if (args.status != 0)
                                panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
                                             "remote status %lx", args.status);
@@ -945,7 +944,7 @@ xen_pal_emulator(unsigned long index, u6
                        /* must be performed on all remote processors 
                           in the coherence domain. */
                        smp_call_function(remote_pal_prefetch_visibility,
-                                         (void *)in1, 1, 1);
+                                         (void *)in1, 1);
                        status = 1; /* no more necessary on remote processor */
                }
                break;
@@ -953,7 +952,7 @@ xen_pal_emulator(unsigned long index, u6
                status = ia64_pal_mc_drain();
                /* FIXME: All vcpus likely call PAL_MC_DRAIN.
                   That causes the congestion. */
-               smp_call_function(remote_pal_mc_drain, NULL, 1, 1);
+               smp_call_function(remote_pal_mc_drain, NULL, 1);
                break;
            case PAL_BRAND_INFO:
                if (in1 == 0) {
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/ia64/xen/vhpt.c  Wed May 27 11:16:27 2009 +0100
@@ -307,7 +307,7 @@ void domain_flush_vtlb_all(struct domain
                        // takes care of mTLB flush.
                        smp_call_function_single(v->processor,
                                                 __vcpu_flush_vtlb_all,
-                                                v, 1, 1);
+                                                v, 1);
        }
        perfc_incr(domain_flush_vtlb_all);
 }
@@ -513,9 +513,9 @@ void domain_flush_tlb_vhpt(struct domain
 {
        /* Very heavy...  */
        if (HAS_PERVCPU_VHPT(d) || is_hvm_domain(d))
-               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
        else
-               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
        cpus_clear (d->domain_dirty_cpumask);
 }
 
@@ -532,7 +532,7 @@ void flush_tlb_for_log_dirty(struct doma
                        thash_purge_all(v);
                }
                smp_call_function((void (*)(void *))local_flush_tlb_all, 
-                                       NULL, 1, 1);
+                                       NULL, 1);
        } else if (HAS_PERVCPU_VHPT(d)) {
                for_each_vcpu (d, v) {
                        if (!v->is_initialised)
@@ -541,9 +541,9 @@ void flush_tlb_for_log_dirty(struct doma
                        vcpu_purge_tr_entry(&PSCBX(v,itlb));
                        vcpu_vhpt_flush(v);
                }
-               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+               on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
        } else {
-               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+               on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
        }
        cpus_clear (d->domain_dirty_cpumask);
 }
@@ -562,7 +562,7 @@ void flush_tlb_mask(const cpumask_t *mas
     for_each_cpu_mask (cpu, *mask)
         if (cpu != smp_processor_id())
             smp_call_function_single
-                (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
+                (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
 }
 
 #ifdef PERF_COUNTERS
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/acpi/cpufreq/cpufreq.c
--- a/xen/arch/x86/acpi/cpufreq/cpufreq.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c       Wed May 27 11:16:27 2009 +0100
@@ -186,7 +186,7 @@ static void drv_read(struct drv_cmd *cmd
     if (likely(cpu_isset(smp_processor_id(), cmd->mask)))
         do_drv_read((void *)cmd);
     else
-        on_selected_cpus(&cmd->mask, do_drv_read, (void *)cmd, 0, 1);
+        on_selected_cpus(&cmd->mask, do_drv_read, cmd, 1);
 }
 
 static void drv_write(struct drv_cmd *cmd)
@@ -195,7 +195,7 @@ static void drv_write(struct drv_cmd *cm
         cpu_isset(smp_processor_id(), cmd->mask))
         do_drv_write((void *)cmd);
     else
-        on_selected_cpus(&cmd->mask, do_drv_write, (void *)cmd, 0, 0);
+        on_selected_cpus(&cmd->mask, do_drv_write, cmd, 0);
 }
 
 static u32 get_cur_val(cpumask_t mask)
@@ -303,7 +303,7 @@ static unsigned int get_measured_perf(un
         read_measured_perf_ctrs((void *)&readin);
     } else {
         on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs,
-                        (void *)&readin, 0, 1);
+                        &readin, 1);
     }
 
     cur.aperf.whole = readin.aperf.whole - saved->aperf.whole;
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/acpi/cpufreq/powernow.c
--- a/xen/arch/x86/acpi/cpufreq/powernow.c      Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/acpi/cpufreq/powernow.c      Wed May 27 11:16:27 2009 +0100
@@ -121,7 +121,7 @@ static int powernow_cpufreq_target(struc
 
     cmd.val = next_perf_state;
 
-    on_selected_cpus(&cmd.mask, transition_pstate, (void *) &cmd, 0, 0);
+    on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 0);
 
     perf->state = next_perf_state;
     policy->cur = freqs.new;
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/cpu/amd.c
--- a/xen/arch/x86/cpu/amd.c    Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/cpu/amd.c    Wed May 27 11:16:27 2009 +0100
@@ -246,7 +246,7 @@ static void check_disable_c1e(unsigned i
 {
        /* C1E is sometimes enabled during entry to ACPI mode. */
        if ((port == acpi_smi_cmd) && (value == acpi_enable_value))
-               on_each_cpu(disable_c1e, NULL, 1, 1);
+               on_each_cpu(disable_c1e, NULL, 1);
 }
 
 static void __devinit init_amd(struct cpuinfo_x86 *c)
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/cpu/mcheck/amd_nonfatal.c
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c    Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c    Wed May 27 11:16:27 2009 +0100
@@ -133,7 +133,7 @@ void mce_amd_checkregs(void *info)
  */
 static void mce_amd_work_fn(void *data)
 {
-       on_each_cpu(mce_amd_checkregs, data, 1, 1);
+       on_each_cpu(mce_amd_checkregs, data, 1);
 
        if (adjust > 0) {
                if (!guest_enabled_event(dom0->vcpu[0], VIRQ_MCA) ) {
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/cpu/mcheck/mce.c
--- a/xen/arch/x86/cpu/mcheck/mce.c     Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce.c     Wed May 27 11:16:27 2009 +0100
@@ -1162,8 +1162,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
                        if (log_cpus == NULL)
                                return x86_mcerr("do_mca cpuinfo", -ENOMEM);
 
-                       if (on_each_cpu(do_mc_get_cpu_info, log_cpus,
-                           1, 1) != 0) {
+                       if (on_each_cpu(do_mc_get_cpu_info, log_cpus, 1)) {
                                xfree(log_cpus);
                                return x86_mcerr("do_mca cpuinfo", -EIO);
                        }
@@ -1206,7 +1205,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
                add_taint(TAINT_ERROR_INJECT);
 
                on_selected_cpus(cpumask_of(target), x86_mc_msrinject,
-                                mc_msrinject, 1, 1);
+                                mc_msrinject, 1);
 
                break;
 
@@ -1226,7 +1225,7 @@ long do_mca(XEN_GUEST_HANDLE(xen_mc_t) u
                add_taint(TAINT_ERROR_INJECT);
 
                on_selected_cpus(cpumask_of(target), x86_mc_mceinject,
-                                mc_mceinject, 1, 1);
+                                mc_mceinject, 1);
                break;
 
        default:
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Wed May 27 11:16:27 2009 +0100
@@ -632,7 +632,7 @@ void cpu_mcheck_distribute_cmci(void)
 void cpu_mcheck_distribute_cmci(void)
 {
     if (cmci_support && !mce_disabled)
-        on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0, 0);
+        on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0);
 }
 
 static void clear_cmci(void)
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/cpu/mcheck/non-fatal.c
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c       Wed May 27 11:16:27 2009 +0100
@@ -69,7 +69,7 @@ static void mce_checkregs (void *info)
 
 static void mce_work_fn(void *data)
 { 
-       on_each_cpu(mce_checkregs, NULL, 1, 1);
+       on_each_cpu(mce_checkregs, NULL, 1);
 
        if (variable_period) {
                if (adjust)
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/cpu/mtrr/main.c
--- a/xen/arch/x86/cpu/mtrr/main.c      Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/cpu/mtrr/main.c      Wed May 27 11:16:27 2009 +0100
@@ -229,7 +229,7 @@ static void set_mtrr(unsigned int reg, u
        atomic_set(&data.gate,0);
 
        /*  Start the ball rolling on other CPUs  */
-       if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
+       if (smp_call_function(ipi_handler, &data, 0) != 0)
                panic("mtrr: timed out waiting for other CPUs\n");
 
        local_irq_save(flags);
@@ -688,7 +688,7 @@ void mtrr_save_state(void)
        if (cpu == 0)
                mtrr_save_fixed_ranges(NULL);
        else
-               on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 
1, 1);
+               on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 
1);
        put_cpu();
 }
 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Wed May 27 11:16:27 2009 +0100
@@ -971,7 +971,7 @@ int hvm_set_cr0(unsigned long value)
             if ( !v->domain->arch.hvm_domain.is_in_uc_mode )
             {
                 /* Flush physical caches. */
-                on_each_cpu(local_flush_cache, NULL, 1, 1);
+                on_each_cpu(local_flush_cache, NULL, 1);
                 hvm_set_uc_mode(v, 1);
             }
             spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed May 27 11:16:27 2009 +0100
@@ -1254,7 +1254,7 @@ static void svm_wbinvd_intercept(void)
 static void svm_wbinvd_intercept(void)
 {
     if ( has_arch_pdevs(current->domain) )
-        on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+        on_each_cpu(wbinvd_ipi, NULL, 1);
 }
 
 static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed May 27 11:16:27 2009 +0100
@@ -264,7 +264,7 @@ static void vmx_clear_vmcs(struct vcpu *
     int cpu = v->arch.hvm_vmx.active_cpu;
 
     if ( cpu != -1 )
-        on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1, 1);
+        on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
 }
 
 static void vmx_load_vmcs(struct vcpu *v)
@@ -900,7 +900,7 @@ void vmx_do_resume(struct vcpu *v)
         {
             int cpu = v->arch.hvm_vmx.active_cpu;
             if ( cpu != -1 )
-                on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1, 1);
+                on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1);
         }
 
         vmx_clear_vmcs(v);
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed May 27 11:16:27 2009 +0100
@@ -1220,7 +1220,7 @@ void ept_sync_domain(struct domain *d)
     if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
     {
         ASSERT(local_irq_is_enabled());
-        on_each_cpu(__ept_sync_domain, d, 1, 1);
+        on_each_cpu(__ept_sync_domain, d, 1);
     }
 }
 
@@ -2131,7 +2131,7 @@ static void vmx_wbinvd_intercept(void)
         return;
 
     if ( cpu_has_wbinvd_exiting )
-        on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+        on_each_cpu(wbinvd_ipi, NULL, 1);
     else
         wbinvd();
 }
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/irq.c        Wed May 27 11:16:27 2009 +0100
@@ -522,7 +522,7 @@ static void __pirq_guest_eoi(struct doma
     }
 
     if ( !cpus_empty(cpu_eoi_map) )
-        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
 }
 
 int pirq_guest_eoi(struct domain *d, int irq)
@@ -761,7 +761,7 @@ static irq_guest_action_t *__pirq_guest_
         {
             cpu_eoi_map = action->cpu_eoi_map;
             spin_unlock_irq(&desc->lock);
-            on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+            on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
             spin_lock_irq(&desc->lock);
         }
         break;
@@ -799,7 +799,7 @@ static irq_guest_action_t *__pirq_guest_
     {
         BUG_ON(action->ack_type != ACKTYPE_EOI);
         spin_unlock_irq(&desc->lock);
-        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 1);
+        on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1);
         spin_lock_irq(&desc->lock);
     }
 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/machine_kexec.c
--- a/xen/arch/x86/machine_kexec.c      Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/machine_kexec.c      Wed May 27 11:16:27 2009 +0100
@@ -100,7 +100,7 @@ void machine_reboot_kexec(xen_kexec_imag
     if ( reboot_cpu_id != smp_processor_id() )
     {
         on_selected_cpus(cpumask_of(reboot_cpu_id), __machine_reboot_kexec,
-                         image, 1, 0);
+                         image, 0);
         for (;;)
                 ; /* nothing */
     }
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/oprofile/nmi_int.c
--- a/xen/arch/x86/oprofile/nmi_int.c   Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/oprofile/nmi_int.c   Wed May 27 11:16:27 2009 +0100
@@ -186,7 +186,7 @@ static void nmi_cpu_setup(void * dummy)
 
 int nmi_setup_events(void)
 {
-       on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_setup, NULL, 1);
        return 0;
 }
 
@@ -207,7 +207,7 @@ int nmi_reserve_counters(void)
        /* We need to serialize save and setup for HT because the subset
         * of msrs are distinct for save and setup operations
         */
-       on_each_cpu(nmi_save_registers, NULL, 0, 1);
+       on_each_cpu(nmi_save_registers, NULL, 1);
        return 0;
 }
 
@@ -256,7 +256,7 @@ static void nmi_cpu_shutdown(void * dumm
  
 void nmi_release_counters(void)
 {
-       on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        release_lapic_nmi();
        free_msrs();
 }
@@ -274,7 +274,7 @@ static void nmi_cpu_start(void * dummy)
 
 int nmi_start(void)
 {
-       on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_start, NULL, 1);
        return 0;
 }
  
@@ -306,7 +306,7 @@ static void nmi_cpu_stop(void * dummy)
  
 void nmi_stop(void)
 {
-       on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_stop, NULL, 1);
 }
 
 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/shutdown.c
--- a/xen/arch/x86/shutdown.c   Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/shutdown.c   Wed May 27 11:16:27 2009 +0100
@@ -91,7 +91,7 @@ void machine_halt(void)
     watchdog_disable();
     console_start_sync();
     local_irq_enable();
-    smp_call_function(__machine_halt, NULL, 1, 0);
+    smp_call_function(__machine_halt, NULL, 0);
     __machine_halt(NULL);
 }
 
@@ -311,7 +311,7 @@ void machine_restart(unsigned int delay_
     {
         /* Send IPI to the boot CPU (logical cpu 0). */
         on_selected_cpus(cpumask_of(0), __machine_restart,
-                         &delay_millisecs, 1, 0);
+                         &delay_millisecs, 0);
         for ( ; ; )
             halt();
     }
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/smp.c        Wed May 27 11:16:27 2009 +0100
@@ -239,19 +239,17 @@ int smp_call_function(
 int smp_call_function(
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait)
 {
     cpumask_t allbutself = cpu_online_map;
     cpu_clear(smp_processor_id(), allbutself);
-    return on_selected_cpus(&allbutself, func, info, retry, wait);
+    return on_selected_cpus(&allbutself, func, info, wait);
 }
 
 int on_selected_cpus(
     const cpumask_t *selected,
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait)
 {
     struct call_data_struct data;
@@ -322,7 +320,7 @@ void smp_send_stop(void)
 {
     int timeout = 10;
 
-    smp_call_function(stop_this_cpu, NULL, 1, 0);
+    smp_call_function(stop_this_cpu, NULL, 0);
 
     /* Wait 10ms for all other CPUs to go offline. */
     while ( (num_online_cpus() > 1) && (timeout-- > 0) )
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/time.c       Wed May 27 11:16:27 2009 +0100
@@ -1193,7 +1193,7 @@ static void time_calibration(void *unuse
                      opt_consistent_tscs
                      ? time_calibration_tsc_rendezvous
                      : time_calibration_std_rendezvous,
-                     &r, 0, 1);
+                     &r, 1);
 }
 
 void init_percpu_time(void)
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Wed May 27 11:15:08 2009 +0100
+++ b/xen/arch/x86/x86_32/traps.c       Wed May 27 11:16:27 2009 +0100
@@ -403,7 +403,7 @@ static long register_guest_callback(stru
     case CALLBACKTYPE_sysenter_deprecated:
         if ( !cpu_has_sep )
             ret = -EINVAL;
-        else if ( on_each_cpu(do_update_sysenter, &reg->address, 1, 1) != 0 )
+        else if ( on_each_cpu(do_update_sysenter, &reg->address, 1) != 0 )
             ret = -EIO;
         break;
 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/common/gdbstub.c
--- a/xen/common/gdbstub.c      Wed May 27 11:15:08 2009 +0100
+++ b/xen/common/gdbstub.c      Wed May 27 11:16:27 2009 +0100
@@ -672,7 +672,7 @@ static void gdb_smp_pause(void)
 
     atomic_set(&gdb_smp_paused_count, 0);
 
-    smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0, 0);
+    smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0);
 
     /* Wait 100ms for all other CPUs to enter pause loop */
     while ( (atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1)) 
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/common/keyhandler.c
--- a/xen/common/keyhandler.c   Wed May 27 11:15:08 2009 +0100
+++ b/xen/common/keyhandler.c   Wed May 27 11:16:27 2009 +0100
@@ -119,7 +119,7 @@ static void dump_registers(unsigned char
         if ( cpu == smp_processor_id() )
             continue;
         printk("\n*** Dumping CPU%d host state: ***\n", cpu);
-        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1, 1);
+        on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
     }
 
     printk("\n");
@@ -263,7 +263,7 @@ static void read_clocks(unsigned char ke
 
     spin_lock(&lock);
 
-    smp_call_function(read_clocks_slave, NULL, 0, 0);
+    smp_call_function(read_clocks_slave, NULL, 0);
 
     local_irq_disable();
     read_clocks_cpumask = cpu_online_map;
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/include/asm-ia64/linux-xen/asm/smp.h
--- a/xen/include/asm-ia64/linux-xen/asm/smp.h  Wed May 27 11:15:08 2009 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/smp.h  Wed May 27 11:16:27 2009 +0100
@@ -127,8 +127,8 @@ extern void __init init_smp_config (void
 extern void __init init_smp_config (void);
 extern void smp_do_timer (struct pt_regs *regs);
 
-extern int smp_call_function_single (int cpuid, void (*func) (void *info), 
void *info,
-                                    int retry, int wait);
+extern int smp_call_function_single (int cpuid, void (*func) (void *info),
+                                    void *info, int wait);
 extern void smp_send_reschedule (int cpu);
 #ifdef XEN
 extern void lock_ipi_calllock(unsigned long *flags);
diff -r 822ea2bf0c54 -r 7dfc0a20fa59 xen/include/xen/smp.h
--- a/xen/include/xen/smp.h     Wed May 27 11:15:08 2009 +0100
+++ b/xen/include/xen/smp.h     Wed May 27 11:16:27 2009 +0100
@@ -34,7 +34,6 @@ extern int smp_call_function(
 extern int smp_call_function(
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait);
 
 /* 
@@ -44,7 +43,6 @@ extern int on_selected_cpus(
     const cpumask_t *selected,
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait);
 
 /*
@@ -59,10 +57,9 @@ static inline int on_each_cpu(
 static inline int on_each_cpu(
     void (*func) (void *info),
     void *info,
-    int retry,
     int wait)
 {
-    return on_selected_cpus(&cpu_online_map, func, info, retry, wait);
+    return on_selected_cpus(&cpu_online_map, func, info, wait);
 }
 
 #define smp_processor_id() raw_smp_processor_id()

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Remove unused 'retry' parameter from on_selected_cpus() etc., Xen patchbot-unstable <=