WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 4/6] eliminate cpu_clear()

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 4/6] eliminate cpu_clear()
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Mon, 07 Nov 2011 09:58:33 +0000
Delivery-date: Mon, 07 Nov 2011 02:11:34 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/ia64/linux-xen/acpi.c
+++ b/xen/arch/ia64/linux-xen/acpi.c
@@ -976,7 +976,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
 int acpi_unmap_lsapic(int cpu)
 {
        ia64_cpu_to_sapicid[cpu] = -1;
-       cpu_clear(cpu, cpu_present_map);
+       set_cpu_present(cpu, 0);
 
 #ifdef CONFIG_ACPI_NUMA
        /* NUMA specific cleanup's */
--- a/xen/arch/ia64/linux-xen/iosapic.c
+++ b/xen/arch/ia64/linux-xen/iosapic.c
@@ -706,7 +706,7 @@ get_target_cpu (unsigned int gsi, int ve
 
                for_each_cpu_mask(numa_cpu, cpu_mask) {
                        if (!cpu_online(numa_cpu))
-                               cpu_clear(numa_cpu, cpu_mask);
+                               cpumask_clear_cpu(numa_cpu, &cpu_mask);
                }
 
                num_cpus = cpumask_weight(&cpu_mask);
--- a/xen/arch/ia64/linux-xen/smp.c
+++ b/xen/arch/ia64/linux-xen/smp.c
@@ -134,7 +134,7 @@ stop_this_cpu (void)
        /*
         * Remove this CPU:
         */
-       cpu_clear(smp_processor_id(), cpu_online_map);
+       set_cpu_online(smp_processor_id(), 0);
        max_xtp();
        local_irq_disable();
        cpu_halt();
--- a/xen/arch/ia64/linux-xen/smpboot.c
+++ b/xen/arch/ia64/linux-xen/smpboot.c
@@ -557,7 +557,7 @@ do_rest:
        if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
                printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, 
sapicid);
                ia64_cpu_to_sapicid[cpu] = -1;
-               cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
+               set_cpu_online(cpu, 0);  /* was set in smp_callin() */
                return -EINVAL;
        }
        return 0;
@@ -727,12 +727,12 @@ void __cpu_disable(void)
        int cpu = smp_processor_id();
 
        remove_siblinginfo(cpu);
-       cpu_clear(cpu, cpu_online_map);
+       set_cpu_online(cpu, 0);
 #ifndef XEN
        fixup_irqs();
 #endif
        local_flush_tlb_all();
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
 }
 #else /* !CONFIG_HOTPLUG_CPU */
 void __cpu_disable(void)
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -668,5 +668,5 @@ void __cpuinit cpu_init(void)
 
 void cpu_uninit(unsigned int cpu)
 {
-       cpu_clear(cpu, cpu_initialized);
+       cpumask_clear_cpu(cpu, &cpu_initialized);
 }
--- a/xen/arch/x86/mpparse.c
+++ b/xen/arch/x86/mpparse.c
@@ -802,7 +802,7 @@ void mp_unregister_lapic(uint32_t apic_i
        physid_clear(apic_id, phys_cpu_present_map);
 
        x86_cpu_to_apicid[cpu] = BAD_APICID;
-       cpu_clear(cpu, cpu_present_map);
+       set_cpu_present(cpu, 0);
 }
 
 #ifdef CONFIG_X86_IO_APIC
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -226,7 +226,7 @@ fastcall void smp_invalidate_interrupt(v
     if ( !__sync_local_execstate() ||
          (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
         flush_area_local(flush_va, flush_flags);
-    cpu_clear(smp_processor_id(), flush_cpumask);
+    cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
     irq_exit();
 }
 
@@ -353,7 +353,7 @@ void __stop_this_cpu(void)
 static void stop_this_cpu(void *dummy)
 {
     __stop_this_cpu();
-    cpu_clear(smp_processor_id(), cpu_online_map);
+    set_cpu_online(smp_processor_id(), 0);
     for ( ; ; )
         halt();
 }
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -151,7 +151,7 @@ static void synchronize_tsc_master(unsig
     }
 
     atomic_set(&tsc_count, 0);
-    cpu_clear(slave, tsc_sync_cpu_mask);
+    cpumask_clear_cpu(slave, &tsc_sync_cpu_mask);
 }
 
 static void synchronize_tsc_slave(unsigned int slave)
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -232,7 +232,7 @@ static int cpupool_assign_cpu_locked(str
         return ret;
     }
 
-    cpu_clear(cpu, cpupool_free_cpus);
+    cpumask_clear_cpu(cpu, &cpupool_free_cpus);
     if (cpupool_moving_cpu == cpu)
     {
         cpupool_moving_cpu = -1;
@@ -259,7 +259,7 @@ static long cpupool_unassign_cpu_helper(
         ret = schedule_cpu_switch(cpu, NULL);
         if ( ret )
         {
-            cpu_clear(cpu, cpupool_free_cpus);
+            cpumask_clear_cpu(cpu, &cpupool_free_cpus);
             goto out;
         }
         per_cpu(cpupool, cpu) = NULL;
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1638,7 +1638,7 @@ csched_schedule(
 
     /* Clear "tickled" bit now that we've been scheduled */
     if ( cpumask_test_cpu(cpu, &rqd->tickled) )
-        cpu_clear(cpu, rqd->tickled);
+        cpumask_clear_cpu(cpu, &rqd->tickled);
 
     /* Update credits */
     burn_credits(rqd, scurr, now);
@@ -1709,7 +1709,7 @@ csched_schedule(
 
         /* Clear the idle mask if necessary */
         if ( cpumask_test_cpu(cpu, &rqd->idle) )
-            cpu_clear(cpu, rqd->idle);
+            cpumask_clear_cpu(cpu, &rqd->idle);
 
         snext->start_time = now;
 
@@ -1873,7 +1873,7 @@ static void deactivate_runqueue(struct c
     
     rqd->id = -1;
 
-    cpu_clear(rqi, prv->active_queues);
+    cpumask_clear_cpu(rqi, &prv->active_queues);
 }
 
 static void init_pcpu(const struct scheduler *ops, int cpu)
@@ -1977,8 +1977,8 @@ csched_free_pdata(const struct scheduler
 
     printk("Removing cpu %d from runqueue %d\n", cpu, rqi);
 
-    cpu_clear(cpu, rqd->idle);
-    cpu_clear(cpu, rqd->active);
+    cpumask_clear_cpu(cpu, &rqd->idle);
+    cpumask_clear_cpu(cpu, &rqd->active);
 
     if ( cpumask_empty(&rqd->active) )
     {
@@ -1988,7 +1988,7 @@ csched_free_pdata(const struct scheduler
 
     spin_unlock(&rqd->lock);
 
-    cpu_clear(cpu, prv->initialized);
+    cpumask_clear_cpu(cpu, &prv->initialized);
 
     spin_unlock_irqrestore(&prv->lock, flags);
 
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -54,7 +54,7 @@ do {                                    
     unsigned int cpu;                                                   \
     for_each_cpu_mask ( cpu, mask )                                     \
         if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
-            cpu_clear(cpu, mask);                                       \
+            cpumask_clear_cpu(cpu, &(mask));                            \
 } while ( 0 )
 
 void new_tlbflush_clock_period(void);
--- a/xen/include/asm-x86/numa.h
+++ b/xen/include/asm-x86/numa.h
@@ -43,7 +43,7 @@ extern void init_cpu_to_node(void);
 
 static inline void clear_node_cpumask(int cpu)
 {
-       cpu_clear(cpu, node_to_cpumask[cpu_to_node(cpu)]);
+       cpumask_clear_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
 }
 
 /* Simple perfect hash to map pdx to node numbers */
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -102,7 +102,6 @@ static inline void cpumask_set_cpu(int c
        set_bit(cpumask_check(cpu), dstp->bits);
 }
 
-#define cpu_clear(cpu, dst) cpumask_clear_cpu(cpu, &(dst))
 static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
 {
        clear_bit(cpumask_check(cpu), dstp->bits);


Attachment: eliminate-cpu_clear.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 4/6] eliminate cpu_clear(), Jan Beulich <=