# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1271238269 -3600
# Node ID 5057604eeefcb6479cc97c092a2399a115fae879
# Parent c02cc832cb2d88c383d33c1ba50c381fae703308
Per-cpu tasklet lists.
Signed-off-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/smpboot.c | 1
xen/common/softirq.c | 95 ++++++++++++++++++++++++++++++++--------------
xen/include/xen/softirq.h | 6 +-
3 files changed, 73 insertions(+), 29 deletions(-)
diff -r c02cc832cb2d -r 5057604eeefc xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Tue Apr 13 18:19:33 2010 +0100
+++ b/xen/arch/x86/smpboot.c Wed Apr 14 10:44:29 2010 +0100
@@ -1374,6 +1374,7 @@ int cpu_down(unsigned int cpu)
BUG_ON(cpu_online(cpu));
+ migrate_tasklets_from_cpu(cpu);
cpu_mcheck_distribute_cmci();
out:
diff -r c02cc832cb2d -r 5057604eeefc xen/common/softirq.c
--- a/xen/common/softirq.c Tue Apr 13 18:19:33 2010 +0100
+++ b/xen/common/softirq.c Wed Apr 14 10:44:29 2010 +0100
@@ -78,7 +78,8 @@ void cpumask_raise_softirq(cpumask_t mas
void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
{
- if ( !test_and_set_bit(nr, &softirq_pending(cpu)) )
+ if ( !test_and_set_bit(nr, &softirq_pending(cpu))
+ && (cpu != smp_processor_id()) )
smp_send_event_check_cpu(cpu);
}
@@ -87,46 +88,54 @@ void raise_softirq(unsigned int nr)
set_bit(nr, &softirq_pending(smp_processor_id()));
}
-static LIST_HEAD(tasklet_list);
+static bool_t tasklets_initialised;
+static DEFINE_PER_CPU(struct list_head, tasklet_list);
static DEFINE_SPINLOCK(tasklet_lock);
+void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tasklet_lock, flags);
+
+ if ( tasklets_initialised && !t->is_dead )
+ {
+ t->scheduled_on = cpu;
+ if ( !t->is_running )
+ {
+ list_del(&t->list);
+ list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ }
+ }
+
+ spin_unlock_irqrestore(&tasklet_lock, flags);
+}
+
void tasklet_schedule(struct tasklet *t)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tasklet_lock, flags);
-
- if ( !t->is_dead )
- {
- if ( !t->is_scheduled && !t->is_running )
- {
- BUG_ON(!list_empty(&t->list));
- list_add_tail(&t->list, &tasklet_list);
- }
- t->is_scheduled = 1;
- raise_softirq(TASKLET_SOFTIRQ);
- }
-
- spin_unlock_irqrestore(&tasklet_lock, flags);
+ tasklet_schedule_on_cpu(t, smp_processor_id());
}
static void tasklet_action(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct list_head *list = &per_cpu(tasklet_list, cpu);
struct tasklet *t;
spin_lock_irq(&tasklet_lock);
- if ( list_empty(&tasklet_list) )
+ if ( list_empty(list) )
{
spin_unlock_irq(&tasklet_lock);
return;
}
- t = list_entry(tasklet_list.next, struct tasklet, list);
+ t = list_entry(list->next, struct tasklet, list);
list_del_init(&t->list);
- BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
- t->is_scheduled = 0;
+ BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
+ t->scheduled_on = -1;
t->is_running = 1;
spin_unlock_irq(&tasklet_lock);
@@ -135,17 +144,19 @@ static void tasklet_action(void)
t->is_running = 0;
- if ( t->is_scheduled )
+ if ( t->scheduled_on >= 0 )
{
BUG_ON(t->is_dead || !list_empty(&t->list));
- list_add_tail(&t->list, &tasklet_list);
+ list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on));
+ if ( t->scheduled_on != cpu )
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
}
/*
* If there is more work to do then reschedule. We don't grab more work
* immediately as we want to allow other softirq work to happen first.
*/
- if ( !list_empty(&tasklet_list) )
+ if ( !list_empty(list) )
raise_softirq(TASKLET_SOFTIRQ);
spin_unlock_irq(&tasklet_lock);
@@ -159,10 +170,10 @@ void tasklet_kill(struct tasklet *t)
if ( !list_empty(&t->list) )
{
- BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
+ BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
list_del_init(&t->list);
}
- t->is_scheduled = 0;
+ t->scheduled_on = -1;
t->is_dead = 1;
while ( t->is_running )
@@ -175,18 +186,48 @@ void tasklet_kill(struct tasklet *t)
spin_unlock_irqrestore(&tasklet_lock, flags);
}
+void migrate_tasklets_from_cpu(unsigned int cpu)
+{
+ struct list_head *list = &per_cpu(tasklet_list, cpu);
+ unsigned long flags;
+ struct tasklet *t;
+
+ spin_lock_irqsave(&tasklet_lock, flags);
+
+ while ( !list_empty(list) )
+ {
+ t = list_entry(list->next, struct tasklet, list);
+ BUG_ON(t->scheduled_on != cpu);
+ t->scheduled_on = smp_processor_id();
+ list_del(&t->list);
+ list_add_tail(&t->list, &this_cpu(tasklet_list));
+ }
+
+ raise_softirq(TASKLET_SOFTIRQ);
+
+ spin_unlock_irqrestore(&tasklet_lock, flags);
+}
+
void tasklet_init(
struct tasklet *t, void (*func)(unsigned long), unsigned long data)
{
memset(t, 0, sizeof(*t));
INIT_LIST_HEAD(&t->list);
+ t->scheduled_on = -1;
t->func = func;
t->data = data;
}
void __init softirq_init(void)
{
+ unsigned int cpu;
+
+ for_each_possible_cpu ( cpu )
+ INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
+
+ tasklets_initialised = 1;
}
/*
diff -r c02cc832cb2d -r 5057604eeefc xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Tue Apr 13 18:19:33 2010 +0100
+++ b/xen/include/xen/softirq.h Wed Apr 14 10:44:29 2010 +0100
@@ -47,7 +47,7 @@ struct tasklet
struct tasklet
{
struct list_head list;
- bool_t is_scheduled;
+ int scheduled_on;
bool_t is_running;
bool_t is_dead;
void (*func)(unsigned long);
@@ -55,10 +55,12 @@ struct tasklet
};
#define DECLARE_TASKLET(name, func, data) \
- struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, func, data }
+ struct tasklet name = { LIST_HEAD_INIT(name.list), -1, 0, 0, func, data }
+void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu);
void tasklet_schedule(struct tasklet *t);
void tasklet_kill(struct tasklet *t);
+void migrate_tasklets_from_cpu(unsigned int cpu);
void tasklet_init(
struct tasklet *t, void (*func)(unsigned long), unsigned long data);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|