# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1308239791 -3600
# Node ID 3ff057cbb16b6a19f1710900e1ac0993cb8bf5d3
# Parent fb5f0febeddc5ede042115595cb1448433235d26
tasklets: Allow tasklets to be created that run in softirq context.
Where this is safe, it can reduce latency and cpu overhead compared
with scheduling the idle vcpu to perform the same tasklet work.
Signed-off-by: Keir Fraser <keir@xxxxxxx>
---
diff -r fb5f0febeddc -r 3ff057cbb16b xen/common/tasklet.c
--- a/xen/common/tasklet.c Thu Jun 16 16:17:35 2011 +0100
+++ b/xen/common/tasklet.c Thu Jun 16 16:56:31 2011 +0100
@@ -1,8 +1,10 @@
/******************************************************************************
* tasklet.c
*
- * Tasklets are dynamically-allocatable tasks run in VCPU context
- * (specifically, the idle VCPU's context) on at most one CPU at a time.
+ * Tasklets are dynamically-allocatable tasks run in either VCPU context
+ * (specifically, the idle VCPU's context) or in softirq context, on at most
+ * one CPU at a time. Softirq versus VCPU context execution is specified
+ * during per-tasklet initialisation.
*
* Copyright (c) 2010, Citrix Systems, Inc.
* Copyright (c) 1992, Linus Torvalds
@@ -24,6 +26,7 @@
DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
static DEFINE_PER_CPU(struct list_head, tasklet_list);
+static DEFINE_PER_CPU(struct list_head, softirq_tasklet_list);
/* Protects all lists and tasklet structures. */
static DEFINE_SPINLOCK(tasklet_lock);
@@ -31,11 +34,22 @@
static void tasklet_enqueue(struct tasklet *t)
{
unsigned int cpu = t->scheduled_on;
- unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
- list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
- if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
- cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+ if ( t->is_softirq )
+ {
+ struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
+ bool_t was_empty = list_empty(list);
+ list_add_tail(&t->list, list);
+ if ( was_empty )
+ cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ }
+ else
+ {
+ unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
+ list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
+ if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
+ cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
+ }
}
void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
@@ -62,25 +76,13 @@
tasklet_schedule_on_cpu(t, smp_processor_id());
}
-void do_tasklet(void)
+static void do_tasklet_work(unsigned int cpu, struct list_head *list)
{
- unsigned int cpu = smp_processor_id();
- unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
- struct list_head *list = &per_cpu(tasklet_list, cpu);
struct tasklet *t;
- /*
- * Work must be enqueued *and* scheduled. Otherwise there is no work to
- * do, and/or scheduler needs to run to update idle vcpu priority.
- */
- if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
+ if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
return;
- spin_lock_irq(&tasklet_lock);
-
- if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
- goto out;
-
t = list_entry(list->next, struct tasklet, list);
list_del_init(&t->list);
@@ -100,8 +102,26 @@
BUG_ON(t->is_dead || !list_empty(&t->list));
tasklet_enqueue(t);
}
+}
- out:
+/* VCPU context work */
+void do_tasklet(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
+ struct list_head *list = &per_cpu(tasklet_list, cpu);
+
+ /*
+ * Work must be enqueued *and* scheduled. Otherwise there is no work to
+ * do, and/or scheduler needs to run to update idle vcpu priority.
+ */
+ if ( likely(*work_to_do != (TASKLET_enqueued|TASKLET_scheduled)) )
+ return;
+
+ spin_lock_irq(&tasklet_lock);
+
+ do_tasklet_work(cpu, list);
+
if ( list_empty(list) )
{
clear_bit(_TASKLET_enqueued, work_to_do);
@@ -111,6 +131,22 @@
spin_unlock_irq(&tasklet_lock);
}
+/* Softirq context work */
+static void tasklet_softirq_action(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
+
+ spin_lock_irq(&tasklet_lock);
+
+ do_tasklet_work(cpu, list);
+
+ if ( !list_empty(list) && !cpu_is_offline(cpu) )
+ raise_softirq(TASKLET_SOFTIRQ);
+
+ spin_unlock_irq(&tasklet_lock);
+}
+
void tasklet_kill(struct tasklet *t)
{
unsigned long flags;
@@ -136,9 +172,8 @@
spin_unlock_irqrestore(&tasklet_lock, flags);
}
-static void migrate_tasklets_from_cpu(unsigned int cpu)
+static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list)
{
- struct list_head *list = &per_cpu(tasklet_list, cpu);
unsigned long flags;
struct tasklet *t;
@@ -166,6 +201,13 @@
t->data = data;
}
+void softirq_tasklet_init(
+ struct tasklet *t, void (*func)(unsigned long), unsigned long data)
+{
+ tasklet_init(t, func, data);
+ t->is_softirq = 1;
+}
+
static int cpu_callback(
struct notifier_block *nfb, unsigned long action, void *hcpu)
{
@@ -175,10 +217,12 @@
{
case CPU_UP_PREPARE:
INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
+ INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu));
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
- migrate_tasklets_from_cpu(cpu);
+ migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_list, cpu));
+ migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_tasklet_list, cpu));
break;
default:
break;
@@ -197,6 +241,7 @@
void *hcpu = (void *)(long)smp_processor_id();
cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
register_cpu_notifier(&cpu_nfb);
+ open_softirq(TASKLET_SOFTIRQ, tasklet_softirq_action);
tasklets_initialised = 1;
}
diff -r fb5f0febeddc -r 3ff057cbb16b xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Thu Jun 16 16:17:35 2011 +0100
+++ b/xen/include/xen/softirq.h Thu Jun 16 16:56:31 2011 +0100
@@ -7,6 +7,7 @@
SCHEDULE_SOFTIRQ,
NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ,
RCU_SOFTIRQ,
+ TASKLET_SOFTIRQ,
NR_COMMON_SOFTIRQS
};
diff -r fb5f0febeddc -r 3ff057cbb16b xen/include/xen/tasklet.h
--- a/xen/include/xen/tasklet.h Thu Jun 16 16:17:35 2011 +0100
+++ b/xen/include/xen/tasklet.h Thu Jun 16 16:56:31 2011 +0100
@@ -1,8 +1,10 @@
/******************************************************************************
* tasklet.h
*
- * Tasklets are dynamically-allocatable tasks run in VCPU context
- * (specifically, the idle VCPU's context) on at most one CPU at a time.
+ * Tasklets are dynamically-allocatable tasks run in either VCPU context
+ * (specifically, the idle VCPU's context) or in softirq context, on at most
+ * one CPU at a time. Softirq versus VCPU context execution is specified
+ * during per-tasklet initialisation.
*/
#ifndef __XEN_TASKLET_H__
@@ -16,14 +18,20 @@
{
struct list_head list;
int scheduled_on;
+ bool_t is_softirq;
bool_t is_running;
bool_t is_dead;
void (*func)(unsigned long);
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
- struct tasklet name = { LIST_HEAD_INIT(name.list), -1, 0, 0, func, data }
+#define _DECLARE_TASKLET(name, func, data, softirq) \
+ struct tasklet name = { \
+ LIST_HEAD_INIT(name.list), -1, softirq, 0, 0, func, data }
+#define DECLARE_TASKLET(name, func, data) \
+ _DECLARE_TASKLET(name, func, data, 0)
+#define DECLARE_SOFTIRQ_TASKLET(name, func, data) \
+ _DECLARE_TASKLET(name, func, data, 1)
/* Indicates status of tasklet work on each CPU. */
DECLARE_PER_CPU(unsigned long, tasklet_work_to_do);
@@ -38,6 +46,8 @@
void tasklet_kill(struct tasklet *t);
void tasklet_init(
struct tasklet *t, void (*func)(unsigned long), unsigned long data);
+void softirq_tasklet_init(
+ struct tasklet *t, void (*func)(unsigned long), unsigned long data);
void tasklet_subsys_init(void);
#endif /* __XEN_TASKLET_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|