ChangeSet 1.1664.1.1, 2005/06/03 17:42:10+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Event-channel CPU affinity. Currently all event channels still bind to
VCPU#0 at start of day, and have their binding automatically changed
when bound to a VIRQ or IPI source. XenLinux maintains a per-cpu
evtchn mask denoting which event channels are bound to each cpu.
Todo: Allow guests to change binding of of non-ipi and non-virq evtchns.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c | 47 ++-
xen/arch/x86/irq.c | 14
xen/common/domain.c | 6
xen/common/event_channel.c | 335 ++++++++++-------------
xen/include/public/event_channel.h | 34 +-
xen/include/xen/event.h | 7
xen/include/xen/sched.h | 34 +-
7 files changed, 250 insertions(+), 227 deletions(-)
diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c 2005-06-03 13:01:57
-04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c 2005-06-03 13:01:57
-04:00
@@ -74,6 +74,33 @@
/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+#ifdef CONFIG_SMP
+
+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
+static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
+
+#define active_evtchns(cpu,sh,idx) \
+ ((sh)->evtchn_pending[idx] & \
+ cpu_evtchn_mask[cpu][idx] & \
+ ~(sh)->evtchn_mask[idx])
+
+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+{
+ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
+ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
+ cpu_evtchn[chn] = cpu;
+}
+
+#else
+
+#define active_evtchns(cpu,sh,idx) \
+ ((sh)->evtchn_pending[idx] & \
+ ~(sh)->evtchn_mask[idx])
+
+#define bind_evtchn_to_cpu(chn,cpu) ((void)0)
+
+#endif
+
/* Upcall to generic IRQ layer. */
#ifdef CONFIG_X86
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
@@ -109,9 +136,9 @@
{
u32 l1, l2;
unsigned int l1i, l2i, port;
- int irq;
+ int irq, cpu = smp_processor_id();
shared_info_t *s = HYPERVISOR_shared_info;
- vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
+ vcpu_info_t *vcpu_info = &s->vcpu_data[cpu];
vcpu_info->evtchn_upcall_pending = 0;
@@ -122,7 +149,7 @@
l1i = __ffs(l1);
l1 &= ~(1 << l1i);
- while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
+ while ( (l2 = active_evtchns(cpu, s, l1i)) != 0 )
{
l2i = __ffs(l2);
l2 &= ~(1 << l2i);
@@ -171,6 +198,8 @@
irq_to_evtchn[irq] = evtchn;
per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+ bind_evtchn_to_cpu(evtchn, cpu);
}
irq_bindcount[irq]++;
@@ -225,8 +254,13 @@
irq_to_evtchn[irq] = evtchn;
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
- } else
+
+ bind_evtchn_to_cpu(evtchn, cpu);
+ }
+ else
+ {
irq = evtchn_to_irq[evtchn];
+ }
irq_bindcount[irq]++;
@@ -545,6 +579,11 @@
irq_ctx_init(0);
spin_lock_init(&irq_mapping_update_lock);
+
+#ifdef CONFIG_SMP
+ /* By default all event channels notify CPU#0. */
+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+#endif
for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
/* No VIRQ -> IRQ mappings. */
diff -Nru a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c 2005-06-03 13:01:57 -04:00
+++ b/xen/arch/x86/irq.c 2005-06-03 13:01:57 -04:00
@@ -184,22 +184,22 @@
u8 nr_guests;
u8 in_flight;
u8 shareable;
- struct vcpu *guest[IRQ_MAX_GUESTS];
+ struct domain *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
static void __do_IRQ_guest(int irq)
{
irq_desc_t *desc = &irq_desc[irq];
irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- struct vcpu *v;
+ struct domain *d;
int i;
for ( i = 0; i < action->nr_guests; i++ )
{
- v = action->guest[i];
- if ( !test_and_set_bit(irq, &v->domain->pirq_mask) )
+ d = action->guest[i];
+ if ( !test_and_set_bit(irq, &d->pirq_mask) )
action->in_flight++;
- send_guest_pirq(v, irq);
+ send_guest_pirq(d, irq);
}
}
@@ -294,7 +294,7 @@
goto out;
}
- action->guest[action->nr_guests++] = v;
+ action->guest[action->nr_guests++] = v->domain;
out:
spin_unlock_irqrestore(&desc->lock, flags);
@@ -328,7 +328,7 @@
else
{
i = 0;
- while ( action->guest[i] && action->guest[i]->domain != d )
+ while ( action->guest[i] && (action->guest[i] != d) )
i++;
memmove(&action->guest[i], &action->guest[i+1], IRQ_MAX_GUESTS-i-1);
action->nr_guests--;
diff -Nru a/xen/common/domain.c b/xen/common/domain.c
--- a/xen/common/domain.c 2005-06-03 13:01:57 -04:00
+++ b/xen/common/domain.c 2005-06-03 13:01:57 -04:00
@@ -54,9 +54,9 @@
set_bit(_DOMF_idle_domain, &d->domain_flags);
if ( !is_idle_task(d) &&
- ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
+ ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
{
- destroy_event_channels(d);
+ evtchn_destroy(d);
free_domain_struct(d);
return NULL;
}
@@ -251,7 +251,7 @@
*pd = d->next_in_hashbucket;
write_unlock(&domlist_lock);
- destroy_event_channels(d);
+ evtchn_destroy(d);
grant_table_destroy(d);
free_perdomain_pt(d);
diff -Nru a/xen/common/event_channel.c b/xen/common/event_channel.c
--- a/xen/common/event_channel.c 2005-06-03 13:01:57 -04:00
+++ b/xen/common/event_channel.c 2005-06-03 13:01:57 -04:00
@@ -27,50 +27,31 @@
#include <public/xen.h>
#include <public/event_channel.h>
-#define INIT_EVENT_CHANNELS 16
-#define MAX_EVENT_CHANNELS 1024
-#define EVENT_CHANNELS_SPREAD 32
+#define bucket_from_port(d,p) \
+ ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET])
+#define port_is_valid(d,p) \
+ (((p) >= 0) && ((p) < MAX_EVTCHNS) && \
+ (bucket_from_port(d,p) != NULL))
+#define evtchn_from_port(d,p) \
+ (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)])
-
-static int get_free_port(struct vcpu *v)
+static int get_free_port(struct domain *d)
{
- struct domain *d = v->domain;
- int max, port;
- event_channel_t *chn;
-
- max = d->max_event_channel;
- chn = d->event_channel;
-
- for ( port = v->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
- if ( chn[port].state == ECS_FREE )
- break;
-
- if ( port >= max )
- {
- if ( max == MAX_EVENT_CHANNELS )
- return -ENOSPC;
-
- if ( port == 0 )
- max = INIT_EVENT_CHANNELS;
- else
- max = port + EVENT_CHANNELS_SPREAD;
-
- chn = xmalloc_array(event_channel_t, max);
- if ( unlikely(chn == NULL) )
- return -ENOMEM;
-
- memset(chn, 0, max * sizeof(event_channel_t));
-
- if ( d->event_channel != NULL )
- {
- memcpy(chn, d->event_channel, d->max_event_channel *
- sizeof(event_channel_t));
- xfree(d->event_channel);
- }
+ struct evtchn *chn;
+ int port;
- d->event_channel = chn;
- d->max_event_channel = max;
- }
+ for ( port = 0; port_is_valid(d, port); port++ )
+ if ( evtchn_from_port(d, port)->state == ECS_FREE )
+ return port;
+
+ if ( port == MAX_EVTCHNS )
+ return -ENOSPC;
+
+ chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET);
+ if ( unlikely(chn == NULL) )
+ return -ENOMEM;
+ memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn));
+ bucket_from_port(d, port) = chn;
return port;
}
@@ -78,18 +59,20 @@
static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
{
+ struct evtchn *chn;
struct domain *d = current->domain;
int port;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|