WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [LINUX] Fix IRQ SMP affinity logic for ev

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [LINUX] Fix IRQ SMP affinity logic for event channels.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Sun, 11 Jun 2006 20:20:23 +0000
Delivery-date: Sun, 11 Jun 2006 13:22:18 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID be05097d5d69925a72405201140df8da4c1cfa5c
# Parent  ddc25d4ebf6023dba9c49597f3b39fe3f5dc9f0d
[LINUX] Fix IRQ SMP affinity logic for event channels.
The logic now mimics native x86 behaviour: a request to change
affinity via /proc is held until the next interrupt on that
event channel. So /proc/irq/n/smp_affinity may not change
immediately!
Other notes:
 1. CPU-specific interrupts silently ignore requests to change
    affinity. For example, resched0, timer0, callfunc0, ...
 2. Reading smp_affinity always returns a cpumask containing
    a single cpu. An event channel can only be bound to a single
    cpu at a time. Neither Xen nor XenLinux implement IRQ
    balancing: requires a user-space balancing daemon.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/core/evtchn.c |   47 ++++++++-----------------
 1 files changed, 16 insertions(+), 31 deletions(-)

diff -r ddc25d4ebf60 -r be05097d5d69 
linux-2.6-xen-sparse/drivers/xen/core/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c    Sat Jun 10 11:07:11 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c    Sun Jun 11 09:54:35 
2006 +0100
@@ -54,7 +54,8 @@ static DEFINE_SPINLOCK(irq_mapping_updat
 static DEFINE_SPINLOCK(irq_mapping_update_lock);
 
 /* IRQ <-> event-channel mappings. */
-static int evtchn_to_irq[NR_EVENT_CHANNELS] = {[0 ...  NR_EVENT_CHANNELS-1] = 
-1};
+static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
+       [0 ...  NR_EVENT_CHANNELS-1] = -1 };
 
 /* Packed IRQ information: binding type, sub-type index, and event channel. */
 static u32 irq_info[NR_IRQS];
@@ -120,6 +121,11 @@ static inline unsigned long active_evtch
 
 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 {
+       int irq = evtchn_to_irq[chn];
+
+       BUG_ON(irq == -1);
+       set_native_irq_info(irq, cpumask_of_cpu(cpu));
+
        clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
        set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
        cpu_evtchn[chn] = cpu;
@@ -127,7 +133,12 @@ static void bind_evtchn_to_cpu(unsigned 
 
 static void init_evtchn_cpu_bindings(void)
 {
+       int i;
+
        /* By default all event channels notify CPU#0. */
+       for (i = 0; i < NR_IRQS; i++)
+               set_native_irq_info(i, cpumask_of_cpu(0));
+
        memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
        memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
 }
@@ -430,25 +441,14 @@ void unbind_from_irqhandler(unsigned int
 }
 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
 
-#ifdef CONFIG_SMP
-static void do_nothing_function(void *ign)
-{
-}
-#endif
-
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 {
        struct evtchn_bind_vcpu bind_vcpu;
-       int evtchn;
-
-       spin_lock(&irq_mapping_update_lock);
-
-       evtchn = evtchn_from_irq(irq);
-       if (!VALID_EVTCHN(evtchn)) {
-               spin_unlock(&irq_mapping_update_lock);
+       int evtchn = evtchn_from_irq(irq);
+
+       if (!VALID_EVTCHN(evtchn))
                return;
-       }
 
        /* Send future instances of this interrupt to other vcpu. */
        bind_vcpu.port = evtchn;
@@ -461,21 +461,6 @@ static void rebind_irq_to_cpu(unsigned i
         */
        if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
                bind_evtchn_to_cpu(evtchn, tcpu);
-
-       spin_unlock(&irq_mapping_update_lock);
-
-       /*
-        * Now send the new target processor a NOP IPI. When this returns, it
-        * will check for any pending interrupts, and so service any that got 
-        * delivered to the wrong processor by mistake.
-        * 
-        * XXX: The only time this is called with interrupts disabled is from
-        * the hotplug/hotunplug path. In that case, all cpus are stopped with 
-        * interrupts disabled, and the missed interrupts will be picked up
-        * when they start again. This is kind of a hack.
-        */
-       if (!irqs_disabled())
-               smp_call_function(do_nothing_function, NULL, 0, 0);
 }
 
 
@@ -597,8 +582,8 @@ static unsigned int startup_pirq(unsigne
 
        pirq_query_unmask(irq_to_pirq(irq));
 
+       evtchn_to_irq[evtchn] = irq;
        bind_evtchn_to_cpu(evtchn, 0);
-       evtchn_to_irq[evtchn] = irq;
        irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
 
  out:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [LINUX] Fix IRQ SMP affinity logic for event channels., Xen patchbot-unstable <=