WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-3.0-testing] [LINUX] Fix IRQ SMP affinity logic for

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-3.0-testing] [LINUX] Fix IRQ SMP affinity logic for event channels.
From: Xen patchbot-3.0-testing <patchbot-3.0-testing@xxxxxxxxxxxxxxxxxxx>
Date: Sun, 11 Jun 2006 12:15:41 +0000
Delivery-date: Sun, 11 Jun 2006 05:17:26 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID df221e310e2b4f162167147aa3d7656af3969417
# Parent  d4312b52c3f88ff4dc4d872f9f0f1c7b556be0e2
[LINUX] Fix IRQ SMP affinity logic for event channels.
The logic now mimics native x86 behaviour: a request to change
affinity via /proc is held until the next interrupt on that
event channel. So /proc/irq/n/smp_affinity may not change
immediately!
Other notes:
 1. CPU-specific interrupts silently ignore requests to change
    affinity. For example, resched0, timer0, callfunc0, ...
 2. Reading smp_affinity always returns a cpumask containing
    a single cpu. An event channel can only be bound to a single
    cpu at a time. Neither Xen nor XenLinux implement IRQ
    balancing: requires a user-space balancing daemon.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
xen-unstable changeset:   10317:be05097d5d69925a72405201140df8da4c1cfa5c
xen-unstable date:        Sun Jun 11 09:54:35 2006 +0100
---
 linux-2.6-xen-sparse/drivers/xen/core/evtchn.c |   44 +++++++------------------
 1 files changed, 14 insertions(+), 30 deletions(-)

diff -r d4312b52c3f8 -r df221e310e2b 
linux-2.6-xen-sparse/drivers/xen/core/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c    Fri Jun 09 17:06:09 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/core/evtchn.c    Sun Jun 11 10:10:59 
2006 +0100
@@ -120,6 +120,11 @@ static inline unsigned long active_evtch
 
 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 {
+       int irq = evtchn_to_irq[chn];
+
+       BUG_ON(irq == -1);
+       set_native_irq_info(irq, cpumask_of_cpu(cpu));
+
        clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
        set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
        cpu_evtchn[chn] = cpu;
@@ -127,7 +132,12 @@ static void bind_evtchn_to_cpu(unsigned 
 
 static void init_evtchn_cpu_bindings(void)
 {
+       int i;
+
        /* By default all event channels notify CPU#0. */
+       for (i = 0; i < NR_IRQS; i++)
+               set_native_irq_info(i, cpumask_of_cpu(0));
+
        memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
        memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
 }
@@ -419,25 +429,14 @@ void unbind_from_irqhandler(unsigned int
 }
 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
 
-#ifdef CONFIG_SMP
-static void do_nothing_function(void *ign)
-{
-}
-#endif
-
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 {
        evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
-       int evtchn;
-
-       spin_lock(&irq_mapping_update_lock);
-
-       evtchn = evtchn_from_irq(irq);
-       if (!VALID_EVTCHN(evtchn)) {
-               spin_unlock(&irq_mapping_update_lock);
+       int evtchn = evtchn_from_irq(irq);
+
+       if (!VALID_EVTCHN(evtchn))
                return;
-       }
 
        /* Send future instances of this interrupt to other vcpu. */
        op.u.bind_vcpu.port = evtchn;
@@ -450,21 +449,6 @@ static void rebind_irq_to_cpu(unsigned i
         */
        if (HYPERVISOR_event_channel_op(&op) >= 0)
                bind_evtchn_to_cpu(evtchn, tcpu);
-
-       spin_unlock(&irq_mapping_update_lock);
-
-       /*
-        * Now send the new target processor a NOP IPI. When this returns, it
-        * will check for any pending interrupts, and so service any that got 
-        * delivered to the wrong processor by mistake.
-        * 
-        * XXX: The only time this is called with interrupts disabled is from
-        * the hotplug/hotunplug path. In that case, all cpus are stopped with 
-        * interrupts disabled, and the missed interrupts will be picked up
-        * when they start again. This is kind of a hack.
-        */
-       if (!irqs_disabled())
-               smp_call_function(do_nothing_function, NULL, 0, 0);
 }
 
 
@@ -589,8 +573,8 @@ static unsigned int startup_pirq(unsigne
 
        pirq_query_unmask(irq_to_pirq(irq));
 
+       evtchn_to_irq[evtchn] = irq;
        bind_evtchn_to_cpu(evtchn, 0);
-       evtchn_to_irq[evtchn] = irq;
        irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
 
  out:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-3.0-testing] [LINUX] Fix IRQ SMP affinity logic for event channels., Xen patchbot-3 . 0-testing <=