WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] evtchn: Avoid spurious event-channel noti

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] evtchn: Avoid spurious event-channel notifications across unbind/bind.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 01 Aug 2008 02:20:11 -0700
Delivery-date: Fri, 01 Aug 2008 02:20:15 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1217499210 -3600
# Node ID 482c16b55c28cd8dfc7b6dc93d0987f0b7bed42d
# Parent  82edb8c9ae178e32aaffc21c821767633e030b6f
evtchn: Avoid spurious event-channel notifications across unbind/bind.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
Signed-off-by: Huacai Chen <huacai.chen@xxxxxxxxx>
---
 xen/arch/ia64/xen/irq.c    |    3 --
 xen/arch/x86/irq.c         |    3 --
 xen/common/domain.c        |    2 +
 xen/common/event_channel.c |   57 ++++++++++++++++++++++++++-------------------
 xen/include/xen/irq.h      |    2 -
 xen/include/xen/sched.h    |    2 +
 xen/include/xen/spinlock.h |    4 +--
 7 files changed, 43 insertions(+), 30 deletions(-)

diff -r 82edb8c9ae17 -r 482c16b55c28 xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/arch/ia64/xen/irq.c   Thu Jul 31 11:13:30 2008 +0100
@@ -459,7 +459,7 @@ int pirq_guest_bind(struct vcpu *v, int 
     return rc;
 }
 
-int pirq_guest_unbind(struct domain *d, int irq)
+void pirq_guest_unbind(struct domain *d, int irq)
 {
     irq_desc_t         *desc = &irq_desc[irq];
     irq_guest_action_t *action;
@@ -493,7 +493,6 @@ int pirq_guest_unbind(struct domain *d, 
     }
 
     spin_unlock_irqrestore(&desc->lock, flags);    
-    return 0;
 }
 
 void
diff -r 82edb8c9ae17 -r 482c16b55c28 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/arch/x86/irq.c        Thu Jul 31 11:13:30 2008 +0100
@@ -573,7 +573,7 @@ int pirq_guest_bind(struct vcpu *v, int 
     return rc;
 }
 
-int pirq_guest_unbind(struct domain *d, int irq)
+void pirq_guest_unbind(struct domain *d, int irq)
 {
     unsigned int        vector;
     irq_desc_t         *desc;
@@ -660,7 +660,6 @@ int pirq_guest_unbind(struct domain *d, 
 
  out:
     spin_unlock_irqrestore(&desc->lock, flags);    
-    return 0;
 }
 
 extern void dump_ioapic_irq_info(void);
diff -r 82edb8c9ae17 -r 482c16b55c28 xen/common/domain.c
--- a/xen/common/domain.c       Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/common/domain.c       Thu Jul 31 11:13:30 2008 +0100
@@ -137,6 +137,8 @@ struct vcpu *alloc_vcpu(
     v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
     v->runstate.state_entry_time = NOW();
 
+    spin_lock_init(&v->virq_lock);
+
     if ( !is_idle_domain(d) )
     {
         set_bit(_VPF_down, &v->pause_flags);
diff -r 82edb8c9ae17 -r 482c16b55c28 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/common/event_channel.c        Thu Jul 31 11:13:30 2008 +0100
@@ -386,14 +386,18 @@ static long __evtchn_close(struct domain
         break;
 
     case ECS_PIRQ:
-        if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 )
-            d1->pirq_to_evtchn[chn1->u.pirq] = 0;
+        pirq_guest_unbind(d1, chn1->u.pirq);
+        d1->pirq_to_evtchn[chn1->u.pirq] = 0;
         break;
 
     case ECS_VIRQ:
         for_each_vcpu ( d1, v )
-            if ( v->virq_to_evtchn[chn1->u.virq] == port1 )
-                v->virq_to_evtchn[chn1->u.virq] = 0;
+        {
+            if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
+                continue;
+            v->virq_to_evtchn[chn1->u.virq] = 0;
+            spin_barrier(&v->virq_lock);
+        }
         break;
 
     case ECS_IPI:
@@ -447,6 +451,9 @@ static long __evtchn_close(struct domain
         BUG();
     }
 
+    /* Clear pending event to avoid unexpected behavior on re-bind. */
+    clear_bit(port1, &shared_info(d1, evtchn_pending));
+
     /* Reset binding to vcpu0 when the channel is freed. */
     chn1->state          = ECS_FREE;
     chn1->notify_vcpu_id = 0;
@@ -573,37 +580,33 @@ static int evtchn_set_pending(struct vcp
     return 0;
 }
 
+int guest_enabled_event(struct vcpu *v, int virq)
+{
+    return ((v != NULL) && (v->virq_to_evtchn[virq] != 0));
+}
 
 void send_guest_vcpu_virq(struct vcpu *v, int virq)
 {
+    unsigned long flags;
     int port;
 
     ASSERT(!virq_is_global(virq));
+
+    spin_lock_irqsave(&v->virq_lock, flags);
 
     port = v->virq_to_evtchn[virq];
     if ( unlikely(port == 0) )
-        return;
+        goto out;
 
     evtchn_set_pending(v, port);
-}
-
-int guest_enabled_event(struct vcpu *v, int virq)
-{
-    int port;
-
-    if ( unlikely(v == NULL) )
-        return 0;
-
-    port = v->virq_to_evtchn[virq];
-    if ( port == 0 )
-        return 0;
-
-    /* virq is in use */
-    return 1;
+
+ out:
+    spin_unlock_irqrestore(&v->virq_lock, flags);
 }
 
 void send_guest_global_virq(struct domain *d, int virq)
 {
+    unsigned long flags;
     int port;
     struct vcpu *v;
     struct evtchn *chn;
@@ -617,20 +620,28 @@ void send_guest_global_virq(struct domai
     if ( unlikely(v == NULL) )
         return;
 
+    spin_lock_irqsave(&v->virq_lock, flags);
+
     port = v->virq_to_evtchn[virq];
     if ( unlikely(port == 0) )
-        return;
+        goto out;
 
     chn = evtchn_from_port(d, port);
     evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
-}
-
+
+ out:
+    spin_unlock_irqrestore(&v->virq_lock, flags);
+}
 
 int send_guest_pirq(struct domain *d, int pirq)
 {
     int port = d->pirq_to_evtchn[pirq];
     struct evtchn *chn;
 
+    /*
+     * It should not be possible to race with __evtchn_close():
+     * The caller of this function must synchronise with pirq_guest_unbind().
+     */
     ASSERT(port != 0);
 
     chn = evtchn_from_port(d, port);
diff -r 82edb8c9ae17 -r 482c16b55c28 xen/include/xen/irq.h
--- a/xen/include/xen/irq.h     Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/include/xen/irq.h     Thu Jul 31 11:13:30 2008 +0100
@@ -78,7 +78,7 @@ extern int pirq_guest_eoi(struct domain 
 extern int pirq_guest_eoi(struct domain *d, int irq);
 extern int pirq_guest_unmask(struct domain *d);
 extern int pirq_guest_bind(struct vcpu *v, int irq, int will_share);
-extern int pirq_guest_unbind(struct domain *d, int irq);
+extern void pirq_guest_unbind(struct domain *d, int irq);
 
 static inline void set_native_irq_info(int irq, cpumask_t mask)
 {
diff -r 82edb8c9ae17 -r 482c16b55c28 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/include/xen/sched.h   Thu Jul 31 11:13:30 2008 +0100
@@ -137,7 +137,9 @@ struct vcpu
     unsigned long    pause_flags;
     atomic_t         pause_count;
 
+    /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
     u16              virq_to_evtchn[NR_VIRQS];
+    spinlock_t       virq_lock;
 
     /* Bitmask of CPUs on which this VCPU may run. */
     cpumask_t        cpu_affinity;
diff -r 82edb8c9ae17 -r 482c16b55c28 xen/include/xen/spinlock.h
--- a/xen/include/xen/spinlock.h        Thu Jul 31 09:51:06 2008 +0100
+++ b/xen/include/xen/spinlock.h        Thu Jul 31 11:13:30 2008 +0100
@@ -85,8 +85,8 @@ typedef struct { int gcc_is_buggy; } rwl
 /* Ensure a lock is quiescent between two critical operations. */
 static inline void spin_barrier(spinlock_t *lock)
 {
-    spin_lock(lock);
-    spin_unlock(lock);
+    do { mb(); } while ( spin_is_locked(lock) );
+    mb();
 }
 
 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] evtchn: Avoid spurious event-channel notifications across unbind/bind., Xen patchbot-unstable <=