WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH] convert vcpu's virq_lock to rwlock

To: Jan Beulich <JBeulich@xxxxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: Re: [Xen-devel] [PATCH] convert vcpu's virq_lock to rwlock
From: Keir Fraser <keir@xxxxxxx>
Date: Sat, 26 Mar 2011 09:29:00 +0000
Cc:
Delivery-date: Sat, 26 Mar 2011 02:30:03 -0700
Dkim-signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:sender:user-agent:date:subject:from:to :message-id:thread-topic:thread-index:in-reply-to:mime-version :content-type:content-transfer-encoding; bh=THeefbPn8j9FSY7N8tkXKHslX10tuya9Yv5LoFIoSNI=; b=tPdF2/NVOcU0pJnTchlLlDUnurISRoIRohHHbyyvQCqaB5efVsJpcJeP6SxbbPy+O6 soPlOtFdWlF8+sD8bJDZeuUJ0GVgKPmanzHB4ypSId27JCHXprqYaP4pWUGmPdS5ii8x efHczkPvpx6K1GNzcABGTJjFmzMu60QX2vt3Q=
Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=sender:user-agent:date:subject:from:to:message-id:thread-topic :thread-index:in-reply-to:mime-version:content-type :content-transfer-encoding; b=qxfdUy/PayfP6maDCe55KsM4xenDkovBh/kXPiabshPSPs1FkFi30JV2N4YgZUC1Rq JWl3tBfzgt7txmGvsJ6sF9CjPriH5nUPdHum2oQfCBPzvbGDB/XyESvC1udQ1Do/g1Ph 8+KHVP6ZwWpLKxcu0Ns50FSUQdZw5nFB69pGs=
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4D8CD68402000078000386E0@xxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcvrmDzuGqwuOMyQGkK57H18rKxkWw==
Thread-topic: [Xen-devel] [PATCH] convert vcpu's virq_lock to rwlock
User-agent: Microsoft-Entourage/12.28.0.101117
On 25/03/2011 16:53, "Jan Beulich" <JBeulich@xxxxxxxxxx> wrote:

> This lock is only intended to prevent racing with closing an event
> channel - evtchn_set_pending() doesn't require external serialization.

Still a small critical section, I doubt the serialisation matters compared
with the cost of an extra LOCKed operation on read_unlock(). Especially
since the lock is per-vcpu anyway and the only at-all common VIRQ is
VIRQ_TIMER. The probability of unnecessary serialisation being a bottleneck
here is basically zero.

We can get rid of the local_irq_save/restore operations though (at the cost
of changing the lock debug checks in spinlock.c because the send_*_virq
functions can be called both with IRQs disabled and enabled, so as-is the
patch could make us bug out on the checks). But, overall, worth it? I doubt
EFLAGS.IF fiddling compares unfavourably with an extra LOCKed operation,
cost wise.

So, I'm not even slightly convinced. I'll leave as is.

 -- Keir

> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
> 
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -147,7 +147,7 @@ struct vcpu *alloc_vcpu(
>      v->domain = d;
>      v->vcpu_id = vcpu_id;
>  
> -    spin_lock_init(&v->virq_lock);
> +    rwlock_init(&v->virq_lock);
>  
>      tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
>  
> --- a/xen/common/event_channel.c
> +++ b/xen/common/event_channel.c
> @@ -417,7 +417,7 @@ static long __evtchn_close(struct domain
>              if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
>                  continue;
>              v->virq_to_evtchn[chn1->u.virq] = 0;
> -            spin_barrier_irq(&v->virq_lock);
> +            write_barrier_irq(&v->virq_lock);
>          }
>          break;
>  
> @@ -613,12 +613,11 @@ int guest_enabled_event(struct vcpu *v,
>  
>  void send_guest_vcpu_virq(struct vcpu *v, int virq)
>  {
> -    unsigned long flags;
>      int port;
>  
>      ASSERT(!virq_is_global(virq));
>  
> -    spin_lock_irqsave(&v->virq_lock, flags);
> +    read_lock(&v->virq_lock);
>  
>      port = v->virq_to_evtchn[virq];
>      if ( unlikely(port == 0) )
> @@ -627,12 +626,11 @@ void send_guest_vcpu_virq(struct vcpu *v
>      evtchn_set_pending(v, port);
>  
>   out:
> -    spin_unlock_irqrestore(&v->virq_lock, flags);
> +    read_unlock(&v->virq_lock);
>  }
>  
>  void send_guest_global_virq(struct domain *d, int virq)
>  {
> -    unsigned long flags;
>      int port;
>      struct vcpu *v;
>      struct evtchn *chn;
> @@ -646,7 +644,7 @@ void send_guest_global_virq(struct domai
>      if ( unlikely(v == NULL) )
>          return;
>  
> -    spin_lock_irqsave(&v->virq_lock, flags);
> +    read_lock(&v->virq_lock);
>  
>      port = v->virq_to_evtchn[virq];
>      if ( unlikely(port == 0) )
> @@ -656,7 +654,7 @@ void send_guest_global_virq(struct domai
>      evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
>  
>   out:
> -    spin_unlock_irqrestore(&v->virq_lock, flags);
> +    read_unlock(&v->virq_lock);
>  }
>  
>  int send_guest_pirq(struct domain *d, int pirq)
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -344,6 +344,20 @@ int _rw_is_write_locked(rwlock_t *lock)
>      return _raw_rw_is_write_locked(&lock->raw);
>  }
>  
> +void _write_barrier(rwlock_t *lock)
> +{
> +    check_lock(&lock->debug);
> +    do { mb(); } while ( _raw_rw_is_locked(&lock->raw) );
> +}
> +
> +void _write_barrier_irq(rwlock_t *lock)
> +{
> +    unsigned long flags;
> +    local_irq_save(flags);
> +    _write_barrier(lock);
> +    local_irq_restore(flags);
> +}
> +
>  #ifdef LOCK_PROFILE
>  
>  struct lock_profile_anc {
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -157,9 +157,12 @@ struct vcpu
>      unsigned long    pause_flags;
>      atomic_t         pause_count;
>  
> -    /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn.
> */
> +    /*
> +     * Writer-side-IRQ-safe virq_lock protects against delivering VIRQ
> +     * to stale evtchn.
> +     */
>      u16              virq_to_evtchn[NR_VIRQS];
> -    spinlock_t       virq_lock;
> +    rwlock_t         virq_lock;
>  
>      /* Bitmask of CPUs on which this VCPU may run. */
>      cpumask_t        cpu_affinity;
> --- a/xen/include/xen/spinlock.h
> +++ b/xen/include/xen/spinlock.h
> @@ -170,6 +170,9 @@ void _write_unlock_irqrestore(rwlock_t *
>  int _rw_is_locked(rwlock_t *lock);
>  int _rw_is_write_locked(rwlock_t *lock);
>  
> +void _write_barrier(rwlock_t *lock);
> +void _write_barrier_irq(rwlock_t *lock);
> +
>  #define spin_lock(l)                  _spin_lock(l)
>  #define spin_lock_irq(l)              _spin_lock_irq(l)
>  #define spin_lock_irqsave(l, f)       ((f) = _spin_lock_irqsave(l))
> @@ -223,4 +226,7 @@ int _rw_is_write_locked(rwlock_t *lock);
>  #define rw_is_locked(l)               _rw_is_locked(l)
>  #define rw_is_write_locked(l)         _rw_is_write_locked(l)
>  
> +#define write_barrier(l)              _write_barrier(l)
> +#define write_barrier_irq(l)          _write_barrier_irq(l)
> +
>  #endif /* __SPINLOCK_H__ */
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>