On Mon, Jan 17, 2011 at 08:52:22PM +0530, Srivatsa Vaddagiri wrote:
> I think this is still racy ..
>
> Unlocker Locker
>
>
> test slowpath
> -> false
>
> set slowpath flag
> test for lock pickup
> -> fail
> block
>
>
> unlock
>
> unlock needs to happen first before testing slowpath? I have made that change
> for my KVM guest and it seems to be working well with that change .. Will
> cleanup and post my patches shortly
Patch below fixes the race described above. You can fold this to your patch
13/14 if you agree this is in right direction.
Signed-off-by: Srivatsa Vaddagiri <vatsa@xxxxxxxxxxxxxxxxxx>
---
arch/x86/include/asm/spinlock.h | 7 +++----
arch/x86/kernel/paravirt-spinlocks.c | 22 +++++-----------------
2 files changed, 8 insertions(+), 21 deletions(-)
Index: linux-2.6.37/arch/x86/include/asm/spinlock.h
===================================================================
--- linux-2.6.37.orig/arch/x86/include/asm/spinlock.h
+++ linux-2.6.37/arch/x86/include/asm/spinlock.h
@@ -55,7 +55,7 @@ static __always_inline void __ticket_unl
/* Only defined when CONFIG_PARAVIRT_SPINLOCKS defined, but may as
* well leave the prototype always visible. */
-extern void __ticket_unlock_release_slowpath(struct arch_spinlock *lock);
+extern void __ticket_unlock_slowpath(struct arch_spinlock *lock);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
@@ -166,10 +166,9 @@ static __always_inline int arch_spin_try
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
barrier(); /* prevent reordering out of locked region */
+ __ticket_unlock_release(lock);
if (unlikely(__ticket_in_slowpath(lock)))
- __ticket_unlock_release_slowpath(lock);
- else
- __ticket_unlock_release(lock);
+ __ticket_unlock_slowpath(lock);
barrier(); /* prevent reordering into locked region */
}
Index: linux-2.6.37/arch/x86/kernel/paravirt-spinlocks.c
===================================================================
--- linux-2.6.37.orig/arch/x86/kernel/paravirt-spinlocks.c
+++ linux-2.6.37/arch/x86/kernel/paravirt-spinlocks.c
@@ -22,33 +22,21 @@ EXPORT_SYMBOL(pv_lock_ops);
* bits. However, we need to be careful about this because someone
* may just be entering as we leave, and enter the slowpath.
*/
-void __ticket_unlock_release_slowpath(struct arch_spinlock *lock)
+void __ticket_unlock_slowpath(struct arch_spinlock *lock)
{
struct arch_spinlock old, new;
BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
old = ACCESS_ONCE(*lock);
-
new = old;
- new.tickets.head += TICKET_LOCK_INC;
/* Clear the slowpath flag */
new.tickets.tail &= ~TICKET_SLOWPATH_FLAG;
+ if (new.tickets.head == new.tickets.tail)
+ cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
- /*
- * If there's currently people waiting or someone snuck in
- * since we read the lock above, then do a normal unlock and
- * kick. If we managed to unlock with no queued waiters, then
- * we can clear the slowpath flag.
- */
- if (new.tickets.head != new.tickets.tail ||
- cmpxchg(&lock->head_tail,
- old.head_tail, new.head_tail) != old.head_tail) {
- /* still people waiting */
- __ticket_unlock_release(lock);
- }
-
+ /* Wake up an appropriate waiter */
__ticket_unlock_kick(lock, new.tickets.head);
}
-EXPORT_SYMBOL(__ticket_unlock_release_slowpath);
+EXPORT_SYMBOL(__ticket_unlock_slowpath);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|