WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: [GIT PULL] More cleanups for atomic memory operations/sp

To: "H. Peter Anvin" <hpa@xxxxxxxxx>, Ingo Molnar <mingo@xxxxxxx>
Subject: [Xen-devel] Re: [GIT PULL] More cleanups for atomic memory operations/spinlocks
From: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Date: Fri, 11 Nov 2011 12:03:57 -0800
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, the arch/x86 maintainers <x86@xxxxxxxxxx>, Linux Kernel Mailing List <linux-kernel@xxxxxxxxxxxxxxx>, Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Delivery-date: Fri, 11 Nov 2011 12:09:59 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4EB9D48F.5010201@xxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <4EB9D48F.5010201@xxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:7.0.1) Gecko/20110930 Thunderbird/7.0.1
Ping?

On 11/08/2011 05:17 PM, Jeremy Fitzhardinge wrote:
> I forgot to push these for the just-closed merge window, but they're
> fine for the next one.  Could you find them a home in tip.git?
>
> Thanks,
>     J
>
> The following changes since commit 1ea6b8f48918282bdca0b32a34095504ee65bab5:
>
>   Linux 3.2-rc1 (2011-11-07 16:16:02 -0800)
>
> are available in the git repository at:
>   git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen.git 
> upstream/ticketlock-cleanup
>
> Jeremy Fitzhardinge (2):
>       x86/cmpxchg: add a locked add() helper
>       x86: consolidate xchg and xadd macros
>
>  arch/x86/include/asm/cmpxchg.h  |  140 +++++++++++++++++++-------------------
>  arch/x86/include/asm/spinlock.h |   15 +----
>  2 files changed, 71 insertions(+), 84 deletions(-)
>
> diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
> index 5d3acdf..5488e10 100644
> --- a/arch/x86/include/asm/cmpxchg.h
> +++ b/arch/x86/include/asm/cmpxchg.h
> @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
>       __compiletime_error("Bad argument size for cmpxchg");
>  extern void __xadd_wrong_size(void)
>       __compiletime_error("Bad argument size for xadd");
> +extern void __add_wrong_size(void)
> +     __compiletime_error("Bad argument size for add");
>  
>  /*
>   * Constants for operation sizes. On 32-bit, the 64-bit size it set to
> @@ -31,60 +33,47 @@ extern void __xadd_wrong_size(void)
>  #define      __X86_CASE_Q    -1              /* sizeof will never return -1 
> */
>  #endif
>  
> +/* 
> + * An exchange-type operation, which takes a value and a pointer, and
> + * returns a the old value.
> + */
> +#define __xchg_op(ptr, arg, op, lock)                                        
> \
> +     ({                                                              \
> +             __typeof__ (*(ptr)) __ret = (arg);                      \
> +             switch (sizeof(*(ptr))) {                               \
> +             case __X86_CASE_B:                                      \
> +                     asm volatile (lock #op "b %b0, %1\n"            \
> +                                   : "+r" (__ret), "+m" (*(ptr))     \
> +                                   : : "memory", "cc");              \
> +                     break;                                          \
> +             case __X86_CASE_W:                                      \
> +                     asm volatile (lock #op "w %w0, %1\n"            \
> +                                   : "+r" (__ret), "+m" (*(ptr))     \
> +                                   : : "memory", "cc");              \
> +                     break;                                          \
> +             case __X86_CASE_L:                                      \
> +                     asm volatile (lock #op "l %0, %1\n"             \
> +                                   : "+r" (__ret), "+m" (*(ptr))     \
> +                                   : : "memory", "cc");              \
> +                     break;                                          \
> +             case __X86_CASE_Q:                                      \
> +                     asm volatile (lock #op "q %q0, %1\n"            \
> +                                   : "+r" (__ret), "+m" (*(ptr))     \
> +                                   : : "memory", "cc");              \
> +                     break;                                          \
> +             default:                                                \
> +                     __ ## op ## _wrong_size();                      \
> +             }                                                       \
> +             __ret;                                                  \
> +     })
> +
>  /*
>   * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
>   * Since this is generally used to protect other memory information, we
>   * use "asm volatile" and "memory" clobbers to prevent gcc from moving
>   * information around.
>   */
> -#define __xchg(x, ptr, size)                                         \
> -({                                                                   \
> -     __typeof(*(ptr)) __x = (x);                                     \
> -     switch (size) {                                                 \
> -     case __X86_CASE_B:                                              \
> -     {                                                               \
> -             volatile u8 *__ptr = (volatile u8 *)(ptr);              \
> -             asm volatile("xchgb %0,%1"                              \
> -                          : "=q" (__x), "+m" (*__ptr)                \
> -                          : "0" (__x)                                \
> -                          : "memory");                               \
> -             break;                                                  \
> -     }                                                               \
> -     case __X86_CASE_W:                                              \
> -     {                                                               \
> -             volatile u16 *__ptr = (volatile u16 *)(ptr);            \
> -             asm volatile("xchgw %0,%1"                              \
> -                          : "=r" (__x), "+m" (*__ptr)                \
> -                          : "0" (__x)                                \
> -                          : "memory");                               \
> -             break;                                                  \
> -     }                                                               \
> -     case __X86_CASE_L:                                              \
> -     {                                                               \
> -             volatile u32 *__ptr = (volatile u32 *)(ptr);            \
> -             asm volatile("xchgl %0,%1"                              \
> -                          : "=r" (__x), "+m" (*__ptr)                \
> -                          : "0" (__x)                                \
> -                          : "memory");                               \
> -             break;                                                  \
> -     }                                                               \
> -     case __X86_CASE_Q:                                              \
> -     {                                                               \
> -             volatile u64 *__ptr = (volatile u64 *)(ptr);            \
> -             asm volatile("xchgq %0,%1"                              \
> -                          : "=r" (__x), "+m" (*__ptr)                \
> -                          : "0" (__x)                                \
> -                          : "memory");                               \
> -             break;                                                  \
> -     }                                                               \
> -     default:                                                        \
> -             __xchg_wrong_size();                                    \
> -     }                                                               \
> -     __x;                                                            \
> -})
> -
> -#define xchg(ptr, v)                                                 \
> -     __xchg((v), (ptr), sizeof(*ptr))
> +#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
>  
>  /*
>   * Atomic compare and exchange.  Compare OLD with MEM, if identical,
> @@ -165,46 +154,57 @@ extern void __xadd_wrong_size(void)
>       __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
>  #endif
>  
> -#define __xadd(ptr, inc, lock)                                               
> \
> +/*
> + * xadd() adds "inc" to "*ptr" and atomically returns the previous
> + * value of "*ptr".
> + *
> + * xadd() is locked when multiple CPUs are online
> + * xadd_sync() is always locked
> + * xadd_local() is never locked
> + */
> +#define __xadd(ptr, inc, lock)       __xchg_op((ptr), (inc), xadd, lock)
> +#define xadd(ptr, inc)               __xadd((ptr), (inc), LOCK_PREFIX)
> +#define xadd_sync(ptr, inc)  __xadd((ptr), (inc), "lock; ")
> +#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
> +
> +#define __add(ptr, inc, lock)                                                
> \
>       ({                                                              \
>               __typeof__ (*(ptr)) __ret = (inc);                      \
>               switch (sizeof(*(ptr))) {                               \
>               case __X86_CASE_B:                                      \
> -                     asm volatile (lock "xaddb %b0, %1\n"            \
> -                                   : "+r" (__ret), "+m" (*(ptr))     \
> -                                   : : "memory", "cc");              \
> +                     asm volatile (lock "addb %b1, %0\n"             \
> +                                   : "+m" (*(ptr)) : "ri" (inc)      \
> +                                   : "memory", "cc");                \
>                       break;                                          \
>               case __X86_CASE_W:                                      \
> -                     asm volatile (lock "xaddw %w0, %1\n"            \
> -                                   : "+r" (__ret), "+m" (*(ptr))     \
> -                                   : : "memory", "cc");              \
> +                     asm volatile (lock "addw %w1, %0\n"             \
> +                                   : "+m" (*(ptr)) : "ri" (inc)      \
> +                                   : "memory", "cc");                \
>                       break;                                          \
>               case __X86_CASE_L:                                      \
> -                     asm volatile (lock "xaddl %0, %1\n"             \
> -                                   : "+r" (__ret), "+m" (*(ptr))     \
> -                                   : : "memory", "cc");              \
> +                     asm volatile (lock "addl %1, %0\n"              \
> +                                   : "+m" (*(ptr)) : "ri" (inc)      \
> +                                   : "memory", "cc");                \
>                       break;                                          \
>               case __X86_CASE_Q:                                      \
> -                     asm volatile (lock "xaddq %q0, %1\n"            \
> -                                   : "+r" (__ret), "+m" (*(ptr))     \
> -                                   : : "memory", "cc");              \
> +                     asm volatile (lock "addq %1, %0\n"              \
> +                                   : "+m" (*(ptr)) : "ri" (inc)      \
> +                                   : "memory", "cc");                \
>                       break;                                          \
>               default:                                                \
> -                     __xadd_wrong_size();                            \
> +                     __add_wrong_size();                             \
>               }                                                       \
>               __ret;                                                  \
>       })
>  
>  /*
> - * xadd() adds "inc" to "*ptr" and atomically returns the previous
> - * value of "*ptr".
> + * add_*() adds "inc" to "*ptr"
>   *
> - * xadd() is locked when multiple CPUs are online
> - * xadd_sync() is always locked
> - * xadd_local() is never locked
> + * __add() takes a lock prefix
> + * add_smp() is locked when multiple CPUs are online
> + * add_sync() is always locked
>   */
> -#define xadd(ptr, inc)               __xadd((ptr), (inc), LOCK_PREFIX)
> -#define xadd_sync(ptr, inc)  __xadd((ptr), (inc), "lock; ")
> -#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
> +#define add_smp(ptr, inc)    __add((ptr), (inc), LOCK_PREFIX)
> +#define add_sync(ptr, inc)   __add((ptr), (inc), "lock; ")
>  
>  #endif       /* ASM_X86_CMPXCHG_H */
> diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
> index 972c260..a82c2bf 100644
> --- a/arch/x86/include/asm/spinlock.h
> +++ b/arch/x86/include/asm/spinlock.h
> @@ -79,23 +79,10 @@ static __always_inline int 
> __ticket_spin_trylock(arch_spinlock_t *lock)
>       return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == 
> old.head_tail;
>  }
>  
> -#if (NR_CPUS < 256)
>  static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
>  {
> -     asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
> -                  : "+m" (lock->head_tail)
> -                  :
> -                  : "memory", "cc");
> +     __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
>  }
> -#else
> -static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
> -{
> -     asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
> -                  : "+m" (lock->head_tail)
> -                  :
> -                  : "memory", "cc");
> -}
> -#endif
>  
>  static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
>  {
>
>


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>