diff options
author | Mathieu Desnoyers <compudj@krystal.dyndns.org> | 2007-05-08 00:34:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 11:15:20 -0700 |
commit | 2856f5e31c1413bf6e4f1371e07e17078a5fee5e (patch) | |
tree | 587dfe584f0913813d0cf2414a9378618143db15 /include | |
parent | 79d365a306c3af53d8a732fec79b76c0b285d816 (diff) |
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
I agree (with Andi Kleen) this typeof is not needed and more error
prone. All the original atomic.h code that uses cmpxchg (which includes
the atomic_add_unless) uses defines instead of inline functions,
probably to circumvent a circular dependency between system.h and
atomic.h on powerpc (which my patch addresses). Therefore, it makes
sense to use inline functions that will provide type checking.
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
Digging into the FRV architecture shows me that it is also affected by
such a circular dependency. Here is the diff applying this against the
rest of my atomic.h patches.
It applies over the atomic.h standardization patches.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-alpha/atomic.h | 59 | ||||
-rw-r--r-- | include/asm-arm/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-arm26/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-frv/atomic.h | 91 | ||||
-rw-r--r-- | include/asm-frv/system.h | 70 | ||||
-rw-r--r-- | include/asm-generic/atomic.h | 17 | ||||
-rw-r--r-- | include/asm-i386/atomic.h | 29 | ||||
-rw-r--r-- | include/asm-ia64/atomic.h | 59 | ||||
-rw-r--r-- | include/asm-m32r/atomic.h | 23 | ||||
-rw-r--r-- | include/asm-m68k/atomic.h | 31 | ||||
-rw-r--r-- | include/asm-m68knommu/atomic.h | 25 | ||||
-rw-r--r-- | include/asm-mips/atomic.h | 46 | ||||
-rw-r--r-- | include/asm-parisc/atomic.h | 47 | ||||
-rw-r--r-- | include/asm-powerpc/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-ppc/system.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/atomic.h | 59 | ||||
-rw-r--r-- | include/asm-x86_64/atomic.h | 59 | ||||
-rw-r--r-- | include/asm-xtensa/atomic.h | 23 |
18 files changed, 360 insertions, 282 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 7b4fba88cbe..f5cb7b878af 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -2,6 +2,7 @@ #define _ALPHA_ATOMIC_H #include <asm/barrier.h> +#include <asm/system.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -190,20 +191,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /** @@ -215,20 +217,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index f266c279512..3b59f94b5a3 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -12,6 +12,7 @@ #define __ASM_ARM_ATOMIC_H #include <linux/compiler.h> +#include <asm/system.h> typedef struct { volatile int counter; } atomic_t; diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h index 97e944fe1cf..d6dd42374cf 100644 --- a/include/asm-arm26/atomic.h +++ b/include/asm-arm26/atomic.h @@ -20,7 +20,6 @@ #ifndef __ASM_ARM_ATOMIC_H #define __ASM_ARM_ATOMIC_H - #ifdef CONFIG_SMP #error SMP is NOT supported #endif diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h index 066386ac238..d425d8d0ad7 100644 --- a/include/asm-frv/atomic.h +++ b/include/asm-frv/atomic.h @@ -16,6 +16,7 @@ #include <linux/types.h> #include <asm/spr-regs.h> +#include <asm/system.h> #ifdef CONFIG_SMP #error not SMP safe @@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); #define tas(ptr) (xchg((ptr), 1)) -/*****************************************************************************/ -/* - * compare and conditionally exchange value with memory - * - if (*ptr == test) then orig = *ptr; *ptr = test; - * - if (*ptr != test) then orig = *ptr; - */ -#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS - -#define cmpxchg(ptr, test, new) \ -({ \ - __typeof__(ptr) __xg_ptr = (ptr); \ - __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ - __typeof__(*(ptr)) __xg_test = (test); \ - __typeof__(*(ptr)) __xg_new = (new); \ - \ - switch (sizeof(__xg_orig)) { \ - case 4: \ - asm volatile( \ - "0: \n" \ - " orcc gr0,gr0,gr0,icc3 \n" \ - " ckeq icc3,cc7 \n" \ - " ld.p %M0,%1 \n" \ - " orcr cc7,cc7,cc3 \n" \ - " sub%I4cc %1,%4,%2,icc0 \n" \ - " bne icc0,#0,1f \n" \ - " cst.p %3,%M0 ,cc3,#1 \n" \ - " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ - " beq icc3,#0,0b \n" \ - "1: \n" \ - : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ - : "r"(__xg_new), "NPr"(__xg_test) \ - : "memory", "cc7", "cc3", "icc3", "icc0" \ - ); \ - break; \ - \ - default: \ - __xg_orig = 0; \ - asm volatile("break"); \ - break; \ - } \ - \ - __xg_orig; \ -}) - -#else - -extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); - -#define cmpxchg(ptr, test, new) \ -({ \ - __typeof__(ptr) __xg_ptr = (ptr); \ - __typeof__(*(ptr)) __xg_orig; \ - __typeof__(*(ptr)) __xg_test = (test); \ - __typeof__(*(ptr)) __xg_new = (new); \ - \ - switch (sizeof(__xg_orig)) { \ - case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ - default: \ - __xg_orig = 0; \ - asm volatile("break"); \ - break; \ - } \ - \ - __xg_orig; \ -}) - -#endif - #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h index 1166899317d..be303b3eef4 100644 --- a/include/asm-frv/system.h +++ b/include/asm-frv/system.h @@ -13,7 +13,6 @@ #define _ASM_SYSTEM_H #include <linux/linkage.h> -#include <asm/atomic.h> struct thread_struct; @@ -197,4 +196,73 @@ extern void free_initmem(void); #define arch_align_stack(x) (x) +/*****************************************************************************/ +/* + * compare and conditionally exchange value with memory + * - if (*ptr == test) then orig = *ptr; *ptr = test; + * - if (*ptr != test) then orig = *ptr; + */ +#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS + +#define cmpxchg(ptr, test, new) \ +({ \ + __typeof__(ptr) __xg_ptr = (ptr); \ + __typeof__(*(ptr)) __xg_orig, __xg_tmp; \ + __typeof__(*(ptr)) __xg_test = (test); \ + __typeof__(*(ptr)) __xg_new = (new); \ + \ + switch (sizeof(__xg_orig)) { \ + case 4: \ + asm volatile( \ + "0: \n" \ + " orcc gr0,gr0,gr0,icc3 \n" \ + " ckeq icc3,cc7 \n" \ + " ld.p %M0,%1 \n" \ + " orcr cc7,cc7,cc3 \n" \ + " sub%I4cc %1,%4,%2,icc0 \n" \ + " bne icc0,#0,1f \n" \ + " cst.p %3,%M0 ,cc3,#1 \n" \ + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ + " beq icc3,#0,0b \n" \ + "1: \n" \ + : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \ + : "r"(__xg_new), "NPr"(__xg_test) \ + : "memory", "cc7", "cc3", "icc3", "icc0" \ + ); \ + break; \ + \ + default: \ + __xg_orig = 0; \ + asm volatile("break"); \ + break; \ + } \ + \ + __xg_orig; \ +}) + +#else + +extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); + +#define cmpxchg(ptr, test, new) \ +({ \ + __typeof__(ptr) __xg_ptr = (ptr); \ + __typeof__(*(ptr)) __xg_orig; \ + __typeof__(*(ptr)) __xg_test = (test); \ + __typeof__(*(ptr)) __xg_new = (new); \ + \ + switch (sizeof(__xg_orig)) { \ + case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \ + default: \ + __xg_orig = 0; \ + asm volatile("break"); \ + break; \ + } \ + \ + __xg_orig; \ +}) + +#endif + + #endif /* _ASM_SYSTEM_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 5ae6dce1cba..85fd0aa27a8 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -9,7 +9,6 @@ */ #include <asm/types.h> -#include <asm/system.h> /* * Suppport for atomic_long_t @@ -123,8 +122,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l) return (long)atomic64_dec_return(v); } -#define atomic_long_add_unless(l, a, u) \ - atomic64_add_unless((atomic64_t *)(l), (a), (u)) +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_unless(v, a, u); +} #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) @@ -236,8 +239,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l) return (long)atomic_dec_return(v); } -#define atomic_long_add_unless(l, a, u) \ - atomic_add_unless((atomic_t *)(l), (a), (u)) +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_unless(v, a, u); +} #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 08935113206..ff90c6e3fcb 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -219,20 +219,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1,v)) diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index b16ad235c7e..1fc3b83325d 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <asm/intrinsics.h> +#include <asm/system.h> /* * On IA-64, counter must always be volatile to ensure that that the @@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__(v->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__(v->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic_add_return(i,v) \ diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index f5a7d7301c7..3a38ffe4a4f 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h index d5eed64cb83..4915294fea6 100644 --- a/include/asm-m68k/atomic.h +++ b/include/asm-m68k/atomic.h @@ -2,7 +2,7 @@ #define __ARCH_M68K_ATOMIC__ -#include <asm/system.h> /* local_irq_XXX() */ +#include <asm/system.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); } -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /* Atomic operations are already serializing */ diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h index 6c4e4b63e45..d5632a305da 100644 --- a/include/asm-m68knommu/atomic.h +++ b/include/asm-m68knommu/atomic.h @@ -1,7 +1,7 @@ #ifndef __ARCH_M68KNOMMU_ATOMIC__ #define __ARCH_M68KNOMMU_ATOMIC__ -#include <asm/system.h> /* local_irq_XXX() */ +#include <asm/system.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 6423ffa195a..62daa746a9c 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -18,6 +18,7 @@ #include <asm/barrier.h> #include <asm/cpu-features.h> #include <asm/war.h> +#include <asm/system.h> typedef struct { volatile int counter; } atomic_t; @@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) @@ -694,14 +701,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 66a0edbb51f..e894ee35074 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -6,6 +6,7 @@ #define _ASM_PARISC_ATOMIC_H_ #include <linux/types.h> +#include <asm/system.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -174,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) @@ -283,14 +291,21 @@ atomic64_read(const atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #endif /* CONFIG_64BIT */ diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 438a7fcfba5..c44810b9d32 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t; #include <linux/compiler.h> #include <asm/synch.h> #include <asm/asm-compat.h> +#include <asm/system.h> #define ATOMIC_INIT(i) { (i) } diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h index 738943584c0..56abe5e9e15 100644 --- a/include/asm-ppc/system.h +++ b/include/asm-ppc/system.h @@ -6,7 +6,6 @@ #include <linux/kernel.h> -#include <asm/atomic.h> #include <asm/hw_irq.h> /* diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index c3feb3af2cf..3fb4e1f7f18 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -9,6 +9,7 @@ #define __ARCH_SPARC64_ATOMIC__ #include <linux/types.h> +#include <asm/system.h> typedef struct { volatile int counter; } atomic_t; typedef struct { volatile __s64 counter; } atomic64_t; @@ -73,40 +74,42 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - likely(c != (u)); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - likely(c != (u)); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* Atomic operations are already serializing */ diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 80e4fdbe220..19e0c607b56 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -2,6 +2,7 @@ #define __ARCH_X86_64_ATOMIC__ #include <asm/alternative.h> +#include <asm/system.h> /* atomic_t should be 32 bit signed type */ @@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) /** @@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic64_add_unless(v, a, u) \ -({ \ - __typeof__((v)->counter) c, old; \ - c = atomic64_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic64_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) /* These are x86-specific, used by some header files */ diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index 5c267202106..b3b23540f14 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) |