From 0332db5aff3eec73eead6d991782b0dee1376dc0 Mon Sep 17 00:00:00 2001 From: Hirokazu Takata Date: Mon, 28 Nov 2005 13:43:59 -0800 Subject: [PATCH] m32r: Introduce atomic_cmpxchg and atomic_inc_not_zero operations Introduce atomic_cmpxchg and atomic_inc_not_zero operations for m32r. Signed-off-by: Hayato Fujiwara Signed-off-by: Hirokazu Takata Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-m32r/atomic.h | 21 ++++++++++++++++ include/asm-m32r/system.h | 64 +++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 83 insertions(+), 2 deletions(-) (limited to 'include/asm-m32r') diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index bfff69a4993..ef1fb8ea472 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h @@ -242,6 +242,27 @@ static __inline__ int atomic_dec_return(atomic_t *v) */ #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) { unsigned long flags; diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 73348c3f858..5eee832b73a 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -11,6 +11,7 @@ */ #include +#include #ifdef __KERNEL__ @@ -132,8 +133,6 @@ static inline void local_irq_disable(void) !(flags & 0x40); \ }) -#endif /* __KERNEL__ */ - #define nop() __asm__ __volatile__ ("nop" : : ) #define xchg(ptr,x) \ @@ -213,6 +212,67 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, return (tmp); } +#define __HAVE_ARCH_CMPXCHG 1 + +static __inline__ unsigned long +__cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) +{ + unsigned long flags; + unsigned int retval; + + local_irq_save(flags); + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + M32R_LOCK" %0, @%1; \n" + " bne %0, %2, 1f; \n" + M32R_UNLOCK" %3, @%1; \n" + " bra 2f; \n" + " .fillinsn \n" + "1:" + M32R_UNLOCK" %2, @%1; \n" + " .fillinsn \n" + "2:" + : "=&r" (retval) + : "r" (p), "r" (old), "r" (new) + : "cbit", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); + local_irq_restore(flags); + + return retval; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __inline__ unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); +#if 0 /* we don't have __cmpxchg_u64 */ + case 8: + return __cmpxchg_u64(ptr, old, new); +#endif /* 0 */ + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr,o,n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) + +#endif /* __KERNEL__ */ + /* * Memory barrier. * -- cgit v1.2.3