diff options
author | Tony Luck <tony.luck@intel.com> | 2006-03-21 08:16:55 -0800 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-03-21 08:16:55 -0800 |
commit | 33cbd30eaf24ea43af4fe161b6fb34a859b3f2e0 (patch) | |
tree | ca0086f9764236e06c244054f5c32d268d226ae6 /include | |
parent | 536ea4e4198eeaa5a73fb734ea675e621609bb7e (diff) | |
parent | a454c2f3d1fd1cab7073b53c6c14d6d4b61f4e09 (diff) |
Pull ia64-mutex-primitives into release branch
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/mutex.h | 93 |
1 files changed, 88 insertions, 5 deletions
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h index 458c1f7fbc1..5a3224f6af3 100644 --- a/include/asm-ia64/mutex.h +++ b/include/asm-ia64/mutex.h @@ -1,9 +1,92 @@ /* - * Pull in the generic implementation for the mutex fastpath. + * ia64 implementation of the mutex fastpath. * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) + * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com> + * + */ + +#ifndef _ASM_MUTEX_H +#define _ASM_MUTEX_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, + * or anything the slow path function returns. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) + return fail_fn(count); + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + * + * If the implementation sets it to a value of lower than 1, then the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int ret = ia64_fetchadd4_rel(count, 1); + if (unlikely(ret < 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * <fail_fn> spinlock-based trylock variant unconditionally. */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(cmpxchg_acq(count, 1, 0)) == 1) + return 1; + return 0; +} -#include <asm-generic/mutex-dec.h> +#endif |