From b8b572e1015f81b4e748417be2629dfe51ab99f9 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 1 Aug 2008 15:20:30 +1000 Subject: powerpc: Move include files to arch/powerpc/include/asm from include/asm-powerpc. This is the result of a mkdir arch/powerpc/include/asm git mv include/asm-powerpc/* arch/powerpc/include/asm Followed by a few documentation/comment fixups and a couple of places where was being used explicitly. Of the latter only one was outside the arch code and it is a driver only built for powerpc. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/spinlock.h | 295 ++++++++++++++++++++++++++++++++++++ 1 file changed, 295 insertions(+) create mode 100644 arch/powerpc/include/asm/spinlock.h (limited to 'arch/powerpc/include/asm/spinlock.h') diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h new file mode 100644 index 00000000000..f56a843f470 --- /dev/null +++ b/arch/powerpc/include/asm/spinlock.h @@ -0,0 +1,295 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H +#ifdef __KERNEL__ + +/* + * Simple spin lock operations. + * + * Copyright (C) 2001-2004 Paul Mackerras , IBM + * Copyright (C) 2001 Anton Blanchard , IBM + * Copyright (C) 2002 Dave Engebretsen , IBM + * Rework to support virtual processors + * + * Type of int is used as a full 64b word is not necessary. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * (the type definitions are in asm/spinlock_types.h) + */ +#include +#ifdef CONFIG_PPC64 +#include +#include +#include +#endif +#include +#include + +#define __raw_spin_is_locked(x) ((x)->slock != 0) + +#ifdef CONFIG_PPC64 +/* use 0x800000yy when locked, where yy == CPU number */ +#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) +#else +#define LOCK_TOKEN 1 +#endif + +#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) +#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) +#define SYNC_IO do { \ + if (unlikely(get_paca()->io_sync)) { \ + mb(); \ + get_paca()->io_sync = 0; \ + } \ + } while (0) +#else +#define CLEAR_IO_SYNC +#define SYNC_IO +#endif + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static inline unsigned long __spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp, token; + + token = LOCK_TOKEN; + __asm__ __volatile__( +"1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (token), "r" (&lock->slock) + : "cr0", "memory"); + + return tmp; +} + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + CLEAR_IO_SYNC; + return __spin_trylock(lock) == 0; +} + +/* + * On a system with shared processors (that is, where a physical + * processor is multiplexed between several virtual processors), + * there is no point spinning on a lock if the holder of the lock + * isn't currently scheduled on a physical processor. Instead + * we detect this situation and ask the hypervisor to give the + * rest of our timeslice to the lock holder. + * + * So that we can tell which virtual processor is holding a lock, + * we put 0x80000000 | smp_processor_id() in the lock when it is + * held. Conveniently, we have a word in the paca that holds this + * value. + */ + +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (get_lppaca()->shared_proc) +extern void __spin_yield(raw_spinlock_t *lock); +extern void __rw_yield(raw_rwlock_t *lock); +#else /* SPLPAR || ISERIES */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + CLEAR_IO_SYNC; + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __spin_yield(lock); + } while (unlikely(lock->slock != 0)); + HMT_medium(); + } +} + +static inline +void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +{ + unsigned long flags_dis; + + CLEAR_IO_SYNC; + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + local_save_flags(flags_dis); + local_irq_restore(flags); + do { + HMT_low(); + if (SHARED_PROCESSOR) + __spin_yield(lock); + } while (unlikely(lock->slock != 0)); + HMT_medium(); + local_irq_restore(flags_dis); + } +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + SYNC_IO; + __asm__ __volatile__("# __raw_spin_unlock\n\t" + LWSYNC_ON_SMP: : :"memory"); + lock->slock = 0; +} + +#ifdef CONFIG_PPC64 +extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +#else +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#endif + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define __raw_write_can_lock(rw) (!(rw)->lock) + +#ifdef CONFIG_PPC64 +#define __DO_SIGN_EXTEND "extsw %0,%0\n" +#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ +#else +#define __DO_SIGN_EXTEND +#define WRLOCK_TOKEN (-1) +#endif + +/* + * This returns the old value in the lock + 1, + * so we got a read lock if the return value is > 0. + */ +static inline long __read_trylock(raw_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( +"1: lwarx %0,0,%1\n" + __DO_SIGN_EXTEND +" addic. %0,%0,1\n\ + ble- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (&rw->lock) + : "cr0", "xer", "memory"); + + return tmp; +} + +/* + * This returns the old value in the lock, + * so we got the write lock if the return value is 0. + */ +static inline long __write_trylock(raw_rwlock_t *rw) +{ + long tmp, token; + + token = WRLOCK_TOKEN; + __asm__ __volatile__( +"1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (token), "r" (&rw->lock) + : "cr0", "memory"); + + return tmp; +} + +static inline void __raw_read_lock(raw_rwlock_t *rw) +{ + while (1) { + if (likely(__read_trylock(rw) > 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __rw_yield(rw); + } while (unlikely(rw->lock < 0)); + HMT_medium(); + } +} + +static inline void __raw_write_lock(raw_rwlock_t *rw) +{ + while (1) { + if (likely(__write_trylock(rw) == 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __rw_yield(rw); + } while (unlikely(rw->lock != 0)); + HMT_medium(); + } +} + +static inline int __raw_read_trylock(raw_rwlock_t *rw) +{ + return __read_trylock(rw) > 0; +} + +static inline int __raw_write_trylock(raw_rwlock_t *rw) +{ + return __write_trylock(rw) == 0; +} + +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( + "# read_unlock\n\t" + LWSYNC_ON_SMP +"1: lwarx %0,0,%1\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + : "=&r"(tmp) + : "r"(&rw->lock) + : "cr0", "memory"); +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + __asm__ __volatile__("# write_unlock\n\t" + LWSYNC_ON_SMP: : :"memory"); + rw->lock = 0; +} + +#define _raw_spin_relax(lock) __spin_yield(lock) +#define _raw_read_relax(lock) __rw_yield(lock) +#define _raw_write_relax(lock) __rw_yield(lock) + +#endif /* __KERNEL__ */ +#endif /* __ASM_SPINLOCK_H */ -- cgit v1.2.3