diff options
-rw-r--r-- | arch/powerpc/kernel/prom.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/setup.c | 1 | ||||
-rw-r--r-- | include/asm-powerpc/atomic.h | 178 |
3 files changed, 181 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 6391a4a0709..6a5b468edb4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1080,9 +1080,9 @@ void __init unflatten_device_tree(void) static int __init early_init_dt_scan_cpus(unsigned long node, const char *uname, int depth, void *data) { - char *type = of_get_flat_dt_prop(node, "device_type", NULL); u32 *prop; - unsigned long size = 0; + unsigned long size; + char *type = of_get_flat_dt_prop(node, "device_type", &size); /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) @@ -1108,7 +1108,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, #ifdef CONFIG_ALTIVEC /* Check if we have a VMX and eventually update CPU features */ - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size); + prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL); if (prop && (*prop) > 0) { cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index 1f338341d8f..6a29f301436 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -704,7 +704,6 @@ static void iseries_shared_idle(void) static void iseries_dedicated_idle(void) { - long oldval; set_thread_flag(TIF_POLLING_NRFLAG); while (1) { diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index c5b12fd2b46..9c0b372a46e 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -197,5 +197,183 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#ifdef __powerpc64__ + +typedef struct { volatile long counter; } atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +#define atomic64_read(v) ((v)->counter) +#define atomic64_set(v,i) (((v)->counter) = (i)) + +static __inline__ void atomic64_add(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # atomic64_add\n\ + add %0,%2,%0\n\ + stdcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (a), "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ long atomic64_add_return(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%2 # atomic64_add_return\n\ + add %0,%1,%0\n\ + stdcx. %0,0,%2 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + +static __inline__ void atomic64_sub(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # atomic64_sub\n\ + subf %0,%2,%0\n\ + stdcx. %0,0,%3 \n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (a), "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ long atomic64_sub_return(long a, atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%2 # atomic64_sub_return\n\ + subf %0,%1,%0\n\ + stdcx. %0,0,%2 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (a), "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +static __inline__ void atomic64_inc(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%2 # atomic64_inc\n\ + addic %0,%0,1\n\ + stdcx. %0,0,%2 \n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ long atomic64_inc_return(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%1 # atomic64_inc_return\n\ + addic %0,%0,1\n\ + stdcx. %0,0,%1 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +/* + * atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) + +static __inline__ void atomic64_dec(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( +"1: ldarx %0,0,%2 # atomic64_dec\n\ + addic %0,%0,-1\n\ + stdcx. %0,0,%2\n\ + bne- 1b" + : "=&r" (t), "=m" (v->counter) + : "r" (&v->counter), "m" (v->counter) + : "cc"); +} + +static __inline__ long atomic64_dec_return(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%1 # atomic64_dec_return\n\ + addic %0,%0,-1\n\ + stdcx. %0,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) + +/* + * Atomically test *v and decrement if it is greater than 0. + * The function returns the old value of *v minus 1. + */ +static __inline__ long atomic64_dec_if_positive(atomic64_t *v) +{ + long t; + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ + addic. %0,%0,-1\n\ + blt- 2f\n\ + stdcx. %0,0,%1\n\ + bne- 1b" + ISYNC_ON_SMP + "\n\ +2:" : "=&r" (t) + : "r" (&v->counter) + : "cc", "memory"); + + return t; +} + +#endif /* __powerpc64__ */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ |