From dd18434ff0b7d9b9ad3d596985fc84b329d2f9a8 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 28 Apr 2008 14:44:08 +1000 Subject: [POWERPC] Use __always_inline for xchg* and cmpxchg* This changes the definitions of the xchg and cmpxchg families of functions in include/asm-powerpc/system.h to be marked __always_inline rather than __inline__. The reason for doing this is that we rely on the compiler inlining them in order to eliminate the references to __xchg_called_with_bad_pointer and __cmpxchg_called_with_bad_pointer, which are deliberately left undefined. Thus this change will enable us to make the inline keyword be just a hint rather than a directive. Signed-off-by: Paul Mackerras --- include/asm-powerpc/system.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index fab1674b31b..2b6559a6d11 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -204,7 +204,7 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */ * Changes the memory location '*ptr' to be val and returns * the previous value stored there. */ -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u32(volatile void *p, unsigned long val) { unsigned long prev; @@ -229,7 +229,7 @@ __xchg_u32(volatile void *p, unsigned long val) * Changes the memory location '*ptr' to be val and returns * the previous value stored there. */ -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u32_local(volatile void *p, unsigned long val) { unsigned long prev; @@ -247,7 +247,7 @@ __xchg_u32_local(volatile void *p, unsigned long val) } #ifdef CONFIG_PPC64 -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u64(volatile void *p, unsigned long val) { unsigned long prev; @@ -266,7 +266,7 @@ __xchg_u64(volatile void *p, unsigned long val) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u64_local(volatile void *p, unsigned long val) { unsigned long prev; @@ -290,7 +290,7 @@ __xchg_u64_local(volatile void *p, unsigned long val) */ extern void __xchg_called_with_bad_pointer(void); -static __inline__ unsigned long +static __always_inline unsigned long __xchg(volatile void *ptr, unsigned long x, unsigned int size) { switch (size) { @@ -305,7 +305,7 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size) return x; } -static __inline__ unsigned long +static __always_inline unsigned long __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) { switch (size) { @@ -338,7 +338,7 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) */ #define __HAVE_ARCH_CMPXCHG 1 -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) { unsigned int prev; @@ -361,7 +361,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, unsigned long new) { @@ -384,7 +384,7 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, } #ifdef CONFIG_PPC64 -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; @@ -406,7 +406,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, unsigned long new) { @@ -432,7 +432,7 @@ __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { @@ -448,7 +448,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, return old; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { -- cgit v1.2.3 From 85218827cc4ca900867807f19345418164ffc108 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Mon, 28 Apr 2008 16:21:22 +1000 Subject: [POWERPC] Add IRQSTACKS support on ppc32 This makes it possible to use separate stacks for hard and soft IRQs on 32-bit powerpc as well as on 64-bit. The code for 32-bit is just the 32-bit analog of the 64-bit code. * Added allocation and initialization of the irq stacks. We limit the stacks to be in lowmem for ppc32. * Implemented ppc32 versions of call_do_softirq() and call_handle_irq() to switch the stack pointers * Reworked how we do stack overflow detection. We now keep around the limit of the stack in the thread_struct and compare against the limit to see if we've overflowed. We can now use this on ppc64 if desired. [ paulus@samba.org: Fixed bug on 6xx where we need to reload r9 with the thread_info pointer. ] Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- include/asm-powerpc/processor.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index fd98ca998b4..cf83f2d7e2a 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -138,6 +138,8 @@ typedef struct { struct thread_struct { unsigned long ksp; /* Kernel stack pointer */ + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ + #ifdef CONFIG_PPC64 unsigned long ksp_vsid; #endif @@ -182,11 +184,14 @@ struct thread_struct { #define ARCH_MIN_TASKALIGN 16 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) +#define INIT_SP_LIMIT \ + (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) #ifdef CONFIG_PPC32 #define INIT_THREAD { \ .ksp = INIT_SP, \ + .ksp_limit = INIT_SP_LIMIT, \ .fs = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ @@ -194,6 +199,7 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ + .ksp_limit = INIT_SP_LIMIT, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ .fpr = {0}, \ -- cgit v1.2.3 From 5a7b60ed8892756b137496b629f2e7c689fe6d8d Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Fri, 18 Apr 2008 13:33:40 -0700 Subject: [RAPIDIO] Move include/asm-ppc/rio.h to asm-powerpc Signed-off-by: Zhang Wei Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- include/asm-powerpc/rio.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 include/asm-powerpc/rio.h (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/rio.h b/include/asm-powerpc/rio.h new file mode 100644 index 00000000000..0018bf80cb2 --- /dev/null +++ b/include/asm-powerpc/rio.h @@ -0,0 +1,18 @@ +/* + * RapidIO architecture support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef ASM_PPC_RIO_H +#define ASM_PPC_RIO_H + +extern void platform_rio_init(void); + +#endif /* ASM_PPC_RIO_H */ -- cgit v1.2.3