From 293666b7a17cb7a389fc274980439212386a19c4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 15 Nov 2008 13:33:25 -0800 Subject: sparc64: Stop using memory barriers for atomics and locks. The kernel always executes in the TSO memory model now, so none of this stuff is necessary any more. With helpful feedback from Nick Piggin. Signed-off-by: David S. Miller --- arch/sparc/include/asm/system_64.h | 35 ++++++++--------------------------- 1 file changed, 8 insertions(+), 27 deletions(-) (limited to 'arch/sparc/include/asm/system_64.h') diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 8759f2a1b83..7554ad39b5a 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -59,20 +59,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ : : : "memory"); \ } while (0) -#define mb() \ - membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") -#define rmb() \ - membar_safe("#LoadLoad") -#define wmb() \ - membar_safe("#StoreStore") -#define membar_storeload() \ - membar_safe("#StoreLoad") -#define membar_storeload_storestore() \ - membar_safe("#StoreLoad | #StoreStore") -#define membar_storeload_loadload() \ - membar_safe("#StoreLoad | #LoadLoad") -#define membar_storestore_loadstore() \ - membar_safe("#StoreStore | #LoadStore") +#define mb() membar_safe("#StoreLoad") +#define rmb() __asm__ __volatile__("":::"memory") +#define wmb() __asm__ __volatile__("":::"memory") #endif @@ -80,20 +69,20 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define read_barrier_depends() do { } while(0) #define set_mb(__var, __value) \ - do { __var = __value; membar_storeload_storestore(); } while(0) + do { __var = __value; membar_safe("#StoreLoad"); } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() #else #define smp_mb() __asm__ __volatile__("":::"memory") #define smp_rmb() __asm__ __volatile__("":::"memory") #define smp_wmb() __asm__ __volatile__("":::"memory") -#define smp_read_barrier_depends() do { } while(0) #endif +#define smp_read_barrier_depends() do { } while(0) + #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") #define flushw_all() __asm__ __volatile__("flushw") @@ -209,14 +198,12 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va unsigned long tmp1, tmp2; __asm__ __volatile__( -" membar #StoreLoad | #LoadLoad\n" " mov %0, %1\n" "1: lduw [%4], %2\n" " cas [%4], %2, %0\n" " cmp %2, %0\n" " bne,a,pn %%icc, 1b\n" " mov %1, %0\n" -" membar #StoreLoad | #StoreStore\n" : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) : "0" (val), "r" (m) : "cc", "memory"); @@ -228,14 +215,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long unsigned long tmp1, tmp2; __asm__ __volatile__( -" membar #StoreLoad | #LoadLoad\n" " mov %0, %1\n" "1: ldx [%4], %2\n" " casx [%4], %2, %0\n" " cmp %2, %0\n" " bne,a,pn %%xcc, 1b\n" " mov %1, %0\n" -" membar #StoreLoad | #StoreStore\n" : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) : "0" (val), "r" (m) : "cc", "memory"); @@ -272,9 +257,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret static inline unsigned long __cmpxchg_u32(volatile int *m, int old, int new) { - __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" - "cas [%2], %3, %0\n\t" - "membar #StoreLoad | #StoreStore" + __asm__ __volatile__("cas [%2], %3, %0" : "=&r" (new) : "0" (new), "r" (m), "r" (old) : "memory"); @@ -285,9 +268,7 @@ __cmpxchg_u32(volatile int *m, int old, int new) static inline unsigned long __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) { - __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" - "casx [%2], %3, %0\n\t" - "membar #StoreLoad | #StoreStore" + __asm__ __volatile__("casx [%2], %3, %0" : "=&r" (new) : "0" (new), "r" (m), "r" (old) : "memory"); -- cgit v1.2.3 From f9aad60010efa896319ed6e908a5cb5e3a852907 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 25 Nov 2008 22:26:59 -0800 Subject: sparc64: Block NMIs in critical section of context switch. In these instructions we load the new thread register, switch the register window, and setup the new frame pointer. All of these must appear atomic, and things will explode if we take a PIL=15 NMI interrupt in the middle of this sequence. Signed-off-by: David S. Miller --- arch/sparc/include/asm/system_64.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/sparc/include/asm/system_64.h') diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 7554ad39b5a..779cf62d3ce 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -159,6 +159,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "stb %%o5, [%%g6 + %5]\n\t" \ "rdpr %%cwp, %%o5\n\t" \ "stb %%o5, [%%g6 + %8]\n\t" \ + "wrpr %%g0, 15, %%pil\n\t" \ "mov %4, %%g6\n\t" \ "ldub [%4 + %8], %%g1\n\t" \ "wrpr %%g1, %%cwp\n\t" \ @@ -169,6 +170,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ "ldx [%%g6 + %9], %%g4\n\t" \ + "wrpr %%g0, 14, %%pil\n\t" \ "brz,pt %%o7, switch_to_pc\n\t" \ " mov %%g7, %0\n\t" \ "sethi %%hi(ret_from_syscall), %%g1\n\t" \ -- cgit v1.2.3 From 456cad8e4e1754672e6df1e716cff1482ea124ce Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 25 Nov 2008 22:27:50 -0800 Subject: sparc64: Add write_pic() helper. It writes the %pic register, keeping mind of processor bugs. Implement reset_pic() in terms of it. Signed-off-by: David S. Miller --- arch/sparc/include/asm/system_64.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/sparc/include/asm/system_64.h') diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 779cf62d3ce..6c077816ab2 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -96,11 +96,12 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() * for more information. */ -#define reset_pic() \ - __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \ +#define write_pic(__p) \ + __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ ".align 64\n" \ - "99:wr %g0, 0x0, %pic\n\t" \ - "rd %pic, %g0") + "99:wr %0, 0x0, %%pic\n\t" \ + "rd %%pic, %%g0" : : "r" (__p)) +#define reset_pic() write_pic(0) #ifndef __ASSEMBLY__ -- cgit v1.2.3