aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/lib/atomic_64.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 17:23:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-30 17:23:31 -0800
commit6de71484cf9561edb45224f659a9db38b6056d5e (patch)
tree588fe6f7c98147b805085503c863d371e2fa497e /arch/sparc/lib/atomic_64.S
parent1dff81f20cd55ffa5a8ee984da70ce0b99d29606 (diff)
parente3c6d4ee545e427b55882d97d3b663c6411645fe (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6: (98 commits) sparc: move select of ARCH_SUPPORTS_MSI sparc: drop SUN_IO sparc: unify sections.h sparc: use .data.init_task section for init_thread_union sparc: fix array overrun check in of_device_64.c sparc: unify module.c sparc64: prepare module_64.c for unification sparc64: use bit neutral Elf symbols sparc: unify module.h sparc: introduce CONFIG_BITS sparc: fix hardirq.h removal fallout sparc64: do not export pus_fs_struct sparc: use sparc64 version of scatterlist.h sparc: Commonize memcmp assembler. sparc: Unify strlen assembler. sparc: Add asm/asm.h sparc: Kill memcmp_32.S code which has been ifdef'd out for centuries. sparc: replace for_each_cpu_mask_nr with for_each_cpu sparc: fix sparse warnings in irq_32.c sparc: add include guards to kernel.h ...
Diffstat (limited to 'arch/sparc/lib/atomic_64.S')
-rw-r--r--arch/sparc/lib/atomic_64.S138
1 files changed, 138 insertions, 0 deletions
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
new file mode 100644
index 00000000000..0268210ca16
--- /dev/null
+++ b/arch/sparc/lib/atomic_64.S
@@ -0,0 +1,138 @@
+/* atomic.S: These things are too big to do inline.
+ *
+ * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/backoff.h>
+
+ .text
+
+ /* Two versions of the atomic routines, one that
+ * does not return a value and does not perform
+ * memory barriers, and a second which returns
+ * a value and does the barriers.
+ */
+ .globl atomic_add
+ .type atomic_add,#function
+atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: lduw [%o1], %g1
+ add %g1, %o0, %g7
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+ nop
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
+ .globl atomic_sub
+ .type atomic_sub,#function
+atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: lduw [%o1], %g1
+ sub %g1, %o0, %g7
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+ nop
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: lduw [%o1], %g1
+ add %g1, %o0, %g7
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+ add %g7, %o0, %g7
+ sra %g7, 0, %o0
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: lduw [%o1], %g1
+ sub %g1, %o0, %g7
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+ sub %g7, %o0, %g7
+ sra %g7, 0, %o0
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub_ret, .-atomic_sub_ret
+
+ .globl atomic64_add
+ .type atomic64_add,#function
+atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: ldx [%o1], %g1
+ add %g1, %o0, %g7
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+ nop
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: ldx [%o1], %g1
+ sub %g1, %o0, %g7
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+ nop
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: ldx [%o1], %g1
+ add %g1, %o0, %g7
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+ add %g7, %o0, %g7
+ mov %g7, %o0
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+1: ldx [%o1], %g1
+ sub %g1, %o0, %g7
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+ sub %g7, %o0, %g7
+ mov %g7, %o0
+ retl
+ nop
+2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub_ret, .-atomic64_sub_ret