aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/atomic-llsc.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-14 14:58:01 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-14 14:58:01 +0200
commit51ca3c679194e7435c25b8e77b0a73c597e41ae9 (patch)
treea681dca369607ab0f371d5246b0f75140b860a8a /arch/sh/include/asm/atomic-llsc.h
parentb55793f7528ce1b73c25b3ac8a86a6cda2a0f9a4 (diff)
parentb635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff)
Merge branch 'linus' into x86/core
Conflicts: arch/x86/kernel/genapic_64.c include/asm-x86/kvm_host.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/sh/include/asm/atomic-llsc.h')
-rw-r--r--arch/sh/include/asm/atomic-llsc.h107
1 files changed, 107 insertions, 0 deletions
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
new file mode 100644
index 00000000000..4b00b78e3f4
--- /dev/null
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -0,0 +1,107 @@
+#ifndef __ASM_SH_ATOMIC_LLSC_H
+#define __ASM_SH_ATOMIC_LLSC_H
+
+/*
+ * To get proper branch prediction for the main line, we must branch
+ * forward to code at the end of this object's .text section, then
+ * branch back to restart the operation.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+}
+
+/*
+ * SH-4A note:
+ *
+ * We basically get atomic_xxx_return() for free compared with
+ * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
+ * encoding, so the retval is automatically set without having to
+ * do any special work.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_add_return \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_sub_return \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_clear_mask \n"
+" and %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (~mask), "r" (&v->counter)
+ : "t");
+}
+
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%2, %0 ! atomic_set_mask \n"
+" or %1, %0 \n"
+" movco.l %0, @%2 \n"
+" bf 1b \n"
+ : "=&z" (tmp)
+ : "r" (mask), "r" (&v->counter)
+ : "t");
+}
+
+#endif /* __ASM_SH_ATOMIC_LLSC_H */