aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sh/atomic.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 08:49:07 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 08:49:07 -0700
commitb98adfccdf5f8dd34ae56a2d5adbe2c030bd4674 (patch)
tree1807a029520f550dd4f90c95ad0063bceb00d645 /include/asm-sh/atomic.h
parentba21fe71725f94792330ebc3034ef2b35a36276f (diff)
parent33573c0e3243aaa38b6ad96942de85a1b713c2ff (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6: (108 commits) sh: Fix occasional flush_cache_4096() stack corruption. sh: Calculate shm alignment at runtime. sh: dma-mapping compile fixes. sh: Initial vsyscall page support. sh: Clean up PAGE_SIZE definition for assembly use. sh: Selective flush_cache_mm() flushing. sh: More intelligent entry_mask/way_size calculation. sh: Support for L2 cache on newer SH-4A CPUs. sh: Update kexec support for API changes. sh: Optimized readsl()/writesl() support. sh: Report movli.l/movco.l capabilities. sh: CPU flags in AT_HWCAP in ELF auxvt. sh: Add support for 4K stacks. sh: Enable /proc/kcore support. sh: stack debugging support. sh: select CONFIG_EMBEDDED. sh: machvec rework. sh: Solution Engine SH7343 board support. sh: SH7710VoIPGW board support. sh: Enable verbose BUG() support. ...
Diffstat (limited to 'include/asm-sh/atomic.h')
-rw-r--r--include/asm-sh/atomic.h106
1 files changed, 97 insertions, 9 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index fb627de217f..8bdc1ba56f7 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -14,6 +14,7 @@ typedef struct { volatile int counter; } atomic_t;
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
+#include <linux/compiler.h>
#include <asm/system.h>
/*
@@ -21,49 +22,110 @@ typedef struct { volatile int counter; } atomic_t;
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
-
-static __inline__ void atomic_add(int i, atomic_t * v)
+static inline void atomic_add(int i, atomic_t *v)
{
+#ifdef CONFIG_CPU_SH4A
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%3, %0 ! atomic_add \n"
+" add %2, %0 \n"
+" movco.l %0, @%3 \n"
+" bf 1b \n"
+ : "=&z" (tmp), "=r" (&v->counter)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+#else
unsigned long flags;
local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
+#endif
}
-static __inline__ void atomic_sub(int i, atomic_t *v)
+static inline void atomic_sub(int i, atomic_t *v)
{
+#ifdef CONFIG_CPU_SH4A
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%3, %0 ! atomic_sub \n"
+" sub %2, %0 \n"
+" movco.l %0, @%3 \n"
+" bf 1b \n"
+ : "=&z" (tmp), "=r" (&v->counter)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+#else
unsigned long flags;
local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
+#endif
}
-static __inline__ int atomic_add_return(int i, atomic_t * v)
+/*
+ * SH-4A note:
+ *
+ * We basically get atomic_xxx_return() for free compared with
+ * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
+ * encoding, so the retval is automatically set without having to
+ * do any special work.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
{
- unsigned long temp, flags;
+ unsigned long temp;
+
+#ifdef CONFIG_CPU_SH4A
+ __asm__ __volatile__ (
+"1: movli.l @%3, %0 ! atomic_add_return \n"
+" add %2, %0 \n"
+" movco.l %0, @%3 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp), "=r" (&v->counter)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+#else
+ unsigned long flags;
local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);
+#endif
return temp;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- unsigned long temp, flags;
+ unsigned long temp;
+
+#ifdef CONFIG_CPU_SH4A
+ __asm__ __volatile__ (
+"1: movli.l @%3, %0 ! atomic_sub_return \n"
+" sub %2, %0 \n"
+" movco.l %0, @%3 \n"
+" bf 1b \n"
+" synco \n"
+ : "=&z" (temp), "=r" (&v->counter)
+ : "r" (i), "r" (&v->counter)
+ : "t");
+#else
+ unsigned long flags;
local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);
+#endif
return temp;
}
@@ -118,22 +180,48 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
+#ifdef CONFIG_CPU_SH4A
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%3, %0 ! atomic_clear_mask \n"
+" and %2, %0 \n"
+" movco.l %0, @%3 \n"
+" bf 1b \n"
+ : "=&z" (tmp), "=r" (&v->counter)
+ : "r" (~mask), "r" (&v->counter)
+ : "t");
+#else
unsigned long flags;
local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
+#endif
}
-static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
+#ifdef CONFIG_CPU_SH4A
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+"1: movli.l @%3, %0 ! atomic_set_mask \n"
+" or %2, %0 \n"
+" movco.l %0, @%3 \n"
+" bf 1b \n"
+ : "=&z" (tmp), "=r" (&v->counter)
+ : "r" (mask), "r" (&v->counter)
+ : "t");
+#else
unsigned long flags;
local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
+#endif
}
/* Atomic operations are already serializing on SH */