aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sh/atomic.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-12-01 14:32:54 +0900
committerPaul Mundt <lethal@linux-sh.org>2006-12-06 10:45:40 +0900
commitc03c69610bfa728805deceeb624ee4268c722a5a (patch)
tree07085eab7196c1e7219473d5a164a0569bce9611 /include/asm-sh/atomic.h
parentbd156147eb63ae525e0ac67868e41a808f03c532 (diff)
sh: Fixup movli.l/movco.l atomic ops for gcc4.
gcc4 gets a bit pissy about the outputs: include/asm/atomic.h: In function 'atomic_add': include/asm/atomic.h:37: error: invalid lvalue in asm statement include/asm/atomic.h:30: error: invalid lvalue in asm output 1 ... this ended up being a thinko anyways, so just fix it up. Verified for proper behaviour with the older toolchains, too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/atomic.h')
-rw-r--r--include/asm-sh/atomic.h48
1 files changed, 24 insertions, 24 deletions
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 8bdc1ba56f7..28305c3cbdd 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -28,11 +28,11 @@ static inline void atomic_add(int i, atomic_t *v)
unsigned long tmp;
__asm__ __volatile__ (
-"1: movli.l @%3, %0 ! atomic_add \n"
-" add %2, %0 \n"
-" movco.l %0, @%3 \n"
+"1: movli.l @%2, %0 ! atomic_add \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
" bf 1b \n"
- : "=&z" (tmp), "=r" (&v->counter)
+ : "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
#else
@@ -50,11 +50,11 @@ static inline void atomic_sub(int i, atomic_t *v)
unsigned long tmp;
__asm__ __volatile__ (
-"1: movli.l @%3, %0 ! atomic_sub \n"
-" sub %2, %0 \n"
-" movco.l %0, @%3 \n"
+"1: movli.l @%2, %0 ! atomic_sub \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
" bf 1b \n"
- : "=&z" (tmp), "=r" (&v->counter)
+ : "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
#else
@@ -80,12 +80,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
-"1: movli.l @%3, %0 ! atomic_add_return \n"
-" add %2, %0 \n"
-" movco.l %0, @%3 \n"
+"1: movli.l @%2, %0 ! atomic_add_return \n"
+" add %1, %0 \n"
+" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
- : "=&z" (temp), "=r" (&v->counter)
+ : "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
#else
@@ -109,12 +109,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
#ifdef CONFIG_CPU_SH4A
__asm__ __volatile__ (
-"1: movli.l @%3, %0 ! atomic_sub_return \n"
-" sub %2, %0 \n"
-" movco.l %0, @%3 \n"
+"1: movli.l @%2, %0 ! atomic_sub_return \n"
+" sub %1, %0 \n"
+" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
- : "=&z" (temp), "=r" (&v->counter)
+ : "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
#else
@@ -186,11 +186,11 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
unsigned long tmp;
__asm__ __volatile__ (
-"1: movli.l @%3, %0 ! atomic_clear_mask \n"
-" and %2, %0 \n"
-" movco.l %0, @%3 \n"
+"1: movli.l @%2, %0 ! atomic_clear_mask \n"
+" and %1, %0 \n"
+" movco.l %0, @%2 \n"
" bf 1b \n"
- : "=&z" (tmp), "=r" (&v->counter)
+ : "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
#else
@@ -208,11 +208,11 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
unsigned long tmp;
__asm__ __volatile__ (
-"1: movli.l @%3, %0 ! atomic_set_mask \n"
-" or %2, %0 \n"
-" movco.l %0, @%3 \n"
+"1: movli.l @%2, %0 ! atomic_set_mask \n"
+" or %1, %0 \n"
+" movco.l %0, @%2 \n"
" bf 1b \n"
- : "=&z" (tmp), "=r" (&v->counter)
+ : "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
#else