aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-03-27 01:16:26 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-27 08:44:49 -0800
commit8fdd6c6df7889dc89df3d9fe0f5bbe6733e39f48 (patch)
treee037a0eb7214818ad4ef177c618ef1e302c2aed9 /include
parentdfd4e3ec246355274c9cf62c6b04a1ee6fa3caba (diff)
[PATCH] lightweight robust futexes: x86_64
x86_64: add the futex_atomic_cmpxchg_inuser() assembly implementation, and wire up the new syscalls. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Arjan van de Ven <arjan@infradead.org> Acked-by: Ulrich Drepper <drepper@redhat.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/futex.h23
-rw-r--r--include/asm-x86_64/unistd.h6
2 files changed, 27 insertions, 2 deletions
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 4f4cb3410d0..7d9eb1a8454 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -97,7 +97,28 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
static inline int
futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval)
{
- return -ENOSYS;
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ __asm__ __volatile__(
+ "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
+
+ "2: .section .fixup, \"ax\" \n"
+ "3: mov %2, %0 \n"
+ " jmp 2b \n"
+ " .previous \n"
+
+ " .section __ex_table, \"a\" \n"
+ " .align 8 \n"
+ " .quad 1b,3b \n"
+ " .previous \n"
+
+ : "=a" (oldval), "=m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
+ : "memory"
+ );
+
+ return oldval;
}
#endif
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index da0341c5794..fcc51635308 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */
#define __NR_unshare 272
__SYSCALL(__NR_unshare, sys_unshare)
+#define __NR_set_robust_list 273
+__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
+#define __NR_get_robust_list 274
+__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
-#define __NR_syscall_max __NR_unshare
+#define __NR_syscall_max __NR_get_robust_list
#ifndef __NO_STUBS