From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- include/asm-m68k/semaphore-helper.h | 143 ++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 include/asm-m68k/semaphore-helper.h (limited to 'include/asm-m68k/semaphore-helper.h') diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h new file mode 100644 index 00000000000..1516a642f9a --- /dev/null +++ b/include/asm-m68k/semaphore-helper.h @@ -0,0 +1,143 @@ +#ifndef _M68K_SEMAPHORE_HELPER_H +#define _M68K_SEMAPHORE_HELPER_H + +/* + * SMP- and interrupt-safe semaphores helper functions. + * + * (C) Copyright 1996 Linus Torvalds + * + * m68k version by Andreas Schwab + */ + +#include +#include + +/* + * These two _must_ execute atomically wrt each other. + */ +static inline void wake_one_more(struct semaphore * sem) +{ + atomic_inc(&sem->waking); +} + +#ifndef CONFIG_RMW_INSNS +extern spinlock_t semaphore_wake_lock; +#endif + +static inline int waking_non_zero(struct semaphore *sem) +{ + int ret; +#ifndef CONFIG_RMW_INSNS + unsigned long flags; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + ret = 0; + if (atomic_read(&sem->waking) > 0) { + atomic_dec(&sem->waking); + ret = 1; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); +#else + int tmp1, tmp2; + + __asm__ __volatile__ + ("1: movel %1,%2\n" + " jle 2f\n" + " subql #1,%2\n" + " casl %1,%2,%3\n" + " jne 1b\n" + " moveq #1,%0\n" + "2:" + : "=d" (ret), "=d" (tmp1), "=d" (tmp2) + : "m" (sem->waking), "0" (0), "1" (sem->waking)); +#endif + + return ret; +} + +/* + * waking_non_zero_interruptible: + * 1 got the lock + * 0 go to sleep + * -EINTR interrupted + */ +static inline int waking_non_zero_interruptible(struct semaphore *sem, + struct task_struct *tsk) +{ + int ret; +#ifndef CONFIG_RMW_INSNS + unsigned long flags; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + ret = 0; + if (atomic_read(&sem->waking) > 0) { + atomic_dec(&sem->waking); + ret = 1; + } else if (signal_pending(tsk)) { + atomic_inc(&sem->count); + ret = -EINTR; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); +#else + int tmp1, tmp2; + + __asm__ __volatile__ + ("1: movel %1,%2\n" + " jle 2f\n" + " subql #1,%2\n" + " casl %1,%2,%3\n" + " jne 1b\n" + " moveq #1,%0\n" + " jra %a4\n" + "2:" + : "=d" (ret), "=d" (tmp1), "=d" (tmp2) + : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking)); + if (signal_pending(tsk)) { + atomic_inc(&sem->count); + ret = -EINTR; + } +next: +#endif + + return ret; +} + +/* + * waking_non_zero_trylock: + * 1 failed to lock + * 0 got the lock + */ +static inline int waking_non_zero_trylock(struct semaphore *sem) +{ + int ret; +#ifndef CONFIG_RMW_INSNS + unsigned long flags; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + ret = 1; + if (atomic_read(&sem->waking) > 0) { + atomic_dec(&sem->waking); + ret = 0; + } else + atomic_inc(&sem->count); + spin_unlock_irqrestore(&semaphore_wake_lock, flags); +#else + int tmp1, tmp2; + + __asm__ __volatile__ + ("1: movel %1,%2\n" + " jle 2f\n" + " subql #1,%2\n" + " casl %1,%2,%3\n" + " jne 1b\n" + " moveq #0,%0\n" + "2:" + : "=d" (ret), "=d" (tmp1), "=d" (tmp2) + : "m" (sem->waking), "0" (1), "1" (sem->waking)); + if (ret) + atomic_inc(&sem->count); +#endif + return ret; +} + +#endif -- cgit v1.2.3