blob: 4fea36be5fd85df5004a23af32e678db21868835 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
|
#ifndef _H8300_SEMAPHORE_HELPER_H
#define _H8300_SEMAPHORE_HELPER_H
/*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
*
* based on
* m68k version by Andreas Schwab
*/
#include <linux/errno.h>
/*
* These two _must_ execute atomically wrt each other.
*/
static inline void wake_one_more(struct semaphore * sem)
{
atomic_inc((atomic_t *)&sem->sleepers);
}
static inline int waking_non_zero(struct semaphore *sem)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&semaphore_wake_lock, flags);
ret = 0;
if (sem->sleepers > 0) {
sem->sleepers--;
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_interruptible:
* 1 got the lock
* 0 go to sleep
* -EINTR interrupted
*/
static inline int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&semaphore_wake_lock, flags);
ret = 0;
if (sem->sleepers > 0) {
sem->sleepers--;
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
ret = -EINTR;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_trylock:
* 1 failed to lock
* 0 got the lock
*/
static inline int waking_non_zero_trylock(struct semaphore *sem)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&semaphore_wake_lock, flags);
ret = 1;
if (sem->sleepers <= 0)
atomic_inc(&sem->count);
else {
sem->sleepers--;
ret = 0;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
#endif
|