fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-sh / semaphore-helper.h
blobbd8230c369ca1e6794ab83adee871275b992e1d7
1 #ifndef __ASM_SH_SEMAPHORE_HELPER_H
2 #define __ASM_SH_SEMAPHORE_HELPER_H
4 /*
5 * SMP- and interrupt-safe semaphores helper functions.
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Andrea Arcangeli
9 */
12 * These two _must_ execute atomically wrt each other.
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
17 static __inline__ void wake_one_more(struct semaphore * sem)
19 atomic_inc((atomic_t *)&sem->sleepers);
22 static __inline__ int waking_non_zero(struct semaphore *sem)
24 unsigned long flags;
25 int ret = 0;
27 spin_lock_irqsave(&semaphore_wake_lock, flags);
28 if (sem->sleepers > 0) {
29 sem->sleepers--;
30 ret = 1;
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
42 * We must undo the sem->count down_interruptible() increment while we are
43 * protected by the spinlock in order to make atomic this atomic_inc() with the
44 * atomic_read() in wake_one_more(), otherwise we can race. -arca
46 static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
47 struct task_struct *tsk)
49 unsigned long flags;
50 int ret = 0;
52 spin_lock_irqsave(&semaphore_wake_lock, flags);
53 if (sem->sleepers > 0) {
54 sem->sleepers--;
55 ret = 1;
56 } else if (signal_pending(tsk)) {
57 atomic_inc(&sem->count);
58 ret = -EINTR;
60 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
61 return ret;
65 * waking_non_zero_trylock:
66 * 1 failed to lock
67 * 0 got the lock
69 * We must undo the sem->count down_trylock() increment while we are
70 * protected by the spinlock in order to make atomic this atomic_inc() with the
71 * atomic_read() in wake_one_more(), otherwise we can race. -arca
73 static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
75 unsigned long flags;
76 int ret = 1;
78 spin_lock_irqsave(&semaphore_wake_lock, flags);
79 if (sem->sleepers <= 0)
80 atomic_inc(&sem->count);
81 else {
82 sem->sleepers--;
83 ret = 0;
85 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
86 return ret;
89 #endif /* __ASM_SH_SEMAPHORE_HELPER_H */