locks: create posix-to-flock helper functions
[pv_ops_mirror.git] / include / asm-m68k / semaphore-helper.h
blobeef30ba0b499f64e6b15c5831bac2becc7519f7f
1 #ifndef _M68K_SEMAPHORE_HELPER_H
2 #define _M68K_SEMAPHORE_HELPER_H
4 /*
5 * SMP- and interrupt-safe semaphores helper functions.
7 * (C) Copyright 1996 Linus Torvalds
9 * m68k version by Andreas Schwab
12 #include <linux/errno.h>
15 * These two _must_ execute atomically wrt each other.
17 static inline void wake_one_more(struct semaphore * sem)
19 atomic_inc(&sem->waking);
22 #ifndef CONFIG_RMW_INSNS
23 extern spinlock_t semaphore_wake_lock;
24 #endif
26 static inline int waking_non_zero(struct semaphore *sem)
28 int ret;
29 #ifndef CONFIG_RMW_INSNS
30 unsigned long flags;
32 spin_lock_irqsave(&semaphore_wake_lock, flags);
33 ret = 0;
34 if (atomic_read(&sem->waking) > 0) {
35 atomic_dec(&sem->waking);
36 ret = 1;
38 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
39 #else
40 int tmp1, tmp2;
42 __asm__ __volatile__
43 ("1: movel %1,%2\n"
44 " jle 2f\n"
45 " subql #1,%2\n"
46 " casl %1,%2,%3\n"
47 " jne 1b\n"
48 " moveq #1,%0\n"
49 "2:"
50 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
51 : "m" (sem->waking), "0" (0), "1" (sem->waking));
52 #endif
54 return ret;
58 * waking_non_zero_interruptible:
59 * 1 got the lock
60 * 0 go to sleep
61 * -EINTR interrupted
63 static inline int waking_non_zero_interruptible(struct semaphore *sem,
64 struct task_struct *tsk)
66 int ret;
67 #ifndef CONFIG_RMW_INSNS
68 unsigned long flags;
70 spin_lock_irqsave(&semaphore_wake_lock, flags);
71 ret = 0;
72 if (atomic_read(&sem->waking) > 0) {
73 atomic_dec(&sem->waking);
74 ret = 1;
75 } else if (signal_pending(tsk)) {
76 atomic_inc(&sem->count);
77 ret = -EINTR;
79 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
80 #else
81 int tmp1, tmp2;
83 __asm__ __volatile__
84 ("1: movel %1,%2\n"
85 " jle 2f\n"
86 " subql #1,%2\n"
87 " casl %1,%2,%3\n"
88 " jne 1b\n"
89 " moveq #1,%0\n"
90 " jra %a4\n"
91 "2:"
92 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
93 : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
94 if (signal_pending(tsk)) {
95 atomic_inc(&sem->count);
96 ret = -EINTR;
98 next:
99 #endif
101 return ret;
105 * waking_non_zero_trylock:
106 * 1 failed to lock
107 * 0 got the lock
109 static inline int waking_non_zero_trylock(struct semaphore *sem)
111 int ret;
112 #ifndef CONFIG_RMW_INSNS
113 unsigned long flags;
115 spin_lock_irqsave(&semaphore_wake_lock, flags);
116 ret = 1;
117 if (atomic_read(&sem->waking) > 0) {
118 atomic_dec(&sem->waking);
119 ret = 0;
120 } else
121 atomic_inc(&sem->count);
122 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
123 #else
124 int tmp1, tmp2;
126 __asm__ __volatile__
127 ("1: movel %1,%2\n"
128 " jle 2f\n"
129 " subql #1,%2\n"
130 " casl %1,%2,%3\n"
131 " jne 1b\n"
132 " moveq #0,%0\n"
133 "2:"
134 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
135 : "m" (sem->waking), "0" (1), "1" (sem->waking));
136 if (ret)
137 atomic_inc(&sem->count);
138 #endif
139 return ret;
142 #endif