fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-m68knommu / semaphore.h
blob5779eb6c06893d71f51e2ffe1b83ff98f85327b3
1 #ifndef _M68K_SEMAPHORE_H
2 #define _M68K_SEMAPHORE_H
4 #define RW_LOCK_BIAS 0x01000000
6 #ifndef __ASSEMBLY__
8 #include <linux/linkage.h>
9 #include <linux/wait.h>
10 #include <linux/spinlock.h>
11 #include <linux/rwsem.h>
13 #include <asm/system.h>
14 #include <asm/atomic.h>
17 * Interrupt-safe semaphores..
19 * (C) Copyright 1996 Linus Torvalds
21 * m68k version by Andreas Schwab
25 struct semaphore {
26 atomic_t count;
27 atomic_t waking;
28 wait_queue_head_t wait;
31 #define __SEMAPHORE_INITIALIZER(name, n) \
32 { \
33 .count = ATOMIC_INIT(n), \
34 .waking = ATOMIC_INIT(0), \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
38 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
41 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
43 static inline void sema_init (struct semaphore *sem, int val)
45 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
48 static inline void init_MUTEX (struct semaphore *sem)
50 sema_init(sem, 1);
53 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
55 sema_init(sem, 0);
58 asmlinkage void __down_failed(void /* special register calling convention */);
59 asmlinkage int __down_failed_interruptible(void /* params in registers */);
60 asmlinkage int __down_failed_trylock(void /* params in registers */);
61 asmlinkage void __up_wakeup(void /* special register calling convention */);
63 asmlinkage void __down(struct semaphore * sem);
64 asmlinkage int __down_interruptible(struct semaphore * sem);
65 asmlinkage int __down_trylock(struct semaphore * sem);
66 asmlinkage void __up(struct semaphore * sem);
68 extern spinlock_t semaphore_wake_lock;
71 * This is ugly, but we want the default case to fall through.
72 * "down_failed" is a special asm handler that calls the C
73 * routine that actually waits. See arch/m68k/lib/semaphore.S
75 static inline void down(struct semaphore * sem)
77 might_sleep();
78 __asm__ __volatile__(
79 "| atomic down operation\n\t"
80 "movel %0, %%a1\n\t"
81 "lea %%pc@(1f), %%a0\n\t"
82 "subql #1, %%a1@\n\t"
83 "jmi __down_failed\n"
84 "1:"
85 : /* no outputs */
86 : "g" (sem)
87 : "cc", "%a0", "%a1", "memory");
90 static inline int down_interruptible(struct semaphore * sem)
92 int ret;
94 might_sleep();
95 __asm__ __volatile__(
96 "| atomic down operation\n\t"
97 "movel %1, %%a1\n\t"
98 "lea %%pc@(1f), %%a0\n\t"
99 "subql #1, %%a1@\n\t"
100 "jmi __down_failed_interruptible\n\t"
101 "clrl %%d0\n"
102 "1: movel %%d0, %0\n"
103 : "=d" (ret)
104 : "g" (sem)
105 : "cc", "%d0", "%a0", "%a1", "memory");
106 return(ret);
109 static inline int down_trylock(struct semaphore * sem)
111 register struct semaphore *sem1 __asm__ ("%a1") = sem;
112 register int result __asm__ ("%d0");
114 __asm__ __volatile__(
115 "| atomic down trylock operation\n\t"
116 "subql #1,%1@\n\t"
117 "jmi 2f\n\t"
118 "clrl %0\n"
119 "1:\n"
120 ".section .text.lock,\"ax\"\n"
121 ".even\n"
122 "2:\tpea 1b\n\t"
123 "jbra __down_failed_trylock\n"
124 ".previous"
125 : "=d" (result)
126 : "a" (sem1)
127 : "memory");
128 return result;
132 * Note! This is subtle. We jump to wake people up only if
133 * the semaphore was negative (== somebody was waiting on it).
134 * The default case (no contention) will result in NO
135 * jumps for both down() and up().
137 static inline void up(struct semaphore * sem)
139 __asm__ __volatile__(
140 "| atomic up operation\n\t"
141 "movel %0, %%a1\n\t"
142 "lea %%pc@(1f), %%a0\n\t"
143 "addql #1, %%a1@\n\t"
144 "jle __up_wakeup\n"
145 "1:"
146 : /* no outputs */
147 : "g" (sem)
148 : "cc", "%a0", "%a1", "memory");
151 #endif /* __ASSEMBLY__ */
153 #endif