x86: cpa: make self-test depend on DEBUG_KERNEL
[wrt350n-kernel.git] / include / asm-alpha / semaphore.h
blobf1e9278a9fe22aeb149665bdcbe39d4aaefbed16
1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
4 /*
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
9 */
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
14 #include <linux/compiler.h>
15 #include <linux/wait.h>
16 #include <linux/rwsem.h>
18 struct semaphore {
19 atomic_t count;
20 wait_queue_head_t wait;
23 #define __SEMAPHORE_INITIALIZER(name, n) \
24 { \
25 .count = ATOMIC_INIT(n), \
26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
29 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
32 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
34 static inline void sema_init(struct semaphore *sem, int val)
37 * Logically,
38 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
39 * except that gcc produces better initializing by parts yet.
42 atomic_set(&sem->count, val);
43 init_waitqueue_head(&sem->wait);
46 static inline void init_MUTEX (struct semaphore *sem)
48 sema_init(sem, 1);
51 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
53 sema_init(sem, 0);
56 extern void down(struct semaphore *);
57 extern void __down_failed(struct semaphore *);
58 extern int down_interruptible(struct semaphore *);
59 extern int __down_failed_interruptible(struct semaphore *);
60 extern int down_trylock(struct semaphore *);
61 extern void up(struct semaphore *);
62 extern void __up_wakeup(struct semaphore *);
65 * Hidden out of line code is fun, but extremely messy. Rely on newer
66 * compilers to do a respectable job with this. The contention cases
67 * are handled out of line in arch/alpha/kernel/semaphore.c.
70 static inline void __down(struct semaphore *sem)
72 long count;
73 might_sleep();
74 count = atomic_dec_return(&sem->count);
75 if (unlikely(count < 0))
76 __down_failed(sem);
79 static inline int __down_interruptible(struct semaphore *sem)
81 long count;
82 might_sleep();
83 count = atomic_dec_return(&sem->count);
84 if (unlikely(count < 0))
85 return __down_failed_interruptible(sem);
86 return 0;
90 * down_trylock returns 0 on success, 1 if we failed to get the lock.
93 static inline int __down_trylock(struct semaphore *sem)
95 long ret;
97 /* "Equivalent" C:
99 do {
100 ret = ldl_l;
101 --ret;
102 if (ret < 0)
103 break;
104 ret = stl_c = ret;
105 } while (ret == 0);
107 __asm__ __volatile__(
108 "1: ldl_l %0,%1\n"
109 " subl %0,1,%0\n"
110 " blt %0,2f\n"
111 " stl_c %0,%1\n"
112 " beq %0,3f\n"
113 " mb\n"
114 "2:\n"
115 ".subsection 2\n"
116 "3: br 1b\n"
117 ".previous"
118 : "=&r" (ret), "=m" (sem->count)
119 : "m" (sem->count));
121 return ret < 0;
124 static inline void __up(struct semaphore *sem)
126 if (unlikely(atomic_inc_return(&sem->count) <= 0))
127 __up_wakeup(sem);
130 #if !defined(CONFIG_DEBUG_SEMAPHORE)
131 extern inline void down(struct semaphore *sem)
133 __down(sem);
135 extern inline int down_interruptible(struct semaphore *sem)
137 return __down_interruptible(sem);
139 extern inline int down_trylock(struct semaphore *sem)
141 return __down_trylock(sem);
143 extern inline void up(struct semaphore *sem)
145 __up(sem);
147 #endif
149 #endif