markers: remove ACCESS_ONCE
[pv_ops_mirror.git] / include / asm-mn10300 / semaphore.h
blob5a9e1ad0b2534460d43e89f10af63069b173171b
1 /* MN10300 Semaphores
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_SEMAPHORE_H
12 #define _ASM_SEMAPHORE_H
14 #ifndef __ASSEMBLY__
16 #include <linux/linkage.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/rwsem.h>
21 #define SEMAPHORE_DEBUG 0
24 * the semaphore definition
25 * - if count is >0 then there are tokens available on the semaphore for down
26 * to collect
27 * - if count is <=0 then there are no spare tokens, and anyone that wants one
28 * must wait
29 * - if wait_list is not empty, then there are processes waiting for the
30 * semaphore
32 struct semaphore {
33 atomic_t count; /* it's not really atomic, it's
34 * just that certain modules
35 * expect to be able to access
36 * it directly */
37 spinlock_t wait_lock;
38 struct list_head wait_list;
39 #if SEMAPHORE_DEBUG
40 unsigned __magic;
41 #endif
44 #if SEMAPHORE_DEBUG
45 # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
46 #else
47 # define __SEM_DEBUG_INIT(name)
48 #endif
51 #define __SEMAPHORE_INITIALIZER(name, init_count) \
52 { \
53 .count = ATOMIC_INIT(init_count), \
54 .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55 .wait_list = LIST_HEAD_INIT((name).wait_list) \
56 __SEM_DEBUG_INIT(name) \
59 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
62 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
63 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
65 static inline void sema_init(struct semaphore *sem, int val)
67 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
70 static inline void init_MUTEX(struct semaphore *sem)
72 sema_init(sem, 1);
75 static inline void init_MUTEX_LOCKED(struct semaphore *sem)
77 sema_init(sem, 0);
80 extern void __down(struct semaphore *sem, unsigned long flags);
81 extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
82 extern void __up(struct semaphore *sem);
84 static inline void down(struct semaphore *sem)
86 unsigned long flags;
87 int count;
89 #if SEMAPHORE_DEBUG
90 CHECK_MAGIC(sem->__magic);
91 #endif
93 spin_lock_irqsave(&sem->wait_lock, flags);
94 count = atomic_read(&sem->count);
95 if (likely(count > 0)) {
96 atomic_set(&sem->count, count - 1);
97 spin_unlock_irqrestore(&sem->wait_lock, flags);
98 } else {
99 __down(sem, flags);
103 static inline int down_interruptible(struct semaphore *sem)
105 unsigned long flags;
106 int count, ret = 0;
108 #if SEMAPHORE_DEBUG
109 CHECK_MAGIC(sem->__magic);
110 #endif
112 spin_lock_irqsave(&sem->wait_lock, flags);
113 count = atomic_read(&sem->count);
114 if (likely(count > 0)) {
115 atomic_set(&sem->count, count - 1);
116 spin_unlock_irqrestore(&sem->wait_lock, flags);
117 } else {
118 ret = __down_interruptible(sem, flags);
120 return ret;
124 * non-blockingly attempt to down() a semaphore.
125 * - returns zero if we acquired it
127 static inline int down_trylock(struct semaphore *sem)
129 unsigned long flags;
130 int count, success = 0;
132 #if SEMAPHORE_DEBUG
133 CHECK_MAGIC(sem->__magic);
134 #endif
136 spin_lock_irqsave(&sem->wait_lock, flags);
137 count = atomic_read(&sem->count);
138 if (likely(count > 0)) {
139 atomic_set(&sem->count, count - 1);
140 success = 1;
142 spin_unlock_irqrestore(&sem->wait_lock, flags);
143 return !success;
146 static inline void up(struct semaphore *sem)
148 unsigned long flags;
150 #if SEMAPHORE_DEBUG
151 CHECK_MAGIC(sem->__magic);
152 #endif
154 spin_lock_irqsave(&sem->wait_lock, flags);
155 if (!list_empty(&sem->wait_list))
156 __up(sem);
157 else
158 atomic_set(&sem->count, atomic_read(&sem->count) + 1);
159 spin_unlock_irqrestore(&sem->wait_lock, flags);
162 static inline int sem_getcount(struct semaphore *sem)
164 return atomic_read(&sem->count);
167 #endif /* __ASSEMBLY__ */
169 #endif