mm-only debug patch...
[mmotm.git] / fs / reiser4 / lock.h
blobe74ed8faad58ea91a85a55ddcc60efbc9951f24d
1 /* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by
2 * reiser4/README */
4 /* Long term locking data structures. See lock.c for details. */
6 #ifndef __LOCK_H__
7 #define __LOCK_H__
9 #include "forward.h"
10 #include "debug.h"
11 #include "dformat.h"
12 #include "key.h"
13 #include "coord.h"
14 #include "plugin/node/node.h"
15 #include "txnmgr.h"
16 #include "readahead.h"
18 #include <linux/types.h>
19 #include <linux/spinlock.h>
20 #include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
21 #include <asm/atomic.h>
22 #include <linux/wait.h>
24 /* Per-znode lock object */
25 struct zlock {
26 spinlock_t guard;
27 /* The number of readers if positive; the number of recursively taken
28 write locks if negative. Protected by zlock spin lock. */
29 int nr_readers;
30 /* A number of processes (lock_stacks) that have this object
31 locked with high priority */
32 unsigned nr_hipri_owners;
33 /* A number of attempts to lock znode in high priority direction */
34 unsigned nr_hipri_requests;
35 /* A linked list of lock_handle objects that contains pointers
36 for all lock_stacks which have this lock object locked */
37 unsigned nr_hipri_write_requests;
38 struct list_head owners;
39 /* A linked list of lock_stacks that wait for this lock */
40 struct list_head requestors;
43 static inline void spin_lock_zlock(zlock *lock)
45 /* check that zlock is not locked */
46 assert("", LOCK_CNT_NIL(spin_locked_zlock));
47 /* check that spinlocks of lower priorities are not held */
48 assert("", LOCK_CNT_NIL(spin_locked_stack));
50 spin_lock(&lock->guard);
52 LOCK_CNT_INC(spin_locked_zlock);
53 LOCK_CNT_INC(spin_locked);
56 static inline void spin_unlock_zlock(zlock *lock)
58 assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_zlock));
59 assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
61 LOCK_CNT_DEC(spin_locked_zlock);
62 LOCK_CNT_DEC(spin_locked);
64 spin_unlock(&lock->guard);
67 #define lock_is_locked(lock) ((lock)->nr_readers != 0)
68 #define lock_is_rlocked(lock) ((lock)->nr_readers > 0)
69 #define lock_is_wlocked(lock) ((lock)->nr_readers < 0)
70 #define lock_is_wlocked_once(lock) ((lock)->nr_readers == -1)
71 #define lock_can_be_rlocked(lock) ((lock)->nr_readers >= 0)
72 #define lock_mode_compatible(lock, mode) \
73 (((mode) == ZNODE_WRITE_LOCK && !lock_is_locked(lock)) || \
74 ((mode) == ZNODE_READ_LOCK && lock_can_be_rlocked(lock)))
76 /* Since we have R/W znode locks we need additional bidirectional `link'
77 objects to implement n<->m relationship between lock owners and lock
78 objects. We call them `lock handles'.
80 Locking: see lock.c/"SHORT-TERM LOCKING"
82 struct lock_handle {
83 /* This flag indicates that a signal to yield a lock was passed to
84 lock owner and counted in owner->nr_signalled
86 Locking: this is accessed under spin lock on ->node.
88 int signaled;
89 /* A link to owner of a lock */
90 lock_stack *owner;
91 /* A link to znode locked */
92 znode *node;
93 /* A list of all locks for a process */
94 struct list_head locks_link;
95 /* A list of all owners for a znode */
96 struct list_head owners_link;
99 struct lock_request {
100 /* A pointer to uninitialized link object */
101 lock_handle *handle;
102 /* A pointer to the object we want to lock */
103 znode *node;
104 /* Lock mode (ZNODE_READ_LOCK or ZNODE_WRITE_LOCK) */
105 znode_lock_mode mode;
106 /* how dispatch_lock_requests() returns lock request result code */
107 int ret_code;
110 /* A lock stack structure for accumulating locks owned by a process */
111 struct lock_stack {
112 /* A guard lock protecting a lock stack */
113 spinlock_t sguard;
114 /* number of znodes which were requested by high priority processes */
115 atomic_t nr_signaled;
116 /* Current priority of a process
118 This is only accessed by the current thread and thus requires no
119 locking.
121 int curpri;
122 /* A list of all locks owned by this process. Elements can be added to
123 * this list only by the current thread. ->node pointers in this list
124 * can be only changed by the current thread. */
125 struct list_head locks;
126 /* When lock_stack waits for the lock, it puts itself on double-linked
127 requestors list of that lock */
128 struct list_head requestors_link;
129 /* Current lock request info.
131 This is only accessed by the current thread and thus requires no
132 locking.
134 struct lock_request request;
135 /* the following two fields are the lock stack's
136 * synchronization object to use with the standard linux/wait.h
137 * interface. See reiser4_go_to_sleep and __reiser4_wake_up for
138 * usage details. */
139 wait_queue_head_t wait;
140 atomic_t wakeup;
141 #if REISER4_DEBUG
142 int nr_locks; /* number of lock handles in the above list */
143 #endif
147 User-visible znode locking functions
150 extern int longterm_lock_znode(lock_handle * handle,
151 znode * node,
152 znode_lock_mode mode,
153 znode_lock_request request);
155 extern void longterm_unlock_znode(lock_handle * handle);
157 extern int reiser4_check_deadlock(void);
159 extern lock_stack *get_current_lock_stack(void);
161 extern void init_lock_stack(lock_stack * owner);
162 extern void reiser4_init_lock(zlock * lock);
164 static inline void init_lh(lock_handle *lh)
166 #if REISER4_DEBUG
167 memset(lh, 0, sizeof *lh);
168 INIT_LIST_HEAD(&lh->locks_link);
169 INIT_LIST_HEAD(&lh->owners_link);
170 #else
171 lh->node = NULL;
172 #endif
175 static inline void done_lh(lock_handle *lh)
177 assert("zam-342", lh != NULL);
178 if (lh->node != NULL)
179 longterm_unlock_znode(lh);
182 extern void move_lh(lock_handle * new, lock_handle * old);
183 extern void copy_lh(lock_handle * new, lock_handle * old);
185 extern int reiser4_prepare_to_sleep(lock_stack * owner);
186 extern void reiser4_go_to_sleep(lock_stack * owner);
187 extern void __reiser4_wake_up(lock_stack * owner);
189 extern int lock_stack_isclean(lock_stack * owner);
191 /* zlock object state check macros: only used in assertions. Both forms imply
192 that the lock is held by the current thread. */
193 extern int znode_is_write_locked(const znode *);
194 extern void reiser4_invalidate_lock(lock_handle *);
196 /* lock ordering is: first take zlock spin lock, then lock stack spin lock */
197 #define spin_ordering_pred_stack(stack) \
198 (LOCK_CNT_NIL(spin_locked_stack) && \
199 LOCK_CNT_NIL(spin_locked_txnmgr) && \
200 LOCK_CNT_NIL(spin_locked_inode) && \
201 LOCK_CNT_NIL(rw_locked_cbk_cache) && \
202 LOCK_CNT_NIL(spin_locked_super_eflush))
204 static inline void spin_lock_stack(lock_stack *stack)
206 assert("", spin_ordering_pred_stack(stack));
207 spin_lock(&(stack->sguard));
208 LOCK_CNT_INC(spin_locked_stack);
209 LOCK_CNT_INC(spin_locked);
212 static inline void spin_unlock_stack(lock_stack *stack)
214 assert_spin_locked(&(stack->sguard));
215 assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_stack));
216 assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
217 LOCK_CNT_DEC(spin_locked_stack);
218 LOCK_CNT_DEC(spin_locked);
219 spin_unlock(&(stack->sguard));
222 static inline void reiser4_wake_up(lock_stack * owner)
224 spin_lock_stack(owner);
225 __reiser4_wake_up(owner);
226 spin_unlock_stack(owner);
229 const char *lock_mode_name(znode_lock_mode lock);
231 #if REISER4_DEBUG
232 extern void check_lock_data(void);
233 extern void check_lock_node_data(znode * node);
234 #else
235 #define check_lock_data() noop
236 #define check_lock_node_data() noop
237 #endif
239 /* __LOCK_H__ */
240 #endif
242 /* Make Linus happy.
243 Local variables:
244 c-indentation-style: "K&R"
245 mode-name: "LC"
246 c-basic-offset: 8
247 tab-width: 8
248 fill-column: 120
249 End: