revert-mm-fix-blkdev-size-calculation-in-generic_write_checks
[linux-2.6/linux-trees-mm.git] / fs / reiser4 / lock.h
blob2e9b45ac88532fe9ad34465e2a0a95fe5a8e04a5
1 /* Copyright 2001, 2002, 2003 by Hans Reiser, licensing governed by reiser4/README */
3 /* Long term locking data structures. See lock.c for details. */
5 #ifndef __LOCK_H__
6 #define __LOCK_H__
8 #include "forward.h"
9 #include "debug.h"
10 #include "dformat.h"
11 #include "key.h"
12 #include "coord.h"
13 #include "plugin/node/node.h"
14 #include "txnmgr.h"
15 #include "readahead.h"
17 #include <linux/types.h>
18 #include <linux/spinlock.h>
19 #include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
20 #include <asm/atomic.h>
21 #include <linux/wait.h>
23 /* Per-znode lock object */
24 struct zlock {
25 spinlock_t guard;
26 /* The number of readers if positive; the number of recursively taken
27 write locks if negative. Protected by zlock spin lock. */
28 int nr_readers;
29 /* A number of processes (lock_stacks) that have this object
30 locked with high priority */
31 unsigned nr_hipri_owners;
32 /* A number of attempts to lock znode in high priority direction */
33 unsigned nr_hipri_requests;
34 /* A linked list of lock_handle objects that contains pointers
35 for all lock_stacks which have this lock object locked */
36 unsigned nr_hipri_write_requests;
37 struct list_head owners;
38 /* A linked list of lock_stacks that wait for this lock */
39 struct list_head requestors;
42 static inline void spin_lock_zlock(zlock *lock)
44 /* check that zlock is not locked */
45 assert("", LOCK_CNT_NIL(spin_locked_zlock));
46 /* check that spinlocks of lower priorities are not held */
47 assert("", LOCK_CNT_NIL(spin_locked_stack));
49 spin_lock(&lock->guard);
51 LOCK_CNT_INC(spin_locked_zlock);
52 LOCK_CNT_INC(spin_locked);
55 static inline void spin_unlock_zlock(zlock *lock)
57 assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_zlock));
58 assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
60 LOCK_CNT_DEC(spin_locked_zlock);
61 LOCK_CNT_DEC(spin_locked);
63 spin_unlock(&lock->guard);
66 #define lock_is_locked(lock) ((lock)->nr_readers != 0)
67 #define lock_is_rlocked(lock) ((lock)->nr_readers > 0)
68 #define lock_is_wlocked(lock) ((lock)->nr_readers < 0)
69 #define lock_is_wlocked_once(lock) ((lock)->nr_readers == -1)
70 #define lock_can_be_rlocked(lock) ((lock)->nr_readers >=0)
71 #define lock_mode_compatible(lock, mode) \
72 (((mode) == ZNODE_WRITE_LOCK && !lock_is_locked(lock)) || \
73 ((mode) == ZNODE_READ_LOCK && lock_can_be_rlocked(lock)))
75 /* Since we have R/W znode locks we need additional bidirectional `link'
76 objects to implement n<->m relationship between lock owners and lock
77 objects. We call them `lock handles'.
79 Locking: see lock.c/"SHORT-TERM LOCKING"
81 struct lock_handle {
82 /* This flag indicates that a signal to yield a lock was passed to
83 lock owner and counted in owner->nr_signalled
85 Locking: this is accessed under spin lock on ->node.
87 int signaled;
88 /* A link to owner of a lock */
89 lock_stack *owner;
90 /* A link to znode locked */
91 znode *node;
92 /* A list of all locks for a process */
93 struct list_head locks_link;
94 /* A list of all owners for a znode */
95 struct list_head owners_link;
98 struct lock_request {
99 /* A pointer to uninitialized link object */
100 lock_handle *handle;
101 /* A pointer to the object we want to lock */
102 znode *node;
103 /* Lock mode (ZNODE_READ_LOCK or ZNODE_WRITE_LOCK) */
104 znode_lock_mode mode;
105 /* how dispatch_lock_requests() returns lock request result code */
106 int ret_code;
109 /* A lock stack structure for accumulating locks owned by a process */
110 struct lock_stack {
111 /* A guard lock protecting a lock stack */
112 spinlock_t sguard;
113 /* number of znodes which were requested by high priority processes */
114 atomic_t nr_signaled;
115 /* Current priority of a process
117 This is only accessed by the current thread and thus requires no
118 locking.
120 int curpri;
121 /* A list of all locks owned by this process. Elements can be added to
122 * this list only by the current thread. ->node pointers in this list
123 * can be only changed by the current thread. */
124 struct list_head locks;
125 /* When lock_stack waits for the lock, it puts itself on double-linked
126 requestors list of that lock */
127 struct list_head requestors_link;
128 /* Current lock request info.
130 This is only accessed by the current thread and thus requires no
131 locking.
133 struct lock_request request;
134 /* the following two fields are the lock stack's
135 * synchronization object to use with the standard linux/wait.h
136 * interface. See reiser4_go_to_sleep and __reiser4_wake_up for
137 * usage details. */
138 wait_queue_head_t wait;
139 atomic_t wakeup;
140 #if REISER4_DEBUG
141 int nr_locks; /* number of lock handles in the above list */
142 #endif
146 User-visible znode locking functions
149 extern int longterm_lock_znode(lock_handle * handle,
150 znode * node,
151 znode_lock_mode mode,
152 znode_lock_request request);
154 extern void longterm_unlock_znode(lock_handle * handle);
156 extern int reiser4_check_deadlock(void);
158 extern lock_stack *get_current_lock_stack(void);
160 extern void init_lock_stack(lock_stack * owner);
161 extern void reiser4_init_lock(zlock * lock);
163 static inline void init_lh(lock_handle *lh)
165 #if REISER4_DEBUG
166 memset(lh, 0, sizeof *lh);
167 INIT_LIST_HEAD(&lh->locks_link);
168 INIT_LIST_HEAD(&lh->owners_link);
169 #else
170 lh->node = NULL;
171 #endif
174 static inline void done_lh(lock_handle *lh)
176 assert("zam-342", lh != NULL);
177 if (lh->node != NULL)
178 longterm_unlock_znode(lh);
181 extern void move_lh(lock_handle * new, lock_handle * old);
182 extern void copy_lh(lock_handle * new, lock_handle * old);
184 extern int reiser4_prepare_to_sleep(lock_stack * owner);
185 extern void reiser4_go_to_sleep(lock_stack * owner);
186 extern void __reiser4_wake_up(lock_stack * owner);
188 extern int lock_stack_isclean(lock_stack * owner);
190 /* zlock object state check macros: only used in assertions. Both forms imply that the
191 lock is held by the current thread. */
192 extern int znode_is_write_locked(const znode *);
193 extern void reiser4_invalidate_lock(lock_handle *);
195 /* lock ordering is: first take zlock spin lock, then lock stack spin lock */
196 #define spin_ordering_pred_stack(stack) \
197 (LOCK_CNT_NIL(spin_locked_stack) && \
198 LOCK_CNT_NIL(spin_locked_txnmgr) && \
199 LOCK_CNT_NIL(spin_locked_inode) && \
200 LOCK_CNT_NIL(rw_locked_cbk_cache) && \
201 LOCK_CNT_NIL(spin_locked_super_eflush) )
203 static inline void spin_lock_stack(lock_stack *stack)
205 assert("", spin_ordering_pred_stack(stack));
206 spin_lock(&(stack->sguard));
207 LOCK_CNT_INC(spin_locked_stack);
208 LOCK_CNT_INC(spin_locked);
211 static inline void spin_unlock_stack(lock_stack *stack)
213 assert_spin_locked(&(stack->sguard));
214 assert("nikita-1375", LOCK_CNT_GTZ(spin_locked_stack));
215 assert("nikita-1376", LOCK_CNT_GTZ(spin_locked));
216 LOCK_CNT_DEC(spin_locked_stack);
217 LOCK_CNT_DEC(spin_locked);
218 spin_unlock(&(stack->sguard));
221 static inline void reiser4_wake_up(lock_stack * owner)
223 spin_lock_stack(owner);
224 __reiser4_wake_up(owner);
225 spin_unlock_stack(owner);
228 const char *lock_mode_name(znode_lock_mode lock);
230 #if REISER4_DEBUG
231 extern void check_lock_data(void);
232 extern void check_lock_node_data(znode * node);
233 #else
234 #define check_lock_data() noop
235 #define check_lock_node_data() noop
236 #endif
238 /* __LOCK_H__ */
239 #endif
241 /* Make Linus happy.
242 Local variables:
243 c-indentation-style: "K&R"
244 mode-name: "LC"
245 c-basic-offset: 8
246 tab-width: 8
247 fill-column: 120
248 End: