kvm tools, setup: Create private directory
[linux-2.6/next.git] / fs / gfs2 / glock.c
blob1c1336e7b3b222d347f657fa2895ddc1c2f05c65
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43 #include "bmap.h"
44 #define CREATE_TRACE_POINTS
45 #include "trace_gfs2.h"
47 struct gfs2_glock_iter {
48 int hash; /* hash bucket index */
49 struct gfs2_sbd *sdp; /* incore superblock */
50 struct gfs2_glock *gl; /* current glock struct */
51 char string[512]; /* scratch space */
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
56 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
57 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
58 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
60 static struct dentry *gfs2_root;
61 static struct workqueue_struct *glock_workqueue;
62 struct workqueue_struct *gfs2_delete_workqueue;
63 static LIST_HEAD(lru_list);
64 static atomic_t lru_count = ATOMIC_INIT(0);
65 static DEFINE_SPINLOCK(lru_lock);
67 #define GFS2_GL_HASH_SHIFT 15
68 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
69 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
71 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72 static struct dentry *gfs2_root;
74 /**
75 * gl_hash() - Turn glock number into hash bucket number
76 * @lock: The glock number
78 * Returns: The number of the corresponding hash bucket
81 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
82 const struct lm_lockname *name)
84 unsigned int h;
86 h = jhash(&name->ln_number, sizeof(u64), 0);
87 h = jhash(&name->ln_type, sizeof(unsigned int), h);
88 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
89 h &= GFS2_GL_HASH_MASK;
91 return h;
94 static inline void spin_lock_bucket(unsigned int hash)
96 hlist_bl_lock(&gl_hash_table[hash]);
99 static inline void spin_unlock_bucket(unsigned int hash)
101 hlist_bl_unlock(&gl_hash_table[hash]);
104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
106 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
108 if (gl->gl_ops->go_flags & GLOF_ASPACE)
109 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 else
111 kmem_cache_free(gfs2_glock_cachep, gl);
114 void gfs2_glock_free(struct gfs2_glock *gl)
116 struct gfs2_sbd *sdp = gl->gl_sbd;
118 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
119 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
120 wake_up(&sdp->sd_glock_wait);
124 * gfs2_glock_hold() - increment reference count on glock
125 * @gl: The glock to hold
129 void gfs2_glock_hold(struct gfs2_glock *gl)
131 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
132 atomic_inc(&gl->gl_ref);
136 * demote_ok - Check to see if it's ok to unlock a glock
137 * @gl: the glock
139 * Returns: 1 if it's ok
142 static int demote_ok(const struct gfs2_glock *gl)
144 const struct gfs2_glock_operations *glops = gl->gl_ops;
146 if (gl->gl_state == LM_ST_UNLOCKED)
147 return 0;
148 if (!list_empty(&gl->gl_holders))
149 return 0;
150 if (glops->go_demote_ok)
151 return glops->go_demote_ok(gl);
152 return 1;
156 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
158 spin_lock(&lru_lock);
160 if (!list_empty(&gl->gl_lru))
161 list_del_init(&gl->gl_lru);
162 else
163 atomic_inc(&lru_count);
165 list_add_tail(&gl->gl_lru, &lru_list);
166 set_bit(GLF_LRU, &gl->gl_flags);
167 spin_unlock(&lru_lock);
170 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
172 spin_lock(&lru_lock);
173 if (!list_empty(&gl->gl_lru)) {
174 list_del_init(&gl->gl_lru);
175 atomic_dec(&lru_count);
176 clear_bit(GLF_LRU, &gl->gl_flags);
178 spin_unlock(&lru_lock);
182 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
183 * @gl: the glock
185 * If the glock is demotable, then we add it (or move it) to the end
186 * of the glock LRU list.
189 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
191 if (demote_ok(gl))
192 gfs2_glock_add_to_lru(gl);
196 * gfs2_glock_put_nolock() - Decrement reference count on glock
197 * @gl: The glock to put
199 * This function should only be used if the caller has its own reference
200 * to the glock, in addition to the one it is dropping.
203 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
205 if (atomic_dec_and_test(&gl->gl_ref))
206 GLOCK_BUG_ON(gl, 1);
210 * gfs2_glock_put() - Decrement reference count on glock
211 * @gl: The glock to put
215 void gfs2_glock_put(struct gfs2_glock *gl)
217 struct gfs2_sbd *sdp = gl->gl_sbd;
218 struct address_space *mapping = gfs2_glock2aspace(gl);
220 if (atomic_dec_and_test(&gl->gl_ref)) {
221 spin_lock_bucket(gl->gl_hash);
222 hlist_bl_del_rcu(&gl->gl_list);
223 spin_unlock_bucket(gl->gl_hash);
224 gfs2_glock_remove_from_lru(gl);
225 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
226 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
227 trace_gfs2_glock_put(gl);
228 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
233 * search_bucket() - Find struct gfs2_glock by lock number
234 * @bucket: the bucket to search
235 * @name: The lock name
237 * Returns: NULL, or the struct gfs2_glock with the requested number
240 static struct gfs2_glock *search_bucket(unsigned int hash,
241 const struct gfs2_sbd *sdp,
242 const struct lm_lockname *name)
244 struct gfs2_glock *gl;
245 struct hlist_bl_node *h;
247 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
248 if (!lm_name_equal(&gl->gl_name, name))
249 continue;
250 if (gl->gl_sbd != sdp)
251 continue;
252 if (atomic_inc_not_zero(&gl->gl_ref))
253 return gl;
256 return NULL;
260 * may_grant - check if its ok to grant a new lock
261 * @gl: The glock
262 * @gh: The lock request which we wish to grant
264 * Returns: true if its ok to grant the lock
267 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
269 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
270 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
271 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
272 return 0;
273 if (gl->gl_state == gh->gh_state)
274 return 1;
275 if (gh->gh_flags & GL_EXACT)
276 return 0;
277 if (gl->gl_state == LM_ST_EXCLUSIVE) {
278 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
279 return 1;
280 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
281 return 1;
283 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
284 return 1;
285 return 0;
288 static void gfs2_holder_wake(struct gfs2_holder *gh)
290 clear_bit(HIF_WAIT, &gh->gh_iflags);
291 smp_mb__after_clear_bit();
292 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
296 * do_error - Something unexpected has happened during a lock request
300 static inline void do_error(struct gfs2_glock *gl, const int ret)
302 struct gfs2_holder *gh, *tmp;
304 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
305 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
306 continue;
307 if (ret & LM_OUT_ERROR)
308 gh->gh_error = -EIO;
309 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
310 gh->gh_error = GLR_TRYFAILED;
311 else
312 continue;
313 list_del_init(&gh->gh_list);
314 trace_gfs2_glock_queue(gh, 0);
315 gfs2_holder_wake(gh);
320 * do_promote - promote as many requests as possible on the current queue
321 * @gl: The glock
323 * Returns: 1 if there is a blocked holder at the head of the list, or 2
324 * if a type specific operation is underway.
327 static int do_promote(struct gfs2_glock *gl)
328 __releases(&gl->gl_spin)
329 __acquires(&gl->gl_spin)
331 const struct gfs2_glock_operations *glops = gl->gl_ops;
332 struct gfs2_holder *gh, *tmp;
333 int ret;
335 restart:
336 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
337 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
338 continue;
339 if (may_grant(gl, gh)) {
340 if (gh->gh_list.prev == &gl->gl_holders &&
341 glops->go_lock) {
342 spin_unlock(&gl->gl_spin);
343 /* FIXME: eliminate this eventually */
344 ret = glops->go_lock(gh);
345 spin_lock(&gl->gl_spin);
346 if (ret) {
347 if (ret == 1)
348 return 2;
349 gh->gh_error = ret;
350 list_del_init(&gh->gh_list);
351 trace_gfs2_glock_queue(gh, 0);
352 gfs2_holder_wake(gh);
353 goto restart;
355 set_bit(HIF_HOLDER, &gh->gh_iflags);
356 trace_gfs2_promote(gh, 1);
357 gfs2_holder_wake(gh);
358 goto restart;
360 set_bit(HIF_HOLDER, &gh->gh_iflags);
361 trace_gfs2_promote(gh, 0);
362 gfs2_holder_wake(gh);
363 continue;
365 if (gh->gh_list.prev == &gl->gl_holders)
366 return 1;
367 do_error(gl, 0);
368 break;
370 return 0;
374 * find_first_waiter - find the first gh that's waiting for the glock
375 * @gl: the glock
378 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
380 struct gfs2_holder *gh;
382 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
383 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
384 return gh;
386 return NULL;
390 * state_change - record that the glock is now in a different state
391 * @gl: the glock
392 * @new_state the new state
396 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
398 int held1, held2;
400 held1 = (gl->gl_state != LM_ST_UNLOCKED);
401 held2 = (new_state != LM_ST_UNLOCKED);
403 if (held1 != held2) {
404 if (held2)
405 gfs2_glock_hold(gl);
406 else
407 gfs2_glock_put_nolock(gl);
409 if (held1 && held2 && list_empty(&gl->gl_holders))
410 clear_bit(GLF_QUEUED, &gl->gl_flags);
412 gl->gl_state = new_state;
413 gl->gl_tchange = jiffies;
416 static void gfs2_demote_wake(struct gfs2_glock *gl)
418 gl->gl_demote_state = LM_ST_EXCLUSIVE;
419 clear_bit(GLF_DEMOTE, &gl->gl_flags);
420 smp_mb__after_clear_bit();
421 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
425 * finish_xmote - The DLM has replied to one of our lock requests
426 * @gl: The glock
427 * @ret: The status from the DLM
431 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
433 const struct gfs2_glock_operations *glops = gl->gl_ops;
434 struct gfs2_holder *gh;
435 unsigned state = ret & LM_OUT_ST_MASK;
436 int rv;
438 spin_lock(&gl->gl_spin);
439 trace_gfs2_glock_state_change(gl, state);
440 state_change(gl, state);
441 gh = find_first_waiter(gl);
443 /* Demote to UN request arrived during demote to SH or DF */
444 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
445 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
446 gl->gl_target = LM_ST_UNLOCKED;
448 /* Check for state != intended state */
449 if (unlikely(state != gl->gl_target)) {
450 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
451 /* move to back of queue and try next entry */
452 if (ret & LM_OUT_CANCELED) {
453 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
454 list_move_tail(&gh->gh_list, &gl->gl_holders);
455 gh = find_first_waiter(gl);
456 gl->gl_target = gh->gh_state;
457 goto retry;
459 /* Some error or failed "try lock" - report it */
460 if ((ret & LM_OUT_ERROR) ||
461 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
462 gl->gl_target = gl->gl_state;
463 do_error(gl, ret);
464 goto out;
467 switch(state) {
468 /* Unlocked due to conversion deadlock, try again */
469 case LM_ST_UNLOCKED:
470 retry:
471 do_xmote(gl, gh, gl->gl_target);
472 break;
473 /* Conversion fails, unlock and try again */
474 case LM_ST_SHARED:
475 case LM_ST_DEFERRED:
476 do_xmote(gl, gh, LM_ST_UNLOCKED);
477 break;
478 default: /* Everything else */
479 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
480 GLOCK_BUG_ON(gl, 1);
482 spin_unlock(&gl->gl_spin);
483 return;
486 /* Fast path - we got what we asked for */
487 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
488 gfs2_demote_wake(gl);
489 if (state != LM_ST_UNLOCKED) {
490 if (glops->go_xmote_bh) {
491 spin_unlock(&gl->gl_spin);
492 rv = glops->go_xmote_bh(gl, gh);
493 spin_lock(&gl->gl_spin);
494 if (rv) {
495 do_error(gl, rv);
496 goto out;
499 rv = do_promote(gl);
500 if (rv == 2)
501 goto out_locked;
503 out:
504 clear_bit(GLF_LOCK, &gl->gl_flags);
505 out_locked:
506 spin_unlock(&gl->gl_spin);
510 * do_xmote - Calls the DLM to change the state of a lock
511 * @gl: The lock state
512 * @gh: The holder (only for promotes)
513 * @target: The target lock state
517 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
518 __releases(&gl->gl_spin)
519 __acquires(&gl->gl_spin)
521 const struct gfs2_glock_operations *glops = gl->gl_ops;
522 struct gfs2_sbd *sdp = gl->gl_sbd;
523 unsigned int lck_flags = gh ? gh->gh_flags : 0;
524 int ret;
526 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
527 LM_FLAG_PRIORITY);
528 GLOCK_BUG_ON(gl, gl->gl_state == target);
529 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
530 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
531 glops->go_inval) {
532 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
533 do_error(gl, 0); /* Fail queued try locks */
535 gl->gl_req = target;
536 spin_unlock(&gl->gl_spin);
537 if (glops->go_xmote_th)
538 glops->go_xmote_th(gl);
539 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
540 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
541 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
543 gfs2_glock_hold(gl);
544 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
545 /* lock_dlm */
546 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
547 GLOCK_BUG_ON(gl, ret);
548 } else { /* lock_nolock */
549 finish_xmote(gl, target);
550 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
551 gfs2_glock_put(gl);
554 spin_lock(&gl->gl_spin);
558 * find_first_holder - find the first "holder" gh
559 * @gl: the glock
562 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
564 struct gfs2_holder *gh;
566 if (!list_empty(&gl->gl_holders)) {
567 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
568 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
569 return gh;
571 return NULL;
575 * run_queue - do all outstanding tasks related to a glock
576 * @gl: The glock in question
577 * @nonblock: True if we must not block in run_queue
581 static void run_queue(struct gfs2_glock *gl, const int nonblock)
582 __releases(&gl->gl_spin)
583 __acquires(&gl->gl_spin)
585 struct gfs2_holder *gh = NULL;
586 int ret;
588 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
589 return;
591 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
593 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
594 gl->gl_demote_state != gl->gl_state) {
595 if (find_first_holder(gl))
596 goto out_unlock;
597 if (nonblock)
598 goto out_sched;
599 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
600 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
601 gl->gl_target = gl->gl_demote_state;
602 } else {
603 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
604 gfs2_demote_wake(gl);
605 ret = do_promote(gl);
606 if (ret == 0)
607 goto out_unlock;
608 if (ret == 2)
609 goto out;
610 gh = find_first_waiter(gl);
611 gl->gl_target = gh->gh_state;
612 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
613 do_error(gl, 0); /* Fail queued try locks */
615 do_xmote(gl, gh, gl->gl_target);
616 out:
617 return;
619 out_sched:
620 clear_bit(GLF_LOCK, &gl->gl_flags);
621 smp_mb__after_clear_bit();
622 gfs2_glock_hold(gl);
623 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
624 gfs2_glock_put_nolock(gl);
625 return;
627 out_unlock:
628 clear_bit(GLF_LOCK, &gl->gl_flags);
629 smp_mb__after_clear_bit();
630 return;
633 static void delete_work_func(struct work_struct *work)
635 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
636 struct gfs2_sbd *sdp = gl->gl_sbd;
637 struct gfs2_inode *ip;
638 struct inode *inode;
639 u64 no_addr = gl->gl_name.ln_number;
641 ip = gl->gl_object;
642 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
644 if (ip)
645 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
646 else
647 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
648 if (inode && !IS_ERR(inode)) {
649 d_prune_aliases(inode);
650 iput(inode);
652 gfs2_glock_put(gl);
655 static void glock_work_func(struct work_struct *work)
657 unsigned long delay = 0;
658 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
659 int drop_ref = 0;
661 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
662 finish_xmote(gl, gl->gl_reply);
663 drop_ref = 1;
665 spin_lock(&gl->gl_spin);
666 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 gl->gl_state != LM_ST_UNLOCKED &&
668 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 unsigned long holdtime, now = jiffies;
671 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
672 if (time_before(now, holdtime))
673 delay = holdtime - now;
675 if (!delay) {
676 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 set_bit(GLF_DEMOTE, &gl->gl_flags);
680 run_queue(gl, 0);
681 spin_unlock(&gl->gl_spin);
682 if (!delay ||
683 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
684 gfs2_glock_put(gl);
685 if (drop_ref)
686 gfs2_glock_put(gl);
690 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
691 * @sdp: The GFS2 superblock
692 * @number: the lock number
693 * @glops: The glock_operations to use
694 * @create: If 0, don't create the glock if it doesn't exist
695 * @glp: the glock is returned here
697 * This does not lock a glock, just finds/creates structures for one.
699 * Returns: errno
702 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
703 const struct gfs2_glock_operations *glops, int create,
704 struct gfs2_glock **glp)
706 struct super_block *s = sdp->sd_vfs;
707 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
708 struct gfs2_glock *gl, *tmp;
709 unsigned int hash = gl_hash(sdp, &name);
710 struct address_space *mapping;
711 struct kmem_cache *cachep;
713 rcu_read_lock();
714 gl = search_bucket(hash, sdp, &name);
715 rcu_read_unlock();
717 *glp = gl;
718 if (gl)
719 return 0;
720 if (!create)
721 return -ENOENT;
723 if (glops->go_flags & GLOF_ASPACE)
724 cachep = gfs2_glock_aspace_cachep;
725 else
726 cachep = gfs2_glock_cachep;
727 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
728 if (!gl)
729 return -ENOMEM;
731 atomic_inc(&sdp->sd_glock_disposal);
732 gl->gl_flags = 0;
733 gl->gl_name = name;
734 atomic_set(&gl->gl_ref, 1);
735 gl->gl_state = LM_ST_UNLOCKED;
736 gl->gl_target = LM_ST_UNLOCKED;
737 gl->gl_demote_state = LM_ST_EXCLUSIVE;
738 gl->gl_hash = hash;
739 gl->gl_ops = glops;
740 snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
741 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
742 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
743 gl->gl_tchange = jiffies;
744 gl->gl_object = NULL;
745 gl->gl_sbd = sdp;
746 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
747 INIT_WORK(&gl->gl_delete, delete_work_func);
749 mapping = gfs2_glock2aspace(gl);
750 if (mapping) {
751 mapping->a_ops = &gfs2_meta_aops;
752 mapping->host = s->s_bdev->bd_inode;
753 mapping->flags = 0;
754 mapping_set_gfp_mask(mapping, GFP_NOFS);
755 mapping->assoc_mapping = NULL;
756 mapping->backing_dev_info = s->s_bdi;
757 mapping->writeback_index = 0;
760 spin_lock_bucket(hash);
761 tmp = search_bucket(hash, sdp, &name);
762 if (tmp) {
763 spin_unlock_bucket(hash);
764 kmem_cache_free(cachep, gl);
765 atomic_dec(&sdp->sd_glock_disposal);
766 gl = tmp;
767 } else {
768 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
769 spin_unlock_bucket(hash);
772 *glp = gl;
774 return 0;
778 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
779 * @gl: the glock
780 * @state: the state we're requesting
781 * @flags: the modifier flags
782 * @gh: the holder structure
786 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
787 struct gfs2_holder *gh)
789 INIT_LIST_HEAD(&gh->gh_list);
790 gh->gh_gl = gl;
791 gh->gh_ip = (unsigned long)__builtin_return_address(0);
792 gh->gh_owner_pid = get_pid(task_pid(current));
793 gh->gh_state = state;
794 gh->gh_flags = flags;
795 gh->gh_error = 0;
796 gh->gh_iflags = 0;
797 gfs2_glock_hold(gl);
801 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
802 * @state: the state we're requesting
803 * @flags: the modifier flags
804 * @gh: the holder structure
806 * Don't mess with the glock.
810 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
812 gh->gh_state = state;
813 gh->gh_flags = flags;
814 gh->gh_iflags = 0;
815 gh->gh_ip = (unsigned long)__builtin_return_address(0);
816 if (gh->gh_owner_pid)
817 put_pid(gh->gh_owner_pid);
818 gh->gh_owner_pid = get_pid(task_pid(current));
822 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
823 * @gh: the holder structure
827 void gfs2_holder_uninit(struct gfs2_holder *gh)
829 put_pid(gh->gh_owner_pid);
830 gfs2_glock_put(gh->gh_gl);
831 gh->gh_gl = NULL;
832 gh->gh_ip = 0;
836 * gfs2_glock_holder_wait
837 * @word: unused
839 * This function and gfs2_glock_demote_wait both show up in the WCHAN
840 * field. Thus I've separated these otherwise identical functions in
841 * order to be more informative to the user.
844 static int gfs2_glock_holder_wait(void *word)
846 schedule();
847 return 0;
850 static int gfs2_glock_demote_wait(void *word)
852 schedule();
853 return 0;
856 static void wait_on_holder(struct gfs2_holder *gh)
858 might_sleep();
859 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
862 static void wait_on_demote(struct gfs2_glock *gl)
864 might_sleep();
865 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
869 * handle_callback - process a demote request
870 * @gl: the glock
871 * @state: the state the caller wants us to change to
873 * There are only two requests that we are going to see in actual
874 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
877 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
878 unsigned long delay)
880 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
882 set_bit(bit, &gl->gl_flags);
883 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
884 gl->gl_demote_state = state;
885 gl->gl_demote_time = jiffies;
886 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
887 gl->gl_demote_state != state) {
888 gl->gl_demote_state = LM_ST_UNLOCKED;
890 if (gl->gl_ops->go_callback)
891 gl->gl_ops->go_callback(gl);
892 trace_gfs2_demote_rq(gl);
896 * gfs2_glock_wait - wait on a glock acquisition
897 * @gh: the glock holder
899 * Returns: 0 on success
902 int gfs2_glock_wait(struct gfs2_holder *gh)
904 wait_on_holder(gh);
905 return gh->gh_error;
908 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
910 struct va_format vaf;
911 va_list args;
913 va_start(args, fmt);
915 if (seq) {
916 struct gfs2_glock_iter *gi = seq->private;
917 vsprintf(gi->string, fmt, args);
918 seq_printf(seq, gi->string);
919 } else {
920 vaf.fmt = fmt;
921 vaf.va = &args;
923 printk(KERN_ERR " %pV", &vaf);
926 va_end(args);
930 * add_to_queue - Add a holder to the wait queue (but look for recursion)
931 * @gh: the holder structure to add
933 * Eventually we should move the recursive locking trap to a
934 * debugging option or something like that. This is the fast
935 * path and needs to have the minimum number of distractions.
939 static inline void add_to_queue(struct gfs2_holder *gh)
940 __releases(&gl->gl_spin)
941 __acquires(&gl->gl_spin)
943 struct gfs2_glock *gl = gh->gh_gl;
944 struct gfs2_sbd *sdp = gl->gl_sbd;
945 struct list_head *insert_pt = NULL;
946 struct gfs2_holder *gh2;
947 int try_lock = 0;
949 BUG_ON(gh->gh_owner_pid == NULL);
950 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
951 BUG();
953 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
954 if (test_bit(GLF_LOCK, &gl->gl_flags))
955 try_lock = 1;
956 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
957 goto fail;
960 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
961 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
962 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
963 goto trap_recursive;
964 if (try_lock &&
965 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
966 !may_grant(gl, gh)) {
967 fail:
968 gh->gh_error = GLR_TRYFAILED;
969 gfs2_holder_wake(gh);
970 return;
972 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
973 continue;
974 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
975 insert_pt = &gh2->gh_list;
977 set_bit(GLF_QUEUED, &gl->gl_flags);
978 trace_gfs2_glock_queue(gh, 1);
979 if (likely(insert_pt == NULL)) {
980 list_add_tail(&gh->gh_list, &gl->gl_holders);
981 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
982 goto do_cancel;
983 return;
985 list_add_tail(&gh->gh_list, insert_pt);
986 do_cancel:
987 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
988 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
989 spin_unlock(&gl->gl_spin);
990 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
991 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
992 spin_lock(&gl->gl_spin);
994 return;
996 trap_recursive:
997 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
998 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
999 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1000 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1001 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1002 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1003 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1004 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1005 __dump_glock(NULL, gl);
1006 BUG();
1010 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1011 * @gh: the holder structure
1013 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1015 * Returns: 0, GLR_TRYFAILED, or errno on failure
1018 int gfs2_glock_nq(struct gfs2_holder *gh)
1020 struct gfs2_glock *gl = gh->gh_gl;
1021 struct gfs2_sbd *sdp = gl->gl_sbd;
1022 int error = 0;
1024 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1025 return -EIO;
1027 if (test_bit(GLF_LRU, &gl->gl_flags))
1028 gfs2_glock_remove_from_lru(gl);
1030 spin_lock(&gl->gl_spin);
1031 add_to_queue(gh);
1032 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1033 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1034 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1035 run_queue(gl, 1);
1036 spin_unlock(&gl->gl_spin);
1038 if (!(gh->gh_flags & GL_ASYNC))
1039 error = gfs2_glock_wait(gh);
1041 return error;
1045 * gfs2_glock_poll - poll to see if an async request has been completed
1046 * @gh: the holder
1048 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1051 int gfs2_glock_poll(struct gfs2_holder *gh)
1053 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1057 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1058 * @gh: the glock holder
1062 void gfs2_glock_dq(struct gfs2_holder *gh)
1064 struct gfs2_glock *gl = gh->gh_gl;
1065 const struct gfs2_glock_operations *glops = gl->gl_ops;
1066 unsigned delay = 0;
1067 int fast_path = 0;
1069 spin_lock(&gl->gl_spin);
1070 if (gh->gh_flags & GL_NOCACHE)
1071 handle_callback(gl, LM_ST_UNLOCKED, 0);
1073 list_del_init(&gh->gh_list);
1074 if (find_first_holder(gl) == NULL) {
1075 if (glops->go_unlock) {
1076 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1077 spin_unlock(&gl->gl_spin);
1078 glops->go_unlock(gh);
1079 spin_lock(&gl->gl_spin);
1080 clear_bit(GLF_LOCK, &gl->gl_flags);
1082 if (list_empty(&gl->gl_holders) &&
1083 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1084 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1085 fast_path = 1;
1087 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1088 __gfs2_glock_schedule_for_reclaim(gl);
1089 trace_gfs2_glock_queue(gh, 0);
1090 spin_unlock(&gl->gl_spin);
1091 if (likely(fast_path))
1092 return;
1094 gfs2_glock_hold(gl);
1095 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1096 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1097 delay = gl->gl_ops->go_min_hold_time;
1098 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1099 gfs2_glock_put(gl);
1102 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1104 struct gfs2_glock *gl = gh->gh_gl;
1105 gfs2_glock_dq(gh);
1106 wait_on_demote(gl);
1110 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1111 * @gh: the holder structure
1115 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1117 gfs2_glock_dq(gh);
1118 gfs2_holder_uninit(gh);
1122 * gfs2_glock_nq_num - acquire a glock based on lock number
1123 * @sdp: the filesystem
1124 * @number: the lock number
1125 * @glops: the glock operations for the type of glock
1126 * @state: the state to acquire the glock in
1127 * @flags: modifier flags for the acquisition
1128 * @gh: the struct gfs2_holder
1130 * Returns: errno
1133 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1134 const struct gfs2_glock_operations *glops,
1135 unsigned int state, int flags, struct gfs2_holder *gh)
1137 struct gfs2_glock *gl;
1138 int error;
1140 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1141 if (!error) {
1142 error = gfs2_glock_nq_init(gl, state, flags, gh);
1143 gfs2_glock_put(gl);
1146 return error;
1150 * glock_compare - Compare two struct gfs2_glock structures for sorting
1151 * @arg_a: the first structure
1152 * @arg_b: the second structure
1156 static int glock_compare(const void *arg_a, const void *arg_b)
1158 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1159 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1160 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1161 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1163 if (a->ln_number > b->ln_number)
1164 return 1;
1165 if (a->ln_number < b->ln_number)
1166 return -1;
1167 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1168 return 0;
1172 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1173 * @num_gh: the number of structures
1174 * @ghs: an array of struct gfs2_holder structures
1176 * Returns: 0 on success (all glocks acquired),
1177 * errno on failure (no glocks acquired)
1180 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1181 struct gfs2_holder **p)
1183 unsigned int x;
1184 int error = 0;
1186 for (x = 0; x < num_gh; x++)
1187 p[x] = &ghs[x];
1189 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1191 for (x = 0; x < num_gh; x++) {
1192 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1194 error = gfs2_glock_nq(p[x]);
1195 if (error) {
1196 while (x--)
1197 gfs2_glock_dq(p[x]);
1198 break;
1202 return error;
1206 * gfs2_glock_nq_m - acquire multiple glocks
1207 * @num_gh: the number of structures
1208 * @ghs: an array of struct gfs2_holder structures
1211 * Returns: 0 on success (all glocks acquired),
1212 * errno on failure (no glocks acquired)
1215 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1217 struct gfs2_holder *tmp[4];
1218 struct gfs2_holder **pph = tmp;
1219 int error = 0;
1221 switch(num_gh) {
1222 case 0:
1223 return 0;
1224 case 1:
1225 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1226 return gfs2_glock_nq(ghs);
1227 default:
1228 if (num_gh <= 4)
1229 break;
1230 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1231 if (!pph)
1232 return -ENOMEM;
1235 error = nq_m_sync(num_gh, ghs, pph);
1237 if (pph != tmp)
1238 kfree(pph);
1240 return error;
1244 * gfs2_glock_dq_m - release multiple glocks
1245 * @num_gh: the number of structures
1246 * @ghs: an array of struct gfs2_holder structures
1250 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1252 while (num_gh--)
1253 gfs2_glock_dq(&ghs[num_gh]);
1257 * gfs2_glock_dq_uninit_m - release multiple glocks
1258 * @num_gh: the number of structures
1259 * @ghs: an array of struct gfs2_holder structures
1263 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1265 while (num_gh--)
1266 gfs2_glock_dq_uninit(&ghs[num_gh]);
1269 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1271 unsigned long delay = 0;
1272 unsigned long holdtime;
1273 unsigned long now = jiffies;
1275 gfs2_glock_hold(gl);
1276 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1277 if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1278 if (time_before(now, holdtime))
1279 delay = holdtime - now;
1280 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1281 delay = gl->gl_ops->go_min_hold_time;
1284 spin_lock(&gl->gl_spin);
1285 handle_callback(gl, state, delay);
1286 spin_unlock(&gl->gl_spin);
1287 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1288 gfs2_glock_put(gl);
1292 * gfs2_should_freeze - Figure out if glock should be frozen
1293 * @gl: The glock in question
1295 * Glocks are not frozen if (a) the result of the dlm operation is
1296 * an error, (b) the locking operation was an unlock operation or
1297 * (c) if there is a "noexp" flagged request anywhere in the queue
1299 * Returns: 1 if freezing should occur, 0 otherwise
1302 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1304 const struct gfs2_holder *gh;
1306 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1307 return 0;
1308 if (gl->gl_target == LM_ST_UNLOCKED)
1309 return 0;
1311 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1312 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1313 continue;
1314 if (LM_FLAG_NOEXP & gh->gh_flags)
1315 return 0;
1318 return 1;
1322 * gfs2_glock_complete - Callback used by locking
1323 * @gl: Pointer to the glock
1324 * @ret: The return value from the dlm
1326 * The gl_reply field is under the gl_spin lock so that it is ok
1327 * to use a bitfield shared with other glock state fields.
1330 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1332 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1334 spin_lock(&gl->gl_spin);
1335 gl->gl_reply = ret;
1337 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1338 if (gfs2_should_freeze(gl)) {
1339 set_bit(GLF_FROZEN, &gl->gl_flags);
1340 spin_unlock(&gl->gl_spin);
1341 return;
1345 spin_unlock(&gl->gl_spin);
1346 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1347 smp_wmb();
1348 gfs2_glock_hold(gl);
1349 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1350 gfs2_glock_put(gl);
1354 static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1355 struct shrink_control *sc)
1357 struct gfs2_glock *gl;
1358 int may_demote;
1359 int nr_skipped = 0;
1360 int nr = sc->nr_to_scan;
1361 gfp_t gfp_mask = sc->gfp_mask;
1362 LIST_HEAD(skipped);
1364 if (nr == 0)
1365 goto out;
1367 if (!(gfp_mask & __GFP_FS))
1368 return -1;
1370 spin_lock(&lru_lock);
1371 while(nr && !list_empty(&lru_list)) {
1372 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1373 list_del_init(&gl->gl_lru);
1374 clear_bit(GLF_LRU, &gl->gl_flags);
1375 atomic_dec(&lru_count);
1377 /* Test for being demotable */
1378 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1379 gfs2_glock_hold(gl);
1380 spin_unlock(&lru_lock);
1381 spin_lock(&gl->gl_spin);
1382 may_demote = demote_ok(gl);
1383 if (may_demote) {
1384 handle_callback(gl, LM_ST_UNLOCKED, 0);
1385 nr--;
1387 clear_bit(GLF_LOCK, &gl->gl_flags);
1388 smp_mb__after_clear_bit();
1389 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1390 gfs2_glock_put_nolock(gl);
1391 spin_unlock(&gl->gl_spin);
1392 spin_lock(&lru_lock);
1393 continue;
1395 nr_skipped++;
1396 list_add(&gl->gl_lru, &skipped);
1397 set_bit(GLF_LRU, &gl->gl_flags);
1399 list_splice(&skipped, &lru_list);
1400 atomic_add(nr_skipped, &lru_count);
1401 spin_unlock(&lru_lock);
1402 out:
1403 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1406 static struct shrinker glock_shrinker = {
1407 .shrink = gfs2_shrink_glock_memory,
1408 .seeks = DEFAULT_SEEKS,
1412 * examine_bucket - Call a function for glock in a hash bucket
1413 * @examiner: the function
1414 * @sdp: the filesystem
1415 * @bucket: the bucket
1419 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1420 unsigned int hash)
1422 struct gfs2_glock *gl;
1423 struct hlist_bl_head *head = &gl_hash_table[hash];
1424 struct hlist_bl_node *pos;
1426 rcu_read_lock();
1427 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1428 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1429 examiner(gl);
1431 rcu_read_unlock();
1432 cond_resched();
1435 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1437 unsigned x;
1439 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1440 examine_bucket(examiner, sdp, x);
1445 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1446 * @gl: The glock to thaw
1448 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1449 * so this has to result in the ref count being dropped by one.
1452 static void thaw_glock(struct gfs2_glock *gl)
1454 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1455 return;
1456 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1457 gfs2_glock_hold(gl);
1458 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1459 gfs2_glock_put(gl);
1463 * clear_glock - look at a glock and see if we can free it from glock cache
1464 * @gl: the glock to look at
1468 static void clear_glock(struct gfs2_glock *gl)
1470 gfs2_glock_remove_from_lru(gl);
1472 spin_lock(&gl->gl_spin);
1473 if (gl->gl_state != LM_ST_UNLOCKED)
1474 handle_callback(gl, LM_ST_UNLOCKED, 0);
1475 spin_unlock(&gl->gl_spin);
1476 gfs2_glock_hold(gl);
1477 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1478 gfs2_glock_put(gl);
1482 * gfs2_glock_thaw - Thaw any frozen glocks
1483 * @sdp: The super block
1487 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1489 glock_hash_walk(thaw_glock, sdp);
1492 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1494 int ret;
1495 spin_lock(&gl->gl_spin);
1496 ret = __dump_glock(seq, gl);
1497 spin_unlock(&gl->gl_spin);
1498 return ret;
1501 static void dump_glock_func(struct gfs2_glock *gl)
1503 dump_glock(NULL, gl);
1507 * gfs2_gl_hash_clear - Empty out the glock hash table
1508 * @sdp: the filesystem
1509 * @wait: wait until it's all gone
1511 * Called when unmounting the filesystem.
1514 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1516 glock_hash_walk(clear_glock, sdp);
1517 flush_workqueue(glock_workqueue);
1518 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1519 glock_hash_walk(dump_glock_func, sdp);
1522 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1524 struct gfs2_glock *gl = ip->i_gl;
1525 int ret;
1527 ret = gfs2_truncatei_resume(ip);
1528 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1530 spin_lock(&gl->gl_spin);
1531 clear_bit(GLF_LOCK, &gl->gl_flags);
1532 run_queue(gl, 1);
1533 spin_unlock(&gl->gl_spin);
1536 static const char *state2str(unsigned state)
1538 switch(state) {
1539 case LM_ST_UNLOCKED:
1540 return "UN";
1541 case LM_ST_SHARED:
1542 return "SH";
1543 case LM_ST_DEFERRED:
1544 return "DF";
1545 case LM_ST_EXCLUSIVE:
1546 return "EX";
1548 return "??";
1551 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1553 char *p = buf;
1554 if (flags & LM_FLAG_TRY)
1555 *p++ = 't';
1556 if (flags & LM_FLAG_TRY_1CB)
1557 *p++ = 'T';
1558 if (flags & LM_FLAG_NOEXP)
1559 *p++ = 'e';
1560 if (flags & LM_FLAG_ANY)
1561 *p++ = 'A';
1562 if (flags & LM_FLAG_PRIORITY)
1563 *p++ = 'p';
1564 if (flags & GL_ASYNC)
1565 *p++ = 'a';
1566 if (flags & GL_EXACT)
1567 *p++ = 'E';
1568 if (flags & GL_NOCACHE)
1569 *p++ = 'c';
1570 if (test_bit(HIF_HOLDER, &iflags))
1571 *p++ = 'H';
1572 if (test_bit(HIF_WAIT, &iflags))
1573 *p++ = 'W';
1574 if (test_bit(HIF_FIRST, &iflags))
1575 *p++ = 'F';
1576 *p = 0;
1577 return buf;
1581 * dump_holder - print information about a glock holder
1582 * @seq: the seq_file struct
1583 * @gh: the glock holder
1585 * Returns: 0 on success, -ENOBUFS when we run out of space
1588 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1590 struct task_struct *gh_owner = NULL;
1591 char flags_buf[32];
1593 if (gh->gh_owner_pid)
1594 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1595 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1596 state2str(gh->gh_state),
1597 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1598 gh->gh_error,
1599 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1600 gh_owner ? gh_owner->comm : "(ended)",
1601 (void *)gh->gh_ip);
1602 return 0;
1605 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1607 const unsigned long *gflags = &gl->gl_flags;
1608 char *p = buf;
1610 if (test_bit(GLF_LOCK, gflags))
1611 *p++ = 'l';
1612 if (test_bit(GLF_DEMOTE, gflags))
1613 *p++ = 'D';
1614 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1615 *p++ = 'd';
1616 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1617 *p++ = 'p';
1618 if (test_bit(GLF_DIRTY, gflags))
1619 *p++ = 'y';
1620 if (test_bit(GLF_LFLUSH, gflags))
1621 *p++ = 'f';
1622 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1623 *p++ = 'i';
1624 if (test_bit(GLF_REPLY_PENDING, gflags))
1625 *p++ = 'r';
1626 if (test_bit(GLF_INITIAL, gflags))
1627 *p++ = 'I';
1628 if (test_bit(GLF_FROZEN, gflags))
1629 *p++ = 'F';
1630 if (test_bit(GLF_QUEUED, gflags))
1631 *p++ = 'q';
1632 if (test_bit(GLF_LRU, gflags))
1633 *p++ = 'L';
1634 if (gl->gl_object)
1635 *p++ = 'o';
1636 *p = 0;
1637 return buf;
1641 * __dump_glock - print information about a glock
1642 * @seq: The seq_file struct
1643 * @gl: the glock
1645 * The file format is as follows:
1646 * One line per object, capital letters are used to indicate objects
1647 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1648 * other objects are indented by a single space and follow the glock to
1649 * which they are related. Fields are indicated by lower case letters
1650 * followed by a colon and the field value, except for strings which are in
1651 * [] so that its possible to see if they are composed of spaces for
1652 * example. The field's are n = number (id of the object), f = flags,
1653 * t = type, s = state, r = refcount, e = error, p = pid.
1655 * Returns: 0 on success, -ENOBUFS when we run out of space
1658 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1660 const struct gfs2_glock_operations *glops = gl->gl_ops;
1661 unsigned long long dtime;
1662 const struct gfs2_holder *gh;
1663 char gflags_buf[32];
1664 int error = 0;
1666 dtime = jiffies - gl->gl_demote_time;
1667 dtime *= 1000000/HZ; /* demote time in uSec */
1668 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1669 dtime = 0;
1670 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1671 state2str(gl->gl_state),
1672 gl->gl_name.ln_type,
1673 (unsigned long long)gl->gl_name.ln_number,
1674 gflags2str(gflags_buf, gl),
1675 state2str(gl->gl_target),
1676 state2str(gl->gl_demote_state), dtime,
1677 atomic_read(&gl->gl_ail_count),
1678 atomic_read(&gl->gl_revokes),
1679 atomic_read(&gl->gl_ref));
1681 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1682 error = dump_holder(seq, gh);
1683 if (error)
1684 goto out;
1686 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1687 error = glops->go_dump(seq, gl);
1688 out:
1689 return error;
1695 int __init gfs2_glock_init(void)
1697 unsigned i;
1698 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1699 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1702 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1703 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1704 if (IS_ERR(glock_workqueue))
1705 return PTR_ERR(glock_workqueue);
1706 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1707 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1709 if (IS_ERR(gfs2_delete_workqueue)) {
1710 destroy_workqueue(glock_workqueue);
1711 return PTR_ERR(gfs2_delete_workqueue);
1714 register_shrinker(&glock_shrinker);
1716 return 0;
1719 void gfs2_glock_exit(void)
1721 unregister_shrinker(&glock_shrinker);
1722 destroy_workqueue(glock_workqueue);
1723 destroy_workqueue(gfs2_delete_workqueue);
1726 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1728 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1729 struct gfs2_glock, gl_list);
1732 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1734 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1735 struct gfs2_glock, gl_list);
1738 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1740 struct gfs2_glock *gl;
1742 do {
1743 gl = gi->gl;
1744 if (gl) {
1745 gi->gl = glock_hash_next(gl);
1746 } else {
1747 gi->gl = glock_hash_chain(gi->hash);
1749 while (gi->gl == NULL) {
1750 gi->hash++;
1751 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1752 rcu_read_unlock();
1753 return 1;
1755 gi->gl = glock_hash_chain(gi->hash);
1757 /* Skip entries for other sb and dead entries */
1758 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1760 return 0;
1763 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1765 struct gfs2_glock_iter *gi = seq->private;
1766 loff_t n = *pos;
1768 gi->hash = 0;
1769 rcu_read_lock();
1771 do {
1772 if (gfs2_glock_iter_next(gi))
1773 return NULL;
1774 } while (n--);
1776 return gi->gl;
1779 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1780 loff_t *pos)
1782 struct gfs2_glock_iter *gi = seq->private;
1784 (*pos)++;
1786 if (gfs2_glock_iter_next(gi))
1787 return NULL;
1789 return gi->gl;
1792 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1794 struct gfs2_glock_iter *gi = seq->private;
1796 if (gi->gl)
1797 rcu_read_unlock();
1798 gi->gl = NULL;
1801 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1803 return dump_glock(seq, iter_ptr);
1806 static const struct seq_operations gfs2_glock_seq_ops = {
1807 .start = gfs2_glock_seq_start,
1808 .next = gfs2_glock_seq_next,
1809 .stop = gfs2_glock_seq_stop,
1810 .show = gfs2_glock_seq_show,
1813 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1815 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1816 sizeof(struct gfs2_glock_iter));
1817 if (ret == 0) {
1818 struct seq_file *seq = file->private_data;
1819 struct gfs2_glock_iter *gi = seq->private;
1820 gi->sdp = inode->i_private;
1822 return ret;
1825 static const struct file_operations gfs2_debug_fops = {
1826 .owner = THIS_MODULE,
1827 .open = gfs2_debugfs_open,
1828 .read = seq_read,
1829 .llseek = seq_lseek,
1830 .release = seq_release_private,
1833 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1835 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1836 if (!sdp->debugfs_dir)
1837 return -ENOMEM;
1838 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1839 S_IFREG | S_IRUGO,
1840 sdp->debugfs_dir, sdp,
1841 &gfs2_debug_fops);
1842 if (!sdp->debugfs_dentry_glocks)
1843 return -ENOMEM;
1845 return 0;
1848 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1850 if (sdp && sdp->debugfs_dir) {
1851 if (sdp->debugfs_dentry_glocks) {
1852 debugfs_remove(sdp->debugfs_dentry_glocks);
1853 sdp->debugfs_dentry_glocks = NULL;
1855 debugfs_remove(sdp->debugfs_dir);
1856 sdp->debugfs_dir = NULL;
1860 int gfs2_register_debugfs(void)
1862 gfs2_root = debugfs_create_dir("gfs2", NULL);
1863 return gfs2_root ? 0 : -ENOMEM;
1866 void gfs2_unregister_debugfs(void)
1868 debugfs_remove(gfs2_root);
1869 gfs2_root = NULL;