2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
45 struct gfs2_gl_hash_bucket
{
46 struct hlist_head hb_list
;
50 int hash
; /* hash bucket index */
51 struct gfs2_sbd
*sdp
; /* incore superblock */
52 struct gfs2_glock
*gl
; /* current glock struct */
53 struct seq_file
*seq
; /* sequence file for debugfs */
54 char string
[512]; /* scratch space */
57 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
59 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
);
60 static int dump_glock(struct glock_iter
*gi
, struct gfs2_glock
*gl
);
61 static void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, struct gfs2_holder
*gh
);
62 static void gfs2_glock_drop_th(struct gfs2_glock
*gl
);
63 static void run_queue(struct gfs2_glock
*gl
);
65 static DECLARE_RWSEM(gfs2_umount_flush_sem
);
66 static struct dentry
*gfs2_root
;
67 static struct task_struct
*scand_process
;
68 static unsigned int scand_secs
= 5;
69 static struct workqueue_struct
*glock_workqueue
;
71 #define GFS2_GL_HASH_SHIFT 15
72 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
75 static struct gfs2_gl_hash_bucket gl_hash_table
[GFS2_GL_HASH_SIZE
];
76 static struct dentry
*gfs2_root
;
79 * Despite what you might think, the numbers below are not arbitrary :-)
80 * They are taken from the ipv4 routing hash code, which is well tested
81 * and thus should be nearly optimal. Later on we might tweek the numbers
82 * but for now this should be fine.
84 * The reason for putting the locks in a separate array from the list heads
85 * is that we can have fewer locks than list heads and save memory. We use
86 * the same hash function for both, but with a different hash mask.
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89 defined(CONFIG_PROVE_LOCKING)
92 # define GL_HASH_LOCK_SZ 256
95 # define GL_HASH_LOCK_SZ 4096
97 # define GL_HASH_LOCK_SZ 2048
99 # define GL_HASH_LOCK_SZ 1024
101 # define GL_HASH_LOCK_SZ 512
103 # define GL_HASH_LOCK_SZ 256
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
113 static rwlock_t gl_hash_locks
[GL_HASH_LOCK_SZ
];
115 static inline rwlock_t
*gl_lock_addr(unsigned int x
)
117 return &gl_hash_locks
[x
& (GL_HASH_LOCK_SZ
-1)];
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t
*gl_lock_addr(unsigned int x
)
127 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128 * @actual: the current state of the lock
129 * @requested: the lock state that was requested by the caller
130 * @flags: the modifier flags passed in by the caller
132 * Returns: 1 if the locks are compatible, 0 otherwise
135 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
138 if (actual
== requested
)
141 if (flags
& GL_EXACT
)
144 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
147 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
154 * gl_hash() - Turn glock number into hash bucket number
155 * @lock: The glock number
157 * Returns: The number of the corresponding hash bucket
160 static unsigned int gl_hash(const struct gfs2_sbd
*sdp
,
161 const struct lm_lockname
*name
)
165 h
= jhash(&name
->ln_number
, sizeof(u64
), 0);
166 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
167 h
= jhash(&sdp
, sizeof(struct gfs2_sbd
*), h
);
168 h
&= GFS2_GL_HASH_MASK
;
174 * glock_free() - Perform a few checks and then release struct gfs2_glock
175 * @gl: The glock to release
177 * Also calls lock module to release its internal structure for this glock.
181 static void glock_free(struct gfs2_glock
*gl
)
183 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
184 struct inode
*aspace
= gl
->gl_aspace
;
186 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
189 gfs2_aspace_put(aspace
);
191 kmem_cache_free(gfs2_glock_cachep
, gl
);
195 * gfs2_glock_hold() - increment reference count on glock
196 * @gl: The glock to hold
200 void gfs2_glock_hold(struct gfs2_glock
*gl
)
202 atomic_inc(&gl
->gl_ref
);
206 * gfs2_glock_put() - Decrement reference count on glock
207 * @gl: The glock to put
211 int gfs2_glock_put(struct gfs2_glock
*gl
)
214 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
216 write_lock(gl_lock_addr(gl
->gl_hash
));
217 if (atomic_dec_and_test(&gl
->gl_ref
)) {
218 hlist_del(&gl
->gl_list
);
219 write_unlock(gl_lock_addr(gl
->gl_hash
));
220 BUG_ON(spin_is_locked(&gl
->gl_spin
));
221 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
222 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
223 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
224 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
225 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
230 write_unlock(gl_lock_addr(gl
->gl_hash
));
236 * search_bucket() - Find struct gfs2_glock by lock number
237 * @bucket: the bucket to search
238 * @name: The lock name
240 * Returns: NULL, or the struct gfs2_glock with the requested number
243 static struct gfs2_glock
*search_bucket(unsigned int hash
,
244 const struct gfs2_sbd
*sdp
,
245 const struct lm_lockname
*name
)
247 struct gfs2_glock
*gl
;
248 struct hlist_node
*h
;
250 hlist_for_each_entry(gl
, h
, &gl_hash_table
[hash
].hb_list
, gl_list
) {
251 if (!lm_name_equal(&gl
->gl_name
, name
))
253 if (gl
->gl_sbd
!= sdp
)
256 atomic_inc(&gl
->gl_ref
);
265 * gfs2_glock_find() - Find glock by lock number
266 * @sdp: The GFS2 superblock
267 * @name: The lock name
269 * Returns: NULL, or the struct gfs2_glock with the requested number
272 static struct gfs2_glock
*gfs2_glock_find(const struct gfs2_sbd
*sdp
,
273 const struct lm_lockname
*name
)
275 unsigned int hash
= gl_hash(sdp
, name
);
276 struct gfs2_glock
*gl
;
278 read_lock(gl_lock_addr(hash
));
279 gl
= search_bucket(hash
, sdp
, name
);
280 read_unlock(gl_lock_addr(hash
));
285 static void glock_work_func(struct work_struct
*work
)
287 struct gfs2_glock
*gl
= container_of(work
, struct gfs2_glock
, gl_work
.work
);
289 spin_lock(&gl
->gl_spin
);
290 if (test_and_clear_bit(GLF_PENDING_DEMOTE
, &gl
->gl_flags
))
291 set_bit(GLF_DEMOTE
, &gl
->gl_flags
);
293 spin_unlock(&gl
->gl_spin
);
298 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
299 * @sdp: The GFS2 superblock
300 * @number: the lock number
301 * @glops: The glock_operations to use
302 * @create: If 0, don't create the glock if it doesn't exist
303 * @glp: the glock is returned here
305 * This does not lock a glock, just finds/creates structures for one.
310 int gfs2_glock_get(struct gfs2_sbd
*sdp
, u64 number
,
311 const struct gfs2_glock_operations
*glops
, int create
,
312 struct gfs2_glock
**glp
)
314 struct lm_lockname name
= { .ln_number
= number
, .ln_type
= glops
->go_type
};
315 struct gfs2_glock
*gl
, *tmp
;
316 unsigned int hash
= gl_hash(sdp
, &name
);
319 read_lock(gl_lock_addr(hash
));
320 gl
= search_bucket(hash
, sdp
, &name
);
321 read_unlock(gl_lock_addr(hash
));
328 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
334 atomic_set(&gl
->gl_ref
, 1);
335 gl
->gl_state
= LM_ST_UNLOCKED
;
336 gl
->gl_demote_state
= LM_ST_EXCLUSIVE
;
338 gl
->gl_owner_pid
= 0;
341 gl
->gl_req_gh
= NULL
;
342 gl
->gl_req_bh
= NULL
;
344 gl
->gl_stamp
= jiffies
;
345 gl
->gl_tchange
= jiffies
;
346 gl
->gl_object
= NULL
;
348 gl
->gl_aspace
= NULL
;
349 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
350 INIT_DELAYED_WORK(&gl
->gl_work
, glock_work_func
);
352 /* If this glock protects actual on-disk data or metadata blocks,
353 create a VFS inode to manage the pages/buffers holding them. */
354 if (glops
== &gfs2_inode_glops
|| glops
== &gfs2_rgrp_glops
) {
355 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
356 if (!gl
->gl_aspace
) {
362 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
366 write_lock(gl_lock_addr(hash
));
367 tmp
= search_bucket(hash
, sdp
, &name
);
369 write_unlock(gl_lock_addr(hash
));
373 hlist_add_head(&gl
->gl_list
, &gl_hash_table
[hash
].hb_list
);
374 write_unlock(gl_lock_addr(hash
));
383 gfs2_aspace_put(gl
->gl_aspace
);
385 kmem_cache_free(gfs2_glock_cachep
, gl
);
390 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
392 * @state: the state we're requesting
393 * @flags: the modifier flags
394 * @gh: the holder structure
398 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, unsigned flags
,
399 struct gfs2_holder
*gh
)
401 INIT_LIST_HEAD(&gh
->gh_list
);
403 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
404 gh
->gh_owner_pid
= current
->pid
;
405 gh
->gh_state
= state
;
406 gh
->gh_flags
= flags
;
413 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
414 * @state: the state we're requesting
415 * @flags: the modifier flags
416 * @gh: the holder structure
418 * Don't mess with the glock.
422 void gfs2_holder_reinit(unsigned int state
, unsigned flags
, struct gfs2_holder
*gh
)
424 gh
->gh_state
= state
;
425 gh
->gh_flags
= flags
;
427 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
431 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
432 * @gh: the holder structure
436 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
438 gfs2_glock_put(gh
->gh_gl
);
443 static void gfs2_holder_wake(struct gfs2_holder
*gh
)
445 clear_bit(HIF_WAIT
, &gh
->gh_iflags
);
446 smp_mb__after_clear_bit();
447 wake_up_bit(&gh
->gh_iflags
, HIF_WAIT
);
450 static int just_schedule(void *word
)
456 static void wait_on_holder(struct gfs2_holder
*gh
)
459 wait_on_bit(&gh
->gh_iflags
, HIF_WAIT
, just_schedule
, TASK_UNINTERRUPTIBLE
);
462 static void gfs2_demote_wake(struct gfs2_glock
*gl
)
464 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
465 gl
->gl_demote_state
= LM_ST_EXCLUSIVE
;
466 clear_bit(GLF_DEMOTE
, &gl
->gl_flags
);
467 smp_mb__after_clear_bit();
468 wake_up_bit(&gl
->gl_flags
, GLF_DEMOTE
);
471 static void wait_on_demote(struct gfs2_glock
*gl
)
474 wait_on_bit(&gl
->gl_flags
, GLF_DEMOTE
, just_schedule
, TASK_UNINTERRUPTIBLE
);
478 * rq_mutex - process a mutex request in the queue
479 * @gh: the glock holder
481 * Returns: 1 if the queue is blocked
484 static int rq_mutex(struct gfs2_holder
*gh
)
486 struct gfs2_glock
*gl
= gh
->gh_gl
;
488 list_del_init(&gh
->gh_list
);
489 /* gh->gh_error never examined. */
490 set_bit(GLF_LOCK
, &gl
->gl_flags
);
491 clear_bit(HIF_WAIT
, &gh
->gh_iflags
);
493 wake_up_bit(&gh
->gh_iflags
, HIF_WAIT
);
499 * rq_promote - process a promote request in the queue
500 * @gh: the glock holder
502 * Acquire a new inter-node lock, or change a lock state to more restrictive.
504 * Returns: 1 if the queue is blocked
507 static int rq_promote(struct gfs2_holder
*gh
)
509 struct gfs2_glock
*gl
= gh
->gh_gl
;
511 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
512 if (list_empty(&gl
->gl_holders
)) {
514 set_bit(GLF_LOCK
, &gl
->gl_flags
);
515 spin_unlock(&gl
->gl_spin
);
516 gfs2_glock_xmote_th(gh
->gh_gl
, gh
);
517 spin_lock(&gl
->gl_spin
);
522 if (list_empty(&gl
->gl_holders
)) {
523 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
524 set_bit(GLF_LOCK
, &gl
->gl_flags
);
526 struct gfs2_holder
*next_gh
;
527 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
529 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
531 if (next_gh
->gh_state
== LM_ST_EXCLUSIVE
)
535 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
537 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
539 gfs2_holder_wake(gh
);
545 * rq_demote - process a demote request in the queue
546 * @gh: the glock holder
548 * Returns: 1 if the queue is blocked
551 static int rq_demote(struct gfs2_glock
*gl
)
553 if (!list_empty(&gl
->gl_holders
))
556 if (gl
->gl_state
== gl
->gl_demote_state
||
557 gl
->gl_state
== LM_ST_UNLOCKED
) {
558 gfs2_demote_wake(gl
);
562 set_bit(GLF_LOCK
, &gl
->gl_flags
);
563 set_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
);
565 if (gl
->gl_demote_state
== LM_ST_UNLOCKED
||
566 gl
->gl_state
!= LM_ST_EXCLUSIVE
) {
567 spin_unlock(&gl
->gl_spin
);
568 gfs2_glock_drop_th(gl
);
570 spin_unlock(&gl
->gl_spin
);
571 gfs2_glock_xmote_th(gl
, NULL
);
574 spin_lock(&gl
->gl_spin
);
575 clear_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
);
581 * run_queue - process holder structures on a glock
585 static void run_queue(struct gfs2_glock
*gl
)
587 struct gfs2_holder
*gh
;
591 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
594 if (!list_empty(&gl
->gl_waiters1
)) {
595 gh
= list_entry(gl
->gl_waiters1
.next
,
596 struct gfs2_holder
, gh_list
);
597 blocked
= rq_mutex(gh
);
598 } else if (test_bit(GLF_DEMOTE
, &gl
->gl_flags
)) {
599 blocked
= rq_demote(gl
);
600 if (gl
->gl_waiters2
&& !blocked
) {
601 set_bit(GLF_DEMOTE
, &gl
->gl_flags
);
602 gl
->gl_demote_state
= LM_ST_UNLOCKED
;
605 } else if (!list_empty(&gl
->gl_waiters3
)) {
606 gh
= list_entry(gl
->gl_waiters3
.next
,
607 struct gfs2_holder
, gh_list
);
608 blocked
= rq_promote(gh
);
618 * gfs2_glmutex_lock - acquire a local lock on a glock
621 * Gives caller exclusive access to manipulate a glock structure.
624 static void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
626 struct gfs2_holder gh
;
628 gfs2_holder_init(gl
, 0, 0, &gh
);
629 if (test_and_set_bit(HIF_WAIT
, &gh
.gh_iflags
))
632 spin_lock(&gl
->gl_spin
);
633 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
)) {
634 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
636 gl
->gl_owner_pid
= current
->pid
;
637 gl
->gl_ip
= (unsigned long)__builtin_return_address(0);
638 clear_bit(HIF_WAIT
, &gh
.gh_iflags
);
640 wake_up_bit(&gh
.gh_iflags
, HIF_WAIT
);
642 spin_unlock(&gl
->gl_spin
);
645 gfs2_holder_uninit(&gh
);
649 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
652 * Returns: 1 if the glock is acquired
655 static int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
659 spin_lock(&gl
->gl_spin
);
660 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
)) {
663 gl
->gl_owner_pid
= current
->pid
;
664 gl
->gl_ip
= (unsigned long)__builtin_return_address(0);
666 spin_unlock(&gl
->gl_spin
);
672 * gfs2_glmutex_unlock - release a local lock on a glock
677 static void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
679 spin_lock(&gl
->gl_spin
);
680 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
681 gl
->gl_owner_pid
= 0;
684 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
685 spin_unlock(&gl
->gl_spin
);
689 * handle_callback - process a demote request
691 * @state: the state the caller wants us to change to
693 * There are only two requests that we are going to see in actual
694 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
697 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
,
698 int remote
, unsigned long delay
)
700 int bit
= delay
? GLF_PENDING_DEMOTE
: GLF_DEMOTE
;
702 spin_lock(&gl
->gl_spin
);
703 set_bit(bit
, &gl
->gl_flags
);
704 if (gl
->gl_demote_state
== LM_ST_EXCLUSIVE
) {
705 gl
->gl_demote_state
= state
;
706 gl
->gl_demote_time
= jiffies
;
707 if (remote
&& gl
->gl_ops
->go_type
== LM_TYPE_IOPEN
&&
709 gfs2_glock_schedule_for_reclaim(gl
);
710 spin_unlock(&gl
->gl_spin
);
713 } else if (gl
->gl_demote_state
!= LM_ST_UNLOCKED
&&
714 gl
->gl_demote_state
!= state
) {
715 if (test_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
))
718 gl
->gl_demote_state
= LM_ST_UNLOCKED
;
720 spin_unlock(&gl
->gl_spin
);
724 * state_change - record that the glock is now in a different state
726 * @new_state the new state
730 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
734 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
735 held2
= (new_state
!= LM_ST_UNLOCKED
);
737 if (held1
!= held2
) {
744 gl
->gl_state
= new_state
;
745 gl
->gl_tchange
= jiffies
;
749 * xmote_bh - Called after the lock module is done acquiring a lock
750 * @gl: The glock in question
751 * @ret: the int returned from the lock module
755 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
757 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
758 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
759 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
760 int prev_state
= gl
->gl_state
;
763 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
764 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
765 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
767 state_change(gl
, ret
& LM_OUT_ST_MASK
);
769 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
771 glops
->go_inval(gl
, DIO_METADATA
);
772 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
773 /* We might not want to do this here.
774 Look at moving to the inode glops. */
776 glops
->go_inval(gl
, 0);
779 /* Deal with each possible exit condition */
782 gl
->gl_stamp
= jiffies
;
783 if (ret
& LM_OUT_CANCELED
) {
786 spin_lock(&gl
->gl_spin
);
787 if (gl
->gl_state
!= gl
->gl_demote_state
) {
788 gl
->gl_req_bh
= NULL
;
789 spin_unlock(&gl
->gl_spin
);
790 gfs2_glock_drop_th(gl
);
794 gfs2_demote_wake(gl
);
795 spin_unlock(&gl
->gl_spin
);
798 spin_lock(&gl
->gl_spin
);
799 list_del_init(&gh
->gh_list
);
801 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
803 gh
->gh_error
= GLR_CANCELED
;
804 if (ret
& LM_OUT_CANCELED
)
806 if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
807 list_add_tail(&gh
->gh_list
, &gl
->gl_holders
);
809 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
810 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
814 gh
->gh_error
= GLR_TRYFAILED
;
815 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
))
817 gh
->gh_error
= -EINVAL
;
818 if (gfs2_assert_withdraw(sdp
, 0) == -1)
819 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
821 spin_unlock(&gl
->gl_spin
);
824 if (glops
->go_xmote_bh
)
825 glops
->go_xmote_bh(gl
);
828 spin_lock(&gl
->gl_spin
);
829 gl
->gl_req_gh
= NULL
;
830 gl
->gl_req_bh
= NULL
;
831 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
832 spin_unlock(&gl
->gl_spin
);
838 gfs2_holder_wake(gh
);
842 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
843 * @gl: The glock in question
844 * @state: the requested state
845 * @flags: modifier flags to the lock call
849 static void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, struct gfs2_holder
*gh
)
851 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
852 int flags
= gh
? gh
->gh_flags
: 0;
853 unsigned state
= gh
? gh
->gh_state
: gl
->gl_demote_state
;
854 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
855 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
856 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
858 unsigned int lck_ret
;
860 if (glops
->go_xmote_th
)
861 glops
->go_xmote_th(gl
);
863 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
864 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
865 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
866 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
869 gl
->gl_req_bh
= xmote_bh
;
871 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
, lck_flags
);
873 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
876 if (lck_ret
& LM_OUT_ASYNC
)
877 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
879 xmote_bh(gl
, lck_ret
);
883 * drop_bh - Called after a lock module unlock completes
885 * @ret: the return status
887 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
888 * Doesn't drop the reference on the glock the top half took out
892 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
894 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
895 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
896 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
898 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
899 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
900 gfs2_assert_warn(sdp
, !ret
);
902 state_change(gl
, LM_ST_UNLOCKED
);
905 glops
->go_inval(gl
, DIO_METADATA
);
908 spin_lock(&gl
->gl_spin
);
909 list_del_init(&gh
->gh_list
);
911 spin_unlock(&gl
->gl_spin
);
914 spin_lock(&gl
->gl_spin
);
915 gfs2_demote_wake(gl
);
916 gl
->gl_req_gh
= NULL
;
917 gl
->gl_req_bh
= NULL
;
918 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
919 spin_unlock(&gl
->gl_spin
);
924 gfs2_holder_wake(gh
);
928 * gfs2_glock_drop_th - call into the lock module to unlock a lock
933 static void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
935 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
936 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
939 if (glops
->go_xmote_th
)
940 glops
->go_xmote_th(gl
);
942 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
943 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
944 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
947 gl
->gl_req_bh
= drop_bh
;
949 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
951 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
957 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
961 * do_cancels - cancel requests for locks stuck waiting on an expire flag
962 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
964 * Don't cancel GL_NOCANCEL requests.
967 static void do_cancels(struct gfs2_holder
*gh
)
969 struct gfs2_glock
*gl
= gh
->gh_gl
;
971 spin_lock(&gl
->gl_spin
);
973 while (gl
->gl_req_gh
!= gh
&&
974 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
975 !list_empty(&gh
->gh_list
)) {
976 if (gl
->gl_req_bh
&& !(gl
->gl_req_gh
&&
977 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
978 spin_unlock(&gl
->gl_spin
);
979 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
981 spin_lock(&gl
->gl_spin
);
983 spin_unlock(&gl
->gl_spin
);
985 spin_lock(&gl
->gl_spin
);
989 spin_unlock(&gl
->gl_spin
);
993 * glock_wait_internal - wait on a glock acquisition
994 * @gh: the glock holder
996 * Returns: 0 on success
999 static int glock_wait_internal(struct gfs2_holder
*gh
)
1001 struct gfs2_glock
*gl
= gh
->gh_gl
;
1002 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1003 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1005 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1008 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1009 spin_lock(&gl
->gl_spin
);
1010 if (gl
->gl_req_gh
!= gh
&&
1011 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1012 !list_empty(&gh
->gh_list
)) {
1013 list_del_init(&gh
->gh_list
);
1014 gh
->gh_error
= GLR_TRYFAILED
;
1016 spin_unlock(&gl
->gl_spin
);
1017 return gh
->gh_error
;
1019 spin_unlock(&gl
->gl_spin
);
1022 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1027 return gh
->gh_error
;
1029 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1030 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
, gh
->gh_state
,
1033 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1034 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1036 if (glops
->go_lock
) {
1037 gh
->gh_error
= glops
->go_lock(gh
);
1039 spin_lock(&gl
->gl_spin
);
1040 list_del_init(&gh
->gh_list
);
1041 spin_unlock(&gl
->gl_spin
);
1045 spin_lock(&gl
->gl_spin
);
1046 gl
->gl_req_gh
= NULL
;
1047 gl
->gl_req_bh
= NULL
;
1048 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1050 spin_unlock(&gl
->gl_spin
);
1053 return gh
->gh_error
;
1056 static inline struct gfs2_holder
*
1057 find_holder_by_owner(struct list_head
*head
, pid_t pid
)
1059 struct gfs2_holder
*gh
;
1061 list_for_each_entry(gh
, head
, gh_list
) {
1062 if (gh
->gh_owner_pid
== pid
)
1069 static void print_dbg(struct glock_iter
*gi
, const char *fmt
, ...)
1073 va_start(args
, fmt
);
1075 vsprintf(gi
->string
, fmt
, args
);
1076 seq_printf(gi
->seq
, gi
->string
);
1084 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1085 * @gh: the holder structure to add
1089 static void add_to_queue(struct gfs2_holder
*gh
)
1091 struct gfs2_glock
*gl
= gh
->gh_gl
;
1092 struct gfs2_holder
*existing
;
1094 BUG_ON(!gh
->gh_owner_pid
);
1095 if (test_and_set_bit(HIF_WAIT
, &gh
->gh_iflags
))
1098 if (!(gh
->gh_flags
& GL_FLOCK
)) {
1099 existing
= find_holder_by_owner(&gl
->gl_holders
,
1102 print_symbol(KERN_WARNING
"original: %s\n",
1104 printk(KERN_INFO
"pid : %d\n", existing
->gh_owner_pid
);
1105 printk(KERN_INFO
"lock type : %d lock state : %d\n",
1106 existing
->gh_gl
->gl_name
.ln_type
,
1107 existing
->gh_gl
->gl_state
);
1108 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1109 printk(KERN_INFO
"pid : %d\n", gh
->gh_owner_pid
);
1110 printk(KERN_INFO
"lock type : %d lock state : %d\n",
1111 gl
->gl_name
.ln_type
, gl
->gl_state
);
1115 existing
= find_holder_by_owner(&gl
->gl_waiters3
,
1118 print_symbol(KERN_WARNING
"original: %s\n",
1120 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1125 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1126 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1128 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1132 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1133 * @gh: the holder structure
1135 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1137 * Returns: 0, GLR_TRYFAILED, or errno on failure
1140 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1142 struct gfs2_glock
*gl
= gh
->gh_gl
;
1143 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1147 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1148 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1152 spin_lock(&gl
->gl_spin
);
1155 spin_unlock(&gl
->gl_spin
);
1157 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1158 error
= glock_wait_internal(gh
);
1159 if (error
== GLR_CANCELED
) {
1169 * gfs2_glock_poll - poll to see if an async request has been completed
1172 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1175 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1177 struct gfs2_glock
*gl
= gh
->gh_gl
;
1180 spin_lock(&gl
->gl_spin
);
1182 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1184 else if (list_empty(&gh
->gh_list
)) {
1185 if (gh
->gh_error
== GLR_CANCELED
) {
1186 spin_unlock(&gl
->gl_spin
);
1188 if (gfs2_glock_nq(gh
))
1195 spin_unlock(&gl
->gl_spin
);
1201 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1202 * @gh: the holder structure
1204 * Returns: 0, GLR_TRYFAILED, or errno on failure
1207 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1211 error
= glock_wait_internal(gh
);
1212 if (error
== GLR_CANCELED
) {
1214 gh
->gh_flags
&= ~GL_ASYNC
;
1215 error
= gfs2_glock_nq(gh
);
1222 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1223 * @gh: the glock holder
1227 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1229 struct gfs2_glock
*gl
= gh
->gh_gl
;
1230 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1233 if (gh
->gh_flags
& GL_NOCACHE
)
1234 handle_callback(gl
, LM_ST_UNLOCKED
, 0, 0);
1236 gfs2_glmutex_lock(gl
);
1238 spin_lock(&gl
->gl_spin
);
1239 list_del_init(&gh
->gh_list
);
1241 if (list_empty(&gl
->gl_holders
)) {
1242 if (glops
->go_unlock
) {
1243 spin_unlock(&gl
->gl_spin
);
1244 glops
->go_unlock(gh
);
1245 spin_lock(&gl
->gl_spin
);
1247 gl
->gl_stamp
= jiffies
;
1250 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1251 spin_unlock(&gl
->gl_spin
);
1253 gfs2_glock_hold(gl
);
1254 if (test_bit(GLF_PENDING_DEMOTE
, &gl
->gl_flags
) &&
1255 !test_bit(GLF_DEMOTE
, &gl
->gl_flags
))
1256 delay
= gl
->gl_ops
->go_min_hold_time
;
1257 if (queue_delayed_work(glock_workqueue
, &gl
->gl_work
, delay
) == 0)
1261 void gfs2_glock_dq_wait(struct gfs2_holder
*gh
)
1263 struct gfs2_glock
*gl
= gh
->gh_gl
;
1269 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1270 * @gh: the holder structure
1274 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1277 gfs2_holder_uninit(gh
);
1281 * gfs2_glock_nq_num - acquire a glock based on lock number
1282 * @sdp: the filesystem
1283 * @number: the lock number
1284 * @glops: the glock operations for the type of glock
1285 * @state: the state to acquire the glock in
1286 * @flags: modifier flags for the aquisition
1287 * @gh: the struct gfs2_holder
1292 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, u64 number
,
1293 const struct gfs2_glock_operations
*glops
,
1294 unsigned int state
, int flags
, struct gfs2_holder
*gh
)
1296 struct gfs2_glock
*gl
;
1299 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1301 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1309 * glock_compare - Compare two struct gfs2_glock structures for sorting
1310 * @arg_a: the first structure
1311 * @arg_b: the second structure
1315 static int glock_compare(const void *arg_a
, const void *arg_b
)
1317 const struct gfs2_holder
*gh_a
= *(const struct gfs2_holder
**)arg_a
;
1318 const struct gfs2_holder
*gh_b
= *(const struct gfs2_holder
**)arg_b
;
1319 const struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1320 const struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1322 if (a
->ln_number
> b
->ln_number
)
1324 if (a
->ln_number
< b
->ln_number
)
1326 BUG_ON(gh_a
->gh_gl
->gl_ops
->go_type
== gh_b
->gh_gl
->gl_ops
->go_type
);
1331 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1332 * @num_gh: the number of structures
1333 * @ghs: an array of struct gfs2_holder structures
1335 * Returns: 0 on success (all glocks acquired),
1336 * errno on failure (no glocks acquired)
1339 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1340 struct gfs2_holder
**p
)
1345 for (x
= 0; x
< num_gh
; x
++)
1348 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1350 for (x
= 0; x
< num_gh
; x
++) {
1351 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1353 error
= gfs2_glock_nq(p
[x
]);
1356 gfs2_glock_dq(p
[x
]);
1365 * gfs2_glock_nq_m - acquire multiple glocks
1366 * @num_gh: the number of structures
1367 * @ghs: an array of struct gfs2_holder structures
1370 * Returns: 0 on success (all glocks acquired),
1371 * errno on failure (no glocks acquired)
1374 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1376 struct gfs2_holder
*tmp
[4];
1377 struct gfs2_holder
**pph
= tmp
;
1384 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1385 return gfs2_glock_nq(ghs
);
1389 pph
= kmalloc(num_gh
* sizeof(struct gfs2_holder
*), GFP_NOFS
);
1394 error
= nq_m_sync(num_gh
, ghs
, pph
);
1403 * gfs2_glock_dq_m - release multiple glocks
1404 * @num_gh: the number of structures
1405 * @ghs: an array of struct gfs2_holder structures
1409 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1413 for (x
= 0; x
< num_gh
; x
++)
1414 gfs2_glock_dq(&ghs
[x
]);
1418 * gfs2_glock_dq_uninit_m - release multiple glocks
1419 * @num_gh: the number of structures
1420 * @ghs: an array of struct gfs2_holder structures
1424 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1428 for (x
= 0; x
< num_gh
; x
++)
1429 gfs2_glock_dq_uninit(&ghs
[x
]);
1433 * gfs2_lvb_hold - attach a LVB from a glock
1434 * @gl: The glock in question
1438 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1442 gfs2_glmutex_lock(gl
);
1444 if (!atomic_read(&gl
->gl_lvb_count
)) {
1445 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1447 gfs2_glmutex_unlock(gl
);
1450 gfs2_glock_hold(gl
);
1452 atomic_inc(&gl
->gl_lvb_count
);
1454 gfs2_glmutex_unlock(gl
);
1460 * gfs2_lvb_unhold - detach a LVB from a glock
1461 * @gl: The glock in question
1465 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1467 gfs2_glock_hold(gl
);
1468 gfs2_glmutex_lock(gl
);
1470 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1471 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1472 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1477 gfs2_glmutex_unlock(gl
);
1481 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1484 struct gfs2_glock
*gl
;
1485 unsigned long delay
= 0;
1486 unsigned long holdtime
;
1487 unsigned long now
= jiffies
;
1489 gl
= gfs2_glock_find(sdp
, name
);
1493 holdtime
= gl
->gl_tchange
+ gl
->gl_ops
->go_min_hold_time
;
1494 if (time_before(now
, holdtime
))
1495 delay
= holdtime
- now
;
1497 handle_callback(gl
, state
, 1, delay
);
1498 if (queue_delayed_work(glock_workqueue
, &gl
->gl_work
, delay
) == 0)
1503 * gfs2_glock_cb - Callback used by locking module
1504 * @sdp: Pointer to the superblock
1505 * @type: Type of callback
1506 * @data: Type dependent data pointer
1508 * Called by the locking module when it wants to tell us something.
1509 * Either we need to drop a lock, one of our ASYNC requests completed, or
1510 * a journal from another client needs to be recovered.
1513 void gfs2_glock_cb(void *cb_data
, unsigned int type
, void *data
)
1515 struct gfs2_sbd
*sdp
= cb_data
;
1519 blocking_cb(sdp
, data
, LM_ST_UNLOCKED
);
1523 blocking_cb(sdp
, data
, LM_ST_DEFERRED
);
1527 blocking_cb(sdp
, data
, LM_ST_SHARED
);
1531 struct lm_async_cb
*async
= data
;
1532 struct gfs2_glock
*gl
;
1534 down_read(&gfs2_umount_flush_sem
);
1535 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1536 if (gfs2_assert_warn(sdp
, gl
))
1538 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1539 gl
->gl_req_bh(gl
, async
->lc_ret
);
1540 if (queue_delayed_work(glock_workqueue
, &gl
->gl_work
, 0) == 0)
1542 up_read(&gfs2_umount_flush_sem
);
1546 case LM_CB_NEED_RECOVERY
:
1547 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1548 if (sdp
->sd_recoverd_process
)
1549 wake_up_process(sdp
->sd_recoverd_process
);
1552 case LM_CB_DROPLOCKS
:
1553 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1554 gfs2_quota_scan(sdp
);
1558 gfs2_assert_warn(sdp
, 0);
1564 * demote_ok - Check to see if it's ok to unlock a glock
1567 * Returns: 1 if it's ok
1570 static int demote_ok(struct gfs2_glock
*gl
)
1572 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1575 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
1577 else if (glops
->go_demote_ok
)
1578 demote
= glops
->go_demote_ok(gl
);
1584 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1589 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
1591 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1593 spin_lock(&sdp
->sd_reclaim_lock
);
1594 if (list_empty(&gl
->gl_reclaim
)) {
1595 gfs2_glock_hold(gl
);
1596 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
1597 atomic_inc(&sdp
->sd_reclaim_count
);
1599 spin_unlock(&sdp
->sd_reclaim_lock
);
1601 wake_up(&sdp
->sd_reclaim_wq
);
1605 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1606 * @sdp: the filesystem
1608 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1609 * different glock and we notice that there are a lot of glocks in the
1614 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
1616 struct gfs2_glock
*gl
;
1618 spin_lock(&sdp
->sd_reclaim_lock
);
1619 if (list_empty(&sdp
->sd_reclaim_list
)) {
1620 spin_unlock(&sdp
->sd_reclaim_lock
);
1623 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
1624 struct gfs2_glock
, gl_reclaim
);
1625 list_del_init(&gl
->gl_reclaim
);
1626 spin_unlock(&sdp
->sd_reclaim_lock
);
1628 atomic_dec(&sdp
->sd_reclaim_count
);
1629 atomic_inc(&sdp
->sd_reclaimed
);
1631 if (gfs2_glmutex_trylock(gl
)) {
1632 if (list_empty(&gl
->gl_holders
) &&
1633 gl
->gl_state
!= LM_ST_UNLOCKED
&& demote_ok(gl
))
1634 handle_callback(gl
, LM_ST_UNLOCKED
, 0, 0);
1635 gfs2_glmutex_unlock(gl
);
1642 * examine_bucket - Call a function for glock in a hash bucket
1643 * @examiner: the function
1644 * @sdp: the filesystem
1645 * @bucket: the bucket
1647 * Returns: 1 if the bucket has entries
1650 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
1653 struct gfs2_glock
*gl
, *prev
= NULL
;
1654 int has_entries
= 0;
1655 struct hlist_head
*head
= &gl_hash_table
[hash
].hb_list
;
1657 read_lock(gl_lock_addr(hash
));
1658 /* Can't use hlist_for_each_entry - don't want prefetch here */
1659 if (hlist_empty(head
))
1661 gl
= list_entry(head
->first
, struct gfs2_glock
, gl_list
);
1663 if (!sdp
|| gl
->gl_sbd
== sdp
) {
1664 gfs2_glock_hold(gl
);
1665 read_unlock(gl_lock_addr(hash
));
1667 gfs2_glock_put(prev
);
1671 read_lock(gl_lock_addr(hash
));
1673 if (gl
->gl_list
.next
== NULL
)
1675 gl
= list_entry(gl
->gl_list
.next
, struct gfs2_glock
, gl_list
);
1678 read_unlock(gl_lock_addr(hash
));
1680 gfs2_glock_put(prev
);
1686 * scan_glock - look at a glock and see if we can reclaim it
1687 * @gl: the glock to look at
1691 static void scan_glock(struct gfs2_glock
*gl
)
1693 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
)
1696 if (gfs2_glmutex_trylock(gl
)) {
1697 if (list_empty(&gl
->gl_holders
) &&
1698 gl
->gl_state
!= LM_ST_UNLOCKED
&& demote_ok(gl
))
1700 gfs2_glmutex_unlock(gl
);
1705 gfs2_glmutex_unlock(gl
);
1706 gfs2_glock_schedule_for_reclaim(gl
);
1710 * clear_glock - look at a glock and see if we can free it from glock cache
1711 * @gl: the glock to look at
1715 static void clear_glock(struct gfs2_glock
*gl
)
1717 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1720 spin_lock(&sdp
->sd_reclaim_lock
);
1721 if (!list_empty(&gl
->gl_reclaim
)) {
1722 list_del_init(&gl
->gl_reclaim
);
1723 atomic_dec(&sdp
->sd_reclaim_count
);
1724 spin_unlock(&sdp
->sd_reclaim_lock
);
1725 released
= gfs2_glock_put(gl
);
1726 gfs2_assert(sdp
, !released
);
1728 spin_unlock(&sdp
->sd_reclaim_lock
);
1731 if (gfs2_glmutex_trylock(gl
)) {
1732 if (list_empty(&gl
->gl_holders
) &&
1733 gl
->gl_state
!= LM_ST_UNLOCKED
)
1734 handle_callback(gl
, LM_ST_UNLOCKED
, 0, 0);
1735 gfs2_glmutex_unlock(gl
);
1740 * gfs2_gl_hash_clear - Empty out the glock hash table
1741 * @sdp: the filesystem
1742 * @wait: wait until it's all gone
1744 * Called when unmounting the filesystem, or when inter-node lock manager
1745 * requests DROPLOCKS because it is running out of capacity.
1748 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
1758 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
1759 if (examine_bucket(clear_glock
, sdp
, x
))
1766 if (time_after_eq(jiffies
,
1767 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
1768 fs_warn(sdp
, "Unmount seems to be stalled. "
1769 "Dumping lock state...\n");
1770 gfs2_dump_lockstate(sdp
);
1774 down_write(&gfs2_umount_flush_sem
);
1775 invalidate_inodes(sdp
->sd_vfs
);
1776 up_write(&gfs2_umount_flush_sem
);
1782 * Diagnostic routines to help debug distributed deadlock
1785 static void gfs2_print_symbol(struct glock_iter
*gi
, const char *fmt
,
1786 unsigned long address
)
1788 char buffer
[KSYM_SYMBOL_LEN
];
1790 sprint_symbol(buffer
, address
);
1791 print_dbg(gi
, fmt
, buffer
);
1795 * dump_holder - print information about a glock holder
1796 * @str: a string naming the type of holder
1797 * @gh: the glock holder
1799 * Returns: 0 on success, -ENOBUFS when we run out of space
1802 static int dump_holder(struct glock_iter
*gi
, char *str
,
1803 struct gfs2_holder
*gh
)
1806 struct task_struct
*gh_owner
;
1808 print_dbg(gi
, " %s\n", str
);
1809 if (gh
->gh_owner_pid
) {
1810 print_dbg(gi
, " owner = %ld ", (long)gh
->gh_owner_pid
);
1811 gh_owner
= find_task_by_pid(gh
->gh_owner_pid
);
1813 print_dbg(gi
, "(%s)\n", gh_owner
->comm
);
1815 print_dbg(gi
, "(ended)\n");
1817 print_dbg(gi
, " owner = -1\n");
1818 print_dbg(gi
, " gh_state = %u\n", gh
->gh_state
);
1819 print_dbg(gi
, " gh_flags =");
1820 for (x
= 0; x
< 32; x
++)
1821 if (gh
->gh_flags
& (1 << x
))
1822 print_dbg(gi
, " %u", x
);
1823 print_dbg(gi
, " \n");
1824 print_dbg(gi
, " error = %d\n", gh
->gh_error
);
1825 print_dbg(gi
, " gh_iflags =");
1826 for (x
= 0; x
< 32; x
++)
1827 if (test_bit(x
, &gh
->gh_iflags
))
1828 print_dbg(gi
, " %u", x
);
1829 print_dbg(gi
, " \n");
1830 gfs2_print_symbol(gi
, " initialized at: %s\n", gh
->gh_ip
);
1836 * dump_inode - print information about an inode
1839 * Returns: 0 on success, -ENOBUFS when we run out of space
1842 static int dump_inode(struct glock_iter
*gi
, struct gfs2_inode
*ip
)
1846 print_dbg(gi
, " Inode:\n");
1847 print_dbg(gi
, " num = %llu/%llu\n",
1848 (unsigned long long)ip
->i_no_formal_ino
,
1849 (unsigned long long)ip
->i_no_addr
);
1850 print_dbg(gi
, " type = %u\n", IF2DT(ip
->i_inode
.i_mode
));
1851 print_dbg(gi
, " i_flags =");
1852 for (x
= 0; x
< 32; x
++)
1853 if (test_bit(x
, &ip
->i_flags
))
1854 print_dbg(gi
, " %u", x
);
1855 print_dbg(gi
, " \n");
1860 * dump_glock - print information about a glock
1862 * @count: where we are in the buffer
1864 * Returns: 0 on success, -ENOBUFS when we run out of space
1867 static int dump_glock(struct glock_iter
*gi
, struct gfs2_glock
*gl
)
1869 struct gfs2_holder
*gh
;
1871 int error
= -ENOBUFS
;
1872 struct task_struct
*gl_owner
;
1874 spin_lock(&gl
->gl_spin
);
1876 print_dbg(gi
, "Glock 0x%p (%u, 0x%llx)\n", gl
, gl
->gl_name
.ln_type
,
1877 (unsigned long long)gl
->gl_name
.ln_number
);
1878 print_dbg(gi
, " gl_flags =");
1879 for (x
= 0; x
< 32; x
++) {
1880 if (test_bit(x
, &gl
->gl_flags
))
1881 print_dbg(gi
, " %u", x
);
1883 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
))
1884 print_dbg(gi
, " (unlocked)");
1885 print_dbg(gi
, " \n");
1886 print_dbg(gi
, " gl_ref = %d\n", atomic_read(&gl
->gl_ref
));
1887 print_dbg(gi
, " gl_state = %u\n", gl
->gl_state
);
1888 if (gl
->gl_owner_pid
) {
1889 gl_owner
= find_task_by_pid(gl
->gl_owner_pid
);
1891 print_dbg(gi
, " gl_owner = pid %d (%s)\n",
1892 gl
->gl_owner_pid
, gl_owner
->comm
);
1894 print_dbg(gi
, " gl_owner = %d (ended)\n",
1897 print_dbg(gi
, " gl_owner = -1\n");
1898 print_dbg(gi
, " gl_ip = %lu\n", gl
->gl_ip
);
1899 print_dbg(gi
, " req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
1900 print_dbg(gi
, " req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
1901 print_dbg(gi
, " lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
1902 print_dbg(gi
, " object = %s\n", (gl
->gl_object
) ? "yes" : "no");
1903 print_dbg(gi
, " le = %s\n",
1904 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
1905 print_dbg(gi
, " reclaim = %s\n",
1906 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
1908 print_dbg(gi
, " aspace = 0x%p nrpages = %lu\n", gl
->gl_aspace
,
1909 gl
->gl_aspace
->i_mapping
->nrpages
);
1911 print_dbg(gi
, " aspace = no\n");
1912 print_dbg(gi
, " ail = %d\n", atomic_read(&gl
->gl_ail_count
));
1913 if (gl
->gl_req_gh
) {
1914 error
= dump_holder(gi
, "Request", gl
->gl_req_gh
);
1918 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
1919 error
= dump_holder(gi
, "Holder", gh
);
1923 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
1924 error
= dump_holder(gi
, "Waiter1", gh
);
1928 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
1929 error
= dump_holder(gi
, "Waiter3", gh
);
1933 if (test_bit(GLF_DEMOTE
, &gl
->gl_flags
)) {
1934 print_dbg(gi
, " Demotion req to state %u (%llu uS ago)\n",
1935 gl
->gl_demote_state
, (unsigned long long)
1936 (jiffies
- gl
->gl_demote_time
)*(1000000/HZ
));
1938 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
) {
1939 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
1940 list_empty(&gl
->gl_holders
)) {
1941 error
= dump_inode(gi
, gl
->gl_object
);
1946 print_dbg(gi
, " Inode: busy\n");
1953 spin_unlock(&gl
->gl_spin
);
1958 * gfs2_dump_lockstate - print out the current lockstate
1959 * @sdp: the filesystem
1960 * @ub: the buffer to copy the information into
1962 * If @ub is NULL, dump the lockstate to the console.
1966 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
1968 struct gfs2_glock
*gl
;
1969 struct hlist_node
*h
;
1973 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
1975 read_lock(gl_lock_addr(x
));
1977 hlist_for_each_entry(gl
, h
, &gl_hash_table
[x
].hb_list
, gl_list
) {
1978 if (gl
->gl_sbd
!= sdp
)
1981 error
= dump_glock(NULL
, gl
);
1986 read_unlock(gl_lock_addr(x
));
1997 * gfs2_scand - Look for cached glocks and inodes to toss from memory
1998 * @sdp: Pointer to GFS2 superblock
2000 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2004 static int gfs2_scand(void *data
)
2009 while (!kthread_should_stop()) {
2010 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
2011 examine_bucket(scan_glock
, NULL
, x
);
2012 if (freezing(current
))
2017 schedule_timeout_interruptible(delay
* HZ
);
2025 int __init
gfs2_glock_init(void)
2028 for(i
= 0; i
< GFS2_GL_HASH_SIZE
; i
++) {
2029 INIT_HLIST_HEAD(&gl_hash_table
[i
].hb_list
);
2031 #ifdef GL_HASH_LOCK_SZ
2032 for(i
= 0; i
< GL_HASH_LOCK_SZ
; i
++) {
2033 rwlock_init(&gl_hash_locks
[i
]);
2037 scand_process
= kthread_run(gfs2_scand
, NULL
, "gfs2_scand");
2038 if (IS_ERR(scand_process
))
2039 return PTR_ERR(scand_process
);
2041 glock_workqueue
= create_workqueue("glock_workqueue");
2042 if (IS_ERR(glock_workqueue
)) {
2043 kthread_stop(scand_process
);
2044 return PTR_ERR(glock_workqueue
);
2050 void gfs2_glock_exit(void)
2052 destroy_workqueue(glock_workqueue
);
2053 kthread_stop(scand_process
);
2056 module_param(scand_secs
, uint
, S_IRUGO
|S_IWUSR
);
2057 MODULE_PARM_DESC(scand_secs
, "The number of seconds between scand runs");
2059 static int gfs2_glock_iter_next(struct glock_iter
*gi
)
2061 struct gfs2_glock
*gl
;
2064 read_lock(gl_lock_addr(gi
->hash
));
2067 gi
->gl
= hlist_entry(gl
->gl_list
.next
,
2068 struct gfs2_glock
, gl_list
);
2070 gfs2_glock_hold(gi
->gl
);
2072 read_unlock(gl_lock_addr(gi
->hash
));
2075 if (gl
&& gi
->gl
== NULL
)
2077 while(gi
->gl
== NULL
) {
2078 if (gi
->hash
>= GFS2_GL_HASH_SIZE
)
2080 read_lock(gl_lock_addr(gi
->hash
));
2081 gi
->gl
= hlist_entry(gl_hash_table
[gi
->hash
].hb_list
.first
,
2082 struct gfs2_glock
, gl_list
);
2084 gfs2_glock_hold(gi
->gl
);
2085 read_unlock(gl_lock_addr(gi
->hash
));
2089 if (gi
->sdp
!= gi
->gl
->gl_sbd
)
2095 static void gfs2_glock_iter_free(struct glock_iter
*gi
)
2098 gfs2_glock_put(gi
->gl
);
2102 static struct glock_iter
*gfs2_glock_iter_init(struct gfs2_sbd
*sdp
)
2104 struct glock_iter
*gi
;
2106 gi
= kmalloc(sizeof (*gi
), GFP_KERNEL
);
2114 memset(gi
->string
, 0, sizeof(gi
->string
));
2116 if (gfs2_glock_iter_next(gi
)) {
2117 gfs2_glock_iter_free(gi
);
2124 static void *gfs2_glock_seq_start(struct seq_file
*file
, loff_t
*pos
)
2126 struct glock_iter
*gi
;
2129 gi
= gfs2_glock_iter_init(file
->private);
2134 if (gfs2_glock_iter_next(gi
)) {
2135 gfs2_glock_iter_free(gi
);
2143 static void *gfs2_glock_seq_next(struct seq_file
*file
, void *iter_ptr
,
2146 struct glock_iter
*gi
= iter_ptr
;
2150 if (gfs2_glock_iter_next(gi
)) {
2151 gfs2_glock_iter_free(gi
);
2158 static void gfs2_glock_seq_stop(struct seq_file
*file
, void *iter_ptr
)
2160 struct glock_iter
*gi
= iter_ptr
;
2162 gfs2_glock_iter_free(gi
);
2165 static int gfs2_glock_seq_show(struct seq_file
*file
, void *iter_ptr
)
2167 struct glock_iter
*gi
= iter_ptr
;
2170 dump_glock(gi
, gi
->gl
);
2175 static const struct seq_operations gfs2_glock_seq_ops
= {
2176 .start
= gfs2_glock_seq_start
,
2177 .next
= gfs2_glock_seq_next
,
2178 .stop
= gfs2_glock_seq_stop
,
2179 .show
= gfs2_glock_seq_show
,
2182 static int gfs2_debugfs_open(struct inode
*inode
, struct file
*file
)
2184 struct seq_file
*seq
;
2187 ret
= seq_open(file
, &gfs2_glock_seq_ops
);
2191 seq
= file
->private_data
;
2192 seq
->private = inode
->i_private
;
2197 static const struct file_operations gfs2_debug_fops
= {
2198 .owner
= THIS_MODULE
,
2199 .open
= gfs2_debugfs_open
,
2201 .llseek
= seq_lseek
,
2202 .release
= seq_release
2205 int gfs2_create_debugfs_file(struct gfs2_sbd
*sdp
)
2207 sdp
->debugfs_dir
= debugfs_create_dir(sdp
->sd_table_name
, gfs2_root
);
2208 if (!sdp
->debugfs_dir
)
2210 sdp
->debugfs_dentry_glocks
= debugfs_create_file("glocks",
2212 sdp
->debugfs_dir
, sdp
,
2214 if (!sdp
->debugfs_dentry_glocks
)
2220 void gfs2_delete_debugfs_file(struct gfs2_sbd
*sdp
)
2222 if (sdp
&& sdp
->debugfs_dir
) {
2223 if (sdp
->debugfs_dentry_glocks
) {
2224 debugfs_remove(sdp
->debugfs_dentry_glocks
);
2225 sdp
->debugfs_dentry_glocks
= NULL
;
2227 debugfs_remove(sdp
->debugfs_dir
);
2228 sdp
->debugfs_dir
= NULL
;
2232 int gfs2_register_debugfs(void)
2234 gfs2_root
= debugfs_create_dir("gfs2", NULL
);
2235 return gfs2_root
? 0 : -ENOMEM
;
2238 void gfs2_unregister_debugfs(void)
2240 debugfs_remove(gfs2_root
);