1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/buffer_head.h>
13 #include <linux/delay.h>
14 #include <linux/sort.h>
15 #include <linux/hash.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 #include <linux/list_sort.h>
34 #include <linux/lockref.h>
35 #include <linux/rhashtable.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/file.h>
38 #include <linux/random.h>
51 #define CREATE_TRACE_POINTS
52 #include "trace_gfs2.h"
54 struct gfs2_glock_iter
{
55 struct gfs2_sbd
*sdp
; /* incore superblock */
56 struct rhashtable_iter hti
; /* rhashtable iterator */
57 struct gfs2_glock
*gl
; /* current glock struct */
58 loff_t last_pos
; /* last position */
61 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
63 static void do_xmote(struct gfs2_glock
*gl
, struct gfs2_holder
*gh
, unsigned int target
);
64 static void request_demote(struct gfs2_glock
*gl
, unsigned int state
,
65 unsigned long delay
, bool remote
);
67 static struct dentry
*gfs2_root
;
68 static LIST_HEAD(lru_list
);
69 static atomic_t lru_count
= ATOMIC_INIT(0);
70 static DEFINE_SPINLOCK(lru_lock
);
72 #define GFS2_GL_HASH_SHIFT 15
73 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
75 static const struct rhashtable_params ht_parms
= {
76 .nelem_hint
= GFS2_GL_HASH_SIZE
* 3 / 4,
77 .key_len
= offsetofend(struct lm_lockname
, ln_type
),
78 .key_offset
= offsetof(struct gfs2_glock
, gl_name
),
79 .head_offset
= offsetof(struct gfs2_glock
, gl_node
),
82 static struct rhashtable gl_hash_table
;
84 #define GLOCK_WAIT_TABLE_BITS 12
85 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86 static wait_queue_head_t glock_wait_table
[GLOCK_WAIT_TABLE_SIZE
] __cacheline_aligned
;
88 struct wait_glock_queue
{
89 struct lm_lockname
*name
;
90 wait_queue_entry_t wait
;
93 static int glock_wake_function(wait_queue_entry_t
*wait
, unsigned int mode
,
96 struct wait_glock_queue
*wait_glock
=
97 container_of(wait
, struct wait_glock_queue
, wait
);
98 struct lm_lockname
*wait_name
= wait_glock
->name
;
99 struct lm_lockname
*wake_name
= key
;
101 if (wake_name
->ln_sbd
!= wait_name
->ln_sbd
||
102 wake_name
->ln_number
!= wait_name
->ln_number
||
103 wake_name
->ln_type
!= wait_name
->ln_type
)
105 return autoremove_wake_function(wait
, mode
, sync
, key
);
108 static wait_queue_head_t
*glock_waitqueue(struct lm_lockname
*name
)
110 u32 hash
= jhash2((u32
*)name
, ht_parms
.key_len
/ 4, 0);
112 return glock_wait_table
+ hash_32(hash
, GLOCK_WAIT_TABLE_BITS
);
116 * wake_up_glock - Wake up waiters on a glock
119 static void wake_up_glock(struct gfs2_glock
*gl
)
121 wait_queue_head_t
*wq
= glock_waitqueue(&gl
->gl_name
);
123 if (waitqueue_active(wq
))
124 __wake_up(wq
, TASK_NORMAL
, 1, &gl
->gl_name
);
127 static void gfs2_glock_dealloc(struct rcu_head
*rcu
)
129 struct gfs2_glock
*gl
= container_of(rcu
, struct gfs2_glock
, gl_rcu
);
131 kfree(gl
->gl_lksb
.sb_lvbptr
);
132 if (gl
->gl_ops
->go_flags
& GLOF_ASPACE
) {
133 struct gfs2_glock_aspace
*gla
=
134 container_of(gl
, struct gfs2_glock_aspace
, glock
);
135 kmem_cache_free(gfs2_glock_aspace_cachep
, gla
);
137 kmem_cache_free(gfs2_glock_cachep
, gl
);
141 * glock_blocked_by_withdraw - determine if we can still use a glock
144 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
145 * when we're withdrawn. For example, to maintain metadata integrity, we should
146 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
147 * the iopen or freeze glock may be safely used because none of their
148 * metadata goes through the journal. So in general, we should disallow all
149 * glocks that are journaled, and allow all the others. One exception is:
150 * we need to allow our active journal to be promoted and demoted so others
151 * may recover it and we can reacquire it when they're done.
153 static bool glock_blocked_by_withdraw(struct gfs2_glock
*gl
)
155 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
157 if (!gfs2_withdrawing_or_withdrawn(sdp
))
159 if (gl
->gl_ops
->go_flags
& GLOF_NONDISK
)
161 if (!sdp
->sd_jdesc
||
162 gl
->gl_name
.ln_number
== sdp
->sd_jdesc
->jd_no_addr
)
167 static void __gfs2_glock_free(struct gfs2_glock
*gl
)
169 rhashtable_remove_fast(&gl_hash_table
, &gl
->gl_node
, ht_parms
);
172 call_rcu(&gl
->gl_rcu
, gfs2_glock_dealloc
);
175 void gfs2_glock_free(struct gfs2_glock
*gl
) {
176 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
178 __gfs2_glock_free(gl
);
179 if (atomic_dec_and_test(&sdp
->sd_glock_disposal
))
180 wake_up(&sdp
->sd_kill_wait
);
183 void gfs2_glock_free_later(struct gfs2_glock
*gl
) {
184 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
186 spin_lock(&lru_lock
);
187 list_add(&gl
->gl_lru
, &sdp
->sd_dead_glocks
);
188 spin_unlock(&lru_lock
);
189 if (atomic_dec_and_test(&sdp
->sd_glock_disposal
))
190 wake_up(&sdp
->sd_kill_wait
);
193 static void gfs2_free_dead_glocks(struct gfs2_sbd
*sdp
)
195 struct list_head
*list
= &sdp
->sd_dead_glocks
;
197 while(!list_empty(list
)) {
198 struct gfs2_glock
*gl
;
200 gl
= list_first_entry(list
, struct gfs2_glock
, gl_lru
);
201 list_del_init(&gl
->gl_lru
);
202 __gfs2_glock_free(gl
);
207 * gfs2_glock_hold() - increment reference count on glock
208 * @gl: The glock to hold
212 struct gfs2_glock
*gfs2_glock_hold(struct gfs2_glock
*gl
)
214 GLOCK_BUG_ON(gl
, __lockref_is_dead(&gl
->gl_lockref
));
215 lockref_get(&gl
->gl_lockref
);
219 static void gfs2_glock_add_to_lru(struct gfs2_glock
*gl
)
221 spin_lock(&lru_lock
);
222 list_move_tail(&gl
->gl_lru
, &lru_list
);
224 if (!test_bit(GLF_LRU
, &gl
->gl_flags
)) {
225 set_bit(GLF_LRU
, &gl
->gl_flags
);
226 atomic_inc(&lru_count
);
229 spin_unlock(&lru_lock
);
232 static void gfs2_glock_remove_from_lru(struct gfs2_glock
*gl
)
234 spin_lock(&lru_lock
);
235 if (test_bit(GLF_LRU
, &gl
->gl_flags
)) {
236 list_del_init(&gl
->gl_lru
);
237 atomic_dec(&lru_count
);
238 clear_bit(GLF_LRU
, &gl
->gl_flags
);
240 spin_unlock(&lru_lock
);
244 * Enqueue the glock on the work queue. Passes one glock reference on to the
247 static void gfs2_glock_queue_work(struct gfs2_glock
*gl
, unsigned long delay
) {
248 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
250 if (!queue_delayed_work(sdp
->sd_glock_wq
, &gl
->gl_work
, delay
)) {
252 * We are holding the lockref spinlock, and the work was still
253 * queued above. The queued work (glock_work_func) takes that
254 * spinlock before dropping its glock reference(s), so it
255 * cannot have dropped them in the meantime.
257 GLOCK_BUG_ON(gl
, gl
->gl_lockref
.count
< 2);
258 gl
->gl_lockref
.count
--;
262 static void __gfs2_glock_put(struct gfs2_glock
*gl
)
264 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
265 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
267 lockref_mark_dead(&gl
->gl_lockref
);
268 spin_unlock(&gl
->gl_lockref
.lock
);
269 gfs2_glock_remove_from_lru(gl
);
270 GLOCK_BUG_ON(gl
, !list_empty(&gl
->gl_holders
));
272 truncate_inode_pages_final(mapping
);
273 if (!gfs2_withdrawing_or_withdrawn(sdp
))
274 GLOCK_BUG_ON(gl
, !mapping_empty(mapping
));
276 trace_gfs2_glock_put(gl
);
277 sdp
->sd_lockstruct
.ls_ops
->lm_put_lock(gl
);
280 static bool __gfs2_glock_put_or_lock(struct gfs2_glock
*gl
)
282 if (lockref_put_or_lock(&gl
->gl_lockref
))
284 GLOCK_BUG_ON(gl
, gl
->gl_lockref
.count
!= 1);
285 if (gl
->gl_state
!= LM_ST_UNLOCKED
) {
286 gl
->gl_lockref
.count
--;
287 gfs2_glock_add_to_lru(gl
);
288 spin_unlock(&gl
->gl_lockref
.lock
);
295 * gfs2_glock_put() - Decrement reference count on glock
296 * @gl: The glock to put
300 void gfs2_glock_put(struct gfs2_glock
*gl
)
302 if (__gfs2_glock_put_or_lock(gl
))
305 __gfs2_glock_put(gl
);
309 * gfs2_glock_put_async - Decrement reference count without sleeping
310 * @gl: The glock to put
312 * Decrement the reference count on glock immediately unless it is the last
313 * reference. Defer putting the last reference to work queue context.
315 void gfs2_glock_put_async(struct gfs2_glock
*gl
)
317 if (__gfs2_glock_put_or_lock(gl
))
320 gfs2_glock_queue_work(gl
, 0);
321 spin_unlock(&gl
->gl_lockref
.lock
);
325 * may_grant - check if it's ok to grant a new lock
327 * @current_gh: One of the current holders of @gl
328 * @gh: The lock request which we wish to grant
330 * With our current compatibility rules, if a glock has one or more active
331 * holders (HIF_HOLDER flag set), any of those holders can be passed in as
332 * @current_gh; they are all the same as far as compatibility with the new @gh
335 * Returns true if it's ok to grant the lock.
338 static inline bool may_grant(struct gfs2_glock
*gl
,
339 struct gfs2_holder
*current_gh
,
340 struct gfs2_holder
*gh
)
343 GLOCK_BUG_ON(gl
, !test_bit(HIF_HOLDER
, ¤t_gh
->gh_iflags
));
345 switch(current_gh
->gh_state
) {
346 case LM_ST_EXCLUSIVE
:
348 * Here we make a special exception to grant holders
349 * who agree to share the EX lock with other holders
350 * who also have the bit set. If the original holder
351 * has the LM_FLAG_NODE_SCOPE bit set, we grant more
352 * holders with the bit set.
354 return gh
->gh_state
== LM_ST_EXCLUSIVE
&&
355 (current_gh
->gh_flags
& LM_FLAG_NODE_SCOPE
) &&
356 (gh
->gh_flags
& LM_FLAG_NODE_SCOPE
);
360 return gh
->gh_state
== current_gh
->gh_state
;
367 if (gl
->gl_state
== gh
->gh_state
)
369 if (gh
->gh_flags
& GL_EXACT
)
371 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
372 return gh
->gh_state
== LM_ST_SHARED
||
373 gh
->gh_state
== LM_ST_DEFERRED
;
375 if (gh
->gh_flags
& LM_FLAG_ANY
)
376 return gl
->gl_state
!= LM_ST_UNLOCKED
;
380 static void gfs2_holder_wake(struct gfs2_holder
*gh
)
382 clear_bit(HIF_WAIT
, &gh
->gh_iflags
);
383 smp_mb__after_atomic();
384 wake_up_bit(&gh
->gh_iflags
, HIF_WAIT
);
385 if (gh
->gh_flags
& GL_ASYNC
) {
386 struct gfs2_sbd
*sdp
= gh
->gh_gl
->gl_name
.ln_sbd
;
388 wake_up(&sdp
->sd_async_glock_wait
);
393 * do_error - Something unexpected has happened during a lock request
395 * @ret: The status from the DLM
398 static void do_error(struct gfs2_glock
*gl
, const int ret
)
400 struct gfs2_holder
*gh
, *tmp
;
402 list_for_each_entry_safe(gh
, tmp
, &gl
->gl_holders
, gh_list
) {
403 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
405 if (ret
& LM_OUT_ERROR
)
407 else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
))
408 gh
->gh_error
= GLR_TRYFAILED
;
411 list_del_init(&gh
->gh_list
);
412 trace_gfs2_glock_queue(gh
, 0);
413 gfs2_holder_wake(gh
);
418 * find_first_holder - find the first "holder" gh
422 static inline struct gfs2_holder
*find_first_holder(const struct gfs2_glock
*gl
)
424 struct gfs2_holder
*gh
;
426 if (!list_empty(&gl
->gl_holders
)) {
427 gh
= list_first_entry(&gl
->gl_holders
, struct gfs2_holder
,
429 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
436 * gfs2_instantiate - Call the glops instantiate function
437 * @gh: The glock holder
439 * Returns: 0 if instantiate was successful, or error.
441 int gfs2_instantiate(struct gfs2_holder
*gh
)
443 struct gfs2_glock
*gl
= gh
->gh_gl
;
444 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
448 if (!test_bit(GLF_INSTANTIATE_NEEDED
, &gl
->gl_flags
))
452 * Since we unlock the lockref lock, we set a flag to indicate
453 * instantiate is in progress.
455 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG
, &gl
->gl_flags
)) {
456 wait_on_bit(&gl
->gl_flags
, GLF_INSTANTIATE_IN_PROG
,
457 TASK_UNINTERRUPTIBLE
);
459 * Here we just waited for a different instantiate to finish.
460 * But that may not have been successful, as when a process
461 * locks an inode glock _before_ it has an actual inode to
462 * instantiate into. So we check again. This process might
463 * have an inode to instantiate, so might be successful.
468 ret
= glops
->go_instantiate(gl
);
470 clear_bit(GLF_INSTANTIATE_NEEDED
, &gl
->gl_flags
);
471 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG
, &gl
->gl_flags
);
477 return glops
->go_held(gh
);
482 * do_promote - promote as many requests as possible on the current queue
485 * Returns true on success (i.e., progress was made or there are no waiters).
488 static bool do_promote(struct gfs2_glock
*gl
)
490 struct gfs2_holder
*gh
, *current_gh
;
492 current_gh
= find_first_holder(gl
);
493 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
494 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
496 if (!may_grant(gl
, current_gh
, gh
)) {
498 * If we get here, it means we may not grant this
499 * holder for some reason. If this holder is at the
500 * head of the list, it means we have a blocked holder
501 * at the head, so return false.
503 if (list_is_first(&gh
->gh_list
, &gl
->gl_holders
))
508 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
509 trace_gfs2_promote(gh
);
510 gfs2_holder_wake(gh
);
518 * find_first_waiter - find the first gh that's waiting for the glock
522 static inline struct gfs2_holder
*find_first_waiter(const struct gfs2_glock
*gl
)
524 struct gfs2_holder
*gh
;
526 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
527 if (!test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
534 * find_last_waiter - find the last gh that's waiting for the glock
537 * This also is a fast way of finding out if there are any waiters.
540 static inline struct gfs2_holder
*find_last_waiter(const struct gfs2_glock
*gl
)
542 struct gfs2_holder
*gh
;
544 if (list_empty(&gl
->gl_holders
))
546 gh
= list_last_entry(&gl
->gl_holders
, struct gfs2_holder
, gh_list
);
547 return test_bit(HIF_HOLDER
, &gh
->gh_iflags
) ? NULL
: gh
;
551 * state_change - record that the glock is now in a different state
553 * @new_state: the new state
556 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
558 if (new_state
!= gl
->gl_target
)
559 /* shorten our minimum hold time */
560 gl
->gl_hold_time
= max(gl
->gl_hold_time
- GL_GLOCK_HOLD_DECR
,
562 gl
->gl_state
= new_state
;
563 gl
->gl_tchange
= jiffies
;
566 static void gfs2_set_demote(int nr
, struct gfs2_glock
*gl
)
568 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
570 set_bit(nr
, &gl
->gl_flags
);
572 wake_up(&sdp
->sd_async_glock_wait
);
575 static void gfs2_demote_wake(struct gfs2_glock
*gl
)
577 gl
->gl_demote_state
= LM_ST_EXCLUSIVE
;
578 clear_bit(GLF_DEMOTE
, &gl
->gl_flags
);
579 smp_mb__after_atomic();
580 wake_up_bit(&gl
->gl_flags
, GLF_DEMOTE
);
584 * finish_xmote - The DLM has replied to one of our lock requests
586 * @ret: The status from the DLM
590 static void finish_xmote(struct gfs2_glock
*gl
, unsigned int ret
)
592 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
593 struct gfs2_holder
*gh
;
594 unsigned state
= ret
& LM_OUT_ST_MASK
;
596 trace_gfs2_glock_state_change(gl
, state
);
597 state_change(gl
, state
);
598 gh
= find_first_waiter(gl
);
600 /* Demote to UN request arrived during demote to SH or DF */
601 if (test_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
) &&
602 state
!= LM_ST_UNLOCKED
&& gl
->gl_demote_state
== LM_ST_UNLOCKED
)
603 gl
->gl_target
= LM_ST_UNLOCKED
;
605 /* Check for state != intended state */
606 if (unlikely(state
!= gl
->gl_target
)) {
607 if (gh
&& (ret
& LM_OUT_CANCELED
))
608 gfs2_holder_wake(gh
);
609 if (gh
&& !test_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
)) {
610 /* move to back of queue and try next entry */
611 if (ret
& LM_OUT_CANCELED
) {
612 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
613 gh
= find_first_waiter(gl
);
614 gl
->gl_target
= gh
->gh_state
;
619 /* Some error or failed "try lock" - report it */
620 if ((ret
& LM_OUT_ERROR
) ||
621 (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
))) {
622 gl
->gl_target
= gl
->gl_state
;
628 /* Unlocked due to conversion deadlock, try again */
631 do_xmote(gl
, gh
, gl
->gl_target
);
633 /* Conversion fails, unlock and try again */
636 do_xmote(gl
, gh
, LM_ST_UNLOCKED
);
638 default: /* Everything else */
639 fs_err(gl
->gl_name
.ln_sbd
, "wanted %u got %u\n",
640 gl
->gl_target
, state
);
646 /* Fast path - we got what we asked for */
647 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
))
648 gfs2_demote_wake(gl
);
649 if (state
!= LM_ST_UNLOCKED
) {
650 if (glops
->go_xmote_bh
) {
653 spin_unlock(&gl
->gl_lockref
.lock
);
654 rv
= glops
->go_xmote_bh(gl
);
655 spin_lock(&gl
->gl_lockref
.lock
);
664 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
667 static bool is_system_glock(struct gfs2_glock
*gl
)
669 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
670 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
672 if (gl
== m_ip
->i_gl
)
678 * do_xmote - Calls the DLM to change the state of a lock
679 * @gl: The lock state
680 * @gh: The holder (only for promotes)
681 * @target: The target lock state
685 static void do_xmote(struct gfs2_glock
*gl
, struct gfs2_holder
*gh
,
687 __releases(&gl
->gl_lockref
.lock
)
688 __acquires(&gl
->gl_lockref
.lock
)
690 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
691 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
692 struct lm_lockstruct
*ls
= &sdp
->sd_lockstruct
;
693 unsigned int lck_flags
= (unsigned int)(gh
? gh
->gh_flags
: 0);
696 if (target
!= LM_ST_UNLOCKED
&& glock_blocked_by_withdraw(gl
) &&
697 gh
&& !(gh
->gh_flags
& LM_FLAG_NOEXP
))
700 lck_flags
&= (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
| LM_FLAG_NOEXP
);
701 GLOCK_BUG_ON(gl
, gl
->gl_state
== target
);
702 GLOCK_BUG_ON(gl
, gl
->gl_state
== gl
->gl_target
);
703 if ((target
== LM_ST_UNLOCKED
|| target
== LM_ST_DEFERRED
) &&
706 * If another process is already doing the invalidate, let that
707 * finish first. The glock state machine will get back to this
708 * holder again later.
710 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS
,
713 do_error(gl
, 0); /* Fail queued try locks */
716 set_bit(GLF_BLOCKING
, &gl
->gl_flags
);
717 if ((gl
->gl_req
== LM_ST_UNLOCKED
) ||
718 (gl
->gl_state
== LM_ST_EXCLUSIVE
) ||
719 (lck_flags
& (LM_FLAG_TRY
|LM_FLAG_TRY_1CB
)))
720 clear_bit(GLF_BLOCKING
, &gl
->gl_flags
);
721 if (!glops
->go_inval
&& !glops
->go_sync
)
724 spin_unlock(&gl
->gl_lockref
.lock
);
725 if (glops
->go_sync
) {
726 ret
= glops
->go_sync(gl
);
727 /* If we had a problem syncing (due to io errors or whatever,
728 * we should not invalidate the metadata or tell dlm to
729 * release the glock to other nodes.
732 if (cmpxchg(&sdp
->sd_log_error
, 0, ret
)) {
733 fs_err(sdp
, "Error %d syncing glock \n", ret
);
734 gfs2_dump_glock(NULL
, gl
, true);
736 spin_lock(&gl
->gl_lockref
.lock
);
740 if (test_bit(GLF_INVALIDATE_IN_PROGRESS
, &gl
->gl_flags
)) {
742 * The call to go_sync should have cleared out the ail list.
743 * If there are still items, we have a problem. We ought to
744 * withdraw, but we can't because the withdraw code also uses
745 * glocks. Warn about the error, dump the glock, then fall
746 * through and wait for logd to do the withdraw for us.
748 if ((atomic_read(&gl
->gl_ail_count
) != 0) &&
749 (!cmpxchg(&sdp
->sd_log_error
, 0, -EIO
))) {
750 gfs2_glock_assert_warn(gl
,
751 !atomic_read(&gl
->gl_ail_count
));
752 gfs2_dump_glock(NULL
, gl
, true);
754 glops
->go_inval(gl
, target
== LM_ST_DEFERRED
? 0 : DIO_METADATA
);
755 clear_bit(GLF_INVALIDATE_IN_PROGRESS
, &gl
->gl_flags
);
757 spin_lock(&gl
->gl_lockref
.lock
);
760 gl
->gl_lockref
.count
++;
762 * Check for an error encountered since we called go_sync and go_inval.
763 * If so, we can't withdraw from the glock code because the withdraw
764 * code itself uses glocks (see function signal_our_withdraw) to
765 * change the mount to read-only. Most importantly, we must not call
766 * dlm to unlock the glock until the journal is in a known good state
767 * (after journal replay) otherwise other nodes may use the object
768 * (rgrp or dinode) and then later, journal replay will corrupt the
769 * file system. The best we can do here is wait for the logd daemon
770 * to see sd_log_error and withdraw, and in the meantime, requeue the
773 * We make a special exception for some system glocks, such as the
774 * system statfs inode glock, which needs to be granted before the
775 * gfs2_quotad daemon can exit, and that exit needs to finish before
776 * we can unmount the withdrawn file system.
778 * However, if we're just unlocking the lock (say, for unmount, when
779 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
780 * then it's okay to tell dlm to unlock it.
782 if (unlikely(sdp
->sd_log_error
) && !gfs2_withdrawing_or_withdrawn(sdp
))
783 gfs2_withdraw_delayed(sdp
);
784 if (glock_blocked_by_withdraw(gl
) &&
785 (target
!= LM_ST_UNLOCKED
||
786 test_bit(SDF_WITHDRAW_RECOVERY
, &sdp
->sd_flags
))) {
787 if (!is_system_glock(gl
)) {
788 request_demote(gl
, LM_ST_UNLOCKED
, 0, false);
790 * Ordinarily, we would call dlm and its callback would call
791 * finish_xmote, which would call state_change() to the new state.
792 * Since we withdrew, we won't call dlm, so call state_change
793 * manually, but to the UNLOCKED state we desire.
795 state_change(gl
, LM_ST_UNLOCKED
);
797 * We skip telling dlm to do the locking, so we won't get a
798 * reply that would otherwise clear GLF_LOCK. So we clear it here.
800 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
801 clear_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
);
802 gfs2_glock_queue_work(gl
, GL_GLOCK_DFT_HOLD
);
805 clear_bit(GLF_INVALIDATE_IN_PROGRESS
, &gl
->gl_flags
);
809 if (ls
->ls_ops
->lm_lock
) {
810 spin_unlock(&gl
->gl_lockref
.lock
);
811 ret
= ls
->ls_ops
->lm_lock(gl
, target
, lck_flags
);
812 spin_lock(&gl
->gl_lockref
.lock
);
814 if (ret
== -EINVAL
&& gl
->gl_target
== LM_ST_UNLOCKED
&&
815 target
== LM_ST_UNLOCKED
&&
816 test_bit(DFL_UNMOUNT
, &ls
->ls_recover_flags
)) {
818 * The lockspace has been released and the lock has
819 * been unlocked implicitly.
822 fs_err(sdp
, "lm_lock ret %d\n", ret
);
823 target
= gl
->gl_state
| LM_OUT_ERROR
;
825 /* The operation will be completed asynchronously. */
830 /* Complete the operation now. */
831 finish_xmote(gl
, target
);
832 gfs2_glock_queue_work(gl
, 0);
836 * run_queue - do all outstanding tasks related to a glock
837 * @gl: The glock in question
838 * @nonblock: True if we must not block in run_queue
842 static void run_queue(struct gfs2_glock
*gl
, const int nonblock
)
843 __releases(&gl
->gl_lockref
.lock
)
844 __acquires(&gl
->gl_lockref
.lock
)
846 struct gfs2_holder
*gh
= NULL
;
848 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
850 set_bit(GLF_LOCK
, &gl
->gl_flags
);
852 GLOCK_BUG_ON(gl
, test_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
));
854 if (test_bit(GLF_DEMOTE
, &gl
->gl_flags
) &&
855 gl
->gl_demote_state
!= gl
->gl_state
) {
856 if (find_first_holder(gl
))
860 set_bit(GLF_DEMOTE_IN_PROGRESS
, &gl
->gl_flags
);
861 GLOCK_BUG_ON(gl
, gl
->gl_demote_state
== LM_ST_EXCLUSIVE
);
862 gl
->gl_target
= gl
->gl_demote_state
;
864 if (test_bit(GLF_DEMOTE
, &gl
->gl_flags
))
865 gfs2_demote_wake(gl
);
868 gh
= find_first_waiter(gl
);
869 gl
->gl_target
= gh
->gh_state
;
870 if (!(gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)))
871 do_error(gl
, 0); /* Fail queued try locks */
873 do_xmote(gl
, gh
, gl
->gl_target
);
877 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
878 smp_mb__after_atomic();
879 gl
->gl_lockref
.count
++;
880 gfs2_glock_queue_work(gl
, 0);
884 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
885 smp_mb__after_atomic();
889 * glock_set_object - set the gl_object field of a glock
891 * @object: the object
893 void glock_set_object(struct gfs2_glock
*gl
, void *object
)
897 spin_lock(&gl
->gl_lockref
.lock
);
898 prev_object
= gl
->gl_object
;
899 gl
->gl_object
= object
;
900 spin_unlock(&gl
->gl_lockref
.lock
);
901 if (gfs2_assert_warn(gl
->gl_name
.ln_sbd
, prev_object
== NULL
)) {
902 pr_warn("glock=%u/%llx\n",
904 (unsigned long long)gl
->gl_name
.ln_number
);
905 gfs2_dump_glock(NULL
, gl
, true);
910 * glock_clear_object - clear the gl_object field of a glock
912 * @object: object the glock currently points at
914 void glock_clear_object(struct gfs2_glock
*gl
, void *object
)
918 spin_lock(&gl
->gl_lockref
.lock
);
919 prev_object
= gl
->gl_object
;
920 gl
->gl_object
= NULL
;
921 spin_unlock(&gl
->gl_lockref
.lock
);
922 if (gfs2_assert_warn(gl
->gl_name
.ln_sbd
, prev_object
== object
)) {
923 pr_warn("glock=%u/%llx\n",
925 (unsigned long long)gl
->gl_name
.ln_number
);
926 gfs2_dump_glock(NULL
, gl
, true);
930 void gfs2_inode_remember_delete(struct gfs2_glock
*gl
, u64 generation
)
932 struct gfs2_inode_lvb
*ri
= (void *)gl
->gl_lksb
.sb_lvbptr
;
934 if (ri
->ri_magic
== 0)
935 ri
->ri_magic
= cpu_to_be32(GFS2_MAGIC
);
936 if (ri
->ri_magic
== cpu_to_be32(GFS2_MAGIC
))
937 ri
->ri_generation_deleted
= cpu_to_be64(generation
);
940 bool gfs2_inode_already_deleted(struct gfs2_glock
*gl
, u64 generation
)
942 struct gfs2_inode_lvb
*ri
= (void *)gl
->gl_lksb
.sb_lvbptr
;
944 if (ri
->ri_magic
!= cpu_to_be32(GFS2_MAGIC
))
946 return generation
<= be64_to_cpu(ri
->ri_generation_deleted
);
949 static void gfs2_glock_poke(struct gfs2_glock
*gl
)
951 int flags
= LM_FLAG_TRY_1CB
| LM_FLAG_ANY
| GL_SKIP
;
952 struct gfs2_holder gh
;
955 __gfs2_holder_init(gl
, LM_ST_SHARED
, flags
, &gh
, _RET_IP_
);
956 error
= gfs2_glock_nq(&gh
);
959 gfs2_holder_uninit(&gh
);
962 static void gfs2_try_evict(struct gfs2_glock
*gl
)
964 struct gfs2_inode
*ip
;
967 * If there is contention on the iopen glock and we have an inode, try
968 * to grab and release the inode so that it can be evicted. The
969 * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
970 * should not be deleted locally. This will allow the remote node to
971 * go ahead and delete the inode without us having to do it, which will
972 * avoid rgrp glock thrashing.
974 * The remote node is likely still holding the corresponding inode
975 * glock, so it will run before we get to verify that the delete has
976 * happened below. (Verification is triggered by the call to
977 * gfs2_queue_verify_delete() in gfs2_evict_inode().)
979 spin_lock(&gl
->gl_lockref
.lock
);
981 if (ip
&& !igrab(&ip
->i_inode
))
983 spin_unlock(&gl
->gl_lockref
.lock
);
985 wait_on_inode(&ip
->i_inode
);
986 if (is_bad_inode(&ip
->i_inode
)) {
992 set_bit(GIF_DEFER_DELETE
, &ip
->i_flags
);
993 d_prune_aliases(&ip
->i_inode
);
996 /* If the inode was evicted, gl->gl_object will now be NULL. */
997 spin_lock(&gl
->gl_lockref
.lock
);
1000 clear_bit(GIF_DEFER_DELETE
, &ip
->i_flags
);
1001 if (!igrab(&ip
->i_inode
))
1004 spin_unlock(&gl
->gl_lockref
.lock
);
1006 gfs2_glock_poke(ip
->i_gl
);
1012 bool gfs2_queue_try_to_evict(struct gfs2_glock
*gl
)
1014 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1016 if (test_and_set_bit(GLF_TRY_TO_EVICT
, &gl
->gl_flags
))
1018 return !mod_delayed_work(sdp
->sd_delete_wq
, &gl
->gl_delete
, 0);
1021 bool gfs2_queue_verify_delete(struct gfs2_glock
*gl
, bool later
)
1023 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1024 unsigned long delay
;
1026 if (test_and_set_bit(GLF_VERIFY_DELETE
, &gl
->gl_flags
))
1028 delay
= later
? HZ
+ get_random_long() % (HZ
* 9) : 0;
1029 return queue_delayed_work(sdp
->sd_delete_wq
, &gl
->gl_delete
, delay
);
1032 static void delete_work_func(struct work_struct
*work
)
1034 struct delayed_work
*dwork
= to_delayed_work(work
);
1035 struct gfs2_glock
*gl
= container_of(dwork
, struct gfs2_glock
, gl_delete
);
1036 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1037 bool verify_delete
= test_and_clear_bit(GLF_VERIFY_DELETE
, &gl
->gl_flags
);
1039 if (test_and_clear_bit(GLF_TRY_TO_EVICT
, &gl
->gl_flags
))
1042 if (verify_delete
) {
1043 u64 no_addr
= gl
->gl_name
.ln_number
;
1044 struct inode
*inode
;
1046 inode
= gfs2_lookup_by_inum(sdp
, no_addr
, gl
->gl_no_formal_ino
,
1047 GFS2_BLKST_UNLINKED
);
1048 if (IS_ERR(inode
)) {
1049 if (PTR_ERR(inode
) == -EAGAIN
&&
1050 !test_bit(SDF_KILL
, &sdp
->sd_flags
) &&
1051 gfs2_queue_verify_delete(gl
, true))
1054 d_prune_aliases(inode
);
1062 static void glock_work_func(struct work_struct
*work
)
1064 unsigned long delay
= 0;
1065 struct gfs2_glock
*gl
= container_of(work
, struct gfs2_glock
, gl_work
.work
);
1066 unsigned int drop_refs
= 1;
1068 spin_lock(&gl
->gl_lockref
.lock
);
1069 if (test_bit(GLF_HAVE_REPLY
, &gl
->gl_flags
)) {
1070 clear_bit(GLF_HAVE_REPLY
, &gl
->gl_flags
);
1071 finish_xmote(gl
, gl
->gl_reply
);
1074 if (test_bit(GLF_PENDING_DEMOTE
, &gl
->gl_flags
) &&
1075 gl
->gl_state
!= LM_ST_UNLOCKED
&&
1076 gl
->gl_demote_state
!= LM_ST_EXCLUSIVE
) {
1077 if (gl
->gl_name
.ln_type
== LM_TYPE_INODE
) {
1078 unsigned long holdtime
, now
= jiffies
;
1080 holdtime
= gl
->gl_tchange
+ gl
->gl_hold_time
;
1081 if (time_before(now
, holdtime
))
1082 delay
= holdtime
- now
;
1086 clear_bit(GLF_PENDING_DEMOTE
, &gl
->gl_flags
);
1087 gfs2_set_demote(GLF_DEMOTE
, gl
);
1092 /* Keep one glock reference for the work we requeue. */
1094 gfs2_glock_queue_work(gl
, delay
);
1097 /* Drop the remaining glock references manually. */
1098 GLOCK_BUG_ON(gl
, gl
->gl_lockref
.count
< drop_refs
);
1099 gl
->gl_lockref
.count
-= drop_refs
;
1100 if (!gl
->gl_lockref
.count
) {
1101 if (gl
->gl_state
== LM_ST_UNLOCKED
) {
1102 __gfs2_glock_put(gl
);
1105 gfs2_glock_add_to_lru(gl
);
1107 spin_unlock(&gl
->gl_lockref
.lock
);
1110 static struct gfs2_glock
*find_insert_glock(struct lm_lockname
*name
,
1111 struct gfs2_glock
*new)
1113 struct wait_glock_queue wait
;
1114 wait_queue_head_t
*wq
= glock_waitqueue(name
);
1115 struct gfs2_glock
*gl
;
1118 init_wait(&wait
.wait
);
1119 wait
.wait
.func
= glock_wake_function
;
1122 prepare_to_wait(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
1125 gl
= rhashtable_lookup_get_insert_fast(&gl_hash_table
,
1126 &new->gl_node
, ht_parms
);
1130 gl
= rhashtable_lookup_fast(&gl_hash_table
,
1133 if (gl
&& !lockref_get_not_dead(&gl
->gl_lockref
)) {
1140 finish_wait(wq
, &wait
.wait
);
1142 gfs2_glock_remove_from_lru(gl
);
1147 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
1148 * @sdp: The GFS2 superblock
1149 * @number: the lock number
1150 * @glops: The glock_operations to use
1151 * @create: If 0, don't create the glock if it doesn't exist
1152 * @glp: the glock is returned here
1154 * This does not lock a glock, just finds/creates structures for one.
1159 int gfs2_glock_get(struct gfs2_sbd
*sdp
, u64 number
,
1160 const struct gfs2_glock_operations
*glops
, int create
,
1161 struct gfs2_glock
**glp
)
1163 struct super_block
*s
= sdp
->sd_vfs
;
1164 struct lm_lockname name
= { .ln_number
= number
,
1165 .ln_type
= glops
->go_type
,
1167 struct gfs2_glock
*gl
, *tmp
;
1168 struct address_space
*mapping
;
1170 gl
= find_insert_glock(&name
, NULL
);
1176 if (glops
->go_flags
& GLOF_ASPACE
) {
1177 struct gfs2_glock_aspace
*gla
=
1178 kmem_cache_alloc(gfs2_glock_aspace_cachep
, GFP_NOFS
);
1183 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_NOFS
);
1187 memset(&gl
->gl_lksb
, 0, sizeof(struct dlm_lksb
));
1190 if (glops
->go_flags
& GLOF_LVB
) {
1191 gl
->gl_lksb
.sb_lvbptr
= kzalloc(GDLM_LVB_SIZE
, GFP_NOFS
);
1192 if (!gl
->gl_lksb
.sb_lvbptr
) {
1193 gfs2_glock_dealloc(&gl
->gl_rcu
);
1198 atomic_inc(&sdp
->sd_glock_disposal
);
1199 gl
->gl_node
.next
= NULL
;
1200 gl
->gl_flags
= BIT(GLF_INITIAL
);
1201 if (glops
->go_instantiate
)
1202 gl
->gl_flags
|= BIT(GLF_INSTANTIATE_NEEDED
);
1204 lockdep_set_subclass(&gl
->gl_lockref
.lock
, glops
->go_subclass
);
1205 gl
->gl_lockref
.count
= 1;
1206 gl
->gl_state
= LM_ST_UNLOCKED
;
1207 gl
->gl_target
= LM_ST_UNLOCKED
;
1208 gl
->gl_demote_state
= LM_ST_EXCLUSIVE
;
1211 /* We use the global stats to estimate the initial per-glock stats */
1212 gl
->gl_stats
= this_cpu_ptr(sdp
->sd_lkstats
)->lkstats
[glops
->go_type
];
1214 gl
->gl_stats
.stats
[GFS2_LKS_DCOUNT
] = 0;
1215 gl
->gl_stats
.stats
[GFS2_LKS_QCOUNT
] = 0;
1216 gl
->gl_tchange
= jiffies
;
1217 gl
->gl_object
= NULL
;
1218 gl
->gl_hold_time
= GL_GLOCK_DFT_HOLD
;
1219 INIT_DELAYED_WORK(&gl
->gl_work
, glock_work_func
);
1220 if (gl
->gl_name
.ln_type
== LM_TYPE_IOPEN
)
1221 INIT_DELAYED_WORK(&gl
->gl_delete
, delete_work_func
);
1223 mapping
= gfs2_glock2aspace(gl
);
1225 mapping
->a_ops
= &gfs2_meta_aops
;
1226 mapping
->host
= s
->s_bdev
->bd_mapping
->host
;
1228 mapping_set_gfp_mask(mapping
, GFP_NOFS
);
1229 mapping
->i_private_data
= NULL
;
1230 mapping
->writeback_index
= 0;
1233 tmp
= find_insert_glock(&name
, gl
);
1235 gfs2_glock_dealloc(&gl
->gl_rcu
);
1236 if (atomic_dec_and_test(&sdp
->sd_glock_disposal
))
1237 wake_up(&sdp
->sd_kill_wait
);
1240 return PTR_ERR(tmp
);
1250 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
1252 * @state: the state we're requesting
1253 * @flags: the modifier flags
1254 * @gh: the holder structure
1258 void __gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, u16 flags
,
1259 struct gfs2_holder
*gh
, unsigned long ip
)
1261 INIT_LIST_HEAD(&gh
->gh_list
);
1262 gh
->gh_gl
= gfs2_glock_hold(gl
);
1264 gh
->gh_owner_pid
= get_pid(task_pid(current
));
1265 gh
->gh_state
= state
;
1266 gh
->gh_flags
= flags
;
1271 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
1272 * @state: the state we're requesting
1273 * @flags: the modifier flags
1274 * @gh: the holder structure
1276 * Don't mess with the glock.
1280 void gfs2_holder_reinit(unsigned int state
, u16 flags
, struct gfs2_holder
*gh
)
1282 gh
->gh_state
= state
;
1283 gh
->gh_flags
= flags
;
1285 gh
->gh_ip
= _RET_IP_
;
1286 put_pid(gh
->gh_owner_pid
);
1287 gh
->gh_owner_pid
= get_pid(task_pid(current
));
1291 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
1292 * @gh: the holder structure
1296 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
1298 put_pid(gh
->gh_owner_pid
);
1299 gfs2_glock_put(gh
->gh_gl
);
1300 gfs2_holder_mark_uninitialized(gh
);
1304 static void gfs2_glock_update_hold_time(struct gfs2_glock
*gl
,
1305 unsigned long start_time
)
1307 /* Have we waited longer that a second? */
1308 if (time_after(jiffies
, start_time
+ HZ
)) {
1309 /* Lengthen the minimum hold time. */
1310 gl
->gl_hold_time
= min(gl
->gl_hold_time
+ GL_GLOCK_HOLD_INCR
,
1316 * gfs2_glock_holder_ready - holder is ready and its error code can be collected
1317 * @gh: the glock holder
1319 * Called when a glock holder no longer needs to be waited for because it is
1320 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
1321 * failed (gh_error != 0).
1324 int gfs2_glock_holder_ready(struct gfs2_holder
*gh
)
1326 if (gh
->gh_error
|| (gh
->gh_flags
& GL_SKIP
))
1327 return gh
->gh_error
;
1328 gh
->gh_error
= gfs2_instantiate(gh
);
1331 return gh
->gh_error
;
1335 * gfs2_glock_wait - wait on a glock acquisition
1336 * @gh: the glock holder
1338 * Returns: 0 on success
1341 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1343 unsigned long start_time
= jiffies
;
1346 wait_on_bit(&gh
->gh_iflags
, HIF_WAIT
, TASK_UNINTERRUPTIBLE
);
1347 gfs2_glock_update_hold_time(gh
->gh_gl
, start_time
);
1348 return gfs2_glock_holder_ready(gh
);
1351 static int glocks_pending(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1355 for (i
= 0; i
< num_gh
; i
++)
1356 if (test_bit(HIF_WAIT
, &ghs
[i
].gh_iflags
))
1362 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
1363 * @num_gh: the number of holders in the array
1364 * @ghs: the glock holder array
1366 * Returns: 0 on success, meaning all glocks have been granted and are held.
1367 * -ESTALE if the request timed out, meaning all glocks were released,
1368 * and the caller should retry the operation.
1371 int gfs2_glock_async_wait(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1373 struct gfs2_sbd
*sdp
= ghs
[0].gh_gl
->gl_name
.ln_sbd
;
1374 int i
, ret
= 0, timeout
= 0;
1375 unsigned long start_time
= jiffies
;
1379 * Total up the (minimum hold time * 2) of all glocks and use that to
1380 * determine the max amount of time we should wait.
1382 for (i
= 0; i
< num_gh
; i
++)
1383 timeout
+= ghs
[i
].gh_gl
->gl_hold_time
<< 1;
1385 if (!wait_event_timeout(sdp
->sd_async_glock_wait
,
1386 !glocks_pending(num_gh
, ghs
), timeout
)) {
1387 ret
= -ESTALE
; /* request timed out. */
1391 for (i
= 0; i
< num_gh
; i
++) {
1392 struct gfs2_holder
*gh
= &ghs
[i
];
1395 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
)) {
1396 gfs2_glock_update_hold_time(gh
->gh_gl
,
1399 ret2
= gfs2_glock_holder_ready(gh
);
1406 for (i
= 0; i
< num_gh
; i
++) {
1407 struct gfs2_holder
*gh
= &ghs
[i
];
1416 * request_demote - process a demote request
1418 * @state: the state the caller wants us to change to
1419 * @delay: zero to demote immediately; otherwise pending demote
1420 * @remote: true if this came from a different cluster node
1422 * There are only two requests that we are going to see in actual
1423 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
1426 static void request_demote(struct gfs2_glock
*gl
, unsigned int state
,
1427 unsigned long delay
, bool remote
)
1429 gfs2_set_demote(delay
? GLF_PENDING_DEMOTE
: GLF_DEMOTE
, gl
);
1430 if (gl
->gl_demote_state
== LM_ST_EXCLUSIVE
) {
1431 gl
->gl_demote_state
= state
;
1432 gl
->gl_demote_time
= jiffies
;
1433 } else if (gl
->gl_demote_state
!= LM_ST_UNLOCKED
&&
1434 gl
->gl_demote_state
!= state
) {
1435 gl
->gl_demote_state
= LM_ST_UNLOCKED
;
1437 if (gl
->gl_ops
->go_callback
)
1438 gl
->gl_ops
->go_callback(gl
, remote
);
1439 trace_gfs2_demote_rq(gl
, remote
);
1442 void gfs2_print_dbg(struct seq_file
*seq
, const char *fmt
, ...)
1444 struct va_format vaf
;
1447 va_start(args
, fmt
);
1450 seq_vprintf(seq
, fmt
, args
);
1455 pr_err("%pV", &vaf
);
1461 static inline bool pid_is_meaningful(const struct gfs2_holder
*gh
)
1463 if (!(gh
->gh_flags
& GL_NOPID
))
1465 if (gh
->gh_state
== LM_ST_UNLOCKED
)
1471 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1472 * @gh: the holder structure to add
1474 * Eventually we should move the recursive locking trap to a
1475 * debugging option or something like that. This is the fast
1476 * path and needs to have the minimum number of distractions.
1480 static inline void add_to_queue(struct gfs2_holder
*gh
)
1481 __releases(&gl
->gl_lockref
.lock
)
1482 __acquires(&gl
->gl_lockref
.lock
)
1484 struct gfs2_glock
*gl
= gh
->gh_gl
;
1485 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1486 struct list_head
*insert_pt
= NULL
;
1487 struct gfs2_holder
*gh2
;
1490 GLOCK_BUG_ON(gl
, gh
->gh_owner_pid
== NULL
);
1491 if (test_and_set_bit(HIF_WAIT
, &gh
->gh_iflags
))
1492 GLOCK_BUG_ON(gl
, true);
1494 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1495 if (test_bit(GLF_LOCK
, &gl
->gl_flags
)) {
1496 struct gfs2_holder
*current_gh
;
1498 current_gh
= find_first_holder(gl
);
1499 try_futile
= !may_grant(gl
, current_gh
, gh
);
1501 if (test_bit(GLF_INVALIDATE_IN_PROGRESS
, &gl
->gl_flags
))
1505 list_for_each_entry(gh2
, &gl
->gl_holders
, gh_list
) {
1506 if (likely(gh2
->gh_owner_pid
!= gh
->gh_owner_pid
))
1508 if (gh
->gh_gl
->gl_ops
->go_type
== LM_TYPE_FLOCK
)
1510 if (!pid_is_meaningful(gh2
))
1512 goto trap_recursive
;
1514 list_for_each_entry(gh2
, &gl
->gl_holders
, gh_list
) {
1516 !(gh2
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
))) {
1518 gh
->gh_error
= GLR_TRYFAILED
;
1519 gfs2_holder_wake(gh
);
1522 if (test_bit(HIF_HOLDER
, &gh2
->gh_iflags
))
1525 trace_gfs2_glock_queue(gh
, 1);
1526 gfs2_glstats_inc(gl
, GFS2_LKS_QCOUNT
);
1527 gfs2_sbstats_inc(gl
, GFS2_LKS_QCOUNT
);
1528 if (likely(insert_pt
== NULL
)) {
1529 list_add_tail(&gh
->gh_list
, &gl
->gl_holders
);
1532 list_add_tail(&gh
->gh_list
, insert_pt
);
1533 spin_unlock(&gl
->gl_lockref
.lock
);
1534 if (sdp
->sd_lockstruct
.ls_ops
->lm_cancel
)
1535 sdp
->sd_lockstruct
.ls_ops
->lm_cancel(gl
);
1536 spin_lock(&gl
->gl_lockref
.lock
);
1540 fs_err(sdp
, "original: %pSR\n", (void *)gh2
->gh_ip
);
1541 fs_err(sdp
, "pid: %d\n", pid_nr(gh2
->gh_owner_pid
));
1542 fs_err(sdp
, "lock type: %d req lock state : %d\n",
1543 gh2
->gh_gl
->gl_name
.ln_type
, gh2
->gh_state
);
1544 fs_err(sdp
, "new: %pSR\n", (void *)gh
->gh_ip
);
1545 fs_err(sdp
, "pid: %d\n", pid_nr(gh
->gh_owner_pid
));
1546 fs_err(sdp
, "lock type: %d req lock state : %d\n",
1547 gh
->gh_gl
->gl_name
.ln_type
, gh
->gh_state
);
1548 gfs2_dump_glock(NULL
, gl
, true);
1553 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1554 * @gh: the holder structure
1556 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1558 * Returns: 0, GLR_TRYFAILED, or errno on failure
1561 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1563 struct gfs2_glock
*gl
= gh
->gh_gl
;
1566 if (glock_blocked_by_withdraw(gl
) && !(gh
->gh_flags
& LM_FLAG_NOEXP
))
1569 if (gh
->gh_flags
& GL_NOBLOCK
) {
1570 struct gfs2_holder
*current_gh
;
1573 spin_lock(&gl
->gl_lockref
.lock
);
1574 if (find_last_waiter(gl
))
1576 current_gh
= find_first_holder(gl
);
1577 if (!may_grant(gl
, current_gh
, gh
))
1579 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
1580 list_add_tail(&gh
->gh_list
, &gl
->gl_holders
);
1581 trace_gfs2_promote(gh
);
1584 spin_unlock(&gl
->gl_lockref
.lock
);
1589 spin_lock(&gl
->gl_lockref
.lock
);
1591 if (unlikely((LM_FLAG_NOEXP
& gh
->gh_flags
) &&
1592 test_and_clear_bit(GLF_HAVE_FROZEN_REPLY
, &gl
->gl_flags
))) {
1593 set_bit(GLF_HAVE_REPLY
, &gl
->gl_flags
);
1594 gl
->gl_lockref
.count
++;
1595 gfs2_glock_queue_work(gl
, 0);
1598 spin_unlock(&gl
->gl_lockref
.lock
);
1601 if (!(gh
->gh_flags
& GL_ASYNC
))
1602 error
= gfs2_glock_wait(gh
);
1608 * gfs2_glock_poll - poll to see if an async request has been completed
1611 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1614 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1616 return test_bit(HIF_WAIT
, &gh
->gh_iflags
) ? 0 : 1;
1619 static void __gfs2_glock_dq(struct gfs2_holder
*gh
)
1621 struct gfs2_glock
*gl
= gh
->gh_gl
;
1626 * This holder should not be cached, so mark it for demote.
1627 * Note: this should be done before the glock_needs_demote
1630 if (gh
->gh_flags
& GL_NOCACHE
)
1631 request_demote(gl
, LM_ST_UNLOCKED
, 0, false);
1633 list_del_init(&gh
->gh_list
);
1634 clear_bit(HIF_HOLDER
, &gh
->gh_iflags
);
1635 trace_gfs2_glock_queue(gh
, 0);
1638 * If there hasn't been a demote request we are done.
1639 * (Let the remaining holders, if any, keep holding it.)
1641 if (!glock_needs_demote(gl
)) {
1642 if (list_empty(&gl
->gl_holders
))
1646 if (unlikely(!fast_path
)) {
1647 gl
->gl_lockref
.count
++;
1648 if (test_bit(GLF_PENDING_DEMOTE
, &gl
->gl_flags
) &&
1649 !test_bit(GLF_DEMOTE
, &gl
->gl_flags
) &&
1650 gl
->gl_name
.ln_type
== LM_TYPE_INODE
)
1651 delay
= gl
->gl_hold_time
;
1652 gfs2_glock_queue_work(gl
, delay
);
1657 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1658 * @gh: the glock holder
1661 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1663 struct gfs2_glock
*gl
= gh
->gh_gl
;
1664 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1666 spin_lock(&gl
->gl_lockref
.lock
);
1667 if (!gfs2_holder_queued(gh
)) {
1669 * May have already been dequeued because the locking request
1670 * was GL_ASYNC and it has failed in the meantime.
1675 if (list_is_first(&gh
->gh_list
, &gl
->gl_holders
) &&
1676 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
)) {
1677 spin_unlock(&gl
->gl_lockref
.lock
);
1678 gl
->gl_name
.ln_sbd
->sd_lockstruct
.ls_ops
->lm_cancel(gl
);
1679 wait_on_bit(&gh
->gh_iflags
, HIF_WAIT
, TASK_UNINTERRUPTIBLE
);
1680 spin_lock(&gl
->gl_lockref
.lock
);
1684 * If we're in the process of file system withdraw, we cannot just
1685 * dequeue any glocks until our journal is recovered, lest we introduce
1686 * file system corruption. We need two exceptions to this rule: We need
1687 * to allow unlocking of nondisk glocks and the glock for our own
1688 * journal that needs recovery.
1690 if (test_bit(SDF_WITHDRAW_RECOVERY
, &sdp
->sd_flags
) &&
1691 glock_blocked_by_withdraw(gl
) &&
1692 gh
->gh_gl
!= sdp
->sd_jinode_gl
) {
1693 sdp
->sd_glock_dqs_held
++;
1694 spin_unlock(&gl
->gl_lockref
.lock
);
1696 wait_on_bit(&sdp
->sd_flags
, SDF_WITHDRAW_RECOVERY
,
1697 TASK_UNINTERRUPTIBLE
);
1698 spin_lock(&gl
->gl_lockref
.lock
);
1701 __gfs2_glock_dq(gh
);
1703 spin_unlock(&gl
->gl_lockref
.lock
);
1706 void gfs2_glock_dq_wait(struct gfs2_holder
*gh
)
1708 struct gfs2_glock
*gl
= gh
->gh_gl
;
1711 wait_on_bit(&gl
->gl_flags
, GLF_DEMOTE
, TASK_UNINTERRUPTIBLE
);
1715 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1716 * @gh: the holder structure
1720 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1723 gfs2_holder_uninit(gh
);
1727 * gfs2_glock_nq_num - acquire a glock based on lock number
1728 * @sdp: the filesystem
1729 * @number: the lock number
1730 * @glops: the glock operations for the type of glock
1731 * @state: the state to acquire the glock in
1732 * @flags: modifier flags for the acquisition
1733 * @gh: the struct gfs2_holder
1738 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, u64 number
,
1739 const struct gfs2_glock_operations
*glops
,
1740 unsigned int state
, u16 flags
, struct gfs2_holder
*gh
)
1742 struct gfs2_glock
*gl
;
1745 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1747 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1755 * glock_compare - Compare two struct gfs2_glock structures for sorting
1756 * @arg_a: the first structure
1757 * @arg_b: the second structure
1761 static int glock_compare(const void *arg_a
, const void *arg_b
)
1763 const struct gfs2_holder
*gh_a
= *(const struct gfs2_holder
**)arg_a
;
1764 const struct gfs2_holder
*gh_b
= *(const struct gfs2_holder
**)arg_b
;
1765 const struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1766 const struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1768 if (a
->ln_number
> b
->ln_number
)
1770 if (a
->ln_number
< b
->ln_number
)
1772 BUG_ON(gh_a
->gh_gl
->gl_ops
->go_type
== gh_b
->gh_gl
->gl_ops
->go_type
);
1777 * nq_m_sync - synchronously acquire more than one glock in deadlock free order
1778 * @num_gh: the number of structures
1779 * @ghs: an array of struct gfs2_holder structures
1780 * @p: placeholder for the holder structure to pass back
1782 * Returns: 0 on success (all glocks acquired),
1783 * errno on failure (no glocks acquired)
1786 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1787 struct gfs2_holder
**p
)
1792 for (x
= 0; x
< num_gh
; x
++)
1795 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1797 for (x
= 0; x
< num_gh
; x
++) {
1798 error
= gfs2_glock_nq(p
[x
]);
1801 gfs2_glock_dq(p
[x
]);
1810 * gfs2_glock_nq_m - acquire multiple glocks
1811 * @num_gh: the number of structures
1812 * @ghs: an array of struct gfs2_holder structures
1814 * Returns: 0 on success (all glocks acquired),
1815 * errno on failure (no glocks acquired)
1818 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1820 struct gfs2_holder
*tmp
[4];
1821 struct gfs2_holder
**pph
= tmp
;
1828 return gfs2_glock_nq(ghs
);
1832 pph
= kmalloc_array(num_gh
, sizeof(struct gfs2_holder
*),
1838 error
= nq_m_sync(num_gh
, ghs
, pph
);
1847 * gfs2_glock_dq_m - release multiple glocks
1848 * @num_gh: the number of structures
1849 * @ghs: an array of struct gfs2_holder structures
1853 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1856 gfs2_glock_dq(&ghs
[num_gh
]);
1859 void gfs2_glock_cb(struct gfs2_glock
*gl
, unsigned int state
)
1861 unsigned long delay
= 0;
1863 gfs2_glock_hold(gl
);
1864 spin_lock(&gl
->gl_lockref
.lock
);
1865 if (!list_empty(&gl
->gl_holders
) &&
1866 gl
->gl_name
.ln_type
== LM_TYPE_INODE
) {
1867 unsigned long now
= jiffies
;
1868 unsigned long holdtime
;
1870 holdtime
= gl
->gl_tchange
+ gl
->gl_hold_time
;
1872 if (time_before(now
, holdtime
))
1873 delay
= holdtime
- now
;
1874 if (test_bit(GLF_HAVE_REPLY
, &gl
->gl_flags
))
1875 delay
= gl
->gl_hold_time
;
1877 request_demote(gl
, state
, delay
, true);
1878 gfs2_glock_queue_work(gl
, delay
);
1879 spin_unlock(&gl
->gl_lockref
.lock
);
1883 * gfs2_should_freeze - Figure out if glock should be frozen
1884 * @gl: The glock in question
1886 * Glocks are not frozen if (a) the result of the dlm operation is
1887 * an error, (b) the locking operation was an unlock operation or
1888 * (c) if there is a "noexp" flagged request anywhere in the queue
1890 * Returns: 1 if freezing should occur, 0 otherwise
1893 static int gfs2_should_freeze(const struct gfs2_glock
*gl
)
1895 const struct gfs2_holder
*gh
;
1897 if (gl
->gl_reply
& ~LM_OUT_ST_MASK
)
1899 if (gl
->gl_target
== LM_ST_UNLOCKED
)
1902 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
1903 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1905 if (LM_FLAG_NOEXP
& gh
->gh_flags
)
1913 * gfs2_glock_complete - Callback used by locking
1914 * @gl: Pointer to the glock
1915 * @ret: The return value from the dlm
1917 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1918 * to use a bitfield shared with other glock state fields.
1921 void gfs2_glock_complete(struct gfs2_glock
*gl
, int ret
)
1923 struct lm_lockstruct
*ls
= &gl
->gl_name
.ln_sbd
->sd_lockstruct
;
1925 spin_lock(&gl
->gl_lockref
.lock
);
1928 if (unlikely(test_bit(DFL_BLOCK_LOCKS
, &ls
->ls_recover_flags
))) {
1929 if (gfs2_should_freeze(gl
)) {
1930 set_bit(GLF_HAVE_FROZEN_REPLY
, &gl
->gl_flags
);
1931 spin_unlock(&gl
->gl_lockref
.lock
);
1936 gl
->gl_lockref
.count
++;
1937 set_bit(GLF_HAVE_REPLY
, &gl
->gl_flags
);
1938 gfs2_glock_queue_work(gl
, 0);
1939 spin_unlock(&gl
->gl_lockref
.lock
);
1942 static int glock_cmp(void *priv
, const struct list_head
*a
,
1943 const struct list_head
*b
)
1945 struct gfs2_glock
*gla
, *glb
;
1947 gla
= list_entry(a
, struct gfs2_glock
, gl_lru
);
1948 glb
= list_entry(b
, struct gfs2_glock
, gl_lru
);
1950 if (gla
->gl_name
.ln_number
> glb
->gl_name
.ln_number
)
1952 if (gla
->gl_name
.ln_number
< glb
->gl_name
.ln_number
)
1958 static bool can_free_glock(struct gfs2_glock
*gl
)
1960 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
1962 return !test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
1963 !gl
->gl_lockref
.count
&&
1964 (!test_bit(GLF_LFLUSH
, &gl
->gl_flags
) ||
1965 test_bit(SDF_KILL
, &sdp
->sd_flags
));
1969 * gfs2_dispose_glock_lru - Demote a list of glocks
1970 * @list: The list to dispose of
1972 * Disposing of glocks may involve disk accesses, so that here we sort
1973 * the glocks by number (i.e. disk location of the inodes) so that if
1974 * there are any such accesses, they'll be sent in order (mostly).
1976 * Must be called under the lru_lock, but may drop and retake this
1977 * lock. While the lru_lock is dropped, entries may vanish from the
1978 * list, but no new entries will appear on the list (since it is
1982 static unsigned long gfs2_dispose_glock_lru(struct list_head
*list
)
1983 __releases(&lru_lock
)
1984 __acquires(&lru_lock
)
1986 struct gfs2_glock
*gl
;
1987 unsigned long freed
= 0;
1989 list_sort(NULL
, list
, glock_cmp
);
1991 while(!list_empty(list
)) {
1992 gl
= list_first_entry(list
, struct gfs2_glock
, gl_lru
);
1993 if (!spin_trylock(&gl
->gl_lockref
.lock
)) {
1995 list_move(&gl
->gl_lru
, &lru_list
);
1998 if (!can_free_glock(gl
)) {
1999 spin_unlock(&gl
->gl_lockref
.lock
);
2000 goto add_back_to_lru
;
2002 list_del_init(&gl
->gl_lru
);
2003 atomic_dec(&lru_count
);
2004 clear_bit(GLF_LRU
, &gl
->gl_flags
);
2006 gl
->gl_lockref
.count
++;
2007 if (gl
->gl_state
!= LM_ST_UNLOCKED
)
2008 request_demote(gl
, LM_ST_UNLOCKED
, 0, false);
2009 gfs2_glock_queue_work(gl
, 0);
2010 spin_unlock(&gl
->gl_lockref
.lock
);
2011 cond_resched_lock(&lru_lock
);
2017 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
2018 * @nr: The number of entries to scan
2020 * This function selects the entries on the LRU which are able to
2021 * be demoted, and then kicks off the process by calling
2022 * gfs2_dispose_glock_lru() above.
2025 static unsigned long gfs2_scan_glock_lru(unsigned long nr
)
2027 struct gfs2_glock
*gl
, *next
;
2029 unsigned long freed
= 0;
2031 spin_lock(&lru_lock
);
2032 list_for_each_entry_safe(gl
, next
, &lru_list
, gl_lru
) {
2035 if (can_free_glock(gl
))
2036 list_move(&gl
->gl_lru
, &dispose
);
2038 if (!list_empty(&dispose
))
2039 freed
= gfs2_dispose_glock_lru(&dispose
);
2040 spin_unlock(&lru_lock
);
2045 static unsigned long gfs2_glock_shrink_scan(struct shrinker
*shrink
,
2046 struct shrink_control
*sc
)
2048 if (!(sc
->gfp_mask
& __GFP_FS
))
2050 return gfs2_scan_glock_lru(sc
->nr_to_scan
);
2053 static unsigned long gfs2_glock_shrink_count(struct shrinker
*shrink
,
2054 struct shrink_control
*sc
)
2056 return vfs_pressure_ratio(atomic_read(&lru_count
));
2059 static struct shrinker
*glock_shrinker
;
2062 * glock_hash_walk - Call a function for glock in a hash bucket
2063 * @examiner: the function
2064 * @sdp: the filesystem
2066 * Note that the function can be called multiple times on the same
2067 * object. So the user must ensure that the function can cope with
2071 static void glock_hash_walk(glock_examiner examiner
, const struct gfs2_sbd
*sdp
)
2073 struct gfs2_glock
*gl
;
2074 struct rhashtable_iter iter
;
2076 rhashtable_walk_enter(&gl_hash_table
, &iter
);
2079 rhashtable_walk_start(&iter
);
2081 while ((gl
= rhashtable_walk_next(&iter
)) && !IS_ERR(gl
)) {
2082 if (gl
->gl_name
.ln_sbd
== sdp
)
2086 rhashtable_walk_stop(&iter
);
2087 } while (cond_resched(), gl
== ERR_PTR(-EAGAIN
));
2089 rhashtable_walk_exit(&iter
);
2092 void gfs2_cancel_delete_work(struct gfs2_glock
*gl
)
2094 clear_bit(GLF_TRY_TO_EVICT
, &gl
->gl_flags
);
2095 clear_bit(GLF_VERIFY_DELETE
, &gl
->gl_flags
);
2096 if (cancel_delayed_work(&gl
->gl_delete
))
2100 static void flush_delete_work(struct gfs2_glock
*gl
)
2102 if (gl
->gl_name
.ln_type
== LM_TYPE_IOPEN
) {
2103 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
2105 if (cancel_delayed_work(&gl
->gl_delete
)) {
2106 queue_delayed_work(sdp
->sd_delete_wq
,
2112 void gfs2_flush_delete_work(struct gfs2_sbd
*sdp
)
2114 glock_hash_walk(flush_delete_work
, sdp
);
2115 flush_workqueue(sdp
->sd_delete_wq
);
2119 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
2120 * @gl: The glock to thaw
2124 static void thaw_glock(struct gfs2_glock
*gl
)
2126 if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY
, &gl
->gl_flags
))
2128 if (!lockref_get_not_dead(&gl
->gl_lockref
))
2131 gfs2_glock_remove_from_lru(gl
);
2132 spin_lock(&gl
->gl_lockref
.lock
);
2133 set_bit(GLF_HAVE_REPLY
, &gl
->gl_flags
);
2134 gfs2_glock_queue_work(gl
, 0);
2135 spin_unlock(&gl
->gl_lockref
.lock
);
2139 * clear_glock - look at a glock and see if we can free it from glock cache
2140 * @gl: the glock to look at
2144 static void clear_glock(struct gfs2_glock
*gl
)
2146 gfs2_glock_remove_from_lru(gl
);
2148 spin_lock(&gl
->gl_lockref
.lock
);
2149 if (!__lockref_is_dead(&gl
->gl_lockref
)) {
2150 gl
->gl_lockref
.count
++;
2151 if (gl
->gl_state
!= LM_ST_UNLOCKED
)
2152 request_demote(gl
, LM_ST_UNLOCKED
, 0, false);
2153 gfs2_glock_queue_work(gl
, 0);
2155 spin_unlock(&gl
->gl_lockref
.lock
);
2159 * gfs2_glock_thaw - Thaw any frozen glocks
2160 * @sdp: The super block
2164 void gfs2_glock_thaw(struct gfs2_sbd
*sdp
)
2166 glock_hash_walk(thaw_glock
, sdp
);
2169 static void dump_glock(struct seq_file
*seq
, struct gfs2_glock
*gl
, bool fsid
)
2171 spin_lock(&gl
->gl_lockref
.lock
);
2172 gfs2_dump_glock(seq
, gl
, fsid
);
2173 spin_unlock(&gl
->gl_lockref
.lock
);
2176 static void dump_glock_func(struct gfs2_glock
*gl
)
2178 dump_glock(NULL
, gl
, true);
2181 static void withdraw_dq(struct gfs2_glock
*gl
)
2183 spin_lock(&gl
->gl_lockref
.lock
);
2184 if (!__lockref_is_dead(&gl
->gl_lockref
) &&
2185 glock_blocked_by_withdraw(gl
))
2186 do_error(gl
, LM_OUT_ERROR
); /* remove pending waiters */
2187 spin_unlock(&gl
->gl_lockref
.lock
);
2190 void gfs2_gl_dq_holders(struct gfs2_sbd
*sdp
)
2192 glock_hash_walk(withdraw_dq
, sdp
);
2196 * gfs2_gl_hash_clear - Empty out the glock hash table
2197 * @sdp: the filesystem
2199 * Called when unmounting the filesystem.
2202 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
)
2204 unsigned long start
= jiffies
;
2205 bool timed_out
= false;
2207 set_bit(SDF_SKIP_DLM_UNLOCK
, &sdp
->sd_flags
);
2208 flush_workqueue(sdp
->sd_glock_wq
);
2209 glock_hash_walk(clear_glock
, sdp
);
2210 flush_workqueue(sdp
->sd_glock_wq
);
2212 while (!timed_out
) {
2213 wait_event_timeout(sdp
->sd_kill_wait
,
2214 !atomic_read(&sdp
->sd_glock_disposal
),
2216 if (!atomic_read(&sdp
->sd_glock_disposal
))
2218 timed_out
= time_after(jiffies
, start
+ (HZ
* 600));
2219 fs_warn(sdp
, "%u glocks left after %u seconds%s\n",
2220 atomic_read(&sdp
->sd_glock_disposal
),
2221 jiffies_to_msecs(jiffies
- start
) / 1000,
2222 timed_out
? ":" : "; still waiting");
2224 gfs2_lm_unmount(sdp
);
2225 gfs2_free_dead_glocks(sdp
);
2226 glock_hash_walk(dump_glock_func
, sdp
);
2227 destroy_workqueue(sdp
->sd_glock_wq
);
2228 sdp
->sd_glock_wq
= NULL
;
2231 static const char *state2str(unsigned state
)
2234 case LM_ST_UNLOCKED
:
2238 case LM_ST_DEFERRED
:
2240 case LM_ST_EXCLUSIVE
:
2246 static const char *hflags2str(char *buf
, u16 flags
, unsigned long iflags
)
2249 if (flags
& LM_FLAG_TRY
)
2251 if (flags
& LM_FLAG_TRY_1CB
)
2253 if (flags
& LM_FLAG_NOEXP
)
2255 if (flags
& LM_FLAG_ANY
)
2257 if (flags
& LM_FLAG_NODE_SCOPE
)
2259 if (flags
& GL_ASYNC
)
2261 if (flags
& GL_EXACT
)
2263 if (flags
& GL_NOCACHE
)
2265 if (test_bit(HIF_HOLDER
, &iflags
))
2267 if (test_bit(HIF_WAIT
, &iflags
))
2269 if (flags
& GL_SKIP
)
2276 * dump_holder - print information about a glock holder
2277 * @seq: the seq_file struct
2278 * @gh: the glock holder
2279 * @fs_id_buf: pointer to file system id (if requested)
2283 static void dump_holder(struct seq_file
*seq
, const struct gfs2_holder
*gh
,
2284 const char *fs_id_buf
)
2286 const char *comm
= "(none)";
2287 pid_t owner_pid
= 0;
2291 if (pid_is_meaningful(gh
)) {
2292 struct task_struct
*gh_owner
;
2295 owner_pid
= pid_nr(gh
->gh_owner_pid
);
2296 gh_owner
= pid_task(gh
->gh_owner_pid
, PIDTYPE_PID
);
2298 comm
= gh_owner
->comm
;
2300 gfs2_print_dbg(seq
, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
2301 fs_id_buf
, state2str(gh
->gh_state
),
2302 hflags2str(flags_buf
, gh
->gh_flags
, gh
->gh_iflags
),
2303 gh
->gh_error
, (long)owner_pid
, comm
, (void *)gh
->gh_ip
);
2307 static const char *gflags2str(char *buf
, const struct gfs2_glock
*gl
)
2309 const unsigned long *gflags
= &gl
->gl_flags
;
2312 if (test_bit(GLF_LOCK
, gflags
))
2314 if (test_bit(GLF_DEMOTE
, gflags
))
2316 if (test_bit(GLF_PENDING_DEMOTE
, gflags
))
2318 if (test_bit(GLF_DEMOTE_IN_PROGRESS
, gflags
))
2320 if (test_bit(GLF_DIRTY
, gflags
))
2322 if (test_bit(GLF_LFLUSH
, gflags
))
2324 if (test_bit(GLF_INVALIDATE_IN_PROGRESS
, gflags
))
2326 if (test_bit(GLF_HAVE_REPLY
, gflags
))
2328 if (test_bit(GLF_INITIAL
, gflags
))
2330 if (test_bit(GLF_HAVE_FROZEN_REPLY
, gflags
))
2332 if (!list_empty(&gl
->gl_holders
))
2334 if (test_bit(GLF_LRU
, gflags
))
2338 if (test_bit(GLF_BLOCKING
, gflags
))
2340 if (test_bit(GLF_UNLOCKED
, gflags
))
2342 if (test_bit(GLF_INSTANTIATE_NEEDED
, gflags
))
2344 if (test_bit(GLF_INSTANTIATE_IN_PROG
, gflags
))
2346 if (test_bit(GLF_TRY_TO_EVICT
, gflags
))
2348 if (test_bit(GLF_VERIFY_DELETE
, gflags
))
2355 * gfs2_dump_glock - print information about a glock
2356 * @seq: The seq_file struct
2358 * @fsid: If true, also dump the file system id
2360 * The file format is as follows:
2361 * One line per object, capital letters are used to indicate objects
2362 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
2363 * other objects are indented by a single space and follow the glock to
2364 * which they are related. Fields are indicated by lower case letters
2365 * followed by a colon and the field value, except for strings which are in
2366 * [] so that its possible to see if they are composed of spaces for
2367 * example. The field's are n = number (id of the object), f = flags,
2368 * t = type, s = state, r = refcount, e = error, p = pid.
2372 void gfs2_dump_glock(struct seq_file
*seq
, struct gfs2_glock
*gl
, bool fsid
)
2374 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
2375 unsigned long long dtime
;
2376 const struct gfs2_holder
*gh
;
2377 char gflags_buf
[32];
2378 struct gfs2_sbd
*sdp
= gl
->gl_name
.ln_sbd
;
2379 char fs_id_buf
[sizeof(sdp
->sd_fsname
) + 7];
2380 unsigned long nrpages
= 0;
2382 if (gl
->gl_ops
->go_flags
& GLOF_ASPACE
) {
2383 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
2385 nrpages
= mapping
->nrpages
;
2387 memset(fs_id_buf
, 0, sizeof(fs_id_buf
));
2388 if (fsid
&& sdp
) /* safety precaution */
2389 sprintf(fs_id_buf
, "fsid=%s: ", sdp
->sd_fsname
);
2390 dtime
= jiffies
- gl
->gl_demote_time
;
2391 dtime
*= 1000000/HZ
; /* demote time in uSec */
2392 if (!test_bit(GLF_DEMOTE
, &gl
->gl_flags
))
2394 gfs2_print_dbg(seq
, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
2395 "v:%d r:%d m:%ld p:%lu\n",
2396 fs_id_buf
, state2str(gl
->gl_state
),
2397 gl
->gl_name
.ln_type
,
2398 (unsigned long long)gl
->gl_name
.ln_number
,
2399 gflags2str(gflags_buf
, gl
),
2400 state2str(gl
->gl_target
),
2401 state2str(gl
->gl_demote_state
), dtime
,
2402 atomic_read(&gl
->gl_ail_count
),
2403 atomic_read(&gl
->gl_revokes
),
2404 (int)gl
->gl_lockref
.count
, gl
->gl_hold_time
, nrpages
);
2406 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
)
2407 dump_holder(seq
, gh
, fs_id_buf
);
2409 if (gl
->gl_state
!= LM_ST_UNLOCKED
&& glops
->go_dump
)
2410 glops
->go_dump(seq
, gl
, fs_id_buf
);
2413 static int gfs2_glstats_seq_show(struct seq_file
*seq
, void *iter_ptr
)
2415 struct gfs2_glock
*gl
= iter_ptr
;
2417 seq_printf(seq
, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
2418 gl
->gl_name
.ln_type
,
2419 (unsigned long long)gl
->gl_name
.ln_number
,
2420 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_SRTT
],
2421 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_SRTTVAR
],
2422 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_SRTTB
],
2423 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_SRTTVARB
],
2424 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_SIRT
],
2425 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_SIRTVAR
],
2426 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_DCOUNT
],
2427 (unsigned long long)gl
->gl_stats
.stats
[GFS2_LKS_QCOUNT
]);
2431 static const char *gfs2_gltype
[] = {
2445 static const char *gfs2_stype
[] = {
2446 [GFS2_LKS_SRTT
] = "srtt",
2447 [GFS2_LKS_SRTTVAR
] = "srttvar",
2448 [GFS2_LKS_SRTTB
] = "srttb",
2449 [GFS2_LKS_SRTTVARB
] = "srttvarb",
2450 [GFS2_LKS_SIRT
] = "sirt",
2451 [GFS2_LKS_SIRTVAR
] = "sirtvar",
2452 [GFS2_LKS_DCOUNT
] = "dlm",
2453 [GFS2_LKS_QCOUNT
] = "queue",
2456 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
2458 static int gfs2_sbstats_seq_show(struct seq_file
*seq
, void *iter_ptr
)
2460 struct gfs2_sbd
*sdp
= seq
->private;
2461 loff_t pos
= *(loff_t
*)iter_ptr
;
2462 unsigned index
= pos
>> 3;
2463 unsigned subindex
= pos
& 0x07;
2466 if (index
== 0 && subindex
!= 0)
2469 seq_printf(seq
, "%-10s %8s:", gfs2_gltype
[index
],
2470 (index
== 0) ? "cpu": gfs2_stype
[subindex
]);
2472 for_each_possible_cpu(i
) {
2473 const struct gfs2_pcpu_lkstats
*lkstats
= per_cpu_ptr(sdp
->sd_lkstats
, i
);
2476 seq_printf(seq
, " %15u", i
);
2478 seq_printf(seq
, " %15llu", (unsigned long long)lkstats
->
2479 lkstats
[index
- 1].stats
[subindex
]);
2481 seq_putc(seq
, '\n');
2485 int __init
gfs2_glock_init(void)
2489 ret
= rhashtable_init(&gl_hash_table
, &ht_parms
);
2493 glock_shrinker
= shrinker_alloc(0, "gfs2-glock");
2494 if (!glock_shrinker
) {
2495 rhashtable_destroy(&gl_hash_table
);
2499 glock_shrinker
->count_objects
= gfs2_glock_shrink_count
;
2500 glock_shrinker
->scan_objects
= gfs2_glock_shrink_scan
;
2502 shrinker_register(glock_shrinker
);
2504 for (i
= 0; i
< GLOCK_WAIT_TABLE_SIZE
; i
++)
2505 init_waitqueue_head(glock_wait_table
+ i
);
2510 void gfs2_glock_exit(void)
2512 shrinker_free(glock_shrinker
);
2513 rhashtable_destroy(&gl_hash_table
);
2516 static void gfs2_glock_iter_next(struct gfs2_glock_iter
*gi
, loff_t n
)
2518 struct gfs2_glock
*gl
= gi
->gl
;
2523 gfs2_glock_put_async(gl
);
2526 gl
= rhashtable_walk_next(&gi
->hti
);
2527 if (IS_ERR_OR_NULL(gl
)) {
2528 if (gl
== ERR_PTR(-EAGAIN
)) {
2535 if (gl
->gl_name
.ln_sbd
!= gi
->sdp
)
2538 if (!lockref_get_not_dead(&gl
->gl_lockref
))
2542 if (__lockref_is_dead(&gl
->gl_lockref
))
2550 static void *gfs2_glock_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2553 struct gfs2_glock_iter
*gi
= seq
->private;
2557 * We can either stay where we are, skip to the next hash table
2558 * entry, or start from the beginning.
2560 if (*pos
< gi
->last_pos
) {
2561 rhashtable_walk_exit(&gi
->hti
);
2562 rhashtable_walk_enter(&gl_hash_table
, &gi
->hti
);
2565 n
= *pos
- gi
->last_pos
;
2568 rhashtable_walk_start(&gi
->hti
);
2570 gfs2_glock_iter_next(gi
, n
);
2571 gi
->last_pos
= *pos
;
2575 static void *gfs2_glock_seq_next(struct seq_file
*seq
, void *iter_ptr
,
2578 struct gfs2_glock_iter
*gi
= seq
->private;
2581 gi
->last_pos
= *pos
;
2582 gfs2_glock_iter_next(gi
, 1);
2586 static void gfs2_glock_seq_stop(struct seq_file
*seq
, void *iter_ptr
)
2589 struct gfs2_glock_iter
*gi
= seq
->private;
2591 rhashtable_walk_stop(&gi
->hti
);
2594 static int gfs2_glock_seq_show(struct seq_file
*seq
, void *iter_ptr
)
2596 dump_glock(seq
, iter_ptr
, false);
2600 static void *gfs2_sbstats_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2603 if (*pos
>= GFS2_NR_SBSTATS
)
2608 static void *gfs2_sbstats_seq_next(struct seq_file
*seq
, void *iter_ptr
,
2612 if (*pos
>= GFS2_NR_SBSTATS
)
2617 static void gfs2_sbstats_seq_stop(struct seq_file
*seq
, void *iter_ptr
)
2622 static const struct seq_operations gfs2_glock_seq_ops
= {
2623 .start
= gfs2_glock_seq_start
,
2624 .next
= gfs2_glock_seq_next
,
2625 .stop
= gfs2_glock_seq_stop
,
2626 .show
= gfs2_glock_seq_show
,
2629 static const struct seq_operations gfs2_glstats_seq_ops
= {
2630 .start
= gfs2_glock_seq_start
,
2631 .next
= gfs2_glock_seq_next
,
2632 .stop
= gfs2_glock_seq_stop
,
2633 .show
= gfs2_glstats_seq_show
,
2636 static const struct seq_operations gfs2_sbstats_sops
= {
2637 .start
= gfs2_sbstats_seq_start
,
2638 .next
= gfs2_sbstats_seq_next
,
2639 .stop
= gfs2_sbstats_seq_stop
,
2640 .show
= gfs2_sbstats_seq_show
,
2643 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2645 static int __gfs2_glocks_open(struct inode
*inode
, struct file
*file
,
2646 const struct seq_operations
*ops
)
2648 int ret
= seq_open_private(file
, ops
, sizeof(struct gfs2_glock_iter
));
2650 struct seq_file
*seq
= file
->private_data
;
2651 struct gfs2_glock_iter
*gi
= seq
->private;
2653 gi
->sdp
= inode
->i_private
;
2654 seq
->buf
= kmalloc(GFS2_SEQ_GOODSIZE
, GFP_KERNEL
| __GFP_NOWARN
);
2656 seq
->size
= GFS2_SEQ_GOODSIZE
;
2658 * Initially, we are "before" the first hash table entry; the
2659 * first call to rhashtable_walk_next gets us the first entry.
2663 rhashtable_walk_enter(&gl_hash_table
, &gi
->hti
);
2668 static int gfs2_glocks_open(struct inode
*inode
, struct file
*file
)
2670 return __gfs2_glocks_open(inode
, file
, &gfs2_glock_seq_ops
);
2673 static int gfs2_glocks_release(struct inode
*inode
, struct file
*file
)
2675 struct seq_file
*seq
= file
->private_data
;
2676 struct gfs2_glock_iter
*gi
= seq
->private;
2679 gfs2_glock_put(gi
->gl
);
2680 rhashtable_walk_exit(&gi
->hti
);
2681 return seq_release_private(inode
, file
);
2684 static int gfs2_glstats_open(struct inode
*inode
, struct file
*file
)
2686 return __gfs2_glocks_open(inode
, file
, &gfs2_glstats_seq_ops
);
2689 static const struct file_operations gfs2_glocks_fops
= {
2690 .owner
= THIS_MODULE
,
2691 .open
= gfs2_glocks_open
,
2693 .llseek
= seq_lseek
,
2694 .release
= gfs2_glocks_release
,
2697 static const struct file_operations gfs2_glstats_fops
= {
2698 .owner
= THIS_MODULE
,
2699 .open
= gfs2_glstats_open
,
2701 .llseek
= seq_lseek
,
2702 .release
= gfs2_glocks_release
,
2705 struct gfs2_glockfd_iter
{
2706 struct super_block
*sb
;
2708 struct task_struct
*task
;
2713 static struct task_struct
*gfs2_glockfd_next_task(struct gfs2_glockfd_iter
*i
)
2715 struct pid_namespace
*ns
= task_active_pid_ns(current
);
2719 put_task_struct(i
->task
);
2724 pid
= find_ge_pid(i
->tgid
, ns
);
2726 i
->tgid
= pid_nr_ns(pid
, ns
);
2727 i
->task
= pid_task(pid
, PIDTYPE_TGID
);
2732 get_task_struct(i
->task
);
2738 static struct file
*gfs2_glockfd_next_file(struct gfs2_glockfd_iter
*i
)
2746 i
->file
= fget_task_next(i
->task
, &i
->fd
);
2752 if (file_inode(i
->file
)->i_sb
== i
->sb
)
2760 static void *gfs2_glockfd_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2762 struct gfs2_glockfd_iter
*i
= seq
->private;
2766 while (gfs2_glockfd_next_task(i
)) {
2767 if (gfs2_glockfd_next_file(i
))
2774 static void *gfs2_glockfd_seq_next(struct seq_file
*seq
, void *iter_ptr
,
2777 struct gfs2_glockfd_iter
*i
= seq
->private;
2782 if (gfs2_glockfd_next_file(i
))
2785 } while (gfs2_glockfd_next_task(i
));
2789 static void gfs2_glockfd_seq_stop(struct seq_file
*seq
, void *iter_ptr
)
2791 struct gfs2_glockfd_iter
*i
= seq
->private;
2796 put_task_struct(i
->task
);
2799 static void gfs2_glockfd_seq_show_flock(struct seq_file
*seq
,
2800 struct gfs2_glockfd_iter
*i
)
2802 struct gfs2_file
*fp
= i
->file
->private_data
;
2803 struct gfs2_holder
*fl_gh
= &fp
->f_fl_gh
;
2804 struct lm_lockname gl_name
= { .ln_type
= LM_TYPE_RESERVED
};
2806 if (!READ_ONCE(fl_gh
->gh_gl
))
2809 spin_lock(&i
->file
->f_lock
);
2810 if (gfs2_holder_initialized(fl_gh
))
2811 gl_name
= fl_gh
->gh_gl
->gl_name
;
2812 spin_unlock(&i
->file
->f_lock
);
2814 if (gl_name
.ln_type
!= LM_TYPE_RESERVED
) {
2815 seq_printf(seq
, "%d %u %u/%llx\n",
2816 i
->tgid
, i
->fd
, gl_name
.ln_type
,
2817 (unsigned long long)gl_name
.ln_number
);
2821 static int gfs2_glockfd_seq_show(struct seq_file
*seq
, void *iter_ptr
)
2823 struct gfs2_glockfd_iter
*i
= seq
->private;
2824 struct inode
*inode
= file_inode(i
->file
);
2825 struct gfs2_glock
*gl
;
2827 inode_lock_shared(inode
);
2828 gl
= GFS2_I(inode
)->i_iopen_gh
.gh_gl
;
2830 seq_printf(seq
, "%d %u %u/%llx\n",
2831 i
->tgid
, i
->fd
, gl
->gl_name
.ln_type
,
2832 (unsigned long long)gl
->gl_name
.ln_number
);
2834 gfs2_glockfd_seq_show_flock(seq
, i
);
2835 inode_unlock_shared(inode
);
2839 static const struct seq_operations gfs2_glockfd_seq_ops
= {
2840 .start
= gfs2_glockfd_seq_start
,
2841 .next
= gfs2_glockfd_seq_next
,
2842 .stop
= gfs2_glockfd_seq_stop
,
2843 .show
= gfs2_glockfd_seq_show
,
2846 static int gfs2_glockfd_open(struct inode
*inode
, struct file
*file
)
2848 struct gfs2_glockfd_iter
*i
;
2849 struct gfs2_sbd
*sdp
= inode
->i_private
;
2851 i
= __seq_open_private(file
, &gfs2_glockfd_seq_ops
,
2852 sizeof(struct gfs2_glockfd_iter
));
2855 i
->sb
= sdp
->sd_vfs
;
2859 static const struct file_operations gfs2_glockfd_fops
= {
2860 .owner
= THIS_MODULE
,
2861 .open
= gfs2_glockfd_open
,
2863 .llseek
= seq_lseek
,
2864 .release
= seq_release_private
,
2867 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats
);
2869 void gfs2_create_debugfs_file(struct gfs2_sbd
*sdp
)
2871 sdp
->debugfs_dir
= debugfs_create_dir(sdp
->sd_table_name
, gfs2_root
);
2873 debugfs_create_file("glocks", S_IFREG
| S_IRUGO
, sdp
->debugfs_dir
, sdp
,
2876 debugfs_create_file("glockfd", S_IFREG
| S_IRUGO
, sdp
->debugfs_dir
, sdp
,
2877 &gfs2_glockfd_fops
);
2879 debugfs_create_file("glstats", S_IFREG
| S_IRUGO
, sdp
->debugfs_dir
, sdp
,
2880 &gfs2_glstats_fops
);
2882 debugfs_create_file("sbstats", S_IFREG
| S_IRUGO
, sdp
->debugfs_dir
, sdp
,
2883 &gfs2_sbstats_fops
);
2886 void gfs2_delete_debugfs_file(struct gfs2_sbd
*sdp
)
2888 debugfs_remove_recursive(sdp
->debugfs_dir
);
2889 sdp
->debugfs_dir
= NULL
;
2892 void gfs2_register_debugfs(void)
2894 gfs2_root
= debugfs_create_dir("gfs2", NULL
);
2897 void gfs2_unregister_debugfs(void)
2899 debugfs_remove(gfs2_root
);