1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
36 #include <linux/sched/signal.h>
38 #define MLOG_MASK_PREFIX ML_DLM_GLUE
39 #include <cluster/masklog.h>
42 #include "ocfs2_lockingver.h"
47 #include "extent_map.h"
49 #include "heartbeat.h"
52 #include "stackglue.h"
57 #include "refcounttree.h"
60 #include "buffer_head_io.h"
62 struct ocfs2_mask_waiter
{
63 struct list_head mw_item
;
65 struct completion mw_complete
;
66 unsigned long mw_mask
;
67 unsigned long mw_goal
;
68 #ifdef CONFIG_OCFS2_FS_STATS
69 ktime_t mw_lock_start
;
73 static struct ocfs2_super
*ocfs2_get_dentry_osb(struct ocfs2_lock_res
*lockres
);
74 static struct ocfs2_super
*ocfs2_get_inode_osb(struct ocfs2_lock_res
*lockres
);
75 static struct ocfs2_super
*ocfs2_get_file_osb(struct ocfs2_lock_res
*lockres
);
76 static struct ocfs2_super
*ocfs2_get_qinfo_osb(struct ocfs2_lock_res
*lockres
);
79 * Return value from ->downconvert_worker functions.
81 * These control the precise actions of ocfs2_unblock_lock()
82 * and ocfs2_process_blocked_lock()
85 enum ocfs2_unblock_action
{
86 UNBLOCK_CONTINUE
= 0, /* Continue downconvert */
87 UNBLOCK_CONTINUE_POST
= 1, /* Continue downconvert, fire
88 * ->post_unlock callback */
89 UNBLOCK_STOP_POST
= 2, /* Do not downconvert, fire
90 * ->post_unlock() callback. */
93 struct ocfs2_unblock_ctl
{
95 enum ocfs2_unblock_action unblock_action
;
98 /* Lockdep class keys */
99 struct lock_class_key lockdep_keys
[OCFS2_NUM_LOCK_TYPES
];
101 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res
*lockres
,
103 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res
*lockres
);
105 static int ocfs2_data_convert_worker(struct ocfs2_lock_res
*lockres
,
108 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res
*lockres
,
111 static void ocfs2_dentry_post_unlock(struct ocfs2_super
*osb
,
112 struct ocfs2_lock_res
*lockres
);
114 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res
*lockres
);
116 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res
*lockres
,
118 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res
*lockres
,
121 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
123 /* This aids in debugging situations where a bad LVB might be involved. */
124 static void ocfs2_dump_meta_lvb_info(u64 level
,
125 const char *function
,
127 struct ocfs2_lock_res
*lockres
)
129 struct ocfs2_meta_lvb
*lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
131 mlog(level
, "LVB information for %s (called from %s:%u):\n",
132 lockres
->l_name
, function
, line
);
133 mlog(level
, "version: %u, clusters: %u, generation: 0x%x\n",
134 lvb
->lvb_version
, be32_to_cpu(lvb
->lvb_iclusters
),
135 be32_to_cpu(lvb
->lvb_igeneration
));
136 mlog(level
, "size: %llu, uid %u, gid %u, mode 0x%x\n",
137 (unsigned long long)be64_to_cpu(lvb
->lvb_isize
),
138 be32_to_cpu(lvb
->lvb_iuid
), be32_to_cpu(lvb
->lvb_igid
),
139 be16_to_cpu(lvb
->lvb_imode
));
140 mlog(level
, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
141 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb
->lvb_inlink
),
142 (long long)be64_to_cpu(lvb
->lvb_iatime_packed
),
143 (long long)be64_to_cpu(lvb
->lvb_ictime_packed
),
144 (long long)be64_to_cpu(lvb
->lvb_imtime_packed
),
145 be32_to_cpu(lvb
->lvb_iattr
));
150 * OCFS2 Lock Resource Operations
152 * These fine tune the behavior of the generic dlmglue locking infrastructure.
154 * The most basic of lock types can point ->l_priv to their respective
155 * struct ocfs2_super and allow the default actions to manage things.
157 * Right now, each lock type also needs to implement an init function,
158 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
159 * should be called when the lock is no longer needed (i.e., object
162 struct ocfs2_lock_res_ops
{
164 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
165 * this callback if ->l_priv is not an ocfs2_super pointer
167 struct ocfs2_super
* (*get_osb
)(struct ocfs2_lock_res
*);
170 * Optionally called in the downconvert thread after a
171 * successful downconvert. The lockres will not be referenced
172 * after this callback is called, so it is safe to free
175 * The exact semantics of when this is called are controlled
176 * by ->downconvert_worker()
178 void (*post_unlock
)(struct ocfs2_super
*, struct ocfs2_lock_res
*);
181 * Allow a lock type to add checks to determine whether it is
182 * safe to downconvert a lock. Return 0 to re-queue the
183 * downconvert at a later time, nonzero to continue.
185 * For most locks, the default checks that there are no
186 * incompatible holders are sufficient.
188 * Called with the lockres spinlock held.
190 int (*check_downconvert
)(struct ocfs2_lock_res
*, int);
193 * Allows a lock type to populate the lock value block. This
194 * is called on downconvert, and when we drop a lock.
196 * Locks that want to use this should set LOCK_TYPE_USES_LVB
197 * in the flags field.
199 * Called with the lockres spinlock held.
201 void (*set_lvb
)(struct ocfs2_lock_res
*);
204 * Called from the downconvert thread when it is determined
205 * that a lock will be downconverted. This is called without
206 * any locks held so the function can do work that might
207 * schedule (syncing out data, etc).
209 * This should return any one of the ocfs2_unblock_action
210 * values, depending on what it wants the thread to do.
212 int (*downconvert_worker
)(struct ocfs2_lock_res
*, int);
215 * LOCK_TYPE_* flags which describe the specific requirements
216 * of a lock type. Descriptions of each individual flag follow.
222 * Some locks want to "refresh" potentially stale data when a
223 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
224 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
225 * individual lockres l_flags member from the ast function. It is
226 * expected that the locking wrapper will clear the
227 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
229 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
232 * Indicate that a lock type makes use of the lock value block. The
233 * ->set_lvb lock type callback must be defined.
235 #define LOCK_TYPE_USES_LVB 0x2
237 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops
= {
238 .get_osb
= ocfs2_get_inode_osb
,
242 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops
= {
243 .get_osb
= ocfs2_get_inode_osb
,
244 .check_downconvert
= ocfs2_check_meta_downconvert
,
245 .set_lvb
= ocfs2_set_meta_lvb
,
246 .downconvert_worker
= ocfs2_data_convert_worker
,
247 .flags
= LOCK_TYPE_REQUIRES_REFRESH
|LOCK_TYPE_USES_LVB
,
250 static struct ocfs2_lock_res_ops ocfs2_super_lops
= {
251 .flags
= LOCK_TYPE_REQUIRES_REFRESH
,
254 static struct ocfs2_lock_res_ops ocfs2_rename_lops
= {
258 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops
= {
262 static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops
= {
263 .flags
= LOCK_TYPE_REQUIRES_REFRESH
|LOCK_TYPE_USES_LVB
,
266 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops
= {
267 .flags
= LOCK_TYPE_REQUIRES_REFRESH
|LOCK_TYPE_USES_LVB
,
270 static struct ocfs2_lock_res_ops ocfs2_dentry_lops
= {
271 .get_osb
= ocfs2_get_dentry_osb
,
272 .post_unlock
= ocfs2_dentry_post_unlock
,
273 .downconvert_worker
= ocfs2_dentry_convert_worker
,
277 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops
= {
278 .get_osb
= ocfs2_get_inode_osb
,
282 static struct ocfs2_lock_res_ops ocfs2_flock_lops
= {
283 .get_osb
= ocfs2_get_file_osb
,
287 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops
= {
288 .set_lvb
= ocfs2_set_qinfo_lvb
,
289 .get_osb
= ocfs2_get_qinfo_osb
,
290 .flags
= LOCK_TYPE_REQUIRES_REFRESH
| LOCK_TYPE_USES_LVB
,
293 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops
= {
294 .check_downconvert
= ocfs2_check_refcount_downconvert
,
295 .downconvert_worker
= ocfs2_refcount_convert_worker
,
299 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res
*lockres
)
301 return lockres
->l_type
== OCFS2_LOCK_TYPE_META
||
302 lockres
->l_type
== OCFS2_LOCK_TYPE_RW
||
303 lockres
->l_type
== OCFS2_LOCK_TYPE_OPEN
;
306 static inline struct ocfs2_lock_res
*ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb
*lksb
)
308 return container_of(lksb
, struct ocfs2_lock_res
, l_lksb
);
311 static inline struct inode
*ocfs2_lock_res_inode(struct ocfs2_lock_res
*lockres
)
313 BUG_ON(!ocfs2_is_inode_lock(lockres
));
315 return (struct inode
*) lockres
->l_priv
;
318 static inline struct ocfs2_dentry_lock
*ocfs2_lock_res_dl(struct ocfs2_lock_res
*lockres
)
320 BUG_ON(lockres
->l_type
!= OCFS2_LOCK_TYPE_DENTRY
);
322 return (struct ocfs2_dentry_lock
*)lockres
->l_priv
;
325 static inline struct ocfs2_mem_dqinfo
*ocfs2_lock_res_qinfo(struct ocfs2_lock_res
*lockres
)
327 BUG_ON(lockres
->l_type
!= OCFS2_LOCK_TYPE_QINFO
);
329 return (struct ocfs2_mem_dqinfo
*)lockres
->l_priv
;
332 static inline struct ocfs2_refcount_tree
*
333 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res
*res
)
335 return container_of(res
, struct ocfs2_refcount_tree
, rf_lockres
);
338 static inline struct ocfs2_super
*ocfs2_get_lockres_osb(struct ocfs2_lock_res
*lockres
)
340 if (lockres
->l_ops
->get_osb
)
341 return lockres
->l_ops
->get_osb(lockres
);
343 return (struct ocfs2_super
*)lockres
->l_priv
;
346 static int ocfs2_lock_create(struct ocfs2_super
*osb
,
347 struct ocfs2_lock_res
*lockres
,
350 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res
*lockres
,
352 static void __ocfs2_cluster_unlock(struct ocfs2_super
*osb
,
353 struct ocfs2_lock_res
*lockres
,
354 int level
, unsigned long caller_ip
);
355 static inline void ocfs2_cluster_unlock(struct ocfs2_super
*osb
,
356 struct ocfs2_lock_res
*lockres
,
359 __ocfs2_cluster_unlock(osb
, lockres
, level
, _RET_IP_
);
362 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
*lockres
);
363 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res
*lockres
);
364 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res
*lockres
);
365 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res
*lockres
, int level
);
366 static void ocfs2_schedule_blocked_lock(struct ocfs2_super
*osb
,
367 struct ocfs2_lock_res
*lockres
);
368 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res
*lockres
,
370 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
371 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
372 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
373 _err, _func, _lockres->l_name); \
375 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
376 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
377 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
379 static int ocfs2_downconvert_thread(void *arg
);
380 static void ocfs2_downconvert_on_unlock(struct ocfs2_super
*osb
,
381 struct ocfs2_lock_res
*lockres
);
382 static int ocfs2_inode_lock_update(struct inode
*inode
,
383 struct buffer_head
**bh
);
384 static void ocfs2_drop_osb_locks(struct ocfs2_super
*osb
);
385 static inline int ocfs2_highest_compat_lock_level(int level
);
386 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res
*lockres
,
388 static int ocfs2_downconvert_lock(struct ocfs2_super
*osb
,
389 struct ocfs2_lock_res
*lockres
,
392 unsigned int generation
);
393 static int ocfs2_prepare_cancel_convert(struct ocfs2_super
*osb
,
394 struct ocfs2_lock_res
*lockres
);
395 static int ocfs2_cancel_convert(struct ocfs2_super
*osb
,
396 struct ocfs2_lock_res
*lockres
);
399 static void ocfs2_build_lock_name(enum ocfs2_lock_type type
,
406 BUG_ON(type
>= OCFS2_NUM_LOCK_TYPES
);
408 len
= snprintf(name
, OCFS2_LOCK_ID_MAX_LEN
, "%c%s%016llx%08x",
409 ocfs2_lock_type_char(type
), OCFS2_LOCK_ID_PAD
,
410 (long long)blkno
, generation
);
412 BUG_ON(len
!= (OCFS2_LOCK_ID_MAX_LEN
- 1));
414 mlog(0, "built lock resource with name: %s\n", name
);
417 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock
);
419 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res
*res
,
420 struct ocfs2_dlm_debug
*dlm_debug
)
422 mlog(0, "Add tracking for lockres %s\n", res
->l_name
);
424 spin_lock(&ocfs2_dlm_tracking_lock
);
425 list_add(&res
->l_debug_list
, &dlm_debug
->d_lockres_tracking
);
426 spin_unlock(&ocfs2_dlm_tracking_lock
);
429 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res
*res
)
431 spin_lock(&ocfs2_dlm_tracking_lock
);
432 if (!list_empty(&res
->l_debug_list
))
433 list_del_init(&res
->l_debug_list
);
434 spin_unlock(&ocfs2_dlm_tracking_lock
);
437 #ifdef CONFIG_OCFS2_FS_STATS
438 static void ocfs2_init_lock_stats(struct ocfs2_lock_res
*res
)
440 res
->l_lock_refresh
= 0;
441 memset(&res
->l_lock_prmode
, 0, sizeof(struct ocfs2_lock_stats
));
442 memset(&res
->l_lock_exmode
, 0, sizeof(struct ocfs2_lock_stats
));
445 static void ocfs2_update_lock_stats(struct ocfs2_lock_res
*res
, int level
,
446 struct ocfs2_mask_waiter
*mw
, int ret
)
450 struct ocfs2_lock_stats
*stats
;
452 if (level
== LKM_PRMODE
)
453 stats
= &res
->l_lock_prmode
;
454 else if (level
== LKM_EXMODE
)
455 stats
= &res
->l_lock_exmode
;
459 kt
= ktime_sub(ktime_get(), mw
->mw_lock_start
);
460 usec
= ktime_to_us(kt
);
463 stats
->ls_total
+= ktime_to_ns(kt
);
465 if (unlikely(stats
->ls_gets
== 0)) {
467 stats
->ls_total
= ktime_to_ns(kt
);
470 if (stats
->ls_max
< usec
)
471 stats
->ls_max
= usec
;
477 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res
*lockres
)
479 lockres
->l_lock_refresh
++;
482 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter
*mw
)
484 mw
->mw_lock_start
= ktime_get();
487 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res
*res
)
490 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res
*res
,
491 int level
, struct ocfs2_mask_waiter
*mw
, int ret
)
494 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res
*lockres
)
497 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter
*mw
)
502 static void ocfs2_lock_res_init_common(struct ocfs2_super
*osb
,
503 struct ocfs2_lock_res
*res
,
504 enum ocfs2_lock_type type
,
505 struct ocfs2_lock_res_ops
*ops
,
512 res
->l_level
= DLM_LOCK_IV
;
513 res
->l_requested
= DLM_LOCK_IV
;
514 res
->l_blocking
= DLM_LOCK_IV
;
515 res
->l_action
= OCFS2_AST_INVALID
;
516 res
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
518 res
->l_flags
= OCFS2_LOCK_INITIALIZED
;
520 ocfs2_add_lockres_tracking(res
, osb
->osb_dlm_debug
);
522 ocfs2_init_lock_stats(res
);
523 #ifdef CONFIG_DEBUG_LOCK_ALLOC
524 if (type
!= OCFS2_LOCK_TYPE_OPEN
)
525 lockdep_init_map(&res
->l_lockdep_map
, ocfs2_lock_type_strings
[type
],
526 &lockdep_keys
[type
], 0);
528 res
->l_lockdep_map
.key
= NULL
;
532 void ocfs2_lock_res_init_once(struct ocfs2_lock_res
*res
)
534 /* This also clears out the lock status block */
535 memset(res
, 0, sizeof(struct ocfs2_lock_res
));
536 spin_lock_init(&res
->l_lock
);
537 init_waitqueue_head(&res
->l_event
);
538 INIT_LIST_HEAD(&res
->l_blocked_list
);
539 INIT_LIST_HEAD(&res
->l_mask_waiters
);
540 INIT_LIST_HEAD(&res
->l_holders
);
543 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res
*res
,
544 enum ocfs2_lock_type type
,
545 unsigned int generation
,
548 struct ocfs2_lock_res_ops
*ops
;
551 case OCFS2_LOCK_TYPE_RW
:
552 ops
= &ocfs2_inode_rw_lops
;
554 case OCFS2_LOCK_TYPE_META
:
555 ops
= &ocfs2_inode_inode_lops
;
557 case OCFS2_LOCK_TYPE_OPEN
:
558 ops
= &ocfs2_inode_open_lops
;
561 mlog_bug_on_msg(1, "type: %d\n", type
);
562 ops
= NULL
; /* thanks, gcc */
566 ocfs2_build_lock_name(type
, OCFS2_I(inode
)->ip_blkno
,
567 generation
, res
->l_name
);
568 ocfs2_lock_res_init_common(OCFS2_SB(inode
->i_sb
), res
, type
, ops
, inode
);
571 static struct ocfs2_super
*ocfs2_get_inode_osb(struct ocfs2_lock_res
*lockres
)
573 struct inode
*inode
= ocfs2_lock_res_inode(lockres
);
575 return OCFS2_SB(inode
->i_sb
);
578 static struct ocfs2_super
*ocfs2_get_qinfo_osb(struct ocfs2_lock_res
*lockres
)
580 struct ocfs2_mem_dqinfo
*info
= lockres
->l_priv
;
582 return OCFS2_SB(info
->dqi_gi
.dqi_sb
);
585 static struct ocfs2_super
*ocfs2_get_file_osb(struct ocfs2_lock_res
*lockres
)
587 struct ocfs2_file_private
*fp
= lockres
->l_priv
;
589 return OCFS2_SB(fp
->fp_file
->f_mapping
->host
->i_sb
);
592 static __u64
ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res
*lockres
)
594 __be64 inode_blkno_be
;
596 memcpy(&inode_blkno_be
, &lockres
->l_name
[OCFS2_DENTRY_LOCK_INO_START
],
599 return be64_to_cpu(inode_blkno_be
);
602 static struct ocfs2_super
*ocfs2_get_dentry_osb(struct ocfs2_lock_res
*lockres
)
604 struct ocfs2_dentry_lock
*dl
= lockres
->l_priv
;
606 return OCFS2_SB(dl
->dl_inode
->i_sb
);
609 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock
*dl
,
610 u64 parent
, struct inode
*inode
)
613 u64 inode_blkno
= OCFS2_I(inode
)->ip_blkno
;
614 __be64 inode_blkno_be
= cpu_to_be64(inode_blkno
);
615 struct ocfs2_lock_res
*lockres
= &dl
->dl_lockres
;
617 ocfs2_lock_res_init_once(lockres
);
620 * Unfortunately, the standard lock naming scheme won't work
621 * here because we have two 16 byte values to use. Instead,
622 * we'll stuff the inode number as a binary value. We still
623 * want error prints to show something without garbling the
624 * display, so drop a null byte in there before the inode
625 * number. A future version of OCFS2 will likely use all
626 * binary lock names. The stringified names have been a
627 * tremendous aid in debugging, but now that the debugfs
628 * interface exists, we can mangle things there if need be.
630 * NOTE: We also drop the standard "pad" value (the total lock
631 * name size stays the same though - the last part is all
632 * zeros due to the memset in ocfs2_lock_res_init_once()
634 len
= snprintf(lockres
->l_name
, OCFS2_DENTRY_LOCK_INO_START
,
636 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY
),
639 BUG_ON(len
!= (OCFS2_DENTRY_LOCK_INO_START
- 1));
641 memcpy(&lockres
->l_name
[OCFS2_DENTRY_LOCK_INO_START
], &inode_blkno_be
,
644 ocfs2_lock_res_init_common(OCFS2_SB(inode
->i_sb
), lockres
,
645 OCFS2_LOCK_TYPE_DENTRY
, &ocfs2_dentry_lops
,
649 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res
*res
,
650 struct ocfs2_super
*osb
)
652 /* Superblock lockres doesn't come from a slab so we call init
653 * once on it manually. */
654 ocfs2_lock_res_init_once(res
);
655 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER
, OCFS2_SUPER_BLOCK_BLKNO
,
657 ocfs2_lock_res_init_common(osb
, res
, OCFS2_LOCK_TYPE_SUPER
,
658 &ocfs2_super_lops
, osb
);
661 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res
*res
,
662 struct ocfs2_super
*osb
)
664 /* Rename lockres doesn't come from a slab so we call init
665 * once on it manually. */
666 ocfs2_lock_res_init_once(res
);
667 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME
, 0, 0, res
->l_name
);
668 ocfs2_lock_res_init_common(osb
, res
, OCFS2_LOCK_TYPE_RENAME
,
669 &ocfs2_rename_lops
, osb
);
672 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res
*res
,
673 struct ocfs2_super
*osb
)
675 /* nfs_sync lockres doesn't come from a slab so we call init
676 * once on it manually. */
677 ocfs2_lock_res_init_once(res
);
678 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC
, 0, 0, res
->l_name
);
679 ocfs2_lock_res_init_common(osb
, res
, OCFS2_LOCK_TYPE_NFS_SYNC
,
680 &ocfs2_nfs_sync_lops
, osb
);
683 void ocfs2_trim_fs_lock_res_init(struct ocfs2_super
*osb
)
685 struct ocfs2_lock_res
*lockres
= &osb
->osb_trim_fs_lockres
;
687 ocfs2_lock_res_init_once(lockres
);
688 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS
, 0, 0, lockres
->l_name
);
689 ocfs2_lock_res_init_common(osb
, lockres
, OCFS2_LOCK_TYPE_TRIM_FS
,
690 &ocfs2_trim_fs_lops
, osb
);
693 void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super
*osb
)
695 struct ocfs2_lock_res
*lockres
= &osb
->osb_trim_fs_lockres
;
697 ocfs2_simple_drop_lockres(osb
, lockres
);
698 ocfs2_lock_res_free(lockres
);
701 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res
*res
,
702 struct ocfs2_super
*osb
)
704 ocfs2_lock_res_init_once(res
);
705 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN
, 0, 0, res
->l_name
);
706 ocfs2_lock_res_init_common(osb
, res
, OCFS2_LOCK_TYPE_ORPHAN_SCAN
,
707 &ocfs2_orphan_scan_lops
, osb
);
710 void ocfs2_file_lock_res_init(struct ocfs2_lock_res
*lockres
,
711 struct ocfs2_file_private
*fp
)
713 struct inode
*inode
= fp
->fp_file
->f_mapping
->host
;
714 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
716 ocfs2_lock_res_init_once(lockres
);
717 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK
, oi
->ip_blkno
,
718 inode
->i_generation
, lockres
->l_name
);
719 ocfs2_lock_res_init_common(OCFS2_SB(inode
->i_sb
), lockres
,
720 OCFS2_LOCK_TYPE_FLOCK
, &ocfs2_flock_lops
,
722 lockres
->l_flags
|= OCFS2_LOCK_NOCACHE
;
725 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res
*lockres
,
726 struct ocfs2_mem_dqinfo
*info
)
728 ocfs2_lock_res_init_once(lockres
);
729 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO
, info
->dqi_gi
.dqi_type
,
731 ocfs2_lock_res_init_common(OCFS2_SB(info
->dqi_gi
.dqi_sb
), lockres
,
732 OCFS2_LOCK_TYPE_QINFO
, &ocfs2_qinfo_lops
,
736 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res
*lockres
,
737 struct ocfs2_super
*osb
, u64 ref_blkno
,
738 unsigned int generation
)
740 ocfs2_lock_res_init_once(lockres
);
741 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT
, ref_blkno
,
742 generation
, lockres
->l_name
);
743 ocfs2_lock_res_init_common(osb
, lockres
, OCFS2_LOCK_TYPE_REFCOUNT
,
744 &ocfs2_refcount_block_lops
, osb
);
747 void ocfs2_lock_res_free(struct ocfs2_lock_res
*res
)
749 if (!(res
->l_flags
& OCFS2_LOCK_INITIALIZED
))
752 ocfs2_remove_lockres_tracking(res
);
754 mlog_bug_on_msg(!list_empty(&res
->l_blocked_list
),
755 "Lockres %s is on the blocked list\n",
757 mlog_bug_on_msg(!list_empty(&res
->l_mask_waiters
),
758 "Lockres %s has mask waiters pending\n",
760 mlog_bug_on_msg(spin_is_locked(&res
->l_lock
),
761 "Lockres %s is locked\n",
763 mlog_bug_on_msg(res
->l_ro_holders
,
764 "Lockres %s has %u ro holders\n",
765 res
->l_name
, res
->l_ro_holders
);
766 mlog_bug_on_msg(res
->l_ex_holders
,
767 "Lockres %s has %u ex holders\n",
768 res
->l_name
, res
->l_ex_holders
);
770 /* Need to clear out the lock status block for the dlm */
771 memset(&res
->l_lksb
, 0, sizeof(res
->l_lksb
));
777 * Keep a list of processes who have interest in a lockres.
778 * Note: this is now only uesed for check recursive cluster locking.
780 static inline void ocfs2_add_holder(struct ocfs2_lock_res
*lockres
,
781 struct ocfs2_lock_holder
*oh
)
783 INIT_LIST_HEAD(&oh
->oh_list
);
784 oh
->oh_owner_pid
= get_pid(task_pid(current
));
786 spin_lock(&lockres
->l_lock
);
787 list_add_tail(&oh
->oh_list
, &lockres
->l_holders
);
788 spin_unlock(&lockres
->l_lock
);
791 static inline void ocfs2_remove_holder(struct ocfs2_lock_res
*lockres
,
792 struct ocfs2_lock_holder
*oh
)
794 spin_lock(&lockres
->l_lock
);
795 list_del(&oh
->oh_list
);
796 spin_unlock(&lockres
->l_lock
);
798 put_pid(oh
->oh_owner_pid
);
801 static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res
*lockres
)
803 struct ocfs2_lock_holder
*oh
;
806 /* look in the list of holders for one with the current task as owner */
807 spin_lock(&lockres
->l_lock
);
808 pid
= task_pid(current
);
809 list_for_each_entry(oh
, &lockres
->l_holders
, oh_list
) {
810 if (oh
->oh_owner_pid
== pid
) {
811 spin_unlock(&lockres
->l_lock
);
815 spin_unlock(&lockres
->l_lock
);
820 static inline void ocfs2_inc_holders(struct ocfs2_lock_res
*lockres
,
827 lockres
->l_ex_holders
++;
830 lockres
->l_ro_holders
++;
837 static inline void ocfs2_dec_holders(struct ocfs2_lock_res
*lockres
,
844 BUG_ON(!lockres
->l_ex_holders
);
845 lockres
->l_ex_holders
--;
848 BUG_ON(!lockres
->l_ro_holders
);
849 lockres
->l_ro_holders
--;
856 /* WARNING: This function lives in a world where the only three lock
857 * levels are EX, PR, and NL. It *will* have to be adjusted when more
858 * lock types are added. */
859 static inline int ocfs2_highest_compat_lock_level(int level
)
861 int new_level
= DLM_LOCK_EX
;
863 if (level
== DLM_LOCK_EX
)
864 new_level
= DLM_LOCK_NL
;
865 else if (level
== DLM_LOCK_PR
)
866 new_level
= DLM_LOCK_PR
;
870 static void lockres_set_flags(struct ocfs2_lock_res
*lockres
,
871 unsigned long newflags
)
873 struct ocfs2_mask_waiter
*mw
, *tmp
;
875 assert_spin_locked(&lockres
->l_lock
);
877 lockres
->l_flags
= newflags
;
879 list_for_each_entry_safe(mw
, tmp
, &lockres
->l_mask_waiters
, mw_item
) {
880 if ((lockres
->l_flags
& mw
->mw_mask
) != mw
->mw_goal
)
883 list_del_init(&mw
->mw_item
);
885 complete(&mw
->mw_complete
);
888 static void lockres_or_flags(struct ocfs2_lock_res
*lockres
, unsigned long or)
890 lockres_set_flags(lockres
, lockres
->l_flags
| or);
892 static void lockres_clear_flags(struct ocfs2_lock_res
*lockres
,
895 lockres_set_flags(lockres
, lockres
->l_flags
& ~clear
);
898 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
*lockres
)
900 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
));
901 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
));
902 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BLOCKED
));
903 BUG_ON(lockres
->l_blocking
<= DLM_LOCK_NL
);
905 lockres
->l_level
= lockres
->l_requested
;
906 if (lockres
->l_level
<=
907 ocfs2_highest_compat_lock_level(lockres
->l_blocking
)) {
908 lockres
->l_blocking
= DLM_LOCK_NL
;
909 lockres_clear_flags(lockres
, OCFS2_LOCK_BLOCKED
);
911 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
914 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res
*lockres
)
916 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
));
917 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
));
919 /* Convert from RO to EX doesn't really need anything as our
920 * information is already up to data. Convert from NL to
921 * *anything* however should mark ourselves as needing an
923 if (lockres
->l_level
== DLM_LOCK_NL
&&
924 lockres
->l_ops
->flags
& LOCK_TYPE_REQUIRES_REFRESH
)
925 lockres_or_flags(lockres
, OCFS2_LOCK_NEEDS_REFRESH
);
927 lockres
->l_level
= lockres
->l_requested
;
930 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
931 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
932 * downconverting the lock before the upconvert has fully completed.
933 * Do not prevent the dc thread from downconverting if NONBLOCK lock
934 * had already returned.
936 if (!(lockres
->l_flags
& OCFS2_LOCK_NONBLOCK_FINISHED
))
937 lockres_or_flags(lockres
, OCFS2_LOCK_UPCONVERT_FINISHING
);
939 lockres_clear_flags(lockres
, OCFS2_LOCK_NONBLOCK_FINISHED
);
941 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
944 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res
*lockres
)
946 BUG_ON((!(lockres
->l_flags
& OCFS2_LOCK_BUSY
)));
947 BUG_ON(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
);
949 if (lockres
->l_requested
> DLM_LOCK_NL
&&
950 !(lockres
->l_flags
& OCFS2_LOCK_LOCAL
) &&
951 lockres
->l_ops
->flags
& LOCK_TYPE_REQUIRES_REFRESH
)
952 lockres_or_flags(lockres
, OCFS2_LOCK_NEEDS_REFRESH
);
954 lockres
->l_level
= lockres
->l_requested
;
955 lockres_or_flags(lockres
, OCFS2_LOCK_ATTACHED
);
956 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
959 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res
*lockres
,
962 int needs_downconvert
= 0;
964 assert_spin_locked(&lockres
->l_lock
);
966 if (level
> lockres
->l_blocking
) {
967 /* only schedule a downconvert if we haven't already scheduled
968 * one that goes low enough to satisfy the level we're
969 * blocking. this also catches the case where we get
971 if (ocfs2_highest_compat_lock_level(level
) <
972 ocfs2_highest_compat_lock_level(lockres
->l_blocking
))
973 needs_downconvert
= 1;
975 lockres
->l_blocking
= level
;
978 mlog(ML_BASTS
, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
979 lockres
->l_name
, level
, lockres
->l_level
, lockres
->l_blocking
,
982 if (needs_downconvert
)
983 lockres_or_flags(lockres
, OCFS2_LOCK_BLOCKED
);
984 mlog(0, "needs_downconvert = %d\n", needs_downconvert
);
985 return needs_downconvert
;
989 * OCFS2_LOCK_PENDING and l_pending_gen.
991 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
992 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
993 * for more details on the race.
995 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
996 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
997 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
998 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
999 * the caller is going to try to clear PENDING again. If nothing else is
1000 * happening, __lockres_clear_pending() sees PENDING is unset and does
1003 * But what if another path (eg downconvert thread) has just started a
1004 * new locking action? The other path has re-set PENDING. Our path
1005 * cannot clear PENDING, because that will re-open the original race
1011 * ocfs2_cluster_lock()
1016 * ocfs2_locking_ast() ocfs2_downconvert_thread()
1017 * clear PENDING ocfs2_unblock_lock()
1020 * ocfs2_prepare_downconvert()
1030 * So as you can see, we now have a window where l_lock is not held,
1031 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
1033 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
1034 * set by ocfs2_prepare_downconvert(). That wasn't nice.
1036 * To solve this we introduce l_pending_gen. A call to
1037 * lockres_clear_pending() will only do so when it is passed a generation
1038 * number that matches the lockres. lockres_set_pending() will return the
1039 * current generation number. When ocfs2_cluster_lock() goes to clear
1040 * PENDING, it passes the generation it got from set_pending(). In our
1041 * example above, the generation numbers will *not* match. Thus,
1042 * ocfs2_cluster_lock() will not clear the PENDING set by
1043 * ocfs2_prepare_downconvert().
1046 /* Unlocked version for ocfs2_locking_ast() */
1047 static void __lockres_clear_pending(struct ocfs2_lock_res
*lockres
,
1048 unsigned int generation
,
1049 struct ocfs2_super
*osb
)
1051 assert_spin_locked(&lockres
->l_lock
);
1054 * The ast and locking functions can race us here. The winner
1055 * will clear pending, the loser will not.
1057 if (!(lockres
->l_flags
& OCFS2_LOCK_PENDING
) ||
1058 (lockres
->l_pending_gen
!= generation
))
1061 lockres_clear_flags(lockres
, OCFS2_LOCK_PENDING
);
1062 lockres
->l_pending_gen
++;
1065 * The downconvert thread may have skipped us because we
1066 * were PENDING. Wake it up.
1068 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
)
1069 ocfs2_wake_downconvert_thread(osb
);
1072 /* Locked version for callers of ocfs2_dlm_lock() */
1073 static void lockres_clear_pending(struct ocfs2_lock_res
*lockres
,
1074 unsigned int generation
,
1075 struct ocfs2_super
*osb
)
1077 unsigned long flags
;
1079 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1080 __lockres_clear_pending(lockres
, generation
, osb
);
1081 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1084 static unsigned int lockres_set_pending(struct ocfs2_lock_res
*lockres
)
1086 assert_spin_locked(&lockres
->l_lock
);
1087 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
));
1089 lockres_or_flags(lockres
, OCFS2_LOCK_PENDING
);
1091 return lockres
->l_pending_gen
;
1094 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb
*lksb
, int level
)
1096 struct ocfs2_lock_res
*lockres
= ocfs2_lksb_to_lock_res(lksb
);
1097 struct ocfs2_super
*osb
= ocfs2_get_lockres_osb(lockres
);
1098 int needs_downconvert
;
1099 unsigned long flags
;
1101 BUG_ON(level
<= DLM_LOCK_NL
);
1103 mlog(ML_BASTS
, "BAST fired for lockres %s, blocking %d, level %d, "
1104 "type %s\n", lockres
->l_name
, level
, lockres
->l_level
,
1105 ocfs2_lock_type_string(lockres
->l_type
));
1108 * We can skip the bast for locks which don't enable caching -
1109 * they'll be dropped at the earliest possible time anyway.
1111 if (lockres
->l_flags
& OCFS2_LOCK_NOCACHE
)
1114 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1115 needs_downconvert
= ocfs2_generic_handle_bast(lockres
, level
);
1116 if (needs_downconvert
)
1117 ocfs2_schedule_blocked_lock(osb
, lockres
);
1118 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1120 wake_up(&lockres
->l_event
);
1122 ocfs2_wake_downconvert_thread(osb
);
1125 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb
*lksb
)
1127 struct ocfs2_lock_res
*lockres
= ocfs2_lksb_to_lock_res(lksb
);
1128 struct ocfs2_super
*osb
= ocfs2_get_lockres_osb(lockres
);
1129 unsigned long flags
;
1132 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1134 status
= ocfs2_dlm_lock_status(&lockres
->l_lksb
);
1136 if (status
== -EAGAIN
) {
1137 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
1142 mlog(ML_ERROR
, "lockres %s: lksb status value of %d!\n",
1143 lockres
->l_name
, status
);
1144 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1148 mlog(ML_BASTS
, "AST fired for lockres %s, action %d, unlock %d, "
1149 "level %d => %d\n", lockres
->l_name
, lockres
->l_action
,
1150 lockres
->l_unlock_action
, lockres
->l_level
, lockres
->l_requested
);
1152 switch(lockres
->l_action
) {
1153 case OCFS2_AST_ATTACH
:
1154 ocfs2_generic_handle_attach_action(lockres
);
1155 lockres_clear_flags(lockres
, OCFS2_LOCK_LOCAL
);
1157 case OCFS2_AST_CONVERT
:
1158 ocfs2_generic_handle_convert_action(lockres
);
1160 case OCFS2_AST_DOWNCONVERT
:
1161 ocfs2_generic_handle_downconvert_action(lockres
);
1164 mlog(ML_ERROR
, "lockres %s: AST fired with invalid action: %u, "
1165 "flags 0x%lx, unlock: %u\n",
1166 lockres
->l_name
, lockres
->l_action
, lockres
->l_flags
,
1167 lockres
->l_unlock_action
);
1171 /* set it to something invalid so if we get called again we
1173 lockres
->l_action
= OCFS2_AST_INVALID
;
1175 /* Did we try to cancel this lock? Clear that state */
1176 if (lockres
->l_unlock_action
== OCFS2_UNLOCK_CANCEL_CONVERT
)
1177 lockres
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
1180 * We may have beaten the locking functions here. We certainly
1181 * know that dlm_lock() has been called :-)
1182 * Because we can't have two lock calls in flight at once, we
1183 * can use lockres->l_pending_gen.
1185 __lockres_clear_pending(lockres
, lockres
->l_pending_gen
, osb
);
1187 wake_up(&lockres
->l_event
);
1188 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1191 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb
*lksb
, int error
)
1193 struct ocfs2_lock_res
*lockres
= ocfs2_lksb_to_lock_res(lksb
);
1194 unsigned long flags
;
1196 mlog(ML_BASTS
, "UNLOCK AST fired for lockres %s, action = %d\n",
1197 lockres
->l_name
, lockres
->l_unlock_action
);
1199 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1201 mlog(ML_ERROR
, "Dlm passes error %d for lock %s, "
1202 "unlock_action %d\n", error
, lockres
->l_name
,
1203 lockres
->l_unlock_action
);
1204 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1208 switch(lockres
->l_unlock_action
) {
1209 case OCFS2_UNLOCK_CANCEL_CONVERT
:
1210 mlog(0, "Cancel convert success for %s\n", lockres
->l_name
);
1211 lockres
->l_action
= OCFS2_AST_INVALID
;
1212 /* Downconvert thread may have requeued this lock, we
1213 * need to wake it. */
1214 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
)
1215 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres
));
1217 case OCFS2_UNLOCK_DROP_LOCK
:
1218 lockres
->l_level
= DLM_LOCK_IV
;
1224 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
1225 lockres
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
1226 wake_up(&lockres
->l_event
);
1227 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1231 * This is the filesystem locking protocol. It provides the lock handling
1232 * hooks for the underlying DLM. It has a maximum version number.
1233 * The version number allows interoperability with systems running at
1234 * the same major number and an equal or smaller minor number.
1236 * Whenever the filesystem does new things with locks (adds or removes a
1237 * lock, orders them differently, does different things underneath a lock),
1238 * the version must be changed. The protocol is negotiated when joining
1239 * the dlm domain. A node may join the domain if its major version is
1240 * identical to all other nodes and its minor version is greater than
1241 * or equal to all other nodes. When its minor version is greater than
1242 * the other nodes, it will run at the minor version specified by the
1245 * If a locking change is made that will not be compatible with older
1246 * versions, the major number must be increased and the minor version set
1247 * to zero. If a change merely adds a behavior that can be disabled when
1248 * speaking to older versions, the minor version must be increased. If a
1249 * change adds a fully backwards compatible change (eg, LVB changes that
1250 * are just ignored by older versions), the version does not need to be
1253 static struct ocfs2_locking_protocol lproto
= {
1255 .pv_major
= OCFS2_LOCKING_PROTOCOL_MAJOR
,
1256 .pv_minor
= OCFS2_LOCKING_PROTOCOL_MINOR
,
1258 .lp_lock_ast
= ocfs2_locking_ast
,
1259 .lp_blocking_ast
= ocfs2_blocking_ast
,
1260 .lp_unlock_ast
= ocfs2_unlock_ast
,
1263 void ocfs2_set_locking_protocol(void)
1265 ocfs2_stack_glue_set_max_proto_version(&lproto
.lp_max_version
);
1268 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res
*lockres
,
1271 unsigned long flags
;
1273 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1274 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
1275 lockres_clear_flags(lockres
, OCFS2_LOCK_UPCONVERT_FINISHING
);
1277 lockres
->l_action
= OCFS2_AST_INVALID
;
1279 lockres
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
1280 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1282 wake_up(&lockres
->l_event
);
1285 /* Note: If we detect another process working on the lock (i.e.,
1286 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1287 * to do the right thing in that case.
1289 static int ocfs2_lock_create(struct ocfs2_super
*osb
,
1290 struct ocfs2_lock_res
*lockres
,
1295 unsigned long flags
;
1298 mlog(0, "lock %s, level = %d, flags = %u\n", lockres
->l_name
, level
,
1301 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1302 if ((lockres
->l_flags
& OCFS2_LOCK_ATTACHED
) ||
1303 (lockres
->l_flags
& OCFS2_LOCK_BUSY
)) {
1304 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1308 lockres
->l_action
= OCFS2_AST_ATTACH
;
1309 lockres
->l_requested
= level
;
1310 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
1311 gen
= lockres_set_pending(lockres
);
1312 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1314 ret
= ocfs2_dlm_lock(osb
->cconn
,
1319 OCFS2_LOCK_ID_MAX_LEN
- 1);
1320 lockres_clear_pending(lockres
, gen
, osb
);
1322 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret
, lockres
);
1323 ocfs2_recover_from_dlm_error(lockres
, 1);
1326 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres
->l_name
);
1332 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res
*lockres
,
1335 unsigned long flags
;
1338 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1339 ret
= lockres
->l_flags
& flag
;
1340 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1345 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res
*lockres
)
1348 wait_event(lockres
->l_event
,
1349 !ocfs2_check_wait_flag(lockres
, OCFS2_LOCK_BUSY
));
1352 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res
*lockres
)
1355 wait_event(lockres
->l_event
,
1356 !ocfs2_check_wait_flag(lockres
, OCFS2_LOCK_REFRESHING
));
1359 /* predict what lock level we'll be dropping down to on behalf
1360 * of another node, and return true if the currently wanted
1361 * level will be compatible with it. */
1362 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res
*lockres
,
1365 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BLOCKED
));
1367 return wanted
<= ocfs2_highest_compat_lock_level(lockres
->l_blocking
);
1370 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter
*mw
)
1372 INIT_LIST_HEAD(&mw
->mw_item
);
1373 init_completion(&mw
->mw_complete
);
1374 ocfs2_init_start_time(mw
);
1377 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter
*mw
)
1379 wait_for_completion(&mw
->mw_complete
);
1380 /* Re-arm the completion in case we want to wait on it again */
1381 reinit_completion(&mw
->mw_complete
);
1382 return mw
->mw_status
;
1385 static void lockres_add_mask_waiter(struct ocfs2_lock_res
*lockres
,
1386 struct ocfs2_mask_waiter
*mw
,
1390 BUG_ON(!list_empty(&mw
->mw_item
));
1392 assert_spin_locked(&lockres
->l_lock
);
1394 list_add_tail(&mw
->mw_item
, &lockres
->l_mask_waiters
);
1399 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1400 * if the mask still hadn't reached its goal */
1401 static int __lockres_remove_mask_waiter(struct ocfs2_lock_res
*lockres
,
1402 struct ocfs2_mask_waiter
*mw
)
1406 assert_spin_locked(&lockres
->l_lock
);
1407 if (!list_empty(&mw
->mw_item
)) {
1408 if ((lockres
->l_flags
& mw
->mw_mask
) != mw
->mw_goal
)
1411 list_del_init(&mw
->mw_item
);
1412 init_completion(&mw
->mw_complete
);
1418 static int lockres_remove_mask_waiter(struct ocfs2_lock_res
*lockres
,
1419 struct ocfs2_mask_waiter
*mw
)
1421 unsigned long flags
;
1424 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1425 ret
= __lockres_remove_mask_waiter(lockres
, mw
);
1426 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1432 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter
*mw
,
1433 struct ocfs2_lock_res
*lockres
)
1437 ret
= wait_for_completion_interruptible(&mw
->mw_complete
);
1439 lockres_remove_mask_waiter(lockres
, mw
);
1441 ret
= mw
->mw_status
;
1442 /* Re-arm the completion in case we want to wait on it again */
1443 reinit_completion(&mw
->mw_complete
);
1447 static int __ocfs2_cluster_lock(struct ocfs2_super
*osb
,
1448 struct ocfs2_lock_res
*lockres
,
1453 unsigned long caller_ip
)
1455 struct ocfs2_mask_waiter mw
;
1456 int wait
, catch_signals
= !(osb
->s_mount_opt
& OCFS2_MOUNT_NOINTR
);
1457 int ret
= 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1458 unsigned long flags
;
1460 int noqueue_attempted
= 0;
1464 if (!(lockres
->l_flags
& OCFS2_LOCK_INITIALIZED
)) {
1465 mlog_errno(-EINVAL
);
1469 ocfs2_init_mask_waiter(&mw
);
1471 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
)
1472 lkm_flags
|= DLM_LKF_VALBLK
;
1477 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1479 if (catch_signals
&& signal_pending(current
)) {
1484 mlog_bug_on_msg(lockres
->l_flags
& OCFS2_LOCK_FREEING
,
1485 "Cluster lock called on freeing lockres %s! flags "
1486 "0x%lx\n", lockres
->l_name
, lockres
->l_flags
);
1488 /* We only compare against the currently granted level
1489 * here. If the lock is blocked waiting on a downconvert,
1490 * we'll get caught below. */
1491 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
&&
1492 level
> lockres
->l_level
) {
1493 /* is someone sitting in dlm_lock? If so, wait on
1495 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1500 if (lockres
->l_flags
& OCFS2_LOCK_UPCONVERT_FINISHING
) {
1502 * We've upconverted. If the lock now has a level we can
1503 * work with, we take it. If, however, the lock is not at the
1504 * required level, we go thru the full cycle. One way this could
1505 * happen is if a process requesting an upconvert to PR is
1506 * closely followed by another requesting upconvert to an EX.
1507 * If the process requesting EX lands here, we want it to
1508 * continue attempting to upconvert and let the process
1509 * requesting PR take the lock.
1510 * If multiple processes request upconvert to PR, the first one
1511 * here will take the lock. The others will have to go thru the
1512 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1513 * downconvert request.
1515 if (level
<= lockres
->l_level
)
1516 goto update_holders
;
1519 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
&&
1520 !ocfs2_may_continue_on_blocked_lock(lockres
, level
)) {
1521 /* is the lock is currently blocked on behalf of
1523 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BLOCKED
, 0);
1528 if (level
> lockres
->l_level
) {
1529 if (noqueue_attempted
> 0) {
1533 if (lkm_flags
& DLM_LKF_NOQUEUE
)
1534 noqueue_attempted
= 1;
1536 if (lockres
->l_action
!= OCFS2_AST_INVALID
)
1537 mlog(ML_ERROR
, "lockres %s has action %u pending\n",
1538 lockres
->l_name
, lockres
->l_action
);
1540 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
)) {
1541 lockres
->l_action
= OCFS2_AST_ATTACH
;
1542 lkm_flags
&= ~DLM_LKF_CONVERT
;
1544 lockres
->l_action
= OCFS2_AST_CONVERT
;
1545 lkm_flags
|= DLM_LKF_CONVERT
;
1548 lockres
->l_requested
= level
;
1549 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
1550 gen
= lockres_set_pending(lockres
);
1551 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1553 BUG_ON(level
== DLM_LOCK_IV
);
1554 BUG_ON(level
== DLM_LOCK_NL
);
1556 mlog(ML_BASTS
, "lockres %s, convert from %d to %d\n",
1557 lockres
->l_name
, lockres
->l_level
, level
);
1559 /* call dlm_lock to upgrade lock now */
1560 ret
= ocfs2_dlm_lock(osb
->cconn
,
1565 OCFS2_LOCK_ID_MAX_LEN
- 1);
1566 lockres_clear_pending(lockres
, gen
, osb
);
1568 if (!(lkm_flags
& DLM_LKF_NOQUEUE
) ||
1570 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1573 ocfs2_recover_from_dlm_error(lockres
, 1);
1578 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1581 /* At this point we've gone inside the dlm and need to
1582 * complete our work regardless. */
1585 /* wait for busy to clear and carry on */
1590 /* Ok, if we get here then we're good to go. */
1591 ocfs2_inc_holders(lockres
, level
);
1595 lockres_clear_flags(lockres
, OCFS2_LOCK_UPCONVERT_FINISHING
);
1597 /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
1598 kick_dc
= (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
);
1600 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1602 ocfs2_wake_downconvert_thread(osb
);
1605 * This is helping work around a lock inversion between the page lock
1606 * and dlm locks. One path holds the page lock while calling aops
1607 * which block acquiring dlm locks. The voting thread holds dlm
1608 * locks while acquiring page locks while down converting data locks.
1609 * This block is helping an aop path notice the inversion and back
1610 * off to unlock its page lock before trying the dlm lock again.
1612 if (wait
&& arg_flags
& OCFS2_LOCK_NONBLOCK
&&
1613 mw
.mw_mask
& (OCFS2_LOCK_BUSY
|OCFS2_LOCK_BLOCKED
)) {
1615 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1616 if (__lockres_remove_mask_waiter(lockres
, &mw
)) {
1618 lockres_or_flags(lockres
,
1619 OCFS2_LOCK_NONBLOCK_FINISHED
);
1620 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1623 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1628 ret
= ocfs2_wait_for_mask(&mw
);
1633 ocfs2_update_lock_stats(lockres
, level
, &mw
, ret
);
1635 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1636 if (!ret
&& lockres
->l_lockdep_map
.key
!= NULL
) {
1637 if (level
== DLM_LOCK_PR
)
1638 rwsem_acquire_read(&lockres
->l_lockdep_map
, l_subclass
,
1639 !!(arg_flags
& OCFS2_META_LOCK_NOQUEUE
),
1642 rwsem_acquire(&lockres
->l_lockdep_map
, l_subclass
,
1643 !!(arg_flags
& OCFS2_META_LOCK_NOQUEUE
),
1650 static inline int ocfs2_cluster_lock(struct ocfs2_super
*osb
,
1651 struct ocfs2_lock_res
*lockres
,
1656 return __ocfs2_cluster_lock(osb
, lockres
, level
, lkm_flags
, arg_flags
,
1661 static void __ocfs2_cluster_unlock(struct ocfs2_super
*osb
,
1662 struct ocfs2_lock_res
*lockres
,
1664 unsigned long caller_ip
)
1666 unsigned long flags
;
1668 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1669 ocfs2_dec_holders(lockres
, level
);
1670 ocfs2_downconvert_on_unlock(osb
, lockres
);
1671 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1672 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1673 if (lockres
->l_lockdep_map
.key
!= NULL
)
1674 rwsem_release(&lockres
->l_lockdep_map
, 1, caller_ip
);
1678 static int ocfs2_create_new_lock(struct ocfs2_super
*osb
,
1679 struct ocfs2_lock_res
*lockres
,
1683 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1684 unsigned long flags
;
1685 u32 lkm_flags
= local
? DLM_LKF_LOCAL
: 0;
1687 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1688 BUG_ON(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
);
1689 lockres_or_flags(lockres
, OCFS2_LOCK_LOCAL
);
1690 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1692 return ocfs2_lock_create(osb
, lockres
, level
, lkm_flags
);
1695 /* Grants us an EX lock on the data and metadata resources, skipping
1696 * the normal cluster directory lookup. Use this ONLY on newly created
1697 * inodes which other nodes can't possibly see, and which haven't been
1698 * hashed in the inode hash yet. This can give us a good performance
1699 * increase as it'll skip the network broadcast normally associated
1700 * with creating a new lock resource. */
1701 int ocfs2_create_new_inode_locks(struct inode
*inode
)
1704 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1706 BUG_ON(!ocfs2_inode_is_new(inode
));
1708 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
1710 /* NOTE: That we don't increment any of the holder counts, nor
1711 * do we add anything to a journal handle. Since this is
1712 * supposed to be a new inode which the cluster doesn't know
1713 * about yet, there is no need to. As far as the LVB handling
1714 * is concerned, this is basically like acquiring an EX lock
1715 * on a resource which has an invalid one -- we'll set it
1716 * valid when we release the EX. */
1718 ret
= ocfs2_create_new_lock(osb
, &OCFS2_I(inode
)->ip_rw_lockres
, 1, 1);
1725 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1726 * don't use a generation in their lock names.
1728 ret
= ocfs2_create_new_lock(osb
, &OCFS2_I(inode
)->ip_inode_lockres
, 1, 0);
1734 ret
= ocfs2_create_new_lock(osb
, &OCFS2_I(inode
)->ip_open_lockres
, 0, 0);
1742 int ocfs2_rw_lock(struct inode
*inode
, int write
)
1745 struct ocfs2_lock_res
*lockres
;
1746 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1748 mlog(0, "inode %llu take %s RW lock\n",
1749 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1750 write
? "EXMODE" : "PRMODE");
1752 if (ocfs2_mount_local(osb
))
1755 lockres
= &OCFS2_I(inode
)->ip_rw_lockres
;
1757 level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1759 status
= ocfs2_cluster_lock(osb
, lockres
, level
, 0, 0);
1766 int ocfs2_try_rw_lock(struct inode
*inode
, int write
)
1769 struct ocfs2_lock_res
*lockres
;
1770 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1772 mlog(0, "inode %llu try to take %s RW lock\n",
1773 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1774 write
? "EXMODE" : "PRMODE");
1776 if (ocfs2_mount_local(osb
))
1779 lockres
= &OCFS2_I(inode
)->ip_rw_lockres
;
1781 level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1783 status
= ocfs2_cluster_lock(osb
, lockres
, level
, DLM_LKF_NOQUEUE
, 0);
1787 void ocfs2_rw_unlock(struct inode
*inode
, int write
)
1789 int level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1790 struct ocfs2_lock_res
*lockres
= &OCFS2_I(inode
)->ip_rw_lockres
;
1791 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1793 mlog(0, "inode %llu drop %s RW lock\n",
1794 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1795 write
? "EXMODE" : "PRMODE");
1797 if (!ocfs2_mount_local(osb
))
1798 ocfs2_cluster_unlock(osb
, lockres
, level
);
1802 * ocfs2_open_lock always get PR mode lock.
1804 int ocfs2_open_lock(struct inode
*inode
)
1807 struct ocfs2_lock_res
*lockres
;
1808 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1810 mlog(0, "inode %llu take PRMODE open lock\n",
1811 (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
1813 if (ocfs2_is_hard_readonly(osb
) || ocfs2_mount_local(osb
))
1816 lockres
= &OCFS2_I(inode
)->ip_open_lockres
;
1818 status
= ocfs2_cluster_lock(osb
, lockres
, DLM_LOCK_PR
, 0, 0);
1826 int ocfs2_try_open_lock(struct inode
*inode
, int write
)
1828 int status
= 0, level
;
1829 struct ocfs2_lock_res
*lockres
;
1830 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1832 mlog(0, "inode %llu try to take %s open lock\n",
1833 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1834 write
? "EXMODE" : "PRMODE");
1836 if (ocfs2_is_hard_readonly(osb
)) {
1842 if (ocfs2_mount_local(osb
))
1845 lockres
= &OCFS2_I(inode
)->ip_open_lockres
;
1847 level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1850 * The file system may already holding a PRMODE/EXMODE open lock.
1851 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1852 * other nodes and the -EAGAIN will indicate to the caller that
1853 * this inode is still in use.
1855 status
= ocfs2_cluster_lock(osb
, lockres
, level
, DLM_LKF_NOQUEUE
, 0);
1862 * ocfs2_open_unlock unlock PR and EX mode open locks.
1864 void ocfs2_open_unlock(struct inode
*inode
)
1866 struct ocfs2_lock_res
*lockres
= &OCFS2_I(inode
)->ip_open_lockres
;
1867 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1869 mlog(0, "inode %llu drop open lock\n",
1870 (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
1872 if (ocfs2_mount_local(osb
))
1875 if(lockres
->l_ro_holders
)
1876 ocfs2_cluster_unlock(osb
, lockres
, DLM_LOCK_PR
);
1877 if(lockres
->l_ex_holders
)
1878 ocfs2_cluster_unlock(osb
, lockres
, DLM_LOCK_EX
);
1884 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res
*lockres
,
1888 struct ocfs2_super
*osb
= ocfs2_get_lockres_osb(lockres
);
1889 unsigned long flags
;
1890 struct ocfs2_mask_waiter mw
;
1892 ocfs2_init_mask_waiter(&mw
);
1895 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1896 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
) {
1897 ret
= ocfs2_prepare_cancel_convert(osb
, lockres
);
1899 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1900 ret
= ocfs2_cancel_convert(osb
, lockres
);
1907 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1908 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1910 ocfs2_wait_for_mask(&mw
);
1916 * We may still have gotten the lock, in which case there's no
1917 * point to restarting the syscall.
1919 if (lockres
->l_level
== level
)
1922 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret
,
1923 lockres
->l_flags
, lockres
->l_level
, lockres
->l_action
);
1925 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1932 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1933 * flock() calls. The locking approach this requires is sufficiently
1934 * different from all other cluster lock types that we implement a
1935 * separate path to the "low-level" dlm calls. In particular:
1937 * - No optimization of lock levels is done - we take at exactly
1938 * what's been requested.
1940 * - No lock caching is employed. We immediately downconvert to
1941 * no-lock at unlock time. This also means flock locks never go on
1942 * the blocking list).
1944 * - Since userspace can trivially deadlock itself with flock, we make
1945 * sure to allow cancellation of a misbehaving applications flock()
1948 * - Access to any flock lockres doesn't require concurrency, so we
1949 * can simplify the code by requiring the caller to guarantee
1950 * serialization of dlmglue flock calls.
1952 int ocfs2_file_lock(struct file
*file
, int ex
, int trylock
)
1954 int ret
, level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1955 unsigned int lkm_flags
= trylock
? DLM_LKF_NOQUEUE
: 0;
1956 unsigned long flags
;
1957 struct ocfs2_file_private
*fp
= file
->private_data
;
1958 struct ocfs2_lock_res
*lockres
= &fp
->fp_flock
;
1959 struct ocfs2_super
*osb
= OCFS2_SB(file
->f_mapping
->host
->i_sb
);
1960 struct ocfs2_mask_waiter mw
;
1962 ocfs2_init_mask_waiter(&mw
);
1964 if ((lockres
->l_flags
& OCFS2_LOCK_BUSY
) ||
1965 (lockres
->l_level
> DLM_LOCK_NL
)) {
1967 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1968 "level: %u\n", lockres
->l_name
, lockres
->l_flags
,
1973 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1974 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
)) {
1975 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1976 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1979 * Get the lock at NLMODE to start - that way we
1980 * can cancel the upconvert request if need be.
1982 ret
= ocfs2_lock_create(osb
, lockres
, DLM_LOCK_NL
, 0);
1988 ret
= ocfs2_wait_for_mask(&mw
);
1993 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1996 lockres
->l_action
= OCFS2_AST_CONVERT
;
1997 lkm_flags
|= DLM_LKF_CONVERT
;
1998 lockres
->l_requested
= level
;
1999 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
2001 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
2002 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2004 ret
= ocfs2_dlm_lock(osb
->cconn
, level
, &lockres
->l_lksb
, lkm_flags
,
2005 lockres
->l_name
, OCFS2_LOCK_ID_MAX_LEN
- 1);
2007 if (!trylock
|| (ret
!= -EAGAIN
)) {
2008 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret
, lockres
);
2012 ocfs2_recover_from_dlm_error(lockres
, 1);
2013 lockres_remove_mask_waiter(lockres
, &mw
);
2017 ret
= ocfs2_wait_for_mask_interruptible(&mw
, lockres
);
2018 if (ret
== -ERESTARTSYS
) {
2020 * Userspace can cause deadlock itself with
2021 * flock(). Current behavior locally is to allow the
2022 * deadlock, but abort the system call if a signal is
2023 * received. We follow this example, otherwise a
2024 * poorly written program could sit in kernel until
2027 * Handling this is a bit more complicated for Ocfs2
2028 * though. We can't exit this function with an
2029 * outstanding lock request, so a cancel convert is
2030 * required. We intentionally overwrite 'ret' - if the
2031 * cancel fails and the lock was granted, it's easier
2032 * to just bubble success back up to the user.
2034 ret
= ocfs2_flock_handle_signal(lockres
, level
);
2035 } else if (!ret
&& (level
> lockres
->l_level
)) {
2036 /* Trylock failed asynchronously */
2043 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
2044 lockres
->l_name
, ex
, trylock
, ret
);
2048 void ocfs2_file_unlock(struct file
*file
)
2052 unsigned long flags
;
2053 struct ocfs2_file_private
*fp
= file
->private_data
;
2054 struct ocfs2_lock_res
*lockres
= &fp
->fp_flock
;
2055 struct ocfs2_super
*osb
= OCFS2_SB(file
->f_mapping
->host
->i_sb
);
2056 struct ocfs2_mask_waiter mw
;
2058 ocfs2_init_mask_waiter(&mw
);
2060 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
))
2063 if (lockres
->l_level
== DLM_LOCK_NL
)
2066 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
2067 lockres
->l_name
, lockres
->l_flags
, lockres
->l_level
,
2070 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2072 * Fake a blocking ast for the downconvert code.
2074 lockres_or_flags(lockres
, OCFS2_LOCK_BLOCKED
);
2075 lockres
->l_blocking
= DLM_LOCK_EX
;
2077 gen
= ocfs2_prepare_downconvert(lockres
, DLM_LOCK_NL
);
2078 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
2079 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2081 ret
= ocfs2_downconvert_lock(osb
, lockres
, DLM_LOCK_NL
, 0, gen
);
2087 ret
= ocfs2_wait_for_mask(&mw
);
2092 static void ocfs2_downconvert_on_unlock(struct ocfs2_super
*osb
,
2093 struct ocfs2_lock_res
*lockres
)
2097 /* If we know that another node is waiting on our lock, kick
2098 * the downconvert thread * pre-emptively when we reach a release
2100 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
) {
2101 switch(lockres
->l_blocking
) {
2103 if (!lockres
->l_ex_holders
&& !lockres
->l_ro_holders
)
2107 if (!lockres
->l_ex_holders
)
2116 ocfs2_wake_downconvert_thread(osb
);
2119 #define OCFS2_SEC_BITS 34
2120 #define OCFS2_SEC_SHIFT (64 - 34)
2121 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2123 /* LVB only has room for 64 bits of time here so we pack it for
2125 static u64
ocfs2_pack_timespec(struct timespec
*spec
)
2128 u64 sec
= spec
->tv_sec
;
2129 u32 nsec
= spec
->tv_nsec
;
2131 res
= (sec
<< OCFS2_SEC_SHIFT
) | (nsec
& OCFS2_NSEC_MASK
);
2136 /* Call this with the lockres locked. I am reasonably sure we don't
2137 * need ip_lock in this function as anyone who would be changing those
2138 * values is supposed to be blocked in ocfs2_inode_lock right now. */
2139 static void __ocfs2_stuff_meta_lvb(struct inode
*inode
)
2141 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2142 struct ocfs2_lock_res
*lockres
= &oi
->ip_inode_lockres
;
2143 struct ocfs2_meta_lvb
*lvb
;
2145 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2148 * Invalidate the LVB of a deleted inode - this way other
2149 * nodes are forced to go to disk and discover the new inode
2152 if (oi
->ip_flags
& OCFS2_INODE_DELETED
) {
2153 lvb
->lvb_version
= 0;
2157 lvb
->lvb_version
= OCFS2_LVB_VERSION
;
2158 lvb
->lvb_isize
= cpu_to_be64(i_size_read(inode
));
2159 lvb
->lvb_iclusters
= cpu_to_be32(oi
->ip_clusters
);
2160 lvb
->lvb_iuid
= cpu_to_be32(i_uid_read(inode
));
2161 lvb
->lvb_igid
= cpu_to_be32(i_gid_read(inode
));
2162 lvb
->lvb_imode
= cpu_to_be16(inode
->i_mode
);
2163 lvb
->lvb_inlink
= cpu_to_be16(inode
->i_nlink
);
2164 lvb
->lvb_iatime_packed
=
2165 cpu_to_be64(ocfs2_pack_timespec(&inode
->i_atime
));
2166 lvb
->lvb_ictime_packed
=
2167 cpu_to_be64(ocfs2_pack_timespec(&inode
->i_ctime
));
2168 lvb
->lvb_imtime_packed
=
2169 cpu_to_be64(ocfs2_pack_timespec(&inode
->i_mtime
));
2170 lvb
->lvb_iattr
= cpu_to_be32(oi
->ip_attr
);
2171 lvb
->lvb_idynfeatures
= cpu_to_be16(oi
->ip_dyn_features
);
2172 lvb
->lvb_igeneration
= cpu_to_be32(inode
->i_generation
);
2175 mlog_meta_lvb(0, lockres
);
2178 static void ocfs2_unpack_timespec(struct timespec
*spec
,
2181 spec
->tv_sec
= packed_time
>> OCFS2_SEC_SHIFT
;
2182 spec
->tv_nsec
= packed_time
& OCFS2_NSEC_MASK
;
2185 static void ocfs2_refresh_inode_from_lvb(struct inode
*inode
)
2187 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2188 struct ocfs2_lock_res
*lockres
= &oi
->ip_inode_lockres
;
2189 struct ocfs2_meta_lvb
*lvb
;
2191 mlog_meta_lvb(0, lockres
);
2193 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2195 /* We're safe here without the lockres lock... */
2196 spin_lock(&oi
->ip_lock
);
2197 oi
->ip_clusters
= be32_to_cpu(lvb
->lvb_iclusters
);
2198 i_size_write(inode
, be64_to_cpu(lvb
->lvb_isize
));
2200 oi
->ip_attr
= be32_to_cpu(lvb
->lvb_iattr
);
2201 oi
->ip_dyn_features
= be16_to_cpu(lvb
->lvb_idynfeatures
);
2202 ocfs2_set_inode_flags(inode
);
2204 /* fast-symlinks are a special case */
2205 if (S_ISLNK(inode
->i_mode
) && !oi
->ip_clusters
)
2206 inode
->i_blocks
= 0;
2208 inode
->i_blocks
= ocfs2_inode_sector_count(inode
);
2210 i_uid_write(inode
, be32_to_cpu(lvb
->lvb_iuid
));
2211 i_gid_write(inode
, be32_to_cpu(lvb
->lvb_igid
));
2212 inode
->i_mode
= be16_to_cpu(lvb
->lvb_imode
);
2213 set_nlink(inode
, be16_to_cpu(lvb
->lvb_inlink
));
2214 ocfs2_unpack_timespec(&inode
->i_atime
,
2215 be64_to_cpu(lvb
->lvb_iatime_packed
));
2216 ocfs2_unpack_timespec(&inode
->i_mtime
,
2217 be64_to_cpu(lvb
->lvb_imtime_packed
));
2218 ocfs2_unpack_timespec(&inode
->i_ctime
,
2219 be64_to_cpu(lvb
->lvb_ictime_packed
));
2220 spin_unlock(&oi
->ip_lock
);
2223 static inline int ocfs2_meta_lvb_is_trustable(struct inode
*inode
,
2224 struct ocfs2_lock_res
*lockres
)
2226 struct ocfs2_meta_lvb
*lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2228 if (ocfs2_dlm_lvb_valid(&lockres
->l_lksb
)
2229 && lvb
->lvb_version
== OCFS2_LVB_VERSION
2230 && be32_to_cpu(lvb
->lvb_igeneration
) == inode
->i_generation
)
2235 /* Determine whether a lock resource needs to be refreshed, and
2236 * arbitrate who gets to refresh it.
2238 * 0 means no refresh needed.
2240 * > 0 means you need to refresh this and you MUST call
2241 * ocfs2_complete_lock_res_refresh afterwards. */
2242 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res
*lockres
)
2244 unsigned long flags
;
2248 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2249 if (!(lockres
->l_flags
& OCFS2_LOCK_NEEDS_REFRESH
)) {
2250 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2254 if (lockres
->l_flags
& OCFS2_LOCK_REFRESHING
) {
2255 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2257 ocfs2_wait_on_refreshing_lock(lockres
);
2261 /* Ok, I'll be the one to refresh this lock. */
2262 lockres_or_flags(lockres
, OCFS2_LOCK_REFRESHING
);
2263 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2267 mlog(0, "status %d\n", status
);
2271 /* If status is non zero, I'll mark it as not being in refresh
2272 * anymroe, but i won't clear the needs refresh flag. */
2273 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res
*lockres
,
2276 unsigned long flags
;
2278 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2279 lockres_clear_flags(lockres
, OCFS2_LOCK_REFRESHING
);
2281 lockres_clear_flags(lockres
, OCFS2_LOCK_NEEDS_REFRESH
);
2282 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2284 wake_up(&lockres
->l_event
);
2287 /* may or may not return a bh if it went to disk. */
2288 static int ocfs2_inode_lock_update(struct inode
*inode
,
2289 struct buffer_head
**bh
)
2292 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2293 struct ocfs2_lock_res
*lockres
= &oi
->ip_inode_lockres
;
2294 struct ocfs2_dinode
*fe
;
2295 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2297 if (ocfs2_mount_local(osb
))
2300 spin_lock(&oi
->ip_lock
);
2301 if (oi
->ip_flags
& OCFS2_INODE_DELETED
) {
2302 mlog(0, "Orphaned inode %llu was deleted while we "
2303 "were waiting on a lock. ip_flags = 0x%x\n",
2304 (unsigned long long)oi
->ip_blkno
, oi
->ip_flags
);
2305 spin_unlock(&oi
->ip_lock
);
2309 spin_unlock(&oi
->ip_lock
);
2311 if (!ocfs2_should_refresh_lock_res(lockres
))
2314 /* This will discard any caching information we might have had
2315 * for the inode metadata. */
2316 ocfs2_metadata_cache_purge(INODE_CACHE(inode
));
2318 ocfs2_extent_map_trunc(inode
, 0);
2320 if (ocfs2_meta_lvb_is_trustable(inode
, lockres
)) {
2321 mlog(0, "Trusting LVB on inode %llu\n",
2322 (unsigned long long)oi
->ip_blkno
);
2323 ocfs2_refresh_inode_from_lvb(inode
);
2325 /* Boo, we have to go to disk. */
2326 /* read bh, cast, ocfs2_refresh_inode */
2327 status
= ocfs2_read_inode_block(inode
, bh
);
2332 fe
= (struct ocfs2_dinode
*) (*bh
)->b_data
;
2334 /* This is a good chance to make sure we're not
2335 * locking an invalid object. ocfs2_read_inode_block()
2336 * already checked that the inode block is sane.
2338 * We bug on a stale inode here because we checked
2339 * above whether it was wiped from disk. The wiping
2340 * node provides a guarantee that we receive that
2341 * message and can mark the inode before dropping any
2342 * locks associated with it. */
2343 mlog_bug_on_msg(inode
->i_generation
!=
2344 le32_to_cpu(fe
->i_generation
),
2345 "Invalid dinode %llu disk generation: %u "
2346 "inode->i_generation: %u\n",
2347 (unsigned long long)oi
->ip_blkno
,
2348 le32_to_cpu(fe
->i_generation
),
2349 inode
->i_generation
);
2350 mlog_bug_on_msg(le64_to_cpu(fe
->i_dtime
) ||
2351 !(fe
->i_flags
& cpu_to_le32(OCFS2_VALID_FL
)),
2352 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2353 (unsigned long long)oi
->ip_blkno
,
2354 (unsigned long long)le64_to_cpu(fe
->i_dtime
),
2355 le32_to_cpu(fe
->i_flags
));
2357 ocfs2_refresh_inode(inode
, fe
);
2358 ocfs2_track_lock_refresh(lockres
);
2363 ocfs2_complete_lock_res_refresh(lockres
, status
);
2368 static int ocfs2_assign_bh(struct inode
*inode
,
2369 struct buffer_head
**ret_bh
,
2370 struct buffer_head
*passed_bh
)
2375 /* Ok, the update went to disk for us, use the
2377 *ret_bh
= passed_bh
;
2383 status
= ocfs2_read_inode_block(inode
, ret_bh
);
2391 * returns < 0 error if the callback will never be called, otherwise
2392 * the result of the lock will be communicated via the callback.
2394 int ocfs2_inode_lock_full_nested(struct inode
*inode
,
2395 struct buffer_head
**ret_bh
,
2400 int status
, level
, acquired
;
2402 struct ocfs2_lock_res
*lockres
= NULL
;
2403 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2404 struct buffer_head
*local_bh
= NULL
;
2406 mlog(0, "inode %llu, take %s META lock\n",
2407 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2408 ex
? "EXMODE" : "PRMODE");
2412 /* We'll allow faking a readonly metadata lock for
2414 if (ocfs2_is_hard_readonly(osb
)) {
2420 if ((arg_flags
& OCFS2_META_LOCK_GETBH
) ||
2421 ocfs2_mount_local(osb
))
2424 if (!(arg_flags
& OCFS2_META_LOCK_RECOVERY
))
2425 ocfs2_wait_for_recovery(osb
);
2427 lockres
= &OCFS2_I(inode
)->ip_inode_lockres
;
2428 level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2430 if (arg_flags
& OCFS2_META_LOCK_NOQUEUE
)
2431 dlm_flags
|= DLM_LKF_NOQUEUE
;
2433 status
= __ocfs2_cluster_lock(osb
, lockres
, level
, dlm_flags
,
2434 arg_flags
, subclass
, _RET_IP_
);
2436 if (status
!= -EAGAIN
)
2441 /* Notify the error cleanup path to drop the cluster lock. */
2444 /* We wait twice because a node may have died while we were in
2445 * the lower dlm layers. The second time though, we've
2446 * committed to owning this lock so we don't allow signals to
2447 * abort the operation. */
2448 if (!(arg_flags
& OCFS2_META_LOCK_RECOVERY
))
2449 ocfs2_wait_for_recovery(osb
);
2453 * We only see this flag if we're being called from
2454 * ocfs2_read_locked_inode(). It means we're locking an inode
2455 * which hasn't been populated yet, so clear the refresh flag
2456 * and let the caller handle it.
2458 if (inode
->i_state
& I_NEW
) {
2461 ocfs2_complete_lock_res_refresh(lockres
, 0);
2465 /* This is fun. The caller may want a bh back, or it may
2466 * not. ocfs2_inode_lock_update definitely wants one in, but
2467 * may or may not read one, depending on what's in the
2468 * LVB. The result of all of this is that we've *only* gone to
2469 * disk if we have to, so the complexity is worthwhile. */
2470 status
= ocfs2_inode_lock_update(inode
, &local_bh
);
2472 if (status
!= -ENOENT
)
2478 status
= ocfs2_assign_bh(inode
, ret_bh
, local_bh
);
2487 if (ret_bh
&& (*ret_bh
)) {
2492 ocfs2_inode_unlock(inode
, ex
);
2502 * This is working around a lock inversion between tasks acquiring DLM
2503 * locks while holding a page lock and the downconvert thread which
2504 * blocks dlm lock acquiry while acquiring page locks.
2506 * ** These _with_page variantes are only intended to be called from aop
2507 * methods that hold page locks and return a very specific *positive* error
2508 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2510 * The DLM is called such that it returns -EAGAIN if it would have
2511 * blocked waiting for the downconvert thread. In that case we unlock
2512 * our page so the downconvert thread can make progress. Once we've
2513 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2514 * that called us can bubble that back up into the VFS who will then
2515 * immediately retry the aop call.
2517 int ocfs2_inode_lock_with_page(struct inode
*inode
,
2518 struct buffer_head
**ret_bh
,
2524 ret
= ocfs2_inode_lock_full(inode
, ret_bh
, ex
, OCFS2_LOCK_NONBLOCK
);
2525 if (ret
== -EAGAIN
) {
2528 * If we can't get inode lock immediately, we should not return
2529 * directly here, since this will lead to a softlockup problem.
2530 * The method is to get a blocking lock and immediately unlock
2531 * before returning, this can avoid CPU resource waste due to
2532 * lots of retries, and benefits fairness in getting lock.
2534 if (ocfs2_inode_lock(inode
, ret_bh
, ex
) == 0)
2535 ocfs2_inode_unlock(inode
, ex
);
2536 ret
= AOP_TRUNCATED_PAGE
;
2542 int ocfs2_inode_lock_atime(struct inode
*inode
,
2543 struct vfsmount
*vfsmnt
,
2544 int *level
, int wait
)
2549 ret
= ocfs2_inode_lock(inode
, NULL
, 0);
2551 ret
= ocfs2_try_inode_lock(inode
, NULL
, 0);
2560 * If we should update atime, we will get EX lock,
2561 * otherwise we just get PR lock.
2563 if (ocfs2_should_update_atime(inode
, vfsmnt
)) {
2564 struct buffer_head
*bh
= NULL
;
2566 ocfs2_inode_unlock(inode
, 0);
2568 ret
= ocfs2_inode_lock(inode
, &bh
, 1);
2570 ret
= ocfs2_try_inode_lock(inode
, &bh
, 1);
2578 if (ocfs2_should_update_atime(inode
, vfsmnt
))
2579 ocfs2_update_inode_atime(inode
, bh
);
2588 void ocfs2_inode_unlock(struct inode
*inode
,
2591 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2592 struct ocfs2_lock_res
*lockres
= &OCFS2_I(inode
)->ip_inode_lockres
;
2593 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2595 mlog(0, "inode %llu drop %s META lock\n",
2596 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2597 ex
? "EXMODE" : "PRMODE");
2599 if (!ocfs2_is_hard_readonly(osb
) &&
2600 !ocfs2_mount_local(osb
))
2601 ocfs2_cluster_unlock(osb
, lockres
, level
);
2605 * This _tracker variantes are introduced to deal with the recursive cluster
2606 * locking issue. The idea is to keep track of a lock holder on the stack of
2607 * the current process. If there's a lock holder on the stack, we know the
2608 * task context is already protected by cluster locking. Currently, they're
2609 * used in some VFS entry routines.
2611 * return < 0 on error, return == 0 if there's no lock holder on the stack
2612 * before this call, return == 1 if this call would be a recursive locking.
2614 int ocfs2_inode_lock_tracker(struct inode
*inode
,
2615 struct buffer_head
**ret_bh
,
2617 struct ocfs2_lock_holder
*oh
)
2620 int arg_flags
= 0, has_locked
;
2621 struct ocfs2_lock_res
*lockres
;
2623 lockres
= &OCFS2_I(inode
)->ip_inode_lockres
;
2624 has_locked
= ocfs2_is_locked_by_me(lockres
);
2625 /* Just get buffer head if the cluster lock has been taken */
2627 arg_flags
= OCFS2_META_LOCK_GETBH
;
2629 if (likely(!has_locked
|| ret_bh
)) {
2630 status
= ocfs2_inode_lock_full(inode
, ret_bh
, ex
, arg_flags
);
2632 if (status
!= -ENOENT
)
2638 ocfs2_add_holder(lockres
, oh
);
2643 void ocfs2_inode_unlock_tracker(struct inode
*inode
,
2645 struct ocfs2_lock_holder
*oh
,
2648 struct ocfs2_lock_res
*lockres
;
2650 lockres
= &OCFS2_I(inode
)->ip_inode_lockres
;
2651 /* had_lock means that the currect process already takes the cluster
2652 * lock previously. If had_lock is 1, we have nothing to do here, and
2653 * it will get unlocked where we got the lock.
2656 ocfs2_remove_holder(lockres
, oh
);
2657 ocfs2_inode_unlock(inode
, ex
);
2661 int ocfs2_orphan_scan_lock(struct ocfs2_super
*osb
, u32
*seqno
)
2663 struct ocfs2_lock_res
*lockres
;
2664 struct ocfs2_orphan_scan_lvb
*lvb
;
2667 if (ocfs2_is_hard_readonly(osb
))
2670 if (ocfs2_mount_local(osb
))
2673 lockres
= &osb
->osb_orphan_scan
.os_lockres
;
2674 status
= ocfs2_cluster_lock(osb
, lockres
, DLM_LOCK_EX
, 0, 0);
2678 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2679 if (ocfs2_dlm_lvb_valid(&lockres
->l_lksb
) &&
2680 lvb
->lvb_version
== OCFS2_ORPHAN_LVB_VERSION
)
2681 *seqno
= be32_to_cpu(lvb
->lvb_os_seqno
);
2683 *seqno
= osb
->osb_orphan_scan
.os_seqno
+ 1;
2688 void ocfs2_orphan_scan_unlock(struct ocfs2_super
*osb
, u32 seqno
)
2690 struct ocfs2_lock_res
*lockres
;
2691 struct ocfs2_orphan_scan_lvb
*lvb
;
2693 if (!ocfs2_is_hard_readonly(osb
) && !ocfs2_mount_local(osb
)) {
2694 lockres
= &osb
->osb_orphan_scan
.os_lockres
;
2695 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2696 lvb
->lvb_version
= OCFS2_ORPHAN_LVB_VERSION
;
2697 lvb
->lvb_os_seqno
= cpu_to_be32(seqno
);
2698 ocfs2_cluster_unlock(osb
, lockres
, DLM_LOCK_EX
);
2702 int ocfs2_super_lock(struct ocfs2_super
*osb
,
2706 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2707 struct ocfs2_lock_res
*lockres
= &osb
->osb_super_lockres
;
2709 if (ocfs2_is_hard_readonly(osb
))
2712 if (ocfs2_mount_local(osb
))
2715 status
= ocfs2_cluster_lock(osb
, lockres
, level
, 0, 0);
2721 /* The super block lock path is really in the best position to
2722 * know when resources covered by the lock need to be
2723 * refreshed, so we do it here. Of course, making sense of
2724 * everything is up to the caller :) */
2725 status
= ocfs2_should_refresh_lock_res(lockres
);
2727 status
= ocfs2_refresh_slot_info(osb
);
2729 ocfs2_complete_lock_res_refresh(lockres
, status
);
2732 ocfs2_cluster_unlock(osb
, lockres
, level
);
2735 ocfs2_track_lock_refresh(lockres
);
2741 void ocfs2_super_unlock(struct ocfs2_super
*osb
,
2744 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2745 struct ocfs2_lock_res
*lockres
= &osb
->osb_super_lockres
;
2747 if (!ocfs2_mount_local(osb
))
2748 ocfs2_cluster_unlock(osb
, lockres
, level
);
2751 int ocfs2_rename_lock(struct ocfs2_super
*osb
)
2754 struct ocfs2_lock_res
*lockres
= &osb
->osb_rename_lockres
;
2756 if (ocfs2_is_hard_readonly(osb
))
2759 if (ocfs2_mount_local(osb
))
2762 status
= ocfs2_cluster_lock(osb
, lockres
, DLM_LOCK_EX
, 0, 0);
2769 void ocfs2_rename_unlock(struct ocfs2_super
*osb
)
2771 struct ocfs2_lock_res
*lockres
= &osb
->osb_rename_lockres
;
2773 if (!ocfs2_mount_local(osb
))
2774 ocfs2_cluster_unlock(osb
, lockres
, DLM_LOCK_EX
);
2777 int ocfs2_nfs_sync_lock(struct ocfs2_super
*osb
, int ex
)
2780 struct ocfs2_lock_res
*lockres
= &osb
->osb_nfs_sync_lockres
;
2782 if (ocfs2_is_hard_readonly(osb
))
2785 if (ocfs2_mount_local(osb
))
2788 status
= ocfs2_cluster_lock(osb
, lockres
, ex
? LKM_EXMODE
: LKM_PRMODE
,
2791 mlog(ML_ERROR
, "lock on nfs sync lock failed %d\n", status
);
2796 void ocfs2_nfs_sync_unlock(struct ocfs2_super
*osb
, int ex
)
2798 struct ocfs2_lock_res
*lockres
= &osb
->osb_nfs_sync_lockres
;
2800 if (!ocfs2_mount_local(osb
))
2801 ocfs2_cluster_unlock(osb
, lockres
,
2802 ex
? LKM_EXMODE
: LKM_PRMODE
);
2805 int ocfs2_trim_fs_lock(struct ocfs2_super
*osb
,
2806 struct ocfs2_trim_fs_info
*info
, int trylock
)
2809 struct ocfs2_trim_fs_lvb
*lvb
;
2810 struct ocfs2_lock_res
*lockres
= &osb
->osb_trim_fs_lockres
;
2815 if (ocfs2_is_hard_readonly(osb
))
2818 if (ocfs2_mount_local(osb
))
2821 status
= ocfs2_cluster_lock(osb
, lockres
, DLM_LOCK_EX
,
2822 trylock
? DLM_LKF_NOQUEUE
: 0, 0);
2824 if (status
!= -EAGAIN
)
2830 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2831 if (ocfs2_dlm_lvb_valid(&lockres
->l_lksb
) &&
2832 lvb
->lvb_version
== OCFS2_TRIMFS_LVB_VERSION
) {
2834 info
->tf_success
= lvb
->lvb_success
;
2835 info
->tf_nodenum
= be32_to_cpu(lvb
->lvb_nodenum
);
2836 info
->tf_start
= be64_to_cpu(lvb
->lvb_start
);
2837 info
->tf_len
= be64_to_cpu(lvb
->lvb_len
);
2838 info
->tf_minlen
= be64_to_cpu(lvb
->lvb_minlen
);
2839 info
->tf_trimlen
= be64_to_cpu(lvb
->lvb_trimlen
);
2846 void ocfs2_trim_fs_unlock(struct ocfs2_super
*osb
,
2847 struct ocfs2_trim_fs_info
*info
)
2849 struct ocfs2_trim_fs_lvb
*lvb
;
2850 struct ocfs2_lock_res
*lockres
= &osb
->osb_trim_fs_lockres
;
2852 if (ocfs2_mount_local(osb
))
2856 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2857 lvb
->lvb_version
= OCFS2_TRIMFS_LVB_VERSION
;
2858 lvb
->lvb_success
= info
->tf_success
;
2859 lvb
->lvb_nodenum
= cpu_to_be32(info
->tf_nodenum
);
2860 lvb
->lvb_start
= cpu_to_be64(info
->tf_start
);
2861 lvb
->lvb_len
= cpu_to_be64(info
->tf_len
);
2862 lvb
->lvb_minlen
= cpu_to_be64(info
->tf_minlen
);
2863 lvb
->lvb_trimlen
= cpu_to_be64(info
->tf_trimlen
);
2866 ocfs2_cluster_unlock(osb
, lockres
, DLM_LOCK_EX
);
2869 int ocfs2_dentry_lock(struct dentry
*dentry
, int ex
)
2872 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2873 struct ocfs2_dentry_lock
*dl
= dentry
->d_fsdata
;
2874 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
2878 if (ocfs2_is_hard_readonly(osb
)) {
2884 if (ocfs2_mount_local(osb
))
2887 ret
= ocfs2_cluster_lock(osb
, &dl
->dl_lockres
, level
, 0, 0);
2894 void ocfs2_dentry_unlock(struct dentry
*dentry
, int ex
)
2896 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2897 struct ocfs2_dentry_lock
*dl
= dentry
->d_fsdata
;
2898 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
2900 if (!ocfs2_is_hard_readonly(osb
) && !ocfs2_mount_local(osb
))
2901 ocfs2_cluster_unlock(osb
, &dl
->dl_lockres
, level
);
2904 /* Reference counting of the dlm debug structure. We want this because
2905 * open references on the debug inodes can live on after a mount, so
2906 * we can't rely on the ocfs2_super to always exist. */
2907 static void ocfs2_dlm_debug_free(struct kref
*kref
)
2909 struct ocfs2_dlm_debug
*dlm_debug
;
2911 dlm_debug
= container_of(kref
, struct ocfs2_dlm_debug
, d_refcnt
);
2916 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug
*dlm_debug
)
2919 kref_put(&dlm_debug
->d_refcnt
, ocfs2_dlm_debug_free
);
2922 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug
*debug
)
2924 kref_get(&debug
->d_refcnt
);
2927 struct ocfs2_dlm_debug
*ocfs2_new_dlm_debug(void)
2929 struct ocfs2_dlm_debug
*dlm_debug
;
2931 dlm_debug
= kmalloc(sizeof(struct ocfs2_dlm_debug
), GFP_KERNEL
);
2933 mlog_errno(-ENOMEM
);
2937 kref_init(&dlm_debug
->d_refcnt
);
2938 INIT_LIST_HEAD(&dlm_debug
->d_lockres_tracking
);
2939 dlm_debug
->d_locking_state
= NULL
;
2944 /* Access to this is arbitrated for us via seq_file->sem. */
2945 struct ocfs2_dlm_seq_priv
{
2946 struct ocfs2_dlm_debug
*p_dlm_debug
;
2947 struct ocfs2_lock_res p_iter_res
;
2948 struct ocfs2_lock_res p_tmp_res
;
2951 static struct ocfs2_lock_res
*ocfs2_dlm_next_res(struct ocfs2_lock_res
*start
,
2952 struct ocfs2_dlm_seq_priv
*priv
)
2954 struct ocfs2_lock_res
*iter
, *ret
= NULL
;
2955 struct ocfs2_dlm_debug
*dlm_debug
= priv
->p_dlm_debug
;
2957 assert_spin_locked(&ocfs2_dlm_tracking_lock
);
2959 list_for_each_entry(iter
, &start
->l_debug_list
, l_debug_list
) {
2960 /* discover the head of the list */
2961 if (&iter
->l_debug_list
== &dlm_debug
->d_lockres_tracking
) {
2962 mlog(0, "End of list found, %p\n", ret
);
2966 /* We track our "dummy" iteration lockres' by a NULL
2968 if (iter
->l_ops
!= NULL
) {
2977 static void *ocfs2_dlm_seq_start(struct seq_file
*m
, loff_t
*pos
)
2979 struct ocfs2_dlm_seq_priv
*priv
= m
->private;
2980 struct ocfs2_lock_res
*iter
;
2982 spin_lock(&ocfs2_dlm_tracking_lock
);
2983 iter
= ocfs2_dlm_next_res(&priv
->p_iter_res
, priv
);
2985 /* Since lockres' have the lifetime of their container
2986 * (which can be inodes, ocfs2_supers, etc) we want to
2987 * copy this out to a temporary lockres while still
2988 * under the spinlock. Obviously after this we can't
2989 * trust any pointers on the copy returned, but that's
2990 * ok as the information we want isn't typically held
2992 priv
->p_tmp_res
= *iter
;
2993 iter
= &priv
->p_tmp_res
;
2995 spin_unlock(&ocfs2_dlm_tracking_lock
);
3000 static void ocfs2_dlm_seq_stop(struct seq_file
*m
, void *v
)
3004 static void *ocfs2_dlm_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3006 struct ocfs2_dlm_seq_priv
*priv
= m
->private;
3007 struct ocfs2_lock_res
*iter
= v
;
3008 struct ocfs2_lock_res
*dummy
= &priv
->p_iter_res
;
3010 spin_lock(&ocfs2_dlm_tracking_lock
);
3011 iter
= ocfs2_dlm_next_res(iter
, priv
);
3012 list_del_init(&dummy
->l_debug_list
);
3014 list_add(&dummy
->l_debug_list
, &iter
->l_debug_list
);
3015 priv
->p_tmp_res
= *iter
;
3016 iter
= &priv
->p_tmp_res
;
3018 spin_unlock(&ocfs2_dlm_tracking_lock
);
3024 * Version is used by debugfs.ocfs2 to determine the format being used
3027 * - Lock stats printed
3029 * - Max time in lock stats is in usecs (instead of nsecs)
3031 #define OCFS2_DLM_DEBUG_STR_VERSION 3
3032 static int ocfs2_dlm_seq_show(struct seq_file
*m
, void *v
)
3036 struct ocfs2_lock_res
*lockres
= v
;
3041 seq_printf(m
, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION
);
3043 if (lockres
->l_type
== OCFS2_LOCK_TYPE_DENTRY
)
3044 seq_printf(m
, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START
- 1,
3046 (unsigned int)ocfs2_get_dentry_lock_ino(lockres
));
3048 seq_printf(m
, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN
, lockres
->l_name
);
3050 seq_printf(m
, "%d\t"
3061 lockres
->l_unlock_action
,
3062 lockres
->l_ro_holders
,
3063 lockres
->l_ex_holders
,
3064 lockres
->l_requested
,
3065 lockres
->l_blocking
);
3067 /* Dump the raw LVB */
3068 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
3069 for(i
= 0; i
< DLM_LVB_LEN
; i
++)
3070 seq_printf(m
, "0x%x\t", lvb
[i
]);
3072 #ifdef CONFIG_OCFS2_FS_STATS
3073 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
3074 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
3075 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
3076 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
3077 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
3078 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
3079 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
3080 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
3081 # define lock_refresh(_l) ((_l)->l_lock_refresh)
3083 # define lock_num_prmode(_l) (0)
3084 # define lock_num_exmode(_l) (0)
3085 # define lock_num_prmode_failed(_l) (0)
3086 # define lock_num_exmode_failed(_l) (0)
3087 # define lock_total_prmode(_l) (0ULL)
3088 # define lock_total_exmode(_l) (0ULL)
3089 # define lock_max_prmode(_l) (0)
3090 # define lock_max_exmode(_l) (0)
3091 # define lock_refresh(_l) (0)
3093 /* The following seq_print was added in version 2 of this output */
3094 seq_printf(m
, "%u\t"
3103 lock_num_prmode(lockres
),
3104 lock_num_exmode(lockres
),
3105 lock_num_prmode_failed(lockres
),
3106 lock_num_exmode_failed(lockres
),
3107 lock_total_prmode(lockres
),
3108 lock_total_exmode(lockres
),
3109 lock_max_prmode(lockres
),
3110 lock_max_exmode(lockres
),
3111 lock_refresh(lockres
));
3114 seq_printf(m
, "\n");
3118 static const struct seq_operations ocfs2_dlm_seq_ops
= {
3119 .start
= ocfs2_dlm_seq_start
,
3120 .stop
= ocfs2_dlm_seq_stop
,
3121 .next
= ocfs2_dlm_seq_next
,
3122 .show
= ocfs2_dlm_seq_show
,
3125 static int ocfs2_dlm_debug_release(struct inode
*inode
, struct file
*file
)
3127 struct seq_file
*seq
= file
->private_data
;
3128 struct ocfs2_dlm_seq_priv
*priv
= seq
->private;
3129 struct ocfs2_lock_res
*res
= &priv
->p_iter_res
;
3131 ocfs2_remove_lockres_tracking(res
);
3132 ocfs2_put_dlm_debug(priv
->p_dlm_debug
);
3133 return seq_release_private(inode
, file
);
3136 static int ocfs2_dlm_debug_open(struct inode
*inode
, struct file
*file
)
3138 struct ocfs2_dlm_seq_priv
*priv
;
3139 struct ocfs2_super
*osb
;
3141 priv
= __seq_open_private(file
, &ocfs2_dlm_seq_ops
, sizeof(*priv
));
3143 mlog_errno(-ENOMEM
);
3147 osb
= inode
->i_private
;
3148 ocfs2_get_dlm_debug(osb
->osb_dlm_debug
);
3149 priv
->p_dlm_debug
= osb
->osb_dlm_debug
;
3150 INIT_LIST_HEAD(&priv
->p_iter_res
.l_debug_list
);
3152 ocfs2_add_lockres_tracking(&priv
->p_iter_res
,
3158 static const struct file_operations ocfs2_dlm_debug_fops
= {
3159 .open
= ocfs2_dlm_debug_open
,
3160 .release
= ocfs2_dlm_debug_release
,
3162 .llseek
= seq_lseek
,
3165 static int ocfs2_dlm_init_debug(struct ocfs2_super
*osb
)
3168 struct ocfs2_dlm_debug
*dlm_debug
= osb
->osb_dlm_debug
;
3170 dlm_debug
->d_locking_state
= debugfs_create_file("locking_state",
3172 osb
->osb_debug_root
,
3174 &ocfs2_dlm_debug_fops
);
3175 if (!dlm_debug
->d_locking_state
) {
3178 "Unable to create locking state debugfs file.\n");
3182 ocfs2_get_dlm_debug(dlm_debug
);
3187 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super
*osb
)
3189 struct ocfs2_dlm_debug
*dlm_debug
= osb
->osb_dlm_debug
;
3192 debugfs_remove(dlm_debug
->d_locking_state
);
3193 ocfs2_put_dlm_debug(dlm_debug
);
3197 int ocfs2_dlm_init(struct ocfs2_super
*osb
)
3200 struct ocfs2_cluster_connection
*conn
= NULL
;
3202 if (ocfs2_mount_local(osb
)) {
3207 status
= ocfs2_dlm_init_debug(osb
);
3213 /* launch downconvert thread */
3214 osb
->dc_task
= kthread_run(ocfs2_downconvert_thread
, osb
, "ocfs2dc-%s",
3216 if (IS_ERR(osb
->dc_task
)) {
3217 status
= PTR_ERR(osb
->dc_task
);
3218 osb
->dc_task
= NULL
;
3223 /* for now, uuid == domain */
3224 status
= ocfs2_cluster_connect(osb
->osb_cluster_stack
,
3225 osb
->osb_cluster_name
,
3226 strlen(osb
->osb_cluster_name
),
3228 strlen(osb
->uuid_str
),
3229 &lproto
, ocfs2_do_node_down
, osb
,
3236 status
= ocfs2_cluster_this_node(conn
, &osb
->node_num
);
3240 "could not find this host's node number\n");
3241 ocfs2_cluster_disconnect(conn
, 0);
3246 ocfs2_super_lock_res_init(&osb
->osb_super_lockres
, osb
);
3247 ocfs2_rename_lock_res_init(&osb
->osb_rename_lockres
, osb
);
3248 ocfs2_nfs_sync_lock_res_init(&osb
->osb_nfs_sync_lockres
, osb
);
3249 ocfs2_orphan_scan_lock_res_init(&osb
->osb_orphan_scan
.os_lockres
, osb
);
3254 ocfs2_dlm_shutdown_debug(osb
);
3256 kthread_stop(osb
->dc_task
);
3262 void ocfs2_dlm_shutdown(struct ocfs2_super
*osb
,
3265 ocfs2_drop_osb_locks(osb
);
3268 * Now that we have dropped all locks and ocfs2_dismount_volume()
3269 * has disabled recovery, the DLM won't be talking to us. It's
3270 * safe to tear things down before disconnecting the cluster.
3274 kthread_stop(osb
->dc_task
);
3275 osb
->dc_task
= NULL
;
3278 ocfs2_lock_res_free(&osb
->osb_super_lockres
);
3279 ocfs2_lock_res_free(&osb
->osb_rename_lockres
);
3280 ocfs2_lock_res_free(&osb
->osb_nfs_sync_lockres
);
3281 ocfs2_lock_res_free(&osb
->osb_orphan_scan
.os_lockres
);
3283 ocfs2_cluster_disconnect(osb
->cconn
, hangup_pending
);
3286 ocfs2_dlm_shutdown_debug(osb
);
3289 static int ocfs2_drop_lock(struct ocfs2_super
*osb
,
3290 struct ocfs2_lock_res
*lockres
)
3293 unsigned long flags
;
3296 /* We didn't get anywhere near actually using this lockres. */
3297 if (!(lockres
->l_flags
& OCFS2_LOCK_INITIALIZED
))
3300 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
)
3301 lkm_flags
|= DLM_LKF_VALBLK
;
3303 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3305 mlog_bug_on_msg(!(lockres
->l_flags
& OCFS2_LOCK_FREEING
),
3306 "lockres %s, flags 0x%lx\n",
3307 lockres
->l_name
, lockres
->l_flags
);
3309 while (lockres
->l_flags
& OCFS2_LOCK_BUSY
) {
3310 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3311 "%u, unlock_action = %u\n",
3312 lockres
->l_name
, lockres
->l_flags
, lockres
->l_action
,
3313 lockres
->l_unlock_action
);
3315 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3317 /* XXX: Today we just wait on any busy
3318 * locks... Perhaps we need to cancel converts in the
3320 ocfs2_wait_on_busy_lock(lockres
);
3322 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3325 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
) {
3326 if (lockres
->l_flags
& OCFS2_LOCK_ATTACHED
&&
3327 lockres
->l_level
== DLM_LOCK_EX
&&
3328 !(lockres
->l_flags
& OCFS2_LOCK_NEEDS_REFRESH
))
3329 lockres
->l_ops
->set_lvb(lockres
);
3332 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
)
3333 mlog(ML_ERROR
, "destroying busy lock: \"%s\"\n",
3335 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
)
3336 mlog(0, "destroying blocked lock: \"%s\"\n", lockres
->l_name
);
3338 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
)) {
3339 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3343 lockres_clear_flags(lockres
, OCFS2_LOCK_ATTACHED
);
3345 /* make sure we never get here while waiting for an ast to
3347 BUG_ON(lockres
->l_action
!= OCFS2_AST_INVALID
);
3349 /* is this necessary? */
3350 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
3351 lockres
->l_unlock_action
= OCFS2_UNLOCK_DROP_LOCK
;
3352 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3354 mlog(0, "lock %s\n", lockres
->l_name
);
3356 ret
= ocfs2_dlm_unlock(osb
->cconn
, &lockres
->l_lksb
, lkm_flags
);
3358 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret
, lockres
);
3359 mlog(ML_ERROR
, "lockres flags: %lu\n", lockres
->l_flags
);
3360 ocfs2_dlm_dump_lksb(&lockres
->l_lksb
);
3363 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3366 ocfs2_wait_on_busy_lock(lockres
);
3371 static void ocfs2_process_blocked_lock(struct ocfs2_super
*osb
,
3372 struct ocfs2_lock_res
*lockres
);
3374 /* Mark the lockres as being dropped. It will no longer be
3375 * queued if blocking, but we still may have to wait on it
3376 * being dequeued from the downconvert thread before we can consider
3379 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3380 void ocfs2_mark_lockres_freeing(struct ocfs2_super
*osb
,
3381 struct ocfs2_lock_res
*lockres
)
3384 struct ocfs2_mask_waiter mw
;
3385 unsigned long flags
, flags2
;
3387 ocfs2_init_mask_waiter(&mw
);
3389 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3390 lockres
->l_flags
|= OCFS2_LOCK_FREEING
;
3391 if (lockres
->l_flags
& OCFS2_LOCK_QUEUED
&& current
== osb
->dc_task
) {
3393 * We know the downconvert is queued but not in progress
3394 * because we are the downconvert thread and processing
3395 * different lock. So we can just remove the lock from the
3396 * queue. This is not only an optimization but also a way
3397 * to avoid the following deadlock:
3398 * ocfs2_dentry_post_unlock()
3399 * ocfs2_dentry_lock_put()
3400 * ocfs2_drop_dentry_lock()
3402 * ocfs2_evict_inode()
3403 * ocfs2_clear_inode()
3404 * ocfs2_mark_lockres_freeing()
3405 * ... blocks waiting for OCFS2_LOCK_QUEUED
3406 * since we are the downconvert thread which
3407 * should clear the flag.
3409 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3410 spin_lock_irqsave(&osb
->dc_task_lock
, flags2
);
3411 list_del_init(&lockres
->l_blocked_list
);
3412 osb
->blocked_lock_count
--;
3413 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags2
);
3415 * Warn if we recurse into another post_unlock call. Strictly
3416 * speaking it isn't a problem but we need to be careful if
3417 * that happens (stack overflow, deadlocks, ...) so warn if
3418 * ocfs2 grows a path for which this can happen.
3420 WARN_ON_ONCE(lockres
->l_ops
->post_unlock
);
3421 /* Since the lock is freeing we don't do much in the fn below */
3422 ocfs2_process_blocked_lock(osb
, lockres
);
3425 while (lockres
->l_flags
& OCFS2_LOCK_QUEUED
) {
3426 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_QUEUED
, 0);
3427 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3429 mlog(0, "Waiting on lockres %s\n", lockres
->l_name
);
3431 status
= ocfs2_wait_for_mask(&mw
);
3435 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3437 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3440 void ocfs2_simple_drop_lockres(struct ocfs2_super
*osb
,
3441 struct ocfs2_lock_res
*lockres
)
3445 ocfs2_mark_lockres_freeing(osb
, lockres
);
3446 ret
= ocfs2_drop_lock(osb
, lockres
);
3451 static void ocfs2_drop_osb_locks(struct ocfs2_super
*osb
)
3453 ocfs2_simple_drop_lockres(osb
, &osb
->osb_super_lockres
);
3454 ocfs2_simple_drop_lockres(osb
, &osb
->osb_rename_lockres
);
3455 ocfs2_simple_drop_lockres(osb
, &osb
->osb_nfs_sync_lockres
);
3456 ocfs2_simple_drop_lockres(osb
, &osb
->osb_orphan_scan
.os_lockres
);
3459 int ocfs2_drop_inode_locks(struct inode
*inode
)
3463 /* No need to call ocfs2_mark_lockres_freeing here -
3464 * ocfs2_clear_inode has done it for us. */
3466 err
= ocfs2_drop_lock(OCFS2_SB(inode
->i_sb
),
3467 &OCFS2_I(inode
)->ip_open_lockres
);
3473 err
= ocfs2_drop_lock(OCFS2_SB(inode
->i_sb
),
3474 &OCFS2_I(inode
)->ip_inode_lockres
);
3477 if (err
< 0 && !status
)
3480 err
= ocfs2_drop_lock(OCFS2_SB(inode
->i_sb
),
3481 &OCFS2_I(inode
)->ip_rw_lockres
);
3484 if (err
< 0 && !status
)
3490 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res
*lockres
,
3493 assert_spin_locked(&lockres
->l_lock
);
3495 BUG_ON(lockres
->l_blocking
<= DLM_LOCK_NL
);
3497 if (lockres
->l_level
<= new_level
) {
3498 mlog(ML_ERROR
, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3499 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3500 "block %d, pgen %d\n", lockres
->l_name
, lockres
->l_level
,
3501 new_level
, list_empty(&lockres
->l_blocked_list
),
3502 list_empty(&lockres
->l_mask_waiters
), lockres
->l_type
,
3503 lockres
->l_flags
, lockres
->l_ro_holders
,
3504 lockres
->l_ex_holders
, lockres
->l_action
,
3505 lockres
->l_unlock_action
, lockres
->l_requested
,
3506 lockres
->l_blocking
, lockres
->l_pending_gen
);
3510 mlog(ML_BASTS
, "lockres %s, level %d => %d, blocking %d\n",
3511 lockres
->l_name
, lockres
->l_level
, new_level
, lockres
->l_blocking
);
3513 lockres
->l_action
= OCFS2_AST_DOWNCONVERT
;
3514 lockres
->l_requested
= new_level
;
3515 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
3516 return lockres_set_pending(lockres
);
3519 static int ocfs2_downconvert_lock(struct ocfs2_super
*osb
,
3520 struct ocfs2_lock_res
*lockres
,
3523 unsigned int generation
)
3526 u32 dlm_flags
= DLM_LKF_CONVERT
;
3528 mlog(ML_BASTS
, "lockres %s, level %d => %d\n", lockres
->l_name
,
3529 lockres
->l_level
, new_level
);
3532 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
3533 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
3534 * we can recover correctly from node failure. Otherwise, we may get
3535 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
3537 if (!ocfs2_is_o2cb_active() &&
3538 lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
)
3542 dlm_flags
|= DLM_LKF_VALBLK
;
3544 ret
= ocfs2_dlm_lock(osb
->cconn
,
3549 OCFS2_LOCK_ID_MAX_LEN
- 1);
3550 lockres_clear_pending(lockres
, generation
, osb
);
3552 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret
, lockres
);
3553 ocfs2_recover_from_dlm_error(lockres
, 1);
3562 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3563 static int ocfs2_prepare_cancel_convert(struct ocfs2_super
*osb
,
3564 struct ocfs2_lock_res
*lockres
)
3566 assert_spin_locked(&lockres
->l_lock
);
3568 if (lockres
->l_unlock_action
== OCFS2_UNLOCK_CANCEL_CONVERT
) {
3569 /* If we're already trying to cancel a lock conversion
3570 * then just drop the spinlock and allow the caller to
3571 * requeue this lock. */
3572 mlog(ML_BASTS
, "lockres %s, skip convert\n", lockres
->l_name
);
3576 /* were we in a convert when we got the bast fire? */
3577 BUG_ON(lockres
->l_action
!= OCFS2_AST_CONVERT
&&
3578 lockres
->l_action
!= OCFS2_AST_DOWNCONVERT
);
3579 /* set things up for the unlockast to know to just
3580 * clear out the ast_action and unset busy, etc. */
3581 lockres
->l_unlock_action
= OCFS2_UNLOCK_CANCEL_CONVERT
;
3583 mlog_bug_on_msg(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
),
3584 "lock %s, invalid flags: 0x%lx\n",
3585 lockres
->l_name
, lockres
->l_flags
);
3587 mlog(ML_BASTS
, "lockres %s\n", lockres
->l_name
);
3592 static int ocfs2_cancel_convert(struct ocfs2_super
*osb
,
3593 struct ocfs2_lock_res
*lockres
)
3597 ret
= ocfs2_dlm_unlock(osb
->cconn
, &lockres
->l_lksb
,
3600 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret
, lockres
);
3601 ocfs2_recover_from_dlm_error(lockres
, 0);
3604 mlog(ML_BASTS
, "lockres %s\n", lockres
->l_name
);
3609 static int ocfs2_unblock_lock(struct ocfs2_super
*osb
,
3610 struct ocfs2_lock_res
*lockres
,
3611 struct ocfs2_unblock_ctl
*ctl
)
3613 unsigned long flags
;
3621 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3625 * Is it still blocking? If not, we have no more work to do.
3627 if (!(lockres
->l_flags
& OCFS2_LOCK_BLOCKED
)) {
3628 BUG_ON(lockres
->l_blocking
!= DLM_LOCK_NL
);
3629 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3634 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
) {
3636 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3637 * exists entirely for one reason - another thread has set
3638 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3640 * If we do ocfs2_cancel_convert() before the other thread
3641 * calls dlm_lock(), our cancel will do nothing. We will
3642 * get no ast, and we will have no way of knowing the
3643 * cancel failed. Meanwhile, the other thread will call
3644 * into dlm_lock() and wait...forever.
3646 * Why forever? Because another node has asked for the
3647 * lock first; that's why we're here in unblock_lock().
3649 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3650 * set, we just requeue the unblock. Only when the other
3651 * thread has called dlm_lock() and cleared PENDING will
3652 * we then cancel their request.
3654 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3655 * at the same time they set OCFS2_DLM_BUSY. They must
3656 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3658 if (lockres
->l_flags
& OCFS2_LOCK_PENDING
) {
3659 mlog(ML_BASTS
, "lockres %s, ReQ: Pending\n",
3665 ret
= ocfs2_prepare_cancel_convert(osb
, lockres
);
3666 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3668 ret
= ocfs2_cancel_convert(osb
, lockres
);
3676 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3677 * set when the ast is received for an upconvert just before the
3678 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3679 * on the heels of the ast, we want to delay the downconvert just
3680 * enough to allow the up requestor to do its task. Because this
3681 * lock is in the blocked queue, the lock will be downconverted
3682 * as soon as the requestor is done with the lock.
3684 if (lockres
->l_flags
& OCFS2_LOCK_UPCONVERT_FINISHING
)
3688 * How can we block and yet be at NL? We were trying to upconvert
3689 * from NL and got canceled. The code comes back here, and now
3690 * we notice and clear BLOCKING.
3692 if (lockres
->l_level
== DLM_LOCK_NL
) {
3693 BUG_ON(lockres
->l_ex_holders
|| lockres
->l_ro_holders
);
3694 mlog(ML_BASTS
, "lockres %s, Aborting dc\n", lockres
->l_name
);
3695 lockres
->l_blocking
= DLM_LOCK_NL
;
3696 lockres_clear_flags(lockres
, OCFS2_LOCK_BLOCKED
);
3697 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3701 /* if we're blocking an exclusive and we have *any* holders,
3703 if ((lockres
->l_blocking
== DLM_LOCK_EX
)
3704 && (lockres
->l_ex_holders
|| lockres
->l_ro_holders
)) {
3705 mlog(ML_BASTS
, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3706 lockres
->l_name
, lockres
->l_ex_holders
,
3707 lockres
->l_ro_holders
);
3711 /* If it's a PR we're blocking, then only
3712 * requeue if we've got any EX holders */
3713 if (lockres
->l_blocking
== DLM_LOCK_PR
&&
3714 lockres
->l_ex_holders
) {
3715 mlog(ML_BASTS
, "lockres %s, ReQ: EX Holders %u\n",
3716 lockres
->l_name
, lockres
->l_ex_holders
);
3721 * Can we get a lock in this state if the holder counts are
3722 * zero? The meta data unblock code used to check this.
3724 if ((lockres
->l_ops
->flags
& LOCK_TYPE_REQUIRES_REFRESH
)
3725 && (lockres
->l_flags
& OCFS2_LOCK_REFRESHING
)) {
3726 mlog(ML_BASTS
, "lockres %s, ReQ: Lock Refreshing\n",
3731 new_level
= ocfs2_highest_compat_lock_level(lockres
->l_blocking
);
3733 if (lockres
->l_ops
->check_downconvert
3734 && !lockres
->l_ops
->check_downconvert(lockres
, new_level
)) {
3735 mlog(ML_BASTS
, "lockres %s, ReQ: Checkpointing\n",
3740 /* If we get here, then we know that there are no more
3741 * incompatible holders (and anyone asking for an incompatible
3742 * lock is blocked). We can now downconvert the lock */
3743 if (!lockres
->l_ops
->downconvert_worker
)
3746 /* Some lockres types want to do a bit of work before
3747 * downconverting a lock. Allow that here. The worker function
3748 * may sleep, so we save off a copy of what we're blocking as
3749 * it may change while we're not holding the spin lock. */
3750 blocking
= lockres
->l_blocking
;
3751 level
= lockres
->l_level
;
3752 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3754 ctl
->unblock_action
= lockres
->l_ops
->downconvert_worker(lockres
, blocking
);
3756 if (ctl
->unblock_action
== UNBLOCK_STOP_POST
) {
3757 mlog(ML_BASTS
, "lockres %s, UNBLOCK_STOP_POST\n",
3762 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3763 if ((blocking
!= lockres
->l_blocking
) || (level
!= lockres
->l_level
)) {
3764 /* If this changed underneath us, then we can't drop
3766 mlog(ML_BASTS
, "lockres %s, block=%d:%d, level=%d:%d, "
3767 "Recheck\n", lockres
->l_name
, blocking
,
3768 lockres
->l_blocking
, level
, lockres
->l_level
);
3775 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
) {
3776 if (lockres
->l_level
== DLM_LOCK_EX
)
3780 * We only set the lvb if the lock has been fully
3781 * refreshed - otherwise we risk setting stale
3782 * data. Otherwise, there's no need to actually clear
3783 * out the lvb here as it's value is still valid.
3785 if (set_lvb
&& !(lockres
->l_flags
& OCFS2_LOCK_NEEDS_REFRESH
))
3786 lockres
->l_ops
->set_lvb(lockres
);
3789 gen
= ocfs2_prepare_downconvert(lockres
, new_level
);
3790 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3791 ret
= ocfs2_downconvert_lock(osb
, lockres
, new_level
, set_lvb
,
3800 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3806 static int ocfs2_data_convert_worker(struct ocfs2_lock_res
*lockres
,
3809 struct inode
*inode
;
3810 struct address_space
*mapping
;
3811 struct ocfs2_inode_info
*oi
;
3813 inode
= ocfs2_lock_res_inode(lockres
);
3814 mapping
= inode
->i_mapping
;
3816 if (S_ISDIR(inode
->i_mode
)) {
3817 oi
= OCFS2_I(inode
);
3818 oi
->ip_dir_lock_gen
++;
3819 mlog(0, "generation: %u\n", oi
->ip_dir_lock_gen
);
3823 if (!S_ISREG(inode
->i_mode
))
3827 * We need this before the filemap_fdatawrite() so that it can
3828 * transfer the dirty bit from the PTE to the
3829 * page. Unfortunately this means that even for EX->PR
3830 * downconverts, we'll lose our mappings and have to build
3833 unmap_mapping_range(mapping
, 0, 0, 0);
3835 if (filemap_fdatawrite(mapping
)) {
3836 mlog(ML_ERROR
, "Could not sync inode %llu for downconvert!",
3837 (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
3839 sync_mapping_buffers(mapping
);
3840 if (blocking
== DLM_LOCK_EX
) {
3841 truncate_inode_pages(mapping
, 0);
3843 /* We only need to wait on the I/O if we're not also
3844 * truncating pages because truncate_inode_pages waits
3845 * for us above. We don't truncate pages if we're
3846 * blocking anything < EXMODE because we want to keep
3847 * them around in that case. */
3848 filemap_fdatawait(mapping
);
3851 forget_all_cached_acls(inode
);
3854 return UNBLOCK_CONTINUE
;
3857 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info
*ci
,
3858 struct ocfs2_lock_res
*lockres
,
3861 int checkpointed
= ocfs2_ci_fully_checkpointed(ci
);
3863 BUG_ON(new_level
!= DLM_LOCK_NL
&& new_level
!= DLM_LOCK_PR
);
3864 BUG_ON(lockres
->l_level
!= DLM_LOCK_EX
&& !checkpointed
);
3869 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci
)));
3873 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res
*lockres
,
3876 struct inode
*inode
= ocfs2_lock_res_inode(lockres
);
3878 return ocfs2_ci_checkpointed(INODE_CACHE(inode
), lockres
, new_level
);
3881 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res
*lockres
)
3883 struct inode
*inode
= ocfs2_lock_res_inode(lockres
);
3885 __ocfs2_stuff_meta_lvb(inode
);
3889 * Does the final reference drop on our dentry lock. Right now this
3890 * happens in the downconvert thread, but we could choose to simplify the
3891 * dlmglue API and push these off to the ocfs2_wq in the future.
3893 static void ocfs2_dentry_post_unlock(struct ocfs2_super
*osb
,
3894 struct ocfs2_lock_res
*lockres
)
3896 struct ocfs2_dentry_lock
*dl
= ocfs2_lock_res_dl(lockres
);
3897 ocfs2_dentry_lock_put(osb
, dl
);
3901 * d_delete() matching dentries before the lock downconvert.
3903 * At this point, any process waiting to destroy the
3904 * dentry_lock due to last ref count is stopped by the
3905 * OCFS2_LOCK_QUEUED flag.
3907 * We have two potential problems
3909 * 1) If we do the last reference drop on our dentry_lock (via dput)
3910 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3911 * the downconvert to finish. Instead we take an elevated
3912 * reference and push the drop until after we've completed our
3913 * unblock processing.
3915 * 2) There might be another process with a final reference,
3916 * waiting on us to finish processing. If this is the case, we
3917 * detect it and exit out - there's no more dentries anyway.
3919 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res
*lockres
,
3922 struct ocfs2_dentry_lock
*dl
= ocfs2_lock_res_dl(lockres
);
3923 struct ocfs2_inode_info
*oi
= OCFS2_I(dl
->dl_inode
);
3924 struct dentry
*dentry
;
3925 unsigned long flags
;
3929 * This node is blocking another node from getting a read
3930 * lock. This happens when we've renamed within a
3931 * directory. We've forced the other nodes to d_delete(), but
3932 * we never actually dropped our lock because it's still
3933 * valid. The downconvert code will retain a PR for this node,
3934 * so there's no further work to do.
3936 if (blocking
== DLM_LOCK_PR
)
3937 return UNBLOCK_CONTINUE
;
3940 * Mark this inode as potentially orphaned. The code in
3941 * ocfs2_delete_inode() will figure out whether it actually
3942 * needs to be freed or not.
3944 spin_lock(&oi
->ip_lock
);
3945 oi
->ip_flags
|= OCFS2_INODE_MAYBE_ORPHANED
;
3946 spin_unlock(&oi
->ip_lock
);
3949 * Yuck. We need to make sure however that the check of
3950 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3951 * respect to a reference decrement or the setting of that
3954 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3955 spin_lock(&dentry_attach_lock
);
3956 if (!(lockres
->l_flags
& OCFS2_LOCK_FREEING
)
3961 spin_unlock(&dentry_attach_lock
);
3962 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3964 mlog(0, "extra_ref = %d\n", extra_ref
);
3967 * We have a process waiting on us in ocfs2_dentry_iput(),
3968 * which means we can't have any more outstanding
3969 * aliases. There's no need to do any more work.
3972 return UNBLOCK_CONTINUE
;
3974 spin_lock(&dentry_attach_lock
);
3976 dentry
= ocfs2_find_local_alias(dl
->dl_inode
,
3977 dl
->dl_parent_blkno
, 1);
3980 spin_unlock(&dentry_attach_lock
);
3982 if (S_ISDIR(dl
->dl_inode
->i_mode
))
3983 shrink_dcache_parent(dentry
);
3985 mlog(0, "d_delete(%pd);\n", dentry
);
3988 * The following dcache calls may do an
3989 * iput(). Normally we don't want that from the
3990 * downconverting thread, but in this case it's ok
3991 * because the requesting node already has an
3992 * exclusive lock on the inode, so it can't be queued
3993 * for a downconvert.
3998 spin_lock(&dentry_attach_lock
);
4000 spin_unlock(&dentry_attach_lock
);
4003 * If we are the last holder of this dentry lock, there is no
4004 * reason to downconvert so skip straight to the unlock.
4006 if (dl
->dl_count
== 1)
4007 return UNBLOCK_STOP_POST
;
4009 return UNBLOCK_CONTINUE_POST
;
4012 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res
*lockres
,
4015 struct ocfs2_refcount_tree
*tree
=
4016 ocfs2_lock_res_refcount_tree(lockres
);
4018 return ocfs2_ci_checkpointed(&tree
->rf_ci
, lockres
, new_level
);
4021 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res
*lockres
,
4024 struct ocfs2_refcount_tree
*tree
=
4025 ocfs2_lock_res_refcount_tree(lockres
);
4027 ocfs2_metadata_cache_purge(&tree
->rf_ci
);
4029 return UNBLOCK_CONTINUE
;
4032 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res
*lockres
)
4034 struct ocfs2_qinfo_lvb
*lvb
;
4035 struct ocfs2_mem_dqinfo
*oinfo
= ocfs2_lock_res_qinfo(lockres
);
4036 struct mem_dqinfo
*info
= sb_dqinfo(oinfo
->dqi_gi
.dqi_sb
,
4037 oinfo
->dqi_gi
.dqi_type
);
4039 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
4040 lvb
->lvb_version
= OCFS2_QINFO_LVB_VERSION
;
4041 lvb
->lvb_bgrace
= cpu_to_be32(info
->dqi_bgrace
);
4042 lvb
->lvb_igrace
= cpu_to_be32(info
->dqi_igrace
);
4043 lvb
->lvb_syncms
= cpu_to_be32(oinfo
->dqi_syncms
);
4044 lvb
->lvb_blocks
= cpu_to_be32(oinfo
->dqi_gi
.dqi_blocks
);
4045 lvb
->lvb_free_blk
= cpu_to_be32(oinfo
->dqi_gi
.dqi_free_blk
);
4046 lvb
->lvb_free_entry
= cpu_to_be32(oinfo
->dqi_gi
.dqi_free_entry
);
4049 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
4051 struct ocfs2_lock_res
*lockres
= &oinfo
->dqi_gqlock
;
4052 struct ocfs2_super
*osb
= OCFS2_SB(oinfo
->dqi_gi
.dqi_sb
);
4053 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
4055 if (!ocfs2_is_hard_readonly(osb
) && !ocfs2_mount_local(osb
))
4056 ocfs2_cluster_unlock(osb
, lockres
, level
);
4059 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo
*oinfo
)
4061 struct mem_dqinfo
*info
= sb_dqinfo(oinfo
->dqi_gi
.dqi_sb
,
4062 oinfo
->dqi_gi
.dqi_type
);
4063 struct ocfs2_lock_res
*lockres
= &oinfo
->dqi_gqlock
;
4064 struct ocfs2_qinfo_lvb
*lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
4065 struct buffer_head
*bh
= NULL
;
4066 struct ocfs2_global_disk_dqinfo
*gdinfo
;
4069 if (ocfs2_dlm_lvb_valid(&lockres
->l_lksb
) &&
4070 lvb
->lvb_version
== OCFS2_QINFO_LVB_VERSION
) {
4071 info
->dqi_bgrace
= be32_to_cpu(lvb
->lvb_bgrace
);
4072 info
->dqi_igrace
= be32_to_cpu(lvb
->lvb_igrace
);
4073 oinfo
->dqi_syncms
= be32_to_cpu(lvb
->lvb_syncms
);
4074 oinfo
->dqi_gi
.dqi_blocks
= be32_to_cpu(lvb
->lvb_blocks
);
4075 oinfo
->dqi_gi
.dqi_free_blk
= be32_to_cpu(lvb
->lvb_free_blk
);
4076 oinfo
->dqi_gi
.dqi_free_entry
=
4077 be32_to_cpu(lvb
->lvb_free_entry
);
4079 status
= ocfs2_read_quota_phys_block(oinfo
->dqi_gqinode
,
4080 oinfo
->dqi_giblk
, &bh
);
4085 gdinfo
= (struct ocfs2_global_disk_dqinfo
*)
4086 (bh
->b_data
+ OCFS2_GLOBAL_INFO_OFF
);
4087 info
->dqi_bgrace
= le32_to_cpu(gdinfo
->dqi_bgrace
);
4088 info
->dqi_igrace
= le32_to_cpu(gdinfo
->dqi_igrace
);
4089 oinfo
->dqi_syncms
= le32_to_cpu(gdinfo
->dqi_syncms
);
4090 oinfo
->dqi_gi
.dqi_blocks
= le32_to_cpu(gdinfo
->dqi_blocks
);
4091 oinfo
->dqi_gi
.dqi_free_blk
= le32_to_cpu(gdinfo
->dqi_free_blk
);
4092 oinfo
->dqi_gi
.dqi_free_entry
=
4093 le32_to_cpu(gdinfo
->dqi_free_entry
);
4095 ocfs2_track_lock_refresh(lockres
);
4102 /* Lock quota info, this function expects at least shared lock on the quota file
4103 * so that we can safely refresh quota info from disk. */
4104 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
4106 struct ocfs2_lock_res
*lockres
= &oinfo
->dqi_gqlock
;
4107 struct ocfs2_super
*osb
= OCFS2_SB(oinfo
->dqi_gi
.dqi_sb
);
4108 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
4111 /* On RO devices, locking really isn't needed... */
4112 if (ocfs2_is_hard_readonly(osb
)) {
4117 if (ocfs2_mount_local(osb
))
4120 status
= ocfs2_cluster_lock(osb
, lockres
, level
, 0, 0);
4125 if (!ocfs2_should_refresh_lock_res(lockres
))
4127 /* OK, we have the lock but we need to refresh the quota info */
4128 status
= ocfs2_refresh_qinfo(oinfo
);
4130 ocfs2_qinfo_unlock(oinfo
, ex
);
4131 ocfs2_complete_lock_res_refresh(lockres
, status
);
4136 int ocfs2_refcount_lock(struct ocfs2_refcount_tree
*ref_tree
, int ex
)
4139 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
4140 struct ocfs2_lock_res
*lockres
= &ref_tree
->rf_lockres
;
4141 struct ocfs2_super
*osb
= lockres
->l_priv
;
4144 if (ocfs2_is_hard_readonly(osb
))
4147 if (ocfs2_mount_local(osb
))
4150 status
= ocfs2_cluster_lock(osb
, lockres
, level
, 0, 0);
4157 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree
*ref_tree
, int ex
)
4159 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
4160 struct ocfs2_lock_res
*lockres
= &ref_tree
->rf_lockres
;
4161 struct ocfs2_super
*osb
= lockres
->l_priv
;
4163 if (!ocfs2_mount_local(osb
))
4164 ocfs2_cluster_unlock(osb
, lockres
, level
);
4167 static void ocfs2_process_blocked_lock(struct ocfs2_super
*osb
,
4168 struct ocfs2_lock_res
*lockres
)
4171 struct ocfs2_unblock_ctl ctl
= {0, 0,};
4172 unsigned long flags
;
4174 /* Our reference to the lockres in this function can be
4175 * considered valid until we remove the OCFS2_LOCK_QUEUED
4179 BUG_ON(!lockres
->l_ops
);
4181 mlog(ML_BASTS
, "lockres %s blocked\n", lockres
->l_name
);
4183 /* Detect whether a lock has been marked as going away while
4184 * the downconvert thread was processing other things. A lock can
4185 * still be marked with OCFS2_LOCK_FREEING after this check,
4186 * but short circuiting here will still save us some
4188 spin_lock_irqsave(&lockres
->l_lock
, flags
);
4189 if (lockres
->l_flags
& OCFS2_LOCK_FREEING
)
4191 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
4193 status
= ocfs2_unblock_lock(osb
, lockres
, &ctl
);
4197 spin_lock_irqsave(&lockres
->l_lock
, flags
);
4199 if (lockres
->l_flags
& OCFS2_LOCK_FREEING
|| !ctl
.requeue
) {
4200 lockres_clear_flags(lockres
, OCFS2_LOCK_QUEUED
);
4202 ocfs2_schedule_blocked_lock(osb
, lockres
);
4204 mlog(ML_BASTS
, "lockres %s, requeue = %s.\n", lockres
->l_name
,
4205 ctl
.requeue
? "yes" : "no");
4206 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
4208 if (ctl
.unblock_action
!= UNBLOCK_CONTINUE
4209 && lockres
->l_ops
->post_unlock
)
4210 lockres
->l_ops
->post_unlock(osb
, lockres
);
4213 static void ocfs2_schedule_blocked_lock(struct ocfs2_super
*osb
,
4214 struct ocfs2_lock_res
*lockres
)
4216 unsigned long flags
;
4218 assert_spin_locked(&lockres
->l_lock
);
4220 if (lockres
->l_flags
& OCFS2_LOCK_FREEING
) {
4221 /* Do not schedule a lock for downconvert when it's on
4222 * the way to destruction - any nodes wanting access
4223 * to the resource will get it soon. */
4224 mlog(ML_BASTS
, "lockres %s won't be scheduled: flags 0x%lx\n",
4225 lockres
->l_name
, lockres
->l_flags
);
4229 lockres_or_flags(lockres
, OCFS2_LOCK_QUEUED
);
4231 spin_lock_irqsave(&osb
->dc_task_lock
, flags
);
4232 if (list_empty(&lockres
->l_blocked_list
)) {
4233 list_add_tail(&lockres
->l_blocked_list
,
4234 &osb
->blocked_lock_list
);
4235 osb
->blocked_lock_count
++;
4237 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags
);
4240 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super
*osb
)
4242 unsigned long processed
;
4243 unsigned long flags
;
4244 struct ocfs2_lock_res
*lockres
;
4246 spin_lock_irqsave(&osb
->dc_task_lock
, flags
);
4247 /* grab this early so we know to try again if a state change and
4248 * wake happens part-way through our work */
4249 osb
->dc_work_sequence
= osb
->dc_wake_sequence
;
4251 processed
= osb
->blocked_lock_count
;
4253 * blocked lock processing in this loop might call iput which can
4254 * remove items off osb->blocked_lock_list. Downconvert up to
4255 * 'processed' number of locks, but stop short if we had some
4256 * removed in ocfs2_mark_lockres_freeing when downconverting.
4258 while (processed
&& !list_empty(&osb
->blocked_lock_list
)) {
4259 lockres
= list_entry(osb
->blocked_lock_list
.next
,
4260 struct ocfs2_lock_res
, l_blocked_list
);
4261 list_del_init(&lockres
->l_blocked_list
);
4262 osb
->blocked_lock_count
--;
4263 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags
);
4268 ocfs2_process_blocked_lock(osb
, lockres
);
4270 spin_lock_irqsave(&osb
->dc_task_lock
, flags
);
4272 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags
);
4275 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super
*osb
)
4278 unsigned long flags
;
4280 spin_lock_irqsave(&osb
->dc_task_lock
, flags
);
4281 if (list_empty(&osb
->blocked_lock_list
))
4284 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags
);
4288 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super
*osb
)
4290 int should_wake
= 0;
4291 unsigned long flags
;
4293 spin_lock_irqsave(&osb
->dc_task_lock
, flags
);
4294 if (osb
->dc_work_sequence
!= osb
->dc_wake_sequence
)
4296 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags
);
4301 static int ocfs2_downconvert_thread(void *arg
)
4304 struct ocfs2_super
*osb
= arg
;
4306 /* only quit once we've been asked to stop and there is no more
4308 while (!(kthread_should_stop() &&
4309 ocfs2_downconvert_thread_lists_empty(osb
))) {
4311 wait_event_interruptible(osb
->dc_event
,
4312 ocfs2_downconvert_thread_should_wake(osb
) ||
4313 kthread_should_stop());
4315 mlog(0, "downconvert_thread: awoken\n");
4317 ocfs2_downconvert_thread_do_work(osb
);
4320 osb
->dc_task
= NULL
;
4324 void ocfs2_wake_downconvert_thread(struct ocfs2_super
*osb
)
4326 unsigned long flags
;
4328 spin_lock_irqsave(&osb
->dc_task_lock
, flags
);
4329 /* make sure the voting thread gets a swipe at whatever changes
4330 * the caller may have made to the voting state */
4331 osb
->dc_wake_sequence
++;
4332 spin_unlock_irqrestore(&osb
->dc_task_lock
, flags
);
4333 wake_up(&osb
->dc_event
);