1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
7 * standalone DLM module
9 * Copyright (C) 2004 Oracle. All rights reserved.
13 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/sysctl.h>
19 #include <linux/random.h>
20 #include <linux/blkdev.h>
21 #include <linux/socket.h>
22 #include <linux/inet.h>
23 #include <linux/timer.h>
24 #include <linux/kthread.h>
25 #include <linux/delay.h>
28 #include "cluster/heartbeat.h"
29 #include "cluster/nodemanager.h"
30 #include "cluster/tcp.h"
33 #include "dlmcommon.h"
34 #include "dlmdomain.h"
36 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
37 #include "cluster/masklog.h"
39 static int dlm_thread(void *data
);
40 static void dlm_flush_asts(struct dlm_ctxt
*dlm
);
42 #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
44 /* will exit holding res->spinlock, but may drop in function */
45 /* waits until flags are cleared on res->state */
46 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource
*res
, int flags
)
48 DECLARE_WAITQUEUE(wait
, current
);
50 assert_spin_locked(&res
->spinlock
);
52 add_wait_queue(&res
->wq
, &wait
);
54 set_current_state(TASK_UNINTERRUPTIBLE
);
55 if (res
->state
& flags
) {
56 spin_unlock(&res
->spinlock
);
58 spin_lock(&res
->spinlock
);
61 remove_wait_queue(&res
->wq
, &wait
);
62 __set_current_state(TASK_RUNNING
);
65 int __dlm_lockres_has_locks(struct dlm_lock_resource
*res
)
67 if (list_empty(&res
->granted
) &&
68 list_empty(&res
->converting
) &&
69 list_empty(&res
->blocked
))
74 /* "unused": the lockres has no locks, is not on the dirty list,
75 * has no inflight locks (in the gap between mastery and acquiring
76 * the first lock), and has no bits in its refmap.
77 * truly ready to be freed. */
78 int __dlm_lockres_unused(struct dlm_lock_resource
*res
)
82 assert_spin_locked(&res
->spinlock
);
84 if (__dlm_lockres_has_locks(res
))
87 /* Locks are in the process of being created */
88 if (res
->inflight_locks
)
91 if (!list_empty(&res
->dirty
) || res
->state
& DLM_LOCK_RES_DIRTY
)
94 if (res
->state
& (DLM_LOCK_RES_RECOVERING
|
95 DLM_LOCK_RES_RECOVERY_WAITING
))
98 /* Another node has this resource with this node as the master */
99 bit
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
, 0);
100 if (bit
< O2NM_MAX_NODES
)
107 /* Call whenever you may have added or deleted something from one of
108 * the lockres queue's. This will figure out whether it belongs on the
109 * unused list or not and does the appropriate thing. */
110 void __dlm_lockres_calc_usage(struct dlm_ctxt
*dlm
,
111 struct dlm_lock_resource
*res
)
113 assert_spin_locked(&dlm
->spinlock
);
114 assert_spin_locked(&res
->spinlock
);
116 if (__dlm_lockres_unused(res
)){
117 if (list_empty(&res
->purge
)) {
118 mlog(0, "%s: Adding res %.*s to purge list\n",
119 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
121 res
->last_used
= jiffies
;
122 dlm_lockres_get(res
);
123 list_add_tail(&res
->purge
, &dlm
->purge_list
);
126 } else if (!list_empty(&res
->purge
)) {
127 mlog(0, "%s: Removing res %.*s from purge list\n",
128 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
130 list_del_init(&res
->purge
);
131 dlm_lockres_put(res
);
136 void dlm_lockres_calc_usage(struct dlm_ctxt
*dlm
,
137 struct dlm_lock_resource
*res
)
139 spin_lock(&dlm
->spinlock
);
140 spin_lock(&res
->spinlock
);
142 __dlm_lockres_calc_usage(dlm
, res
);
144 spin_unlock(&res
->spinlock
);
145 spin_unlock(&dlm
->spinlock
);
149 * Do the real purge work:
150 * unhash the lockres, and
151 * clear flag DLM_LOCK_RES_DROPPING_REF.
152 * It requires dlm and lockres spinlock to be taken.
154 void __dlm_do_purge_lockres(struct dlm_ctxt
*dlm
,
155 struct dlm_lock_resource
*res
)
157 assert_spin_locked(&dlm
->spinlock
);
158 assert_spin_locked(&res
->spinlock
);
160 if (!list_empty(&res
->purge
)) {
161 mlog(0, "%s: Removing res %.*s from purgelist\n",
162 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
163 list_del_init(&res
->purge
);
164 dlm_lockres_put(res
);
168 if (!__dlm_lockres_unused(res
)) {
169 mlog(ML_ERROR
, "%s: res %.*s in use after deref\n",
170 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
171 __dlm_print_one_lock_resource(res
);
175 __dlm_unhash_lockres(dlm
, res
);
177 spin_lock(&dlm
->track_lock
);
178 if (!list_empty(&res
->tracking
))
179 list_del_init(&res
->tracking
);
181 mlog(ML_ERROR
, "%s: Resource %.*s not on the Tracking list\n",
182 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
183 __dlm_print_one_lock_resource(res
);
185 spin_unlock(&dlm
->track_lock
);
188 * lockres is not in the hash now. drop the flag and wake up
189 * any processes waiting in dlm_get_lock_resource.
191 res
->state
&= ~DLM_LOCK_RES_DROPPING_REF
;
194 static void dlm_purge_lockres(struct dlm_ctxt
*dlm
,
195 struct dlm_lock_resource
*res
)
200 assert_spin_locked(&dlm
->spinlock
);
201 assert_spin_locked(&res
->spinlock
);
203 master
= (res
->owner
== dlm
->node_num
);
205 mlog(0, "%s: Purging res %.*s, master %d\n", dlm
->name
,
206 res
->lockname
.len
, res
->lockname
.name
, master
);
209 if (res
->state
& DLM_LOCK_RES_DROPPING_REF
) {
210 mlog(ML_NOTICE
, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
211 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
212 spin_unlock(&res
->spinlock
);
216 res
->state
|= DLM_LOCK_RES_DROPPING_REF
;
217 /* drop spinlock... retake below */
218 spin_unlock(&res
->spinlock
);
219 spin_unlock(&dlm
->spinlock
);
221 spin_lock(&res
->spinlock
);
222 /* This ensures that clear refmap is sent after the set */
223 __dlm_wait_on_lockres_flags(res
, DLM_LOCK_RES_SETREF_INPROG
);
224 spin_unlock(&res
->spinlock
);
226 /* clear our bit from the master's refmap, ignore errors */
227 ret
= dlm_drop_lockres_ref(dlm
, res
);
229 if (!dlm_is_host_down(ret
))
232 spin_lock(&dlm
->spinlock
);
233 spin_lock(&res
->spinlock
);
236 if (!list_empty(&res
->purge
)) {
237 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
238 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
, master
);
239 list_del_init(&res
->purge
);
240 dlm_lockres_put(res
);
244 if (!master
&& ret
== DLM_DEREF_RESPONSE_INPROG
) {
245 mlog(0, "%s: deref %.*s in progress\n",
246 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
247 spin_unlock(&res
->spinlock
);
251 if (!__dlm_lockres_unused(res
)) {
252 mlog(ML_ERROR
, "%s: res %.*s in use after deref\n",
253 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
254 __dlm_print_one_lock_resource(res
);
258 __dlm_unhash_lockres(dlm
, res
);
260 spin_lock(&dlm
->track_lock
);
261 if (!list_empty(&res
->tracking
))
262 list_del_init(&res
->tracking
);
264 mlog(ML_ERROR
, "Resource %.*s not on the Tracking list\n",
265 res
->lockname
.len
, res
->lockname
.name
);
266 __dlm_print_one_lock_resource(res
);
268 spin_unlock(&dlm
->track_lock
);
270 /* lockres is not in the hash now. drop the flag and wake up
271 * any processes waiting in dlm_get_lock_resource. */
273 res
->state
&= ~DLM_LOCK_RES_DROPPING_REF
;
274 spin_unlock(&res
->spinlock
);
277 spin_unlock(&res
->spinlock
);
280 static void dlm_run_purge_list(struct dlm_ctxt
*dlm
,
283 unsigned int run_max
, unused
;
284 unsigned long purge_jiffies
;
285 struct dlm_lock_resource
*lockres
;
287 spin_lock(&dlm
->spinlock
);
288 run_max
= dlm
->purge_count
;
290 while(run_max
&& !list_empty(&dlm
->purge_list
)) {
293 lockres
= list_entry(dlm
->purge_list
.next
,
294 struct dlm_lock_resource
, purge
);
296 spin_lock(&lockres
->spinlock
);
298 purge_jiffies
= lockres
->last_used
+
299 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS
);
301 /* Make sure that we want to be processing this guy at
303 if (!purge_now
&& time_after(purge_jiffies
, jiffies
)) {
304 /* Since resources are added to the purge list
305 * in tail order, we can stop at the first
306 * unpurgable resource -- anyone added after
307 * him will have a greater last_used value */
308 spin_unlock(&lockres
->spinlock
);
312 /* Status of the lockres *might* change so double
313 * check. If the lockres is unused, holding the dlm
314 * spinlock will prevent people from getting and more
316 unused
= __dlm_lockres_unused(lockres
);
318 (lockres
->state
& DLM_LOCK_RES_MIGRATING
) ||
319 (lockres
->inflight_assert_workers
!= 0)) {
320 mlog(0, "%s: res %.*s is in use or being remastered, "
321 "used %d, state %d, assert master workers %u\n",
322 dlm
->name
, lockres
->lockname
.len
,
323 lockres
->lockname
.name
,
324 !unused
, lockres
->state
,
325 lockres
->inflight_assert_workers
);
326 list_move_tail(&lockres
->purge
, &dlm
->purge_list
);
327 spin_unlock(&lockres
->spinlock
);
331 dlm_lockres_get(lockres
);
333 dlm_purge_lockres(dlm
, lockres
);
335 dlm_lockres_put(lockres
);
337 /* Avoid adding any scheduling latencies */
338 cond_resched_lock(&dlm
->spinlock
);
341 spin_unlock(&dlm
->spinlock
);
344 static void dlm_shuffle_lists(struct dlm_ctxt
*dlm
,
345 struct dlm_lock_resource
*res
)
347 struct dlm_lock
*lock
, *target
;
351 * Because this function is called with the lockres
352 * spinlock, and because we know that it is not migrating/
353 * recovering/in-progress, it is fine to reserve asts and
354 * basts right before queueing them all throughout
356 assert_spin_locked(&dlm
->ast_lock
);
357 assert_spin_locked(&res
->spinlock
);
358 BUG_ON((res
->state
& (DLM_LOCK_RES_MIGRATING
|
359 DLM_LOCK_RES_RECOVERING
|
360 DLM_LOCK_RES_IN_PROGRESS
)));
363 if (list_empty(&res
->converting
))
365 mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm
->name
,
366 res
->lockname
.len
, res
->lockname
.name
);
368 target
= list_entry(res
->converting
.next
, struct dlm_lock
, list
);
369 if (target
->ml
.convert_type
== LKM_IVMODE
) {
370 mlog(ML_ERROR
, "%s: res %.*s converting lock to invalid mode\n",
371 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
374 list_for_each_entry(lock
, &res
->granted
, list
) {
377 if (!dlm_lock_compatible(lock
->ml
.type
,
378 target
->ml
.convert_type
)) {
380 /* queue the BAST if not already */
381 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
382 __dlm_lockres_reserve_ast(res
);
383 __dlm_queue_bast(dlm
, lock
);
385 /* update the highest_blocked if needed */
386 if (lock
->ml
.highest_blocked
< target
->ml
.convert_type
)
387 lock
->ml
.highest_blocked
=
388 target
->ml
.convert_type
;
392 list_for_each_entry(lock
, &res
->converting
, list
) {
395 if (!dlm_lock_compatible(lock
->ml
.type
,
396 target
->ml
.convert_type
)) {
398 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
399 __dlm_lockres_reserve_ast(res
);
400 __dlm_queue_bast(dlm
, lock
);
402 if (lock
->ml
.highest_blocked
< target
->ml
.convert_type
)
403 lock
->ml
.highest_blocked
=
404 target
->ml
.convert_type
;
408 /* we can convert the lock */
410 spin_lock(&target
->spinlock
);
411 BUG_ON(target
->ml
.highest_blocked
!= LKM_IVMODE
);
413 mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
414 "%d => %d, node %u\n", dlm
->name
, res
->lockname
.len
,
416 dlm_get_lock_cookie_node(be64_to_cpu(target
->ml
.cookie
)),
417 dlm_get_lock_cookie_seq(be64_to_cpu(target
->ml
.cookie
)),
419 target
->ml
.convert_type
, target
->ml
.node
);
421 target
->ml
.type
= target
->ml
.convert_type
;
422 target
->ml
.convert_type
= LKM_IVMODE
;
423 list_move_tail(&target
->list
, &res
->granted
);
425 BUG_ON(!target
->lksb
);
426 target
->lksb
->status
= DLM_NORMAL
;
428 spin_unlock(&target
->spinlock
);
430 __dlm_lockres_reserve_ast(res
);
431 __dlm_queue_ast(dlm
, target
);
432 /* go back and check for more */
437 if (list_empty(&res
->blocked
))
439 target
= list_entry(res
->blocked
.next
, struct dlm_lock
, list
);
441 list_for_each_entry(lock
, &res
->granted
, list
) {
444 if (!dlm_lock_compatible(lock
->ml
.type
, target
->ml
.type
)) {
446 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
447 __dlm_lockres_reserve_ast(res
);
448 __dlm_queue_bast(dlm
, lock
);
450 if (lock
->ml
.highest_blocked
< target
->ml
.type
)
451 lock
->ml
.highest_blocked
= target
->ml
.type
;
455 list_for_each_entry(lock
, &res
->converting
, list
) {
458 if (!dlm_lock_compatible(lock
->ml
.type
, target
->ml
.type
)) {
460 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
461 __dlm_lockres_reserve_ast(res
);
462 __dlm_queue_bast(dlm
, lock
);
464 if (lock
->ml
.highest_blocked
< target
->ml
.type
)
465 lock
->ml
.highest_blocked
= target
->ml
.type
;
469 /* we can grant the blocked lock (only
470 * possible if converting list empty) */
472 spin_lock(&target
->spinlock
);
473 BUG_ON(target
->ml
.highest_blocked
!= LKM_IVMODE
);
475 mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
476 "node %u\n", dlm
->name
, res
->lockname
.len
,
478 dlm_get_lock_cookie_node(be64_to_cpu(target
->ml
.cookie
)),
479 dlm_get_lock_cookie_seq(be64_to_cpu(target
->ml
.cookie
)),
480 target
->ml
.type
, target
->ml
.node
);
482 /* target->ml.type is already correct */
483 list_move_tail(&target
->list
, &res
->granted
);
485 BUG_ON(!target
->lksb
);
486 target
->lksb
->status
= DLM_NORMAL
;
488 spin_unlock(&target
->spinlock
);
490 __dlm_lockres_reserve_ast(res
);
491 __dlm_queue_ast(dlm
, target
);
492 /* go back and check for more */
500 /* must have NO locks when calling this with res !=NULL * */
501 void dlm_kick_thread(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
504 spin_lock(&dlm
->spinlock
);
505 spin_lock(&res
->spinlock
);
506 __dlm_dirty_lockres(dlm
, res
);
507 spin_unlock(&res
->spinlock
);
508 spin_unlock(&dlm
->spinlock
);
510 wake_up(&dlm
->dlm_thread_wq
);
513 void __dlm_dirty_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
515 assert_spin_locked(&dlm
->spinlock
);
516 assert_spin_locked(&res
->spinlock
);
518 /* don't shuffle secondary queues */
519 if (res
->owner
== dlm
->node_num
) {
520 if (res
->state
& (DLM_LOCK_RES_MIGRATING
|
521 DLM_LOCK_RES_BLOCK_DIRTY
))
524 if (list_empty(&res
->dirty
)) {
525 /* ref for dirty_list */
526 dlm_lockres_get(res
);
527 list_add_tail(&res
->dirty
, &dlm
->dirty_list
);
528 res
->state
|= DLM_LOCK_RES_DIRTY
;
532 mlog(0, "%s: res %.*s\n", dlm
->name
, res
->lockname
.len
,
537 /* Launch the NM thread for the mounted volume */
538 int dlm_launch_thread(struct dlm_ctxt
*dlm
)
540 mlog(0, "Starting dlm_thread...\n");
542 dlm
->dlm_thread_task
= kthread_run(dlm_thread
, dlm
, "dlm-%s",
544 if (IS_ERR(dlm
->dlm_thread_task
)) {
545 mlog_errno(PTR_ERR(dlm
->dlm_thread_task
));
546 dlm
->dlm_thread_task
= NULL
;
553 void dlm_complete_thread(struct dlm_ctxt
*dlm
)
555 if (dlm
->dlm_thread_task
) {
556 mlog(ML_KTHREAD
, "Waiting for dlm thread to exit\n");
557 kthread_stop(dlm
->dlm_thread_task
);
558 dlm
->dlm_thread_task
= NULL
;
562 static int dlm_dirty_list_empty(struct dlm_ctxt
*dlm
)
566 spin_lock(&dlm
->spinlock
);
567 empty
= list_empty(&dlm
->dirty_list
);
568 spin_unlock(&dlm
->spinlock
);
573 static void dlm_flush_asts(struct dlm_ctxt
*dlm
)
576 struct dlm_lock
*lock
;
577 struct dlm_lock_resource
*res
;
580 spin_lock(&dlm
->ast_lock
);
581 while (!list_empty(&dlm
->pending_asts
)) {
582 lock
= list_entry(dlm
->pending_asts
.next
,
583 struct dlm_lock
, ast_list
);
584 /* get an extra ref on lock */
587 mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
588 "node %u\n", dlm
->name
, res
->lockname
.len
,
590 dlm_get_lock_cookie_node(be64_to_cpu(lock
->ml
.cookie
)),
591 dlm_get_lock_cookie_seq(be64_to_cpu(lock
->ml
.cookie
)),
592 lock
->ml
.type
, lock
->ml
.node
);
594 BUG_ON(!lock
->ast_pending
);
596 /* remove from list (including ref) */
597 list_del_init(&lock
->ast_list
);
599 spin_unlock(&dlm
->ast_lock
);
601 if (lock
->ml
.node
!= dlm
->node_num
) {
602 ret
= dlm_do_remote_ast(dlm
, res
, lock
);
606 dlm_do_local_ast(dlm
, res
, lock
);
608 spin_lock(&dlm
->ast_lock
);
610 /* possible that another ast was queued while
611 * we were delivering the last one */
612 if (!list_empty(&lock
->ast_list
)) {
613 mlog(0, "%s: res %.*s, AST queued while flushing last "
614 "one\n", dlm
->name
, res
->lockname
.len
,
617 lock
->ast_pending
= 0;
619 /* drop the extra ref.
620 * this may drop it completely. */
622 dlm_lockres_release_ast(dlm
, res
);
625 while (!list_empty(&dlm
->pending_basts
)) {
626 lock
= list_entry(dlm
->pending_basts
.next
,
627 struct dlm_lock
, bast_list
);
628 /* get an extra ref on lock */
632 BUG_ON(!lock
->bast_pending
);
634 /* get the highest blocked lock, and reset */
635 spin_lock(&lock
->spinlock
);
636 BUG_ON(lock
->ml
.highest_blocked
<= LKM_IVMODE
);
637 hi
= lock
->ml
.highest_blocked
;
638 lock
->ml
.highest_blocked
= LKM_IVMODE
;
639 spin_unlock(&lock
->spinlock
);
641 /* remove from list (including ref) */
642 list_del_init(&lock
->bast_list
);
644 spin_unlock(&dlm
->ast_lock
);
646 mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
647 "blocked %d, node %u\n",
648 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
649 dlm_get_lock_cookie_node(be64_to_cpu(lock
->ml
.cookie
)),
650 dlm_get_lock_cookie_seq(be64_to_cpu(lock
->ml
.cookie
)),
653 if (lock
->ml
.node
!= dlm
->node_num
) {
654 ret
= dlm_send_proxy_bast(dlm
, res
, lock
, hi
);
658 dlm_do_local_bast(dlm
, res
, lock
, hi
);
660 spin_lock(&dlm
->ast_lock
);
662 /* possible that another bast was queued while
663 * we were delivering the last one */
664 if (!list_empty(&lock
->bast_list
)) {
665 mlog(0, "%s: res %.*s, BAST queued while flushing last "
666 "one\n", dlm
->name
, res
->lockname
.len
,
669 lock
->bast_pending
= 0;
671 /* drop the extra ref.
672 * this may drop it completely. */
674 dlm_lockres_release_ast(dlm
, res
);
676 wake_up(&dlm
->ast_wq
);
677 spin_unlock(&dlm
->ast_lock
);
681 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
682 #define DLM_THREAD_MAX_DIRTY 100
683 #define DLM_THREAD_MAX_ASTS 10
685 static int dlm_thread(void *data
)
687 struct dlm_lock_resource
*res
;
688 struct dlm_ctxt
*dlm
= data
;
689 unsigned long timeout
= msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS
);
691 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
693 while (!kthread_should_stop()) {
694 int n
= DLM_THREAD_MAX_DIRTY
;
696 /* dlm_shutting_down is very point-in-time, but that
697 * doesn't matter as we'll just loop back around if we
698 * get false on the leading edge of a state
700 dlm_run_purge_list(dlm
, dlm_shutting_down(dlm
));
702 /* We really don't want to hold dlm->spinlock while
703 * calling dlm_shuffle_lists on each lockres that
704 * needs to have its queues adjusted and AST/BASTs
705 * run. So let's pull each entry off the dirty_list
706 * and drop dlm->spinlock ASAP. Once off the list,
707 * res->spinlock needs to be taken again to protect
708 * the queues while calling dlm_shuffle_lists. */
709 spin_lock(&dlm
->spinlock
);
710 while (!list_empty(&dlm
->dirty_list
)) {
712 res
= list_entry(dlm
->dirty_list
.next
,
713 struct dlm_lock_resource
, dirty
);
715 /* peel a lockres off, remove it from the list,
716 * unset the dirty flag and drop the dlm lock */
718 dlm_lockres_get(res
);
720 spin_lock(&res
->spinlock
);
721 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
722 list_del_init(&res
->dirty
);
723 spin_unlock(&res
->spinlock
);
724 spin_unlock(&dlm
->spinlock
);
725 /* Drop dirty_list ref */
726 dlm_lockres_put(res
);
728 /* lockres can be re-dirtied/re-added to the
729 * dirty_list in this gap, but that is ok */
731 spin_lock(&dlm
->ast_lock
);
732 spin_lock(&res
->spinlock
);
733 if (res
->owner
!= dlm
->node_num
) {
734 __dlm_print_one_lock_resource(res
);
735 mlog(ML_ERROR
, "%s: inprog %d, mig %d, reco %d,"
736 " dirty %d\n", dlm
->name
,
737 !!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
),
738 !!(res
->state
& DLM_LOCK_RES_MIGRATING
),
739 !!(res
->state
& DLM_LOCK_RES_RECOVERING
),
740 !!(res
->state
& DLM_LOCK_RES_DIRTY
));
742 BUG_ON(res
->owner
!= dlm
->node_num
);
744 /* it is now ok to move lockreses in these states
745 * to the dirty list, assuming that they will only be
746 * dirty for a short while. */
747 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
748 if (res
->state
& (DLM_LOCK_RES_IN_PROGRESS
|
749 DLM_LOCK_RES_RECOVERING
|
750 DLM_LOCK_RES_RECOVERY_WAITING
)) {
751 /* move it to the tail and keep going */
752 res
->state
&= ~DLM_LOCK_RES_DIRTY
;
753 spin_unlock(&res
->spinlock
);
754 spin_unlock(&dlm
->ast_lock
);
755 mlog(0, "%s: res %.*s, inprogress, delay list "
756 "shuffle, state %d\n", dlm
->name
,
757 res
->lockname
.len
, res
->lockname
.name
,
763 /* at this point the lockres is not migrating/
764 * recovering/in-progress. we have the lockres
765 * spinlock and do NOT have the dlm lock.
766 * safe to reserve/queue asts and run the lists. */
768 /* called while holding lockres lock */
769 dlm_shuffle_lists(dlm
, res
);
770 res
->state
&= ~DLM_LOCK_RES_DIRTY
;
771 spin_unlock(&res
->spinlock
);
772 spin_unlock(&dlm
->ast_lock
);
774 dlm_lockres_calc_usage(dlm
, res
);
778 spin_lock(&dlm
->spinlock
);
779 /* if the lock was in-progress, stick
780 * it on the back of the list */
782 spin_lock(&res
->spinlock
);
783 __dlm_dirty_lockres(dlm
, res
);
784 spin_unlock(&res
->spinlock
);
786 dlm_lockres_put(res
);
788 /* unlikely, but we may need to give time to
791 mlog(0, "%s: Throttling dlm thread\n",
797 spin_unlock(&dlm
->spinlock
);
800 /* yield and continue right away if there is more work to do */
806 wait_event_interruptible_timeout(dlm
->dlm_thread_wq
,
807 !dlm_dirty_list_empty(dlm
) ||
808 kthread_should_stop(),
812 mlog(0, "quitting DLM thread\n");