1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
7 * standalone DLM module
9 * Copyright (C) 2004 Oracle. All rights reserved.
13 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/highmem.h>
17 #include <linux/init.h>
18 #include <linux/sysctl.h>
19 #include <linux/random.h>
20 #include <linux/blkdev.h>
21 #include <linux/socket.h>
22 #include <linux/inet.h>
23 #include <linux/timer.h>
24 #include <linux/kthread.h>
25 #include <linux/delay.h>
28 #include "../cluster/heartbeat.h"
29 #include "../cluster/nodemanager.h"
30 #include "../cluster/tcp.h"
33 #include "dlmcommon.h"
34 #include "dlmdomain.h"
36 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
37 #include "../cluster/masklog.h"
39 static int dlm_thread(void *data
);
40 static void dlm_flush_asts(struct dlm_ctxt
*dlm
);
42 /* will exit holding res->spinlock, but may drop in function */
43 /* waits until flags are cleared on res->state */
44 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource
*res
, int flags
)
46 DECLARE_WAITQUEUE(wait
, current
);
48 assert_spin_locked(&res
->spinlock
);
50 add_wait_queue(&res
->wq
, &wait
);
52 set_current_state(TASK_UNINTERRUPTIBLE
);
53 if (res
->state
& flags
) {
54 spin_unlock(&res
->spinlock
);
56 spin_lock(&res
->spinlock
);
59 remove_wait_queue(&res
->wq
, &wait
);
60 __set_current_state(TASK_RUNNING
);
63 int __dlm_lockres_has_locks(struct dlm_lock_resource
*res
)
65 if (list_empty(&res
->granted
) &&
66 list_empty(&res
->converting
) &&
67 list_empty(&res
->blocked
))
72 /* "unused": the lockres has no locks, is not on the dirty list,
73 * has no inflight locks (in the gap between mastery and acquiring
74 * the first lock), and has no bits in its refmap.
75 * truly ready to be freed. */
76 int __dlm_lockres_unused(struct dlm_lock_resource
*res
)
80 assert_spin_locked(&res
->spinlock
);
82 if (__dlm_lockres_has_locks(res
))
85 /* Locks are in the process of being created */
86 if (res
->inflight_locks
)
89 if (!list_empty(&res
->dirty
) || res
->state
& DLM_LOCK_RES_DIRTY
)
92 if (res
->state
& (DLM_LOCK_RES_RECOVERING
|
93 DLM_LOCK_RES_RECOVERY_WAITING
))
96 /* Another node has this resource with this node as the master */
97 bit
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
, 0);
98 if (bit
< O2NM_MAX_NODES
)
105 /* Call whenever you may have added or deleted something from one of
106 * the lockres queue's. This will figure out whether it belongs on the
107 * unused list or not and does the appropriate thing. */
108 void __dlm_lockres_calc_usage(struct dlm_ctxt
*dlm
,
109 struct dlm_lock_resource
*res
)
111 assert_spin_locked(&dlm
->spinlock
);
112 assert_spin_locked(&res
->spinlock
);
114 if (__dlm_lockres_unused(res
)){
115 if (list_empty(&res
->purge
)) {
116 mlog(0, "%s: Adding res %.*s to purge list\n",
117 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
119 res
->last_used
= jiffies
;
120 dlm_lockres_get(res
);
121 list_add_tail(&res
->purge
, &dlm
->purge_list
);
124 } else if (!list_empty(&res
->purge
)) {
125 mlog(0, "%s: Removing res %.*s from purge list\n",
126 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
128 list_del_init(&res
->purge
);
129 dlm_lockres_put(res
);
134 void dlm_lockres_calc_usage(struct dlm_ctxt
*dlm
,
135 struct dlm_lock_resource
*res
)
137 spin_lock(&dlm
->spinlock
);
138 spin_lock(&res
->spinlock
);
140 __dlm_lockres_calc_usage(dlm
, res
);
142 spin_unlock(&res
->spinlock
);
143 spin_unlock(&dlm
->spinlock
);
147 * Do the real purge work:
148 * unhash the lockres, and
149 * clear flag DLM_LOCK_RES_DROPPING_REF.
150 * It requires dlm and lockres spinlock to be taken.
152 void __dlm_do_purge_lockres(struct dlm_ctxt
*dlm
,
153 struct dlm_lock_resource
*res
)
155 assert_spin_locked(&dlm
->spinlock
);
156 assert_spin_locked(&res
->spinlock
);
158 if (!list_empty(&res
->purge
)) {
159 mlog(0, "%s: Removing res %.*s from purgelist\n",
160 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
161 list_del_init(&res
->purge
);
162 dlm_lockres_put(res
);
166 if (!__dlm_lockres_unused(res
)) {
167 mlog(ML_ERROR
, "%s: res %.*s in use after deref\n",
168 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
169 __dlm_print_one_lock_resource(res
);
173 __dlm_unhash_lockres(dlm
, res
);
175 spin_lock(&dlm
->track_lock
);
176 if (!list_empty(&res
->tracking
))
177 list_del_init(&res
->tracking
);
179 mlog(ML_ERROR
, "%s: Resource %.*s not on the Tracking list\n",
180 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
181 __dlm_print_one_lock_resource(res
);
183 spin_unlock(&dlm
->track_lock
);
186 * lockres is not in the hash now. drop the flag and wake up
187 * any processes waiting in dlm_get_lock_resource.
189 res
->state
&= ~DLM_LOCK_RES_DROPPING_REF
;
192 static void dlm_purge_lockres(struct dlm_ctxt
*dlm
,
193 struct dlm_lock_resource
*res
)
198 assert_spin_locked(&dlm
->spinlock
);
199 assert_spin_locked(&res
->spinlock
);
201 master
= (res
->owner
== dlm
->node_num
);
203 mlog(0, "%s: Purging res %.*s, master %d\n", dlm
->name
,
204 res
->lockname
.len
, res
->lockname
.name
, master
);
207 if (res
->state
& DLM_LOCK_RES_DROPPING_REF
) {
208 mlog(ML_NOTICE
, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
209 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
210 spin_unlock(&res
->spinlock
);
214 res
->state
|= DLM_LOCK_RES_DROPPING_REF
;
215 /* drop spinlock... retake below */
216 spin_unlock(&res
->spinlock
);
217 spin_unlock(&dlm
->spinlock
);
219 spin_lock(&res
->spinlock
);
220 /* This ensures that clear refmap is sent after the set */
221 __dlm_wait_on_lockres_flags(res
, DLM_LOCK_RES_SETREF_INPROG
);
222 spin_unlock(&res
->spinlock
);
224 /* clear our bit from the master's refmap, ignore errors */
225 ret
= dlm_drop_lockres_ref(dlm
, res
);
227 if (!dlm_is_host_down(ret
))
230 spin_lock(&dlm
->spinlock
);
231 spin_lock(&res
->spinlock
);
234 if (!list_empty(&res
->purge
)) {
235 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
236 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
, master
);
237 list_del_init(&res
->purge
);
238 dlm_lockres_put(res
);
242 if (!master
&& ret
== DLM_DEREF_RESPONSE_INPROG
) {
243 mlog(0, "%s: deref %.*s in progress\n",
244 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
245 spin_unlock(&res
->spinlock
);
249 if (!__dlm_lockres_unused(res
)) {
250 mlog(ML_ERROR
, "%s: res %.*s in use after deref\n",
251 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
252 __dlm_print_one_lock_resource(res
);
256 __dlm_unhash_lockres(dlm
, res
);
258 spin_lock(&dlm
->track_lock
);
259 if (!list_empty(&res
->tracking
))
260 list_del_init(&res
->tracking
);
262 mlog(ML_ERROR
, "Resource %.*s not on the Tracking list\n",
263 res
->lockname
.len
, res
->lockname
.name
);
264 __dlm_print_one_lock_resource(res
);
266 spin_unlock(&dlm
->track_lock
);
268 /* lockres is not in the hash now. drop the flag and wake up
269 * any processes waiting in dlm_get_lock_resource. */
271 res
->state
&= ~DLM_LOCK_RES_DROPPING_REF
;
272 spin_unlock(&res
->spinlock
);
275 spin_unlock(&res
->spinlock
);
278 static void dlm_run_purge_list(struct dlm_ctxt
*dlm
,
281 unsigned int run_max
, unused
;
282 unsigned long purge_jiffies
;
283 struct dlm_lock_resource
*lockres
;
285 spin_lock(&dlm
->spinlock
);
286 run_max
= dlm
->purge_count
;
288 while(run_max
&& !list_empty(&dlm
->purge_list
)) {
291 lockres
= list_entry(dlm
->purge_list
.next
,
292 struct dlm_lock_resource
, purge
);
294 spin_lock(&lockres
->spinlock
);
296 purge_jiffies
= lockres
->last_used
+
297 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS
);
299 /* Make sure that we want to be processing this guy at
301 if (!purge_now
&& time_after(purge_jiffies
, jiffies
)) {
302 /* Since resources are added to the purge list
303 * in tail order, we can stop at the first
304 * unpurgable resource -- anyone added after
305 * him will have a greater last_used value */
306 spin_unlock(&lockres
->spinlock
);
310 /* Status of the lockres *might* change so double
311 * check. If the lockres is unused, holding the dlm
312 * spinlock will prevent people from getting and more
314 unused
= __dlm_lockres_unused(lockres
);
316 (lockres
->state
& DLM_LOCK_RES_MIGRATING
) ||
317 (lockres
->inflight_assert_workers
!= 0)) {
318 mlog(0, "%s: res %.*s is in use or being remastered, "
319 "used %d, state %d, assert master workers %u\n",
320 dlm
->name
, lockres
->lockname
.len
,
321 lockres
->lockname
.name
,
322 !unused
, lockres
->state
,
323 lockres
->inflight_assert_workers
);
324 list_move_tail(&lockres
->purge
, &dlm
->purge_list
);
325 spin_unlock(&lockres
->spinlock
);
329 dlm_lockres_get(lockres
);
331 dlm_purge_lockres(dlm
, lockres
);
333 dlm_lockres_put(lockres
);
335 /* Avoid adding any scheduling latencies */
336 cond_resched_lock(&dlm
->spinlock
);
339 spin_unlock(&dlm
->spinlock
);
342 static void dlm_shuffle_lists(struct dlm_ctxt
*dlm
,
343 struct dlm_lock_resource
*res
)
345 struct dlm_lock
*lock
, *target
;
349 * Because this function is called with the lockres
350 * spinlock, and because we know that it is not migrating/
351 * recovering/in-progress, it is fine to reserve asts and
352 * basts right before queueing them all throughout
354 assert_spin_locked(&dlm
->ast_lock
);
355 assert_spin_locked(&res
->spinlock
);
356 BUG_ON((res
->state
& (DLM_LOCK_RES_MIGRATING
|
357 DLM_LOCK_RES_RECOVERING
|
358 DLM_LOCK_RES_IN_PROGRESS
)));
361 if (list_empty(&res
->converting
))
363 mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm
->name
,
364 res
->lockname
.len
, res
->lockname
.name
);
366 target
= list_entry(res
->converting
.next
, struct dlm_lock
, list
);
367 if (target
->ml
.convert_type
== LKM_IVMODE
) {
368 mlog(ML_ERROR
, "%s: res %.*s converting lock to invalid mode\n",
369 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
372 list_for_each_entry(lock
, &res
->granted
, list
) {
375 if (!dlm_lock_compatible(lock
->ml
.type
,
376 target
->ml
.convert_type
)) {
378 /* queue the BAST if not already */
379 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
380 __dlm_lockres_reserve_ast(res
);
381 __dlm_queue_bast(dlm
, lock
);
383 /* update the highest_blocked if needed */
384 if (lock
->ml
.highest_blocked
< target
->ml
.convert_type
)
385 lock
->ml
.highest_blocked
=
386 target
->ml
.convert_type
;
390 list_for_each_entry(lock
, &res
->converting
, list
) {
393 if (!dlm_lock_compatible(lock
->ml
.type
,
394 target
->ml
.convert_type
)) {
396 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
397 __dlm_lockres_reserve_ast(res
);
398 __dlm_queue_bast(dlm
, lock
);
400 if (lock
->ml
.highest_blocked
< target
->ml
.convert_type
)
401 lock
->ml
.highest_blocked
=
402 target
->ml
.convert_type
;
406 /* we can convert the lock */
408 spin_lock(&target
->spinlock
);
409 BUG_ON(target
->ml
.highest_blocked
!= LKM_IVMODE
);
411 mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
412 "%d => %d, node %u\n", dlm
->name
, res
->lockname
.len
,
414 dlm_get_lock_cookie_node(be64_to_cpu(target
->ml
.cookie
)),
415 dlm_get_lock_cookie_seq(be64_to_cpu(target
->ml
.cookie
)),
417 target
->ml
.convert_type
, target
->ml
.node
);
419 target
->ml
.type
= target
->ml
.convert_type
;
420 target
->ml
.convert_type
= LKM_IVMODE
;
421 list_move_tail(&target
->list
, &res
->granted
);
423 BUG_ON(!target
->lksb
);
424 target
->lksb
->status
= DLM_NORMAL
;
426 spin_unlock(&target
->spinlock
);
428 __dlm_lockres_reserve_ast(res
);
429 __dlm_queue_ast(dlm
, target
);
430 /* go back and check for more */
435 if (list_empty(&res
->blocked
))
437 target
= list_entry(res
->blocked
.next
, struct dlm_lock
, list
);
439 list_for_each_entry(lock
, &res
->granted
, list
) {
442 if (!dlm_lock_compatible(lock
->ml
.type
, target
->ml
.type
)) {
444 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
445 __dlm_lockres_reserve_ast(res
);
446 __dlm_queue_bast(dlm
, lock
);
448 if (lock
->ml
.highest_blocked
< target
->ml
.type
)
449 lock
->ml
.highest_blocked
= target
->ml
.type
;
453 list_for_each_entry(lock
, &res
->converting
, list
) {
456 if (!dlm_lock_compatible(lock
->ml
.type
, target
->ml
.type
)) {
458 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
459 __dlm_lockres_reserve_ast(res
);
460 __dlm_queue_bast(dlm
, lock
);
462 if (lock
->ml
.highest_blocked
< target
->ml
.type
)
463 lock
->ml
.highest_blocked
= target
->ml
.type
;
467 /* we can grant the blocked lock (only
468 * possible if converting list empty) */
470 spin_lock(&target
->spinlock
);
471 BUG_ON(target
->ml
.highest_blocked
!= LKM_IVMODE
);
473 mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
474 "node %u\n", dlm
->name
, res
->lockname
.len
,
476 dlm_get_lock_cookie_node(be64_to_cpu(target
->ml
.cookie
)),
477 dlm_get_lock_cookie_seq(be64_to_cpu(target
->ml
.cookie
)),
478 target
->ml
.type
, target
->ml
.node
);
480 /* target->ml.type is already correct */
481 list_move_tail(&target
->list
, &res
->granted
);
483 BUG_ON(!target
->lksb
);
484 target
->lksb
->status
= DLM_NORMAL
;
486 spin_unlock(&target
->spinlock
);
488 __dlm_lockres_reserve_ast(res
);
489 __dlm_queue_ast(dlm
, target
);
490 /* go back and check for more */
498 /* must have NO locks when calling this with res !=NULL * */
499 void dlm_kick_thread(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
502 spin_lock(&dlm
->spinlock
);
503 spin_lock(&res
->spinlock
);
504 __dlm_dirty_lockres(dlm
, res
);
505 spin_unlock(&res
->spinlock
);
506 spin_unlock(&dlm
->spinlock
);
508 wake_up(&dlm
->dlm_thread_wq
);
511 void __dlm_dirty_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
513 assert_spin_locked(&dlm
->spinlock
);
514 assert_spin_locked(&res
->spinlock
);
516 /* don't shuffle secondary queues */
517 if (res
->owner
== dlm
->node_num
) {
518 if (res
->state
& (DLM_LOCK_RES_MIGRATING
|
519 DLM_LOCK_RES_BLOCK_DIRTY
))
522 if (list_empty(&res
->dirty
)) {
523 /* ref for dirty_list */
524 dlm_lockres_get(res
);
525 list_add_tail(&res
->dirty
, &dlm
->dirty_list
);
526 res
->state
|= DLM_LOCK_RES_DIRTY
;
530 mlog(0, "%s: res %.*s\n", dlm
->name
, res
->lockname
.len
,
535 /* Launch the NM thread for the mounted volume */
536 int dlm_launch_thread(struct dlm_ctxt
*dlm
)
538 mlog(0, "Starting dlm_thread...\n");
540 dlm
->dlm_thread_task
= kthread_run(dlm_thread
, dlm
, "dlm-%s",
542 if (IS_ERR(dlm
->dlm_thread_task
)) {
543 mlog_errno(PTR_ERR(dlm
->dlm_thread_task
));
544 dlm
->dlm_thread_task
= NULL
;
551 void dlm_complete_thread(struct dlm_ctxt
*dlm
)
553 if (dlm
->dlm_thread_task
) {
554 mlog(ML_KTHREAD
, "Waiting for dlm thread to exit\n");
555 kthread_stop(dlm
->dlm_thread_task
);
556 dlm
->dlm_thread_task
= NULL
;
560 static int dlm_dirty_list_empty(struct dlm_ctxt
*dlm
)
564 spin_lock(&dlm
->spinlock
);
565 empty
= list_empty(&dlm
->dirty_list
);
566 spin_unlock(&dlm
->spinlock
);
571 static void dlm_flush_asts(struct dlm_ctxt
*dlm
)
574 struct dlm_lock
*lock
;
575 struct dlm_lock_resource
*res
;
578 spin_lock(&dlm
->ast_lock
);
579 while (!list_empty(&dlm
->pending_asts
)) {
580 lock
= list_entry(dlm
->pending_asts
.next
,
581 struct dlm_lock
, ast_list
);
582 /* get an extra ref on lock */
585 mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
586 "node %u\n", dlm
->name
, res
->lockname
.len
,
588 dlm_get_lock_cookie_node(be64_to_cpu(lock
->ml
.cookie
)),
589 dlm_get_lock_cookie_seq(be64_to_cpu(lock
->ml
.cookie
)),
590 lock
->ml
.type
, lock
->ml
.node
);
592 BUG_ON(!lock
->ast_pending
);
594 /* remove from list (including ref) */
595 list_del_init(&lock
->ast_list
);
597 spin_unlock(&dlm
->ast_lock
);
599 if (lock
->ml
.node
!= dlm
->node_num
) {
600 ret
= dlm_do_remote_ast(dlm
, res
, lock
);
604 dlm_do_local_ast(dlm
, res
, lock
);
606 spin_lock(&dlm
->ast_lock
);
608 /* possible that another ast was queued while
609 * we were delivering the last one */
610 if (!list_empty(&lock
->ast_list
)) {
611 mlog(0, "%s: res %.*s, AST queued while flushing last "
612 "one\n", dlm
->name
, res
->lockname
.len
,
615 lock
->ast_pending
= 0;
617 /* drop the extra ref.
618 * this may drop it completely. */
620 dlm_lockres_release_ast(dlm
, res
);
623 while (!list_empty(&dlm
->pending_basts
)) {
624 lock
= list_entry(dlm
->pending_basts
.next
,
625 struct dlm_lock
, bast_list
);
626 /* get an extra ref on lock */
630 BUG_ON(!lock
->bast_pending
);
632 /* get the highest blocked lock, and reset */
633 spin_lock(&lock
->spinlock
);
634 BUG_ON(lock
->ml
.highest_blocked
<= LKM_IVMODE
);
635 hi
= lock
->ml
.highest_blocked
;
636 lock
->ml
.highest_blocked
= LKM_IVMODE
;
637 spin_unlock(&lock
->spinlock
);
639 /* remove from list (including ref) */
640 list_del_init(&lock
->bast_list
);
642 spin_unlock(&dlm
->ast_lock
);
644 mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
645 "blocked %d, node %u\n",
646 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
647 dlm_get_lock_cookie_node(be64_to_cpu(lock
->ml
.cookie
)),
648 dlm_get_lock_cookie_seq(be64_to_cpu(lock
->ml
.cookie
)),
651 if (lock
->ml
.node
!= dlm
->node_num
) {
652 ret
= dlm_send_proxy_bast(dlm
, res
, lock
, hi
);
656 dlm_do_local_bast(dlm
, res
, lock
, hi
);
658 spin_lock(&dlm
->ast_lock
);
660 /* possible that another bast was queued while
661 * we were delivering the last one */
662 if (!list_empty(&lock
->bast_list
)) {
663 mlog(0, "%s: res %.*s, BAST queued while flushing last "
664 "one\n", dlm
->name
, res
->lockname
.len
,
667 lock
->bast_pending
= 0;
669 /* drop the extra ref.
670 * this may drop it completely. */
672 dlm_lockres_release_ast(dlm
, res
);
674 wake_up(&dlm
->ast_wq
);
675 spin_unlock(&dlm
->ast_lock
);
679 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
680 #define DLM_THREAD_MAX_DIRTY 100
682 static int dlm_thread(void *data
)
684 struct dlm_lock_resource
*res
;
685 struct dlm_ctxt
*dlm
= data
;
686 unsigned long timeout
= msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS
);
688 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
690 while (!kthread_should_stop()) {
691 int n
= DLM_THREAD_MAX_DIRTY
;
693 /* dlm_shutting_down is very point-in-time, but that
694 * doesn't matter as we'll just loop back around if we
695 * get false on the leading edge of a state
697 dlm_run_purge_list(dlm
, dlm_shutting_down(dlm
));
699 /* We really don't want to hold dlm->spinlock while
700 * calling dlm_shuffle_lists on each lockres that
701 * needs to have its queues adjusted and AST/BASTs
702 * run. So let's pull each entry off the dirty_list
703 * and drop dlm->spinlock ASAP. Once off the list,
704 * res->spinlock needs to be taken again to protect
705 * the queues while calling dlm_shuffle_lists. */
706 spin_lock(&dlm
->spinlock
);
707 while (!list_empty(&dlm
->dirty_list
)) {
709 res
= list_entry(dlm
->dirty_list
.next
,
710 struct dlm_lock_resource
, dirty
);
712 /* peel a lockres off, remove it from the list,
713 * unset the dirty flag and drop the dlm lock */
715 dlm_lockres_get(res
);
717 spin_lock(&res
->spinlock
);
718 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
719 list_del_init(&res
->dirty
);
720 spin_unlock(&res
->spinlock
);
721 spin_unlock(&dlm
->spinlock
);
722 /* Drop dirty_list ref */
723 dlm_lockres_put(res
);
725 /* lockres can be re-dirtied/re-added to the
726 * dirty_list in this gap, but that is ok */
728 spin_lock(&dlm
->ast_lock
);
729 spin_lock(&res
->spinlock
);
730 if (res
->owner
!= dlm
->node_num
) {
731 __dlm_print_one_lock_resource(res
);
732 mlog(ML_ERROR
, "%s: inprog %d, mig %d, reco %d,"
733 " dirty %d\n", dlm
->name
,
734 !!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
),
735 !!(res
->state
& DLM_LOCK_RES_MIGRATING
),
736 !!(res
->state
& DLM_LOCK_RES_RECOVERING
),
737 !!(res
->state
& DLM_LOCK_RES_DIRTY
));
739 BUG_ON(res
->owner
!= dlm
->node_num
);
741 /* it is now ok to move lockreses in these states
742 * to the dirty list, assuming that they will only be
743 * dirty for a short while. */
744 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
745 if (res
->state
& (DLM_LOCK_RES_IN_PROGRESS
|
746 DLM_LOCK_RES_RECOVERING
|
747 DLM_LOCK_RES_RECOVERY_WAITING
)) {
748 /* move it to the tail and keep going */
749 res
->state
&= ~DLM_LOCK_RES_DIRTY
;
750 spin_unlock(&res
->spinlock
);
751 spin_unlock(&dlm
->ast_lock
);
752 mlog(0, "%s: res %.*s, inprogress, delay list "
753 "shuffle, state %d\n", dlm
->name
,
754 res
->lockname
.len
, res
->lockname
.name
,
760 /* at this point the lockres is not migrating/
761 * recovering/in-progress. we have the lockres
762 * spinlock and do NOT have the dlm lock.
763 * safe to reserve/queue asts and run the lists. */
765 /* called while holding lockres lock */
766 dlm_shuffle_lists(dlm
, res
);
767 res
->state
&= ~DLM_LOCK_RES_DIRTY
;
768 spin_unlock(&res
->spinlock
);
769 spin_unlock(&dlm
->ast_lock
);
771 dlm_lockres_calc_usage(dlm
, res
);
775 spin_lock(&dlm
->spinlock
);
776 /* if the lock was in-progress, stick
777 * it on the back of the list */
779 spin_lock(&res
->spinlock
);
780 __dlm_dirty_lockres(dlm
, res
);
781 spin_unlock(&res
->spinlock
);
783 dlm_lockres_put(res
);
785 /* unlikely, but we may need to give time to
788 mlog(0, "%s: Throttling dlm thread\n",
794 spin_unlock(&dlm
->spinlock
);
797 /* yield and continue right away if there is more work to do */
803 wait_event_interruptible_timeout(dlm
->dlm_thread_wq
,
804 !dlm_dirty_list_empty(dlm
) ||
805 kthread_should_stop(),
809 mlog(0, "quitting DLM thread\n");