initial commit with v2.6.32.60
[linux-2.6.32.60-moxart.git] / fs / ocfs2 / dlm / dlmthread.c
blob86491f583ea56f7dc6e62f3b580334601bbfc27a
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmthread.c
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
53 #include "cluster/masklog.h"
55 static int dlm_thread(void *data);
56 static void dlm_flush_asts(struct dlm_ctxt *dlm);
58 #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
60 /* will exit holding res->spinlock, but may drop in function */
61 /* waits until flags are cleared on res->state */
62 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
64 DECLARE_WAITQUEUE(wait, current);
66 assert_spin_locked(&res->spinlock);
68 add_wait_queue(&res->wq, &wait);
69 repeat:
70 set_current_state(TASK_UNINTERRUPTIBLE);
71 if (res->state & flags) {
72 spin_unlock(&res->spinlock);
73 schedule();
74 spin_lock(&res->spinlock);
75 goto repeat;
77 remove_wait_queue(&res->wq, &wait);
78 __set_current_state(TASK_RUNNING);
81 int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
83 if (list_empty(&res->granted) &&
84 list_empty(&res->converting) &&
85 list_empty(&res->blocked))
86 return 0;
87 return 1;
90 /* "unused": the lockres has no locks, is not on the dirty list,
91 * has no inflight locks (in the gap between mastery and acquiring
92 * the first lock), and has no bits in its refmap.
93 * truly ready to be freed. */
94 int __dlm_lockres_unused(struct dlm_lock_resource *res)
96 int bit;
98 if (__dlm_lockres_has_locks(res))
99 return 0;
101 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
102 return 0;
104 if (res->state & DLM_LOCK_RES_RECOVERING)
105 return 0;
107 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
108 if (bit < O2NM_MAX_NODES)
109 return 0;
112 * since the bit for dlm->node_num is not set, inflight_locks better
113 * be zero
115 BUG_ON(res->inflight_locks != 0);
116 return 1;
120 /* Call whenever you may have added or deleted something from one of
121 * the lockres queue's. This will figure out whether it belongs on the
122 * unused list or not and does the appropriate thing. */
123 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
124 struct dlm_lock_resource *res)
126 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
128 assert_spin_locked(&dlm->spinlock);
129 assert_spin_locked(&res->spinlock);
131 if (__dlm_lockres_unused(res)){
132 if (list_empty(&res->purge)) {
133 mlog(0, "putting lockres %.*s:%p onto purge list\n",
134 res->lockname.len, res->lockname.name, res);
136 res->last_used = jiffies;
137 dlm_lockres_get(res);
138 list_add_tail(&res->purge, &dlm->purge_list);
139 dlm->purge_count++;
141 } else if (!list_empty(&res->purge)) {
142 mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
143 res->lockname.len, res->lockname.name, res, res->owner);
145 list_del_init(&res->purge);
146 dlm_lockres_put(res);
147 dlm->purge_count--;
151 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
152 struct dlm_lock_resource *res)
154 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
155 spin_lock(&dlm->spinlock);
156 spin_lock(&res->spinlock);
158 __dlm_lockres_calc_usage(dlm, res);
160 spin_unlock(&res->spinlock);
161 spin_unlock(&dlm->spinlock);
164 static void dlm_purge_lockres(struct dlm_ctxt *dlm,
165 struct dlm_lock_resource *res)
167 int master;
168 int ret = 0;
170 assert_spin_locked(&dlm->spinlock);
171 assert_spin_locked(&res->spinlock);
173 master = (res->owner == dlm->node_num);
176 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
177 res->lockname.name, master);
179 if (!master) {
180 res->state |= DLM_LOCK_RES_DROPPING_REF;
181 /* drop spinlock... retake below */
182 spin_unlock(&res->spinlock);
183 spin_unlock(&dlm->spinlock);
185 spin_lock(&res->spinlock);
186 /* This ensures that clear refmap is sent after the set */
187 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
188 spin_unlock(&res->spinlock);
190 /* clear our bit from the master's refmap, ignore errors */
191 ret = dlm_drop_lockres_ref(dlm, res);
192 if (ret < 0) {
193 mlog_errno(ret);
194 if (!dlm_is_host_down(ret))
195 BUG();
197 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
198 dlm->name, res->lockname.len, res->lockname.name, ret);
199 spin_lock(&dlm->spinlock);
200 spin_lock(&res->spinlock);
203 if (!list_empty(&res->purge)) {
204 mlog(0, "removing lockres %.*s:%p from purgelist, "
205 "master = %d\n", res->lockname.len, res->lockname.name,
206 res, master);
207 list_del_init(&res->purge);
208 dlm_lockres_put(res);
209 dlm->purge_count--;
212 if (!__dlm_lockres_unused(res)) {
213 mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
214 dlm->name, res->lockname.len, res->lockname.name);
215 __dlm_print_one_lock_resource(res);
216 BUG();
219 __dlm_unhash_lockres(res);
221 /* lockres is not in the hash now. drop the flag and wake up
222 * any processes waiting in dlm_get_lock_resource. */
223 if (!master) {
224 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
225 spin_unlock(&res->spinlock);
226 wake_up(&res->wq);
227 } else
228 spin_unlock(&res->spinlock);
231 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
232 int purge_now)
234 unsigned int run_max, unused;
235 unsigned long purge_jiffies;
236 struct dlm_lock_resource *lockres;
238 spin_lock(&dlm->spinlock);
239 run_max = dlm->purge_count;
241 while(run_max && !list_empty(&dlm->purge_list)) {
242 run_max--;
244 lockres = list_entry(dlm->purge_list.next,
245 struct dlm_lock_resource, purge);
247 spin_lock(&lockres->spinlock);
249 purge_jiffies = lockres->last_used +
250 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
252 /* Make sure that we want to be processing this guy at
253 * this time. */
254 if (!purge_now && time_after(purge_jiffies, jiffies)) {
255 /* Since resources are added to the purge list
256 * in tail order, we can stop at the first
257 * unpurgable resource -- anyone added after
258 * him will have a greater last_used value */
259 spin_unlock(&lockres->spinlock);
260 break;
263 /* Status of the lockres *might* change so double
264 * check. If the lockres is unused, holding the dlm
265 * spinlock will prevent people from getting and more
266 * refs on it. */
267 unused = __dlm_lockres_unused(lockres);
268 if (!unused ||
269 (lockres->state & DLM_LOCK_RES_MIGRATING)) {
270 mlog(0, "lockres %s:%.*s: is in use or "
271 "being remastered, used %d, state %d\n",
272 dlm->name, lockres->lockname.len,
273 lockres->lockname.name, !unused, lockres->state);
274 list_move_tail(&dlm->purge_list, &lockres->purge);
275 spin_unlock(&lockres->spinlock);
276 continue;
279 dlm_lockres_get(lockres);
281 dlm_purge_lockres(dlm, lockres);
283 dlm_lockres_put(lockres);
285 /* Avoid adding any scheduling latencies */
286 cond_resched_lock(&dlm->spinlock);
289 spin_unlock(&dlm->spinlock);
292 static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
293 struct dlm_lock_resource *res)
295 struct dlm_lock *lock, *target;
296 struct list_head *iter;
297 struct list_head *head;
298 int can_grant = 1;
300 //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
301 //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
302 //mlog(0, "shuffle res %.*s\n", res->lockname.len,
303 // res->lockname.name);
305 /* because this function is called with the lockres
306 * spinlock, and because we know that it is not migrating/
307 * recovering/in-progress, it is fine to reserve asts and
308 * basts right before queueing them all throughout */
309 assert_spin_locked(&res->spinlock);
310 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
311 DLM_LOCK_RES_RECOVERING|
312 DLM_LOCK_RES_IN_PROGRESS)));
314 converting:
315 if (list_empty(&res->converting))
316 goto blocked;
317 mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
318 res->lockname.name);
320 target = list_entry(res->converting.next, struct dlm_lock, list);
321 if (target->ml.convert_type == LKM_IVMODE) {
322 mlog(ML_ERROR, "%.*s: converting a lock with no "
323 "convert_type!\n", res->lockname.len, res->lockname.name);
324 BUG();
326 head = &res->granted;
327 list_for_each(iter, head) {
328 lock = list_entry(iter, struct dlm_lock, list);
329 if (lock==target)
330 continue;
331 if (!dlm_lock_compatible(lock->ml.type,
332 target->ml.convert_type)) {
333 can_grant = 0;
334 /* queue the BAST if not already */
335 if (lock->ml.highest_blocked == LKM_IVMODE) {
336 __dlm_lockres_reserve_ast(res);
337 dlm_queue_bast(dlm, lock);
339 /* update the highest_blocked if needed */
340 if (lock->ml.highest_blocked < target->ml.convert_type)
341 lock->ml.highest_blocked =
342 target->ml.convert_type;
345 head = &res->converting;
346 list_for_each(iter, head) {
347 lock = list_entry(iter, struct dlm_lock, list);
348 if (lock==target)
349 continue;
350 if (!dlm_lock_compatible(lock->ml.type,
351 target->ml.convert_type)) {
352 can_grant = 0;
353 if (lock->ml.highest_blocked == LKM_IVMODE) {
354 __dlm_lockres_reserve_ast(res);
355 dlm_queue_bast(dlm, lock);
357 if (lock->ml.highest_blocked < target->ml.convert_type)
358 lock->ml.highest_blocked =
359 target->ml.convert_type;
363 /* we can convert the lock */
364 if (can_grant) {
365 spin_lock(&target->spinlock);
366 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
368 mlog(0, "calling ast for converting lock: %.*s, have: %d, "
369 "granting: %d, node: %u\n", res->lockname.len,
370 res->lockname.name, target->ml.type,
371 target->ml.convert_type, target->ml.node);
373 target->ml.type = target->ml.convert_type;
374 target->ml.convert_type = LKM_IVMODE;
375 list_move_tail(&target->list, &res->granted);
377 BUG_ON(!target->lksb);
378 target->lksb->status = DLM_NORMAL;
380 spin_unlock(&target->spinlock);
382 __dlm_lockres_reserve_ast(res);
383 dlm_queue_ast(dlm, target);
384 /* go back and check for more */
385 goto converting;
388 blocked:
389 if (list_empty(&res->blocked))
390 goto leave;
391 target = list_entry(res->blocked.next, struct dlm_lock, list);
393 head = &res->granted;
394 list_for_each(iter, head) {
395 lock = list_entry(iter, struct dlm_lock, list);
396 if (lock==target)
397 continue;
398 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
399 can_grant = 0;
400 if (lock->ml.highest_blocked == LKM_IVMODE) {
401 __dlm_lockres_reserve_ast(res);
402 dlm_queue_bast(dlm, lock);
404 if (lock->ml.highest_blocked < target->ml.type)
405 lock->ml.highest_blocked = target->ml.type;
409 head = &res->converting;
410 list_for_each(iter, head) {
411 lock = list_entry(iter, struct dlm_lock, list);
412 if (lock==target)
413 continue;
414 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
415 can_grant = 0;
416 if (lock->ml.highest_blocked == LKM_IVMODE) {
417 __dlm_lockres_reserve_ast(res);
418 dlm_queue_bast(dlm, lock);
420 if (lock->ml.highest_blocked < target->ml.type)
421 lock->ml.highest_blocked = target->ml.type;
425 /* we can grant the blocked lock (only
426 * possible if converting list empty) */
427 if (can_grant) {
428 spin_lock(&target->spinlock);
429 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
431 mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
432 "node: %u\n", res->lockname.len, res->lockname.name,
433 target->ml.type, target->ml.node);
435 // target->ml.type is already correct
436 list_move_tail(&target->list, &res->granted);
438 BUG_ON(!target->lksb);
439 target->lksb->status = DLM_NORMAL;
441 spin_unlock(&target->spinlock);
443 __dlm_lockres_reserve_ast(res);
444 dlm_queue_ast(dlm, target);
445 /* go back and check for more */
446 goto converting;
449 leave:
450 return;
453 /* must have NO locks when calling this with res !=NULL * */
454 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
456 mlog_entry("dlm=%p, res=%p\n", dlm, res);
457 if (res) {
458 spin_lock(&dlm->spinlock);
459 spin_lock(&res->spinlock);
460 __dlm_dirty_lockres(dlm, res);
461 spin_unlock(&res->spinlock);
462 spin_unlock(&dlm->spinlock);
464 wake_up(&dlm->dlm_thread_wq);
467 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
469 mlog_entry("dlm=%p, res=%p\n", dlm, res);
471 assert_spin_locked(&dlm->spinlock);
472 assert_spin_locked(&res->spinlock);
474 /* don't shuffle secondary queues */
475 if ((res->owner == dlm->node_num)) {
476 if (res->state & (DLM_LOCK_RES_MIGRATING |
477 DLM_LOCK_RES_BLOCK_DIRTY))
478 return;
480 if (list_empty(&res->dirty)) {
481 /* ref for dirty_list */
482 dlm_lockres_get(res);
483 list_add_tail(&res->dirty, &dlm->dirty_list);
484 res->state |= DLM_LOCK_RES_DIRTY;
490 /* Launch the NM thread for the mounted volume */
491 int dlm_launch_thread(struct dlm_ctxt *dlm)
493 mlog(0, "starting dlm thread...\n");
495 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
496 if (IS_ERR(dlm->dlm_thread_task)) {
497 mlog_errno(PTR_ERR(dlm->dlm_thread_task));
498 dlm->dlm_thread_task = NULL;
499 return -EINVAL;
502 return 0;
505 void dlm_complete_thread(struct dlm_ctxt *dlm)
507 if (dlm->dlm_thread_task) {
508 mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
509 kthread_stop(dlm->dlm_thread_task);
510 dlm->dlm_thread_task = NULL;
514 static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
516 int empty;
518 spin_lock(&dlm->spinlock);
519 empty = list_empty(&dlm->dirty_list);
520 spin_unlock(&dlm->spinlock);
522 return empty;
525 static void dlm_flush_asts(struct dlm_ctxt *dlm)
527 int ret;
528 struct dlm_lock *lock;
529 struct dlm_lock_resource *res;
530 u8 hi;
532 spin_lock(&dlm->ast_lock);
533 while (!list_empty(&dlm->pending_asts)) {
534 lock = list_entry(dlm->pending_asts.next,
535 struct dlm_lock, ast_list);
536 /* get an extra ref on lock */
537 dlm_lock_get(lock);
538 res = lock->lockres;
539 mlog(0, "delivering an ast for this lockres\n");
541 BUG_ON(!lock->ast_pending);
543 /* remove from list (including ref) */
544 list_del_init(&lock->ast_list);
545 dlm_lock_put(lock);
546 spin_unlock(&dlm->ast_lock);
548 if (lock->ml.node != dlm->node_num) {
549 ret = dlm_do_remote_ast(dlm, res, lock);
550 if (ret < 0)
551 mlog_errno(ret);
552 } else
553 dlm_do_local_ast(dlm, res, lock);
555 spin_lock(&dlm->ast_lock);
557 /* possible that another ast was queued while
558 * we were delivering the last one */
559 if (!list_empty(&lock->ast_list)) {
560 mlog(0, "aha another ast got queued while "
561 "we were finishing the last one. will "
562 "keep the ast_pending flag set.\n");
563 } else
564 lock->ast_pending = 0;
566 /* drop the extra ref.
567 * this may drop it completely. */
568 dlm_lock_put(lock);
569 dlm_lockres_release_ast(dlm, res);
572 while (!list_empty(&dlm->pending_basts)) {
573 lock = list_entry(dlm->pending_basts.next,
574 struct dlm_lock, bast_list);
575 /* get an extra ref on lock */
576 dlm_lock_get(lock);
577 res = lock->lockres;
579 BUG_ON(!lock->bast_pending);
581 /* get the highest blocked lock, and reset */
582 spin_lock(&lock->spinlock);
583 BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
584 hi = lock->ml.highest_blocked;
585 lock->ml.highest_blocked = LKM_IVMODE;
586 spin_unlock(&lock->spinlock);
588 /* remove from list (including ref) */
589 list_del_init(&lock->bast_list);
590 dlm_lock_put(lock);
591 spin_unlock(&dlm->ast_lock);
593 mlog(0, "delivering a bast for this lockres "
594 "(blocked = %d\n", hi);
596 if (lock->ml.node != dlm->node_num) {
597 ret = dlm_send_proxy_bast(dlm, res, lock, hi);
598 if (ret < 0)
599 mlog_errno(ret);
600 } else
601 dlm_do_local_bast(dlm, res, lock, hi);
603 spin_lock(&dlm->ast_lock);
605 /* possible that another bast was queued while
606 * we were delivering the last one */
607 if (!list_empty(&lock->bast_list)) {
608 mlog(0, "aha another bast got queued while "
609 "we were finishing the last one. will "
610 "keep the bast_pending flag set.\n");
611 } else
612 lock->bast_pending = 0;
614 /* drop the extra ref.
615 * this may drop it completely. */
616 dlm_lock_put(lock);
617 dlm_lockres_release_ast(dlm, res);
619 wake_up(&dlm->ast_wq);
620 spin_unlock(&dlm->ast_lock);
624 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
625 #define DLM_THREAD_MAX_DIRTY 100
626 #define DLM_THREAD_MAX_ASTS 10
628 static int dlm_thread(void *data)
630 struct dlm_lock_resource *res;
631 struct dlm_ctxt *dlm = data;
632 unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
634 mlog(0, "dlm thread running for %s...\n", dlm->name);
636 while (!kthread_should_stop()) {
637 int n = DLM_THREAD_MAX_DIRTY;
639 /* dlm_shutting_down is very point-in-time, but that
640 * doesn't matter as we'll just loop back around if we
641 * get false on the leading edge of a state
642 * transition. */
643 dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
645 /* We really don't want to hold dlm->spinlock while
646 * calling dlm_shuffle_lists on each lockres that
647 * needs to have its queues adjusted and AST/BASTs
648 * run. So let's pull each entry off the dirty_list
649 * and drop dlm->spinlock ASAP. Once off the list,
650 * res->spinlock needs to be taken again to protect
651 * the queues while calling dlm_shuffle_lists. */
652 spin_lock(&dlm->spinlock);
653 while (!list_empty(&dlm->dirty_list)) {
654 int delay = 0;
655 res = list_entry(dlm->dirty_list.next,
656 struct dlm_lock_resource, dirty);
658 /* peel a lockres off, remove it from the list,
659 * unset the dirty flag and drop the dlm lock */
660 BUG_ON(!res);
661 dlm_lockres_get(res);
663 spin_lock(&res->spinlock);
664 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
665 list_del_init(&res->dirty);
666 spin_unlock(&res->spinlock);
667 spin_unlock(&dlm->spinlock);
668 /* Drop dirty_list ref */
669 dlm_lockres_put(res);
671 /* lockres can be re-dirtied/re-added to the
672 * dirty_list in this gap, but that is ok */
674 spin_lock(&res->spinlock);
675 if (res->owner != dlm->node_num) {
676 __dlm_print_one_lock_resource(res);
677 mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
678 res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
679 res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
680 res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
681 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
683 BUG_ON(res->owner != dlm->node_num);
685 /* it is now ok to move lockreses in these states
686 * to the dirty list, assuming that they will only be
687 * dirty for a short while. */
688 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
689 if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
690 DLM_LOCK_RES_RECOVERING)) {
691 /* move it to the tail and keep going */
692 res->state &= ~DLM_LOCK_RES_DIRTY;
693 spin_unlock(&res->spinlock);
694 mlog(0, "delaying list shuffling for in-"
695 "progress lockres %.*s, state=%d\n",
696 res->lockname.len, res->lockname.name,
697 res->state);
698 delay = 1;
699 goto in_progress;
702 /* at this point the lockres is not migrating/
703 * recovering/in-progress. we have the lockres
704 * spinlock and do NOT have the dlm lock.
705 * safe to reserve/queue asts and run the lists. */
707 mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
708 "res=%.*s\n", dlm->name,
709 res->lockname.len, res->lockname.name);
711 /* called while holding lockres lock */
712 dlm_shuffle_lists(dlm, res);
713 res->state &= ~DLM_LOCK_RES_DIRTY;
714 spin_unlock(&res->spinlock);
716 dlm_lockres_calc_usage(dlm, res);
718 in_progress:
720 spin_lock(&dlm->spinlock);
721 /* if the lock was in-progress, stick
722 * it on the back of the list */
723 if (delay) {
724 spin_lock(&res->spinlock);
725 __dlm_dirty_lockres(dlm, res);
726 spin_unlock(&res->spinlock);
728 dlm_lockres_put(res);
730 /* unlikely, but we may need to give time to
731 * other tasks */
732 if (!--n) {
733 mlog(0, "throttling dlm_thread\n");
734 break;
738 spin_unlock(&dlm->spinlock);
739 dlm_flush_asts(dlm);
741 /* yield and continue right away if there is more work to do */
742 if (!n) {
743 cond_resched();
744 continue;
747 wait_event_interruptible_timeout(dlm->dlm_thread_wq,
748 !dlm_dirty_list_empty(dlm) ||
749 kthread_should_stop(),
750 timeout);
753 mlog(0, "quitting DLM thread\n");
754 return 0;