[PATCH] i386: entry.S END/ENDPROC annotations
[wrt350n-kernel.git] / fs / ocfs2 / dlm / dlmthread.c
blob8ffa0916eb86fc1c111ae3964b156505f714c6e6
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmthread.c
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
49 #include "dlmapi.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
54 #include "cluster/masklog.h"
56 static int dlm_thread(void *data);
57 static void dlm_flush_asts(struct dlm_ctxt *dlm);
59 #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
61 /* will exit holding res->spinlock, but may drop in function */
62 /* waits until flags are cleared on res->state */
63 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
65 DECLARE_WAITQUEUE(wait, current);
67 assert_spin_locked(&res->spinlock);
69 add_wait_queue(&res->wq, &wait);
70 repeat:
71 set_current_state(TASK_UNINTERRUPTIBLE);
72 if (res->state & flags) {
73 spin_unlock(&res->spinlock);
74 schedule();
75 spin_lock(&res->spinlock);
76 goto repeat;
78 remove_wait_queue(&res->wq, &wait);
79 current->state = TASK_RUNNING;
82 int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
84 if (list_empty(&res->granted) &&
85 list_empty(&res->converting) &&
86 list_empty(&res->blocked))
87 return 0;
88 return 1;
91 /* "unused": the lockres has no locks, is not on the dirty list,
92 * has no inflight locks (in the gap between mastery and acquiring
93 * the first lock), and has no bits in its refmap.
94 * truly ready to be freed. */
95 int __dlm_lockres_unused(struct dlm_lock_resource *res)
97 if (!__dlm_lockres_has_locks(res) &&
98 (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
99 /* try not to scan the bitmap unless the first two
100 * conditions are already true */
101 int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
102 if (bit >= O2NM_MAX_NODES) {
103 /* since the bit for dlm->node_num is not
104 * set, inflight_locks better be zero */
105 BUG_ON(res->inflight_locks != 0);
106 return 1;
109 return 0;
113 /* Call whenever you may have added or deleted something from one of
114 * the lockres queue's. This will figure out whether it belongs on the
115 * unused list or not and does the appropriate thing. */
116 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
117 struct dlm_lock_resource *res)
119 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
121 assert_spin_locked(&dlm->spinlock);
122 assert_spin_locked(&res->spinlock);
124 if (__dlm_lockres_unused(res)){
125 if (list_empty(&res->purge)) {
126 mlog(0, "putting lockres %.*s:%p onto purge list\n",
127 res->lockname.len, res->lockname.name, res);
129 res->last_used = jiffies;
130 dlm_lockres_get(res);
131 list_add_tail(&res->purge, &dlm->purge_list);
132 dlm->purge_count++;
134 } else if (!list_empty(&res->purge)) {
135 mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
136 res->lockname.len, res->lockname.name, res, res->owner);
138 list_del_init(&res->purge);
139 dlm_lockres_put(res);
140 dlm->purge_count--;
144 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
145 struct dlm_lock_resource *res)
147 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
148 spin_lock(&dlm->spinlock);
149 spin_lock(&res->spinlock);
151 __dlm_lockres_calc_usage(dlm, res);
153 spin_unlock(&res->spinlock);
154 spin_unlock(&dlm->spinlock);
157 static int dlm_purge_lockres(struct dlm_ctxt *dlm,
158 struct dlm_lock_resource *res)
160 int master;
161 int ret = 0;
163 spin_lock(&res->spinlock);
164 if (!__dlm_lockres_unused(res)) {
165 spin_unlock(&res->spinlock);
166 mlog(0, "%s:%.*s: tried to purge but not unused\n",
167 dlm->name, res->lockname.len, res->lockname.name);
168 return -ENOTEMPTY;
170 master = (res->owner == dlm->node_num);
171 if (!master)
172 res->state |= DLM_LOCK_RES_DROPPING_REF;
173 spin_unlock(&res->spinlock);
175 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
176 res->lockname.name, master);
178 if (!master) {
179 spin_lock(&res->spinlock);
180 /* This ensures that clear refmap is sent after the set */
181 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
182 spin_unlock(&res->spinlock);
183 /* drop spinlock to do messaging, retake below */
184 spin_unlock(&dlm->spinlock);
185 /* clear our bit from the master's refmap, ignore errors */
186 ret = dlm_drop_lockres_ref(dlm, res);
187 if (ret < 0) {
188 mlog_errno(ret);
189 if (!dlm_is_host_down(ret))
190 BUG();
192 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
193 dlm->name, res->lockname.len, res->lockname.name, ret);
194 spin_lock(&dlm->spinlock);
197 if (!list_empty(&res->purge)) {
198 mlog(0, "removing lockres %.*s:%p from purgelist, "
199 "master = %d\n", res->lockname.len, res->lockname.name,
200 res, master);
201 list_del_init(&res->purge);
202 dlm_lockres_put(res);
203 dlm->purge_count--;
205 __dlm_unhash_lockres(res);
207 /* lockres is not in the hash now. drop the flag and wake up
208 * any processes waiting in dlm_get_lock_resource. */
209 if (!master) {
210 spin_lock(&res->spinlock);
211 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
212 spin_unlock(&res->spinlock);
213 wake_up(&res->wq);
215 return 0;
218 static void dlm_run_purge_list(struct dlm_ctxt *dlm,
219 int purge_now)
221 unsigned int run_max, unused;
222 unsigned long purge_jiffies;
223 struct dlm_lock_resource *lockres;
225 spin_lock(&dlm->spinlock);
226 run_max = dlm->purge_count;
228 while(run_max && !list_empty(&dlm->purge_list)) {
229 run_max--;
231 lockres = list_entry(dlm->purge_list.next,
232 struct dlm_lock_resource, purge);
234 /* Status of the lockres *might* change so double
235 * check. If the lockres is unused, holding the dlm
236 * spinlock will prevent people from getting and more
237 * refs on it -- there's no need to keep the lockres
238 * spinlock. */
239 spin_lock(&lockres->spinlock);
240 unused = __dlm_lockres_unused(lockres);
241 spin_unlock(&lockres->spinlock);
243 if (!unused)
244 continue;
246 purge_jiffies = lockres->last_used +
247 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
249 /* Make sure that we want to be processing this guy at
250 * this time. */
251 if (!purge_now && time_after(purge_jiffies, jiffies)) {
252 /* Since resources are added to the purge list
253 * in tail order, we can stop at the first
254 * unpurgable resource -- anyone added after
255 * him will have a greater last_used value */
256 break;
259 mlog(0, "removing lockres %.*s:%p from purgelist\n",
260 lockres->lockname.len, lockres->lockname.name, lockres);
261 list_del_init(&lockres->purge);
262 dlm_lockres_put(lockres);
263 dlm->purge_count--;
265 /* This may drop and reacquire the dlm spinlock if it
266 * has to do migration. */
267 mlog(0, "calling dlm_purge_lockres!\n");
268 if (dlm_purge_lockres(dlm, lockres))
269 BUG();
270 mlog(0, "DONE calling dlm_purge_lockres!\n");
272 /* Avoid adding any scheduling latencies */
273 cond_resched_lock(&dlm->spinlock);
276 spin_unlock(&dlm->spinlock);
279 static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
280 struct dlm_lock_resource *res)
282 struct dlm_lock *lock, *target;
283 struct list_head *iter;
284 struct list_head *head;
285 int can_grant = 1;
287 //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
288 //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
289 //mlog(0, "shuffle res %.*s\n", res->lockname.len,
290 // res->lockname.name);
292 /* because this function is called with the lockres
293 * spinlock, and because we know that it is not migrating/
294 * recovering/in-progress, it is fine to reserve asts and
295 * basts right before queueing them all throughout */
296 assert_spin_locked(&res->spinlock);
297 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
298 DLM_LOCK_RES_RECOVERING|
299 DLM_LOCK_RES_IN_PROGRESS)));
301 converting:
302 if (list_empty(&res->converting))
303 goto blocked;
304 mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
305 res->lockname.name);
307 target = list_entry(res->converting.next, struct dlm_lock, list);
308 if (target->ml.convert_type == LKM_IVMODE) {
309 mlog(ML_ERROR, "%.*s: converting a lock with no "
310 "convert_type!\n", res->lockname.len, res->lockname.name);
311 BUG();
313 head = &res->granted;
314 list_for_each(iter, head) {
315 lock = list_entry(iter, struct dlm_lock, list);
316 if (lock==target)
317 continue;
318 if (!dlm_lock_compatible(lock->ml.type,
319 target->ml.convert_type)) {
320 can_grant = 0;
321 /* queue the BAST if not already */
322 if (lock->ml.highest_blocked == LKM_IVMODE) {
323 __dlm_lockres_reserve_ast(res);
324 dlm_queue_bast(dlm, lock);
326 /* update the highest_blocked if needed */
327 if (lock->ml.highest_blocked < target->ml.convert_type)
328 lock->ml.highest_blocked =
329 target->ml.convert_type;
332 head = &res->converting;
333 list_for_each(iter, head) {
334 lock = list_entry(iter, struct dlm_lock, list);
335 if (lock==target)
336 continue;
337 if (!dlm_lock_compatible(lock->ml.type,
338 target->ml.convert_type)) {
339 can_grant = 0;
340 if (lock->ml.highest_blocked == LKM_IVMODE) {
341 __dlm_lockres_reserve_ast(res);
342 dlm_queue_bast(dlm, lock);
344 if (lock->ml.highest_blocked < target->ml.convert_type)
345 lock->ml.highest_blocked =
346 target->ml.convert_type;
350 /* we can convert the lock */
351 if (can_grant) {
352 spin_lock(&target->spinlock);
353 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
355 mlog(0, "calling ast for converting lock: %.*s, have: %d, "
356 "granting: %d, node: %u\n", res->lockname.len,
357 res->lockname.name, target->ml.type,
358 target->ml.convert_type, target->ml.node);
360 target->ml.type = target->ml.convert_type;
361 target->ml.convert_type = LKM_IVMODE;
362 list_move_tail(&target->list, &res->granted);
364 BUG_ON(!target->lksb);
365 target->lksb->status = DLM_NORMAL;
367 spin_unlock(&target->spinlock);
369 __dlm_lockres_reserve_ast(res);
370 dlm_queue_ast(dlm, target);
371 /* go back and check for more */
372 goto converting;
375 blocked:
376 if (list_empty(&res->blocked))
377 goto leave;
378 target = list_entry(res->blocked.next, struct dlm_lock, list);
380 head = &res->granted;
381 list_for_each(iter, head) {
382 lock = list_entry(iter, struct dlm_lock, list);
383 if (lock==target)
384 continue;
385 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
386 can_grant = 0;
387 if (lock->ml.highest_blocked == LKM_IVMODE) {
388 __dlm_lockres_reserve_ast(res);
389 dlm_queue_bast(dlm, lock);
391 if (lock->ml.highest_blocked < target->ml.type)
392 lock->ml.highest_blocked = target->ml.type;
396 head = &res->converting;
397 list_for_each(iter, head) {
398 lock = list_entry(iter, struct dlm_lock, list);
399 if (lock==target)
400 continue;
401 if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
402 can_grant = 0;
403 if (lock->ml.highest_blocked == LKM_IVMODE) {
404 __dlm_lockres_reserve_ast(res);
405 dlm_queue_bast(dlm, lock);
407 if (lock->ml.highest_blocked < target->ml.type)
408 lock->ml.highest_blocked = target->ml.type;
412 /* we can grant the blocked lock (only
413 * possible if converting list empty) */
414 if (can_grant) {
415 spin_lock(&target->spinlock);
416 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
418 mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
419 "node: %u\n", res->lockname.len, res->lockname.name,
420 target->ml.type, target->ml.node);
422 // target->ml.type is already correct
423 list_move_tail(&target->list, &res->granted);
425 BUG_ON(!target->lksb);
426 target->lksb->status = DLM_NORMAL;
428 spin_unlock(&target->spinlock);
430 __dlm_lockres_reserve_ast(res);
431 dlm_queue_ast(dlm, target);
432 /* go back and check for more */
433 goto converting;
436 leave:
437 return;
440 /* must have NO locks when calling this with res !=NULL * */
441 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
443 mlog_entry("dlm=%p, res=%p\n", dlm, res);
444 if (res) {
445 spin_lock(&dlm->spinlock);
446 spin_lock(&res->spinlock);
447 __dlm_dirty_lockres(dlm, res);
448 spin_unlock(&res->spinlock);
449 spin_unlock(&dlm->spinlock);
451 wake_up(&dlm->dlm_thread_wq);
454 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
456 mlog_entry("dlm=%p, res=%p\n", dlm, res);
458 assert_spin_locked(&dlm->spinlock);
459 assert_spin_locked(&res->spinlock);
461 /* don't shuffle secondary queues */
462 if ((res->owner == dlm->node_num)) {
463 if (res->state & (DLM_LOCK_RES_MIGRATING |
464 DLM_LOCK_RES_BLOCK_DIRTY))
465 return;
467 if (list_empty(&res->dirty)) {
468 /* ref for dirty_list */
469 dlm_lockres_get(res);
470 list_add_tail(&res->dirty, &dlm->dirty_list);
471 res->state |= DLM_LOCK_RES_DIRTY;
477 /* Launch the NM thread for the mounted volume */
478 int dlm_launch_thread(struct dlm_ctxt *dlm)
480 mlog(0, "starting dlm thread...\n");
482 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
483 if (IS_ERR(dlm->dlm_thread_task)) {
484 mlog_errno(PTR_ERR(dlm->dlm_thread_task));
485 dlm->dlm_thread_task = NULL;
486 return -EINVAL;
489 return 0;
492 void dlm_complete_thread(struct dlm_ctxt *dlm)
494 if (dlm->dlm_thread_task) {
495 mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
496 kthread_stop(dlm->dlm_thread_task);
497 dlm->dlm_thread_task = NULL;
501 static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
503 int empty;
505 spin_lock(&dlm->spinlock);
506 empty = list_empty(&dlm->dirty_list);
507 spin_unlock(&dlm->spinlock);
509 return empty;
512 static void dlm_flush_asts(struct dlm_ctxt *dlm)
514 int ret;
515 struct dlm_lock *lock;
516 struct dlm_lock_resource *res;
517 u8 hi;
519 spin_lock(&dlm->ast_lock);
520 while (!list_empty(&dlm->pending_asts)) {
521 lock = list_entry(dlm->pending_asts.next,
522 struct dlm_lock, ast_list);
523 /* get an extra ref on lock */
524 dlm_lock_get(lock);
525 res = lock->lockres;
526 mlog(0, "delivering an ast for this lockres\n");
528 BUG_ON(!lock->ast_pending);
530 /* remove from list (including ref) */
531 list_del_init(&lock->ast_list);
532 dlm_lock_put(lock);
533 spin_unlock(&dlm->ast_lock);
535 if (lock->ml.node != dlm->node_num) {
536 ret = dlm_do_remote_ast(dlm, res, lock);
537 if (ret < 0)
538 mlog_errno(ret);
539 } else
540 dlm_do_local_ast(dlm, res, lock);
542 spin_lock(&dlm->ast_lock);
544 /* possible that another ast was queued while
545 * we were delivering the last one */
546 if (!list_empty(&lock->ast_list)) {
547 mlog(0, "aha another ast got queued while "
548 "we were finishing the last one. will "
549 "keep the ast_pending flag set.\n");
550 } else
551 lock->ast_pending = 0;
553 /* drop the extra ref.
554 * this may drop it completely. */
555 dlm_lock_put(lock);
556 dlm_lockres_release_ast(dlm, res);
559 while (!list_empty(&dlm->pending_basts)) {
560 lock = list_entry(dlm->pending_basts.next,
561 struct dlm_lock, bast_list);
562 /* get an extra ref on lock */
563 dlm_lock_get(lock);
564 res = lock->lockres;
566 BUG_ON(!lock->bast_pending);
568 /* get the highest blocked lock, and reset */
569 spin_lock(&lock->spinlock);
570 BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
571 hi = lock->ml.highest_blocked;
572 lock->ml.highest_blocked = LKM_IVMODE;
573 spin_unlock(&lock->spinlock);
575 /* remove from list (including ref) */
576 list_del_init(&lock->bast_list);
577 dlm_lock_put(lock);
578 spin_unlock(&dlm->ast_lock);
580 mlog(0, "delivering a bast for this lockres "
581 "(blocked = %d\n", hi);
583 if (lock->ml.node != dlm->node_num) {
584 ret = dlm_send_proxy_bast(dlm, res, lock, hi);
585 if (ret < 0)
586 mlog_errno(ret);
587 } else
588 dlm_do_local_bast(dlm, res, lock, hi);
590 spin_lock(&dlm->ast_lock);
592 /* possible that another bast was queued while
593 * we were delivering the last one */
594 if (!list_empty(&lock->bast_list)) {
595 mlog(0, "aha another bast got queued while "
596 "we were finishing the last one. will "
597 "keep the bast_pending flag set.\n");
598 } else
599 lock->bast_pending = 0;
601 /* drop the extra ref.
602 * this may drop it completely. */
603 dlm_lock_put(lock);
604 dlm_lockres_release_ast(dlm, res);
606 wake_up(&dlm->ast_wq);
607 spin_unlock(&dlm->ast_lock);
611 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
612 #define DLM_THREAD_MAX_DIRTY 100
613 #define DLM_THREAD_MAX_ASTS 10
615 static int dlm_thread(void *data)
617 struct dlm_lock_resource *res;
618 struct dlm_ctxt *dlm = data;
619 unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
621 mlog(0, "dlm thread running for %s...\n", dlm->name);
623 while (!kthread_should_stop()) {
624 int n = DLM_THREAD_MAX_DIRTY;
626 /* dlm_shutting_down is very point-in-time, but that
627 * doesn't matter as we'll just loop back around if we
628 * get false on the leading edge of a state
629 * transition. */
630 dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
632 /* We really don't want to hold dlm->spinlock while
633 * calling dlm_shuffle_lists on each lockres that
634 * needs to have its queues adjusted and AST/BASTs
635 * run. So let's pull each entry off the dirty_list
636 * and drop dlm->spinlock ASAP. Once off the list,
637 * res->spinlock needs to be taken again to protect
638 * the queues while calling dlm_shuffle_lists. */
639 spin_lock(&dlm->spinlock);
640 while (!list_empty(&dlm->dirty_list)) {
641 int delay = 0;
642 res = list_entry(dlm->dirty_list.next,
643 struct dlm_lock_resource, dirty);
645 /* peel a lockres off, remove it from the list,
646 * unset the dirty flag and drop the dlm lock */
647 BUG_ON(!res);
648 dlm_lockres_get(res);
650 spin_lock(&res->spinlock);
651 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
652 list_del_init(&res->dirty);
653 spin_unlock(&res->spinlock);
654 spin_unlock(&dlm->spinlock);
655 /* Drop dirty_list ref */
656 dlm_lockres_put(res);
658 /* lockres can be re-dirtied/re-added to the
659 * dirty_list in this gap, but that is ok */
661 spin_lock(&res->spinlock);
662 if (res->owner != dlm->node_num) {
663 __dlm_print_one_lock_resource(res);
664 mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
665 res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
666 res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
667 res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
668 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
670 BUG_ON(res->owner != dlm->node_num);
672 /* it is now ok to move lockreses in these states
673 * to the dirty list, assuming that they will only be
674 * dirty for a short while. */
675 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
676 if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
677 DLM_LOCK_RES_RECOVERING)) {
678 /* move it to the tail and keep going */
679 res->state &= ~DLM_LOCK_RES_DIRTY;
680 spin_unlock(&res->spinlock);
681 mlog(0, "delaying list shuffling for in-"
682 "progress lockres %.*s, state=%d\n",
683 res->lockname.len, res->lockname.name,
684 res->state);
685 delay = 1;
686 goto in_progress;
689 /* at this point the lockres is not migrating/
690 * recovering/in-progress. we have the lockres
691 * spinlock and do NOT have the dlm lock.
692 * safe to reserve/queue asts and run the lists. */
694 mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
695 "res=%.*s\n", dlm->name,
696 res->lockname.len, res->lockname.name);
698 /* called while holding lockres lock */
699 dlm_shuffle_lists(dlm, res);
700 res->state &= ~DLM_LOCK_RES_DIRTY;
701 spin_unlock(&res->spinlock);
703 dlm_lockres_calc_usage(dlm, res);
705 in_progress:
707 spin_lock(&dlm->spinlock);
708 /* if the lock was in-progress, stick
709 * it on the back of the list */
710 if (delay) {
711 spin_lock(&res->spinlock);
712 __dlm_dirty_lockres(dlm, res);
713 spin_unlock(&res->spinlock);
715 dlm_lockres_put(res);
717 /* unlikely, but we may need to give time to
718 * other tasks */
719 if (!--n) {
720 mlog(0, "throttling dlm_thread\n");
721 break;
725 spin_unlock(&dlm->spinlock);
726 dlm_flush_asts(dlm);
728 /* yield and continue right away if there is more work to do */
729 if (!n) {
730 cond_resched();
731 continue;
734 wait_event_interruptible_timeout(dlm->dlm_thread_wq,
735 !dlm_dirty_list_empty(dlm) ||
736 kthread_should_stop(),
737 timeout);
740 mlog(0, "quitting DLM thread\n");
741 return 0;