xfs: fix type usage
[linux/fpc-iii.git] / fs / dlm / recover.c
blobce2aa54ca2e2484f35b11d2cc12614b080656465
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6 **
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "dir.h"
17 #include "config.h"
18 #include "ast.h"
19 #include "memory.h"
20 #include "rcom.h"
21 #include "lock.h"
22 #include "lowcomms.h"
23 #include "member.h"
24 #include "recover.h"
28 * Recovery waiting routines: these functions wait for a particular reply from
29 * a remote node, or for the remote node to report a certain status. They need
30 * to abort if the lockspace is stopped indicating a node has failed (perhaps
31 * the one being waited for).
35 * Wait until given function returns non-zero or lockspace is stopped
36 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
37 * function thinks it could have completed the waited-on task, they should wake
38 * up ls_wait_general to get an immediate response rather than waiting for the
39 * timeout. This uses a timeout so it can check periodically if the wait
40 * should abort due to node failure (which doesn't cause a wake_up).
41 * This should only be called by the dlm_recoverd thread.
44 int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
46 int error = 0;
47 int rv;
49 while (1) {
50 rv = wait_event_timeout(ls->ls_wait_general,
51 testfn(ls) || dlm_recovery_stopped(ls),
52 dlm_config.ci_recover_timer * HZ);
53 if (rv)
54 break;
55 if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
56 log_debug(ls, "dlm_wait_function timed out");
57 return -ETIMEDOUT;
61 if (dlm_recovery_stopped(ls)) {
62 log_debug(ls, "dlm_wait_function aborted");
63 error = -EINTR;
65 return error;
69 * An efficient way for all nodes to wait for all others to have a certain
70 * status. The node with the lowest nodeid polls all the others for their
71 * status (wait_status_all) and all the others poll the node with the low id
72 * for its accumulated result (wait_status_low). When all nodes have set
73 * status flag X, then status flag X_ALL will be set on the low nodeid.
76 uint32_t dlm_recover_status(struct dlm_ls *ls)
78 uint32_t status;
79 spin_lock(&ls->ls_recover_lock);
80 status = ls->ls_recover_status;
81 spin_unlock(&ls->ls_recover_lock);
82 return status;
85 static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
87 ls->ls_recover_status |= status;
90 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
92 spin_lock(&ls->ls_recover_lock);
93 _set_recover_status(ls, status);
94 spin_unlock(&ls->ls_recover_lock);
97 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
98 int save_slots)
100 struct dlm_rcom *rc = ls->ls_recover_buf;
101 struct dlm_member *memb;
102 int error = 0, delay;
104 list_for_each_entry(memb, &ls->ls_nodes, list) {
105 delay = 0;
106 for (;;) {
107 if (dlm_recovery_stopped(ls)) {
108 error = -EINTR;
109 goto out;
112 error = dlm_rcom_status(ls, memb->nodeid, 0);
113 if (error)
114 goto out;
116 if (save_slots)
117 dlm_slot_save(ls, rc, memb);
119 if (rc->rc_result & wait_status)
120 break;
121 if (delay < 1000)
122 delay += 20;
123 msleep(delay);
126 out:
127 return error;
130 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
131 uint32_t status_flags)
133 struct dlm_rcom *rc = ls->ls_recover_buf;
134 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
136 for (;;) {
137 if (dlm_recovery_stopped(ls)) {
138 error = -EINTR;
139 goto out;
142 error = dlm_rcom_status(ls, nodeid, status_flags);
143 if (error)
144 break;
146 if (rc->rc_result & wait_status)
147 break;
148 if (delay < 1000)
149 delay += 20;
150 msleep(delay);
152 out:
153 return error;
156 static int wait_status(struct dlm_ls *ls, uint32_t status)
158 uint32_t status_all = status << 1;
159 int error;
161 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
162 error = wait_status_all(ls, status, 0);
163 if (!error)
164 dlm_set_recover_status(ls, status_all);
165 } else
166 error = wait_status_low(ls, status_all, 0);
168 return error;
171 int dlm_recover_members_wait(struct dlm_ls *ls)
173 struct dlm_member *memb;
174 struct dlm_slot *slots;
175 int num_slots, slots_size;
176 int error, rv;
177 uint32_t gen;
179 list_for_each_entry(memb, &ls->ls_nodes, list) {
180 memb->slot = -1;
181 memb->generation = 0;
184 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
185 error = wait_status_all(ls, DLM_RS_NODES, 1);
186 if (error)
187 goto out;
189 /* slots array is sparse, slots_size may be > num_slots */
191 rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
192 if (!rv) {
193 spin_lock(&ls->ls_recover_lock);
194 _set_recover_status(ls, DLM_RS_NODES_ALL);
195 ls->ls_num_slots = num_slots;
196 ls->ls_slots_size = slots_size;
197 ls->ls_slots = slots;
198 ls->ls_generation = gen;
199 spin_unlock(&ls->ls_recover_lock);
200 } else {
201 dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
203 } else {
204 error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
205 if (error)
206 goto out;
208 dlm_slots_copy_in(ls);
210 out:
211 return error;
214 int dlm_recover_directory_wait(struct dlm_ls *ls)
216 return wait_status(ls, DLM_RS_DIR);
219 int dlm_recover_locks_wait(struct dlm_ls *ls)
221 return wait_status(ls, DLM_RS_LOCKS);
224 int dlm_recover_done_wait(struct dlm_ls *ls)
226 return wait_status(ls, DLM_RS_DONE);
230 * The recover_list contains all the rsb's for which we've requested the new
231 * master nodeid. As replies are returned from the resource directories the
232 * rsb's are removed from the list. When the list is empty we're done.
234 * The recover_list is later similarly used for all rsb's for which we've sent
235 * new lkb's and need to receive new corresponding lkid's.
237 * We use the address of the rsb struct as a simple local identifier for the
238 * rsb so we can match an rcom reply with the rsb it was sent for.
241 static int recover_list_empty(struct dlm_ls *ls)
243 int empty;
245 spin_lock(&ls->ls_recover_list_lock);
246 empty = list_empty(&ls->ls_recover_list);
247 spin_unlock(&ls->ls_recover_list_lock);
249 return empty;
252 static void recover_list_add(struct dlm_rsb *r)
254 struct dlm_ls *ls = r->res_ls;
256 spin_lock(&ls->ls_recover_list_lock);
257 if (list_empty(&r->res_recover_list)) {
258 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
259 ls->ls_recover_list_count++;
260 dlm_hold_rsb(r);
262 spin_unlock(&ls->ls_recover_list_lock);
265 static void recover_list_del(struct dlm_rsb *r)
267 struct dlm_ls *ls = r->res_ls;
269 spin_lock(&ls->ls_recover_list_lock);
270 list_del_init(&r->res_recover_list);
271 ls->ls_recover_list_count--;
272 spin_unlock(&ls->ls_recover_list_lock);
274 dlm_put_rsb(r);
277 static void recover_list_clear(struct dlm_ls *ls)
279 struct dlm_rsb *r, *s;
281 spin_lock(&ls->ls_recover_list_lock);
282 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
283 list_del_init(&r->res_recover_list);
284 r->res_recover_locks_count = 0;
285 dlm_put_rsb(r);
286 ls->ls_recover_list_count--;
289 if (ls->ls_recover_list_count != 0) {
290 log_error(ls, "warning: recover_list_count %d",
291 ls->ls_recover_list_count);
292 ls->ls_recover_list_count = 0;
294 spin_unlock(&ls->ls_recover_list_lock);
297 static int recover_idr_empty(struct dlm_ls *ls)
299 int empty = 1;
301 spin_lock(&ls->ls_recover_idr_lock);
302 if (ls->ls_recover_list_count)
303 empty = 0;
304 spin_unlock(&ls->ls_recover_idr_lock);
306 return empty;
309 static int recover_idr_add(struct dlm_rsb *r)
311 struct dlm_ls *ls = r->res_ls;
312 int rv;
314 idr_preload(GFP_NOFS);
315 spin_lock(&ls->ls_recover_idr_lock);
316 if (r->res_id) {
317 rv = -1;
318 goto out_unlock;
320 rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
321 if (rv < 0)
322 goto out_unlock;
324 r->res_id = rv;
325 ls->ls_recover_list_count++;
326 dlm_hold_rsb(r);
327 rv = 0;
328 out_unlock:
329 spin_unlock(&ls->ls_recover_idr_lock);
330 idr_preload_end();
331 return rv;
334 static void recover_idr_del(struct dlm_rsb *r)
336 struct dlm_ls *ls = r->res_ls;
338 spin_lock(&ls->ls_recover_idr_lock);
339 idr_remove(&ls->ls_recover_idr, r->res_id);
340 r->res_id = 0;
341 ls->ls_recover_list_count--;
342 spin_unlock(&ls->ls_recover_idr_lock);
344 dlm_put_rsb(r);
347 static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
349 struct dlm_rsb *r;
351 spin_lock(&ls->ls_recover_idr_lock);
352 r = idr_find(&ls->ls_recover_idr, (int)id);
353 spin_unlock(&ls->ls_recover_idr_lock);
354 return r;
357 static void recover_idr_clear(struct dlm_ls *ls)
359 struct dlm_rsb *r;
360 int id;
362 spin_lock(&ls->ls_recover_idr_lock);
364 idr_for_each_entry(&ls->ls_recover_idr, r, id) {
365 idr_remove(&ls->ls_recover_idr, id);
366 r->res_id = 0;
367 r->res_recover_locks_count = 0;
368 ls->ls_recover_list_count--;
370 dlm_put_rsb(r);
373 if (ls->ls_recover_list_count != 0) {
374 log_error(ls, "warning: recover_list_count %d",
375 ls->ls_recover_list_count);
376 ls->ls_recover_list_count = 0;
378 spin_unlock(&ls->ls_recover_idr_lock);
382 /* Master recovery: find new master node for rsb's that were
383 mastered on nodes that have been removed.
385 dlm_recover_masters
386 recover_master
387 dlm_send_rcom_lookup -> receive_rcom_lookup
388 dlm_dir_lookup
389 receive_rcom_lookup_reply <-
390 dlm_recover_master_reply
391 set_new_master
392 set_master_lkbs
393 set_lock_master
397 * Set the lock master for all LKBs in a lock queue
398 * If we are the new master of the rsb, we may have received new
399 * MSTCPY locks from other nodes already which we need to ignore
400 * when setting the new nodeid.
403 static void set_lock_master(struct list_head *queue, int nodeid)
405 struct dlm_lkb *lkb;
407 list_for_each_entry(lkb, queue, lkb_statequeue) {
408 if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) {
409 lkb->lkb_nodeid = nodeid;
410 lkb->lkb_remid = 0;
415 static void set_master_lkbs(struct dlm_rsb *r)
417 set_lock_master(&r->res_grantqueue, r->res_nodeid);
418 set_lock_master(&r->res_convertqueue, r->res_nodeid);
419 set_lock_master(&r->res_waitqueue, r->res_nodeid);
423 * Propagate the new master nodeid to locks
424 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
425 * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
426 * rsb's to consider.
429 static void set_new_master(struct dlm_rsb *r)
431 set_master_lkbs(r);
432 rsb_set_flag(r, RSB_NEW_MASTER);
433 rsb_set_flag(r, RSB_NEW_MASTER2);
437 * We do async lookups on rsb's that need new masters. The rsb's
438 * waiting for a lookup reply are kept on the recover_list.
440 * Another node recovering the master may have sent us a rcom lookup,
441 * and our dlm_master_lookup() set it as the new master, along with
442 * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
443 * equals our_nodeid below).
446 static int recover_master(struct dlm_rsb *r, unsigned int *count)
448 struct dlm_ls *ls = r->res_ls;
449 int our_nodeid, dir_nodeid;
450 int is_removed = 0;
451 int error;
453 if (is_master(r))
454 return 0;
456 is_removed = dlm_is_removed(ls, r->res_nodeid);
458 if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
459 return 0;
461 our_nodeid = dlm_our_nodeid();
462 dir_nodeid = dlm_dir_nodeid(r);
464 if (dir_nodeid == our_nodeid) {
465 if (is_removed) {
466 r->res_master_nodeid = our_nodeid;
467 r->res_nodeid = 0;
470 /* set master of lkbs to ourself when is_removed, or to
471 another new master which we set along with NEW_MASTER
472 in dlm_master_lookup */
473 set_new_master(r);
474 error = 0;
475 } else {
476 recover_idr_add(r);
477 error = dlm_send_rcom_lookup(r, dir_nodeid);
480 (*count)++;
481 return error;
485 * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
486 * This is necessary because recovery can be started, aborted and restarted,
487 * causing the master nodeid to briefly change during the aborted recovery, and
488 * change back to the original value in the second recovery. The MSTCPY locks
489 * may or may not have been purged during the aborted recovery. Another node
490 * with an outstanding request in waiters list and a request reply saved in the
491 * requestqueue, cannot know whether it should ignore the reply and resend the
492 * request, or accept the reply and complete the request. It must do the
493 * former if the remote node purged MSTCPY locks, and it must do the later if
494 * the remote node did not. This is solved by always purging MSTCPY locks, in
495 * which case, the request reply would always be ignored and the request
496 * resent.
499 static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
501 int dir_nodeid = dlm_dir_nodeid(r);
502 int new_master = dir_nodeid;
504 if (dir_nodeid == dlm_our_nodeid())
505 new_master = 0;
507 dlm_purge_mstcpy_locks(r);
508 r->res_master_nodeid = dir_nodeid;
509 r->res_nodeid = new_master;
510 set_new_master(r);
511 (*count)++;
512 return 0;
516 * Go through local root resources and for each rsb which has a master which
517 * has departed, get the new master nodeid from the directory. The dir will
518 * assign mastery to the first node to look up the new master. That means
519 * we'll discover in this lookup if we're the new master of any rsb's.
521 * We fire off all the dir lookup requests individually and asynchronously to
522 * the correct dir node.
525 int dlm_recover_masters(struct dlm_ls *ls)
527 struct dlm_rsb *r;
528 unsigned int total = 0;
529 unsigned int count = 0;
530 int nodir = dlm_no_directory(ls);
531 int error;
533 log_rinfo(ls, "dlm_recover_masters");
535 down_read(&ls->ls_root_sem);
536 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
537 if (dlm_recovery_stopped(ls)) {
538 up_read(&ls->ls_root_sem);
539 error = -EINTR;
540 goto out;
543 lock_rsb(r);
544 if (nodir)
545 error = recover_master_static(r, &count);
546 else
547 error = recover_master(r, &count);
548 unlock_rsb(r);
549 cond_resched();
550 total++;
552 if (error) {
553 up_read(&ls->ls_root_sem);
554 goto out;
557 up_read(&ls->ls_root_sem);
559 log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
561 error = dlm_wait_function(ls, &recover_idr_empty);
562 out:
563 if (error)
564 recover_idr_clear(ls);
565 return error;
568 int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
570 struct dlm_rsb *r;
571 int ret_nodeid, new_master;
573 r = recover_idr_find(ls, rc->rc_id);
574 if (!r) {
575 log_error(ls, "dlm_recover_master_reply no id %llx",
576 (unsigned long long)rc->rc_id);
577 goto out;
580 ret_nodeid = rc->rc_result;
582 if (ret_nodeid == dlm_our_nodeid())
583 new_master = 0;
584 else
585 new_master = ret_nodeid;
587 lock_rsb(r);
588 r->res_master_nodeid = ret_nodeid;
589 r->res_nodeid = new_master;
590 set_new_master(r);
591 unlock_rsb(r);
592 recover_idr_del(r);
594 if (recover_idr_empty(ls))
595 wake_up(&ls->ls_wait_general);
596 out:
597 return 0;
601 /* Lock recovery: rebuild the process-copy locks we hold on a
602 remastered rsb on the new rsb master.
604 dlm_recover_locks
605 recover_locks
606 recover_locks_queue
607 dlm_send_rcom_lock -> receive_rcom_lock
608 dlm_recover_master_copy
609 receive_rcom_lock_reply <-
610 dlm_recover_process_copy
615 * keep a count of the number of lkb's we send to the new master; when we get
616 * an equal number of replies then recovery for the rsb is done
619 static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
621 struct dlm_lkb *lkb;
622 int error = 0;
624 list_for_each_entry(lkb, head, lkb_statequeue) {
625 error = dlm_send_rcom_lock(r, lkb);
626 if (error)
627 break;
628 r->res_recover_locks_count++;
631 return error;
634 static int recover_locks(struct dlm_rsb *r)
636 int error = 0;
638 lock_rsb(r);
640 DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
642 error = recover_locks_queue(r, &r->res_grantqueue);
643 if (error)
644 goto out;
645 error = recover_locks_queue(r, &r->res_convertqueue);
646 if (error)
647 goto out;
648 error = recover_locks_queue(r, &r->res_waitqueue);
649 if (error)
650 goto out;
652 if (r->res_recover_locks_count)
653 recover_list_add(r);
654 else
655 rsb_clear_flag(r, RSB_NEW_MASTER);
656 out:
657 unlock_rsb(r);
658 return error;
661 int dlm_recover_locks(struct dlm_ls *ls)
663 struct dlm_rsb *r;
664 int error, count = 0;
666 down_read(&ls->ls_root_sem);
667 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
668 if (is_master(r)) {
669 rsb_clear_flag(r, RSB_NEW_MASTER);
670 continue;
673 if (!rsb_flag(r, RSB_NEW_MASTER))
674 continue;
676 if (dlm_recovery_stopped(ls)) {
677 error = -EINTR;
678 up_read(&ls->ls_root_sem);
679 goto out;
682 error = recover_locks(r);
683 if (error) {
684 up_read(&ls->ls_root_sem);
685 goto out;
688 count += r->res_recover_locks_count;
690 up_read(&ls->ls_root_sem);
692 log_rinfo(ls, "dlm_recover_locks %d out", count);
694 error = dlm_wait_function(ls, &recover_list_empty);
695 out:
696 if (error)
697 recover_list_clear(ls);
698 return error;
701 void dlm_recovered_lock(struct dlm_rsb *r)
703 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
705 r->res_recover_locks_count--;
706 if (!r->res_recover_locks_count) {
707 rsb_clear_flag(r, RSB_NEW_MASTER);
708 recover_list_del(r);
711 if (recover_list_empty(r->res_ls))
712 wake_up(&r->res_ls->ls_wait_general);
716 * The lvb needs to be recovered on all master rsb's. This includes setting
717 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
718 * based on the lvb's of the locks held on the rsb.
720 * RSB_VALNOTVALID is set in two cases:
722 * 1. we are master, but not new, and we purged an EX/PW lock held by a
723 * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
725 * 2. we are a new master, and there are only NL/CR locks left.
726 * (We could probably improve this by only invaliding in this way when
727 * the previous master left uncleanly. VMS docs mention that.)
729 * The LVB contents are only considered for changing when this is a new master
730 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
731 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
732 * from the lkb with the largest lvb sequence number.
735 static void recover_lvb(struct dlm_rsb *r)
737 struct dlm_lkb *lkb, *high_lkb = NULL;
738 uint32_t high_seq = 0;
739 int lock_lvb_exists = 0;
740 int big_lock_exists = 0;
741 int lvblen = r->res_ls->ls_lvblen;
743 if (!rsb_flag(r, RSB_NEW_MASTER2) &&
744 rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
745 /* case 1 above */
746 rsb_set_flag(r, RSB_VALNOTVALID);
747 return;
750 if (!rsb_flag(r, RSB_NEW_MASTER2))
751 return;
753 /* we are the new master, so figure out if VALNOTVALID should
754 be set, and set the rsb lvb from the best lkb available. */
756 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
757 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
758 continue;
760 lock_lvb_exists = 1;
762 if (lkb->lkb_grmode > DLM_LOCK_CR) {
763 big_lock_exists = 1;
764 goto setflag;
767 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
768 high_lkb = lkb;
769 high_seq = lkb->lkb_lvbseq;
773 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
774 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
775 continue;
777 lock_lvb_exists = 1;
779 if (lkb->lkb_grmode > DLM_LOCK_CR) {
780 big_lock_exists = 1;
781 goto setflag;
784 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
785 high_lkb = lkb;
786 high_seq = lkb->lkb_lvbseq;
790 setflag:
791 if (!lock_lvb_exists)
792 goto out;
794 /* lvb is invalidated if only NL/CR locks remain */
795 if (!big_lock_exists)
796 rsb_set_flag(r, RSB_VALNOTVALID);
798 if (!r->res_lvbptr) {
799 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
800 if (!r->res_lvbptr)
801 goto out;
804 if (big_lock_exists) {
805 r->res_lvbseq = lkb->lkb_lvbseq;
806 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
807 } else if (high_lkb) {
808 r->res_lvbseq = high_lkb->lkb_lvbseq;
809 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
810 } else {
811 r->res_lvbseq = 0;
812 memset(r->res_lvbptr, 0, lvblen);
814 out:
815 return;
818 /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
819 converting PR->CW or CW->PR need to have their lkb_grmode set. */
821 static void recover_conversion(struct dlm_rsb *r)
823 struct dlm_ls *ls = r->res_ls;
824 struct dlm_lkb *lkb;
825 int grmode = -1;
827 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
828 if (lkb->lkb_grmode == DLM_LOCK_PR ||
829 lkb->lkb_grmode == DLM_LOCK_CW) {
830 grmode = lkb->lkb_grmode;
831 break;
835 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
836 if (lkb->lkb_grmode != DLM_LOCK_IV)
837 continue;
838 if (grmode == -1) {
839 log_debug(ls, "recover_conversion %x set gr to rq %d",
840 lkb->lkb_id, lkb->lkb_rqmode);
841 lkb->lkb_grmode = lkb->lkb_rqmode;
842 } else {
843 log_debug(ls, "recover_conversion %x set gr %d",
844 lkb->lkb_id, grmode);
845 lkb->lkb_grmode = grmode;
850 /* We've become the new master for this rsb and waiting/converting locks may
851 need to be granted in dlm_recover_grant() due to locks that may have
852 existed from a removed node. */
854 static void recover_grant(struct dlm_rsb *r)
856 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
857 rsb_set_flag(r, RSB_RECOVER_GRANT);
860 void dlm_recover_rsbs(struct dlm_ls *ls)
862 struct dlm_rsb *r;
863 unsigned int count = 0;
865 down_read(&ls->ls_root_sem);
866 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
867 lock_rsb(r);
868 if (is_master(r)) {
869 if (rsb_flag(r, RSB_RECOVER_CONVERT))
870 recover_conversion(r);
872 /* recover lvb before granting locks so the updated
873 lvb/VALNOTVALID is presented in the completion */
874 recover_lvb(r);
876 if (rsb_flag(r, RSB_NEW_MASTER2))
877 recover_grant(r);
878 count++;
879 } else {
880 rsb_clear_flag(r, RSB_VALNOTVALID);
882 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
883 rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
884 rsb_clear_flag(r, RSB_NEW_MASTER2);
885 unlock_rsb(r);
887 up_read(&ls->ls_root_sem);
889 if (count)
890 log_rinfo(ls, "dlm_recover_rsbs %d done", count);
893 /* Create a single list of all root rsb's to be used during recovery */
895 int dlm_create_root_list(struct dlm_ls *ls)
897 struct rb_node *n;
898 struct dlm_rsb *r;
899 int i, error = 0;
901 down_write(&ls->ls_root_sem);
902 if (!list_empty(&ls->ls_root_list)) {
903 log_error(ls, "root list not empty");
904 error = -EINVAL;
905 goto out;
908 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
909 spin_lock(&ls->ls_rsbtbl[i].lock);
910 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
911 r = rb_entry(n, struct dlm_rsb, res_hashnode);
912 list_add(&r->res_root_list, &ls->ls_root_list);
913 dlm_hold_rsb(r);
916 if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
917 log_error(ls, "dlm_create_root_list toss not empty");
918 spin_unlock(&ls->ls_rsbtbl[i].lock);
920 out:
921 up_write(&ls->ls_root_sem);
922 return error;
925 void dlm_release_root_list(struct dlm_ls *ls)
927 struct dlm_rsb *r, *safe;
929 down_write(&ls->ls_root_sem);
930 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
931 list_del_init(&r->res_root_list);
932 dlm_put_rsb(r);
934 up_write(&ls->ls_root_sem);
937 void dlm_clear_toss(struct dlm_ls *ls)
939 struct rb_node *n, *next;
940 struct dlm_rsb *r;
941 unsigned int count = 0;
942 int i;
944 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
945 spin_lock(&ls->ls_rsbtbl[i].lock);
946 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
947 next = rb_next(n);
948 r = rb_entry(n, struct dlm_rsb, res_hashnode);
949 rb_erase(n, &ls->ls_rsbtbl[i].toss);
950 dlm_free_rsb(r);
951 count++;
953 spin_unlock(&ls->ls_rsbtbl[i].lock);
956 if (count)
957 log_rinfo(ls, "dlm_clear_toss %u done", count);