1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
12 #include "dlm_internal.h"
13 #include "lockspace.h"
26 * Recovery waiting routines: these functions wait for a particular reply from
27 * a remote node, or for the remote node to report a certain status. They need
28 * to abort if the lockspace is stopped indicating a node has failed (perhaps
29 * the one being waited for).
33 * Wait until given function returns non-zero or lockspace is stopped
34 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
35 * function thinks it could have completed the waited-on task, they should wake
36 * up ls_wait_general to get an immediate response rather than waiting for the
37 * timeout. This uses a timeout so it can check periodically if the wait
38 * should abort due to node failure (which doesn't cause a wake_up).
39 * This should only be called by the dlm_recoverd thread.
42 int dlm_wait_function(struct dlm_ls
*ls
, int (*testfn
) (struct dlm_ls
*ls
))
48 rv
= wait_event_timeout(ls
->ls_wait_general
,
49 testfn(ls
) || dlm_recovery_stopped(ls
),
50 dlm_config
.ci_recover_timer
* HZ
);
53 if (test_bit(LSFL_RCOM_WAIT
, &ls
->ls_flags
)) {
54 log_debug(ls
, "dlm_wait_function timed out");
59 if (dlm_recovery_stopped(ls
)) {
60 log_debug(ls
, "dlm_wait_function aborted");
67 * An efficient way for all nodes to wait for all others to have a certain
68 * status. The node with the lowest nodeid polls all the others for their
69 * status (wait_status_all) and all the others poll the node with the low id
70 * for its accumulated result (wait_status_low). When all nodes have set
71 * status flag X, then status flag X_ALL will be set on the low nodeid.
74 uint32_t dlm_recover_status(struct dlm_ls
*ls
)
77 spin_lock(&ls
->ls_recover_lock
);
78 status
= ls
->ls_recover_status
;
79 spin_unlock(&ls
->ls_recover_lock
);
83 static void _set_recover_status(struct dlm_ls
*ls
, uint32_t status
)
85 ls
->ls_recover_status
|= status
;
88 void dlm_set_recover_status(struct dlm_ls
*ls
, uint32_t status
)
90 spin_lock(&ls
->ls_recover_lock
);
91 _set_recover_status(ls
, status
);
92 spin_unlock(&ls
->ls_recover_lock
);
95 static int wait_status_all(struct dlm_ls
*ls
, uint32_t wait_status
,
98 struct dlm_rcom
*rc
= ls
->ls_recover_buf
;
99 struct dlm_member
*memb
;
100 int error
= 0, delay
;
102 list_for_each_entry(memb
, &ls
->ls_nodes
, list
) {
105 if (dlm_recovery_stopped(ls
)) {
110 error
= dlm_rcom_status(ls
, memb
->nodeid
, 0);
115 dlm_slot_save(ls
, rc
, memb
);
117 if (rc
->rc_result
& wait_status
)
128 static int wait_status_low(struct dlm_ls
*ls
, uint32_t wait_status
,
129 uint32_t status_flags
)
131 struct dlm_rcom
*rc
= ls
->ls_recover_buf
;
132 int error
= 0, delay
= 0, nodeid
= ls
->ls_low_nodeid
;
135 if (dlm_recovery_stopped(ls
)) {
140 error
= dlm_rcom_status(ls
, nodeid
, status_flags
);
144 if (rc
->rc_result
& wait_status
)
154 static int wait_status(struct dlm_ls
*ls
, uint32_t status
)
156 uint32_t status_all
= status
<< 1;
159 if (ls
->ls_low_nodeid
== dlm_our_nodeid()) {
160 error
= wait_status_all(ls
, status
, 0);
162 dlm_set_recover_status(ls
, status_all
);
164 error
= wait_status_low(ls
, status_all
, 0);
169 int dlm_recover_members_wait(struct dlm_ls
*ls
)
171 struct dlm_member
*memb
;
172 struct dlm_slot
*slots
;
173 int num_slots
, slots_size
;
177 list_for_each_entry(memb
, &ls
->ls_nodes
, list
) {
179 memb
->generation
= 0;
182 if (ls
->ls_low_nodeid
== dlm_our_nodeid()) {
183 error
= wait_status_all(ls
, DLM_RS_NODES
, 1);
187 /* slots array is sparse, slots_size may be > num_slots */
189 rv
= dlm_slots_assign(ls
, &num_slots
, &slots_size
, &slots
, &gen
);
191 spin_lock(&ls
->ls_recover_lock
);
192 _set_recover_status(ls
, DLM_RS_NODES_ALL
);
193 ls
->ls_num_slots
= num_slots
;
194 ls
->ls_slots_size
= slots_size
;
195 ls
->ls_slots
= slots
;
196 ls
->ls_generation
= gen
;
197 spin_unlock(&ls
->ls_recover_lock
);
199 dlm_set_recover_status(ls
, DLM_RS_NODES_ALL
);
202 error
= wait_status_low(ls
, DLM_RS_NODES_ALL
, DLM_RSF_NEED_SLOTS
);
206 dlm_slots_copy_in(ls
);
212 int dlm_recover_directory_wait(struct dlm_ls
*ls
)
214 return wait_status(ls
, DLM_RS_DIR
);
217 int dlm_recover_locks_wait(struct dlm_ls
*ls
)
219 return wait_status(ls
, DLM_RS_LOCKS
);
222 int dlm_recover_done_wait(struct dlm_ls
*ls
)
224 return wait_status(ls
, DLM_RS_DONE
);
228 * The recover_list contains all the rsb's for which we've requested the new
229 * master nodeid. As replies are returned from the resource directories the
230 * rsb's are removed from the list. When the list is empty we're done.
232 * The recover_list is later similarly used for all rsb's for which we've sent
233 * new lkb's and need to receive new corresponding lkid's.
235 * We use the address of the rsb struct as a simple local identifier for the
236 * rsb so we can match an rcom reply with the rsb it was sent for.
239 static int recover_list_empty(struct dlm_ls
*ls
)
243 spin_lock(&ls
->ls_recover_list_lock
);
244 empty
= list_empty(&ls
->ls_recover_list
);
245 spin_unlock(&ls
->ls_recover_list_lock
);
250 static void recover_list_add(struct dlm_rsb
*r
)
252 struct dlm_ls
*ls
= r
->res_ls
;
254 spin_lock(&ls
->ls_recover_list_lock
);
255 if (list_empty(&r
->res_recover_list
)) {
256 list_add_tail(&r
->res_recover_list
, &ls
->ls_recover_list
);
257 ls
->ls_recover_list_count
++;
260 spin_unlock(&ls
->ls_recover_list_lock
);
263 static void recover_list_del(struct dlm_rsb
*r
)
265 struct dlm_ls
*ls
= r
->res_ls
;
267 spin_lock(&ls
->ls_recover_list_lock
);
268 list_del_init(&r
->res_recover_list
);
269 ls
->ls_recover_list_count
--;
270 spin_unlock(&ls
->ls_recover_list_lock
);
275 static void recover_list_clear(struct dlm_ls
*ls
)
277 struct dlm_rsb
*r
, *s
;
279 spin_lock(&ls
->ls_recover_list_lock
);
280 list_for_each_entry_safe(r
, s
, &ls
->ls_recover_list
, res_recover_list
) {
281 list_del_init(&r
->res_recover_list
);
282 r
->res_recover_locks_count
= 0;
284 ls
->ls_recover_list_count
--;
287 if (ls
->ls_recover_list_count
!= 0) {
288 log_error(ls
, "warning: recover_list_count %d",
289 ls
->ls_recover_list_count
);
290 ls
->ls_recover_list_count
= 0;
292 spin_unlock(&ls
->ls_recover_list_lock
);
295 static int recover_idr_empty(struct dlm_ls
*ls
)
299 spin_lock(&ls
->ls_recover_idr_lock
);
300 if (ls
->ls_recover_list_count
)
302 spin_unlock(&ls
->ls_recover_idr_lock
);
307 static int recover_idr_add(struct dlm_rsb
*r
)
309 struct dlm_ls
*ls
= r
->res_ls
;
312 idr_preload(GFP_NOFS
);
313 spin_lock(&ls
->ls_recover_idr_lock
);
318 rv
= idr_alloc(&ls
->ls_recover_idr
, r
, 1, 0, GFP_NOWAIT
);
323 ls
->ls_recover_list_count
++;
327 spin_unlock(&ls
->ls_recover_idr_lock
);
332 static void recover_idr_del(struct dlm_rsb
*r
)
334 struct dlm_ls
*ls
= r
->res_ls
;
336 spin_lock(&ls
->ls_recover_idr_lock
);
337 idr_remove(&ls
->ls_recover_idr
, r
->res_id
);
339 ls
->ls_recover_list_count
--;
340 spin_unlock(&ls
->ls_recover_idr_lock
);
345 static struct dlm_rsb
*recover_idr_find(struct dlm_ls
*ls
, uint64_t id
)
349 spin_lock(&ls
->ls_recover_idr_lock
);
350 r
= idr_find(&ls
->ls_recover_idr
, (int)id
);
351 spin_unlock(&ls
->ls_recover_idr_lock
);
355 static void recover_idr_clear(struct dlm_ls
*ls
)
360 spin_lock(&ls
->ls_recover_idr_lock
);
362 idr_for_each_entry(&ls
->ls_recover_idr
, r
, id
) {
363 idr_remove(&ls
->ls_recover_idr
, id
);
365 r
->res_recover_locks_count
= 0;
366 ls
->ls_recover_list_count
--;
371 if (ls
->ls_recover_list_count
!= 0) {
372 log_error(ls
, "warning: recover_list_count %d",
373 ls
->ls_recover_list_count
);
374 ls
->ls_recover_list_count
= 0;
376 spin_unlock(&ls
->ls_recover_idr_lock
);
380 /* Master recovery: find new master node for rsb's that were
381 mastered on nodes that have been removed.
385 dlm_send_rcom_lookup -> receive_rcom_lookup
387 receive_rcom_lookup_reply <-
388 dlm_recover_master_reply
395 * Set the lock master for all LKBs in a lock queue
396 * If we are the new master of the rsb, we may have received new
397 * MSTCPY locks from other nodes already which we need to ignore
398 * when setting the new nodeid.
401 static void set_lock_master(struct list_head
*queue
, int nodeid
)
405 list_for_each_entry(lkb
, queue
, lkb_statequeue
) {
406 if (!(lkb
->lkb_flags
& DLM_IFL_MSTCPY
)) {
407 lkb
->lkb_nodeid
= nodeid
;
413 static void set_master_lkbs(struct dlm_rsb
*r
)
415 set_lock_master(&r
->res_grantqueue
, r
->res_nodeid
);
416 set_lock_master(&r
->res_convertqueue
, r
->res_nodeid
);
417 set_lock_master(&r
->res_waitqueue
, r
->res_nodeid
);
421 * Propagate the new master nodeid to locks
422 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
423 * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
427 static void set_new_master(struct dlm_rsb
*r
)
430 rsb_set_flag(r
, RSB_NEW_MASTER
);
431 rsb_set_flag(r
, RSB_NEW_MASTER2
);
435 * We do async lookups on rsb's that need new masters. The rsb's
436 * waiting for a lookup reply are kept on the recover_list.
438 * Another node recovering the master may have sent us a rcom lookup,
439 * and our dlm_master_lookup() set it as the new master, along with
440 * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
441 * equals our_nodeid below).
444 static int recover_master(struct dlm_rsb
*r
, unsigned int *count
)
446 struct dlm_ls
*ls
= r
->res_ls
;
447 int our_nodeid
, dir_nodeid
;
454 is_removed
= dlm_is_removed(ls
, r
->res_nodeid
);
456 if (!is_removed
&& !rsb_flag(r
, RSB_NEW_MASTER
))
459 our_nodeid
= dlm_our_nodeid();
460 dir_nodeid
= dlm_dir_nodeid(r
);
462 if (dir_nodeid
== our_nodeid
) {
464 r
->res_master_nodeid
= our_nodeid
;
468 /* set master of lkbs to ourself when is_removed, or to
469 another new master which we set along with NEW_MASTER
470 in dlm_master_lookup */
475 error
= dlm_send_rcom_lookup(r
, dir_nodeid
);
483 * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
484 * This is necessary because recovery can be started, aborted and restarted,
485 * causing the master nodeid to briefly change during the aborted recovery, and
486 * change back to the original value in the second recovery. The MSTCPY locks
487 * may or may not have been purged during the aborted recovery. Another node
488 * with an outstanding request in waiters list and a request reply saved in the
489 * requestqueue, cannot know whether it should ignore the reply and resend the
490 * request, or accept the reply and complete the request. It must do the
491 * former if the remote node purged MSTCPY locks, and it must do the later if
492 * the remote node did not. This is solved by always purging MSTCPY locks, in
493 * which case, the request reply would always be ignored and the request
497 static int recover_master_static(struct dlm_rsb
*r
, unsigned int *count
)
499 int dir_nodeid
= dlm_dir_nodeid(r
);
500 int new_master
= dir_nodeid
;
502 if (dir_nodeid
== dlm_our_nodeid())
505 dlm_purge_mstcpy_locks(r
);
506 r
->res_master_nodeid
= dir_nodeid
;
507 r
->res_nodeid
= new_master
;
514 * Go through local root resources and for each rsb which has a master which
515 * has departed, get the new master nodeid from the directory. The dir will
516 * assign mastery to the first node to look up the new master. That means
517 * we'll discover in this lookup if we're the new master of any rsb's.
519 * We fire off all the dir lookup requests individually and asynchronously to
520 * the correct dir node.
523 int dlm_recover_masters(struct dlm_ls
*ls
)
526 unsigned int total
= 0;
527 unsigned int count
= 0;
528 int nodir
= dlm_no_directory(ls
);
531 log_rinfo(ls
, "dlm_recover_masters");
533 down_read(&ls
->ls_root_sem
);
534 list_for_each_entry(r
, &ls
->ls_root_list
, res_root_list
) {
535 if (dlm_recovery_stopped(ls
)) {
536 up_read(&ls
->ls_root_sem
);
543 error
= recover_master_static(r
, &count
);
545 error
= recover_master(r
, &count
);
551 up_read(&ls
->ls_root_sem
);
555 up_read(&ls
->ls_root_sem
);
557 log_rinfo(ls
, "dlm_recover_masters %u of %u", count
, total
);
559 error
= dlm_wait_function(ls
, &recover_idr_empty
);
562 recover_idr_clear(ls
);
566 int dlm_recover_master_reply(struct dlm_ls
*ls
, struct dlm_rcom
*rc
)
569 int ret_nodeid
, new_master
;
571 r
= recover_idr_find(ls
, rc
->rc_id
);
573 log_error(ls
, "dlm_recover_master_reply no id %llx",
574 (unsigned long long)rc
->rc_id
);
578 ret_nodeid
= rc
->rc_result
;
580 if (ret_nodeid
== dlm_our_nodeid())
583 new_master
= ret_nodeid
;
586 r
->res_master_nodeid
= ret_nodeid
;
587 r
->res_nodeid
= new_master
;
592 if (recover_idr_empty(ls
))
593 wake_up(&ls
->ls_wait_general
);
599 /* Lock recovery: rebuild the process-copy locks we hold on a
600 remastered rsb on the new rsb master.
605 dlm_send_rcom_lock -> receive_rcom_lock
606 dlm_recover_master_copy
607 receive_rcom_lock_reply <-
608 dlm_recover_process_copy
613 * keep a count of the number of lkb's we send to the new master; when we get
614 * an equal number of replies then recovery for the rsb is done
617 static int recover_locks_queue(struct dlm_rsb
*r
, struct list_head
*head
)
622 list_for_each_entry(lkb
, head
, lkb_statequeue
) {
623 error
= dlm_send_rcom_lock(r
, lkb
);
626 r
->res_recover_locks_count
++;
632 static int recover_locks(struct dlm_rsb
*r
)
638 DLM_ASSERT(!r
->res_recover_locks_count
, dlm_dump_rsb(r
););
640 error
= recover_locks_queue(r
, &r
->res_grantqueue
);
643 error
= recover_locks_queue(r
, &r
->res_convertqueue
);
646 error
= recover_locks_queue(r
, &r
->res_waitqueue
);
650 if (r
->res_recover_locks_count
)
653 rsb_clear_flag(r
, RSB_NEW_MASTER
);
659 int dlm_recover_locks(struct dlm_ls
*ls
)
662 int error
, count
= 0;
664 down_read(&ls
->ls_root_sem
);
665 list_for_each_entry(r
, &ls
->ls_root_list
, res_root_list
) {
667 rsb_clear_flag(r
, RSB_NEW_MASTER
);
671 if (!rsb_flag(r
, RSB_NEW_MASTER
))
674 if (dlm_recovery_stopped(ls
)) {
676 up_read(&ls
->ls_root_sem
);
680 error
= recover_locks(r
);
682 up_read(&ls
->ls_root_sem
);
686 count
+= r
->res_recover_locks_count
;
688 up_read(&ls
->ls_root_sem
);
690 log_rinfo(ls
, "dlm_recover_locks %d out", count
);
692 error
= dlm_wait_function(ls
, &recover_list_empty
);
695 recover_list_clear(ls
);
699 void dlm_recovered_lock(struct dlm_rsb
*r
)
701 DLM_ASSERT(rsb_flag(r
, RSB_NEW_MASTER
), dlm_dump_rsb(r
););
703 r
->res_recover_locks_count
--;
704 if (!r
->res_recover_locks_count
) {
705 rsb_clear_flag(r
, RSB_NEW_MASTER
);
709 if (recover_list_empty(r
->res_ls
))
710 wake_up(&r
->res_ls
->ls_wait_general
);
714 * The lvb needs to be recovered on all master rsb's. This includes setting
715 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
716 * based on the lvb's of the locks held on the rsb.
718 * RSB_VALNOTVALID is set in two cases:
720 * 1. we are master, but not new, and we purged an EX/PW lock held by a
721 * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
723 * 2. we are a new master, and there are only NL/CR locks left.
724 * (We could probably improve this by only invaliding in this way when
725 * the previous master left uncleanly. VMS docs mention that.)
727 * The LVB contents are only considered for changing when this is a new master
728 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
729 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
730 * from the lkb with the largest lvb sequence number.
733 static void recover_lvb(struct dlm_rsb
*r
)
735 struct dlm_lkb
*lkb
, *high_lkb
= NULL
;
736 uint32_t high_seq
= 0;
737 int lock_lvb_exists
= 0;
738 int big_lock_exists
= 0;
739 int lvblen
= r
->res_ls
->ls_lvblen
;
741 if (!rsb_flag(r
, RSB_NEW_MASTER2
) &&
742 rsb_flag(r
, RSB_RECOVER_LVB_INVAL
)) {
744 rsb_set_flag(r
, RSB_VALNOTVALID
);
748 if (!rsb_flag(r
, RSB_NEW_MASTER2
))
751 /* we are the new master, so figure out if VALNOTVALID should
752 be set, and set the rsb lvb from the best lkb available. */
754 list_for_each_entry(lkb
, &r
->res_grantqueue
, lkb_statequeue
) {
755 if (!(lkb
->lkb_exflags
& DLM_LKF_VALBLK
))
760 if (lkb
->lkb_grmode
> DLM_LOCK_CR
) {
765 if (((int)lkb
->lkb_lvbseq
- (int)high_seq
) >= 0) {
767 high_seq
= lkb
->lkb_lvbseq
;
771 list_for_each_entry(lkb
, &r
->res_convertqueue
, lkb_statequeue
) {
772 if (!(lkb
->lkb_exflags
& DLM_LKF_VALBLK
))
777 if (lkb
->lkb_grmode
> DLM_LOCK_CR
) {
782 if (((int)lkb
->lkb_lvbseq
- (int)high_seq
) >= 0) {
784 high_seq
= lkb
->lkb_lvbseq
;
789 if (!lock_lvb_exists
)
792 /* lvb is invalidated if only NL/CR locks remain */
793 if (!big_lock_exists
)
794 rsb_set_flag(r
, RSB_VALNOTVALID
);
796 if (!r
->res_lvbptr
) {
797 r
->res_lvbptr
= dlm_allocate_lvb(r
->res_ls
);
802 if (big_lock_exists
) {
803 r
->res_lvbseq
= lkb
->lkb_lvbseq
;
804 memcpy(r
->res_lvbptr
, lkb
->lkb_lvbptr
, lvblen
);
805 } else if (high_lkb
) {
806 r
->res_lvbseq
= high_lkb
->lkb_lvbseq
;
807 memcpy(r
->res_lvbptr
, high_lkb
->lkb_lvbptr
, lvblen
);
810 memset(r
->res_lvbptr
, 0, lvblen
);
816 /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
817 converting PR->CW or CW->PR need to have their lkb_grmode set. */
819 static void recover_conversion(struct dlm_rsb
*r
)
821 struct dlm_ls
*ls
= r
->res_ls
;
825 list_for_each_entry(lkb
, &r
->res_grantqueue
, lkb_statequeue
) {
826 if (lkb
->lkb_grmode
== DLM_LOCK_PR
||
827 lkb
->lkb_grmode
== DLM_LOCK_CW
) {
828 grmode
= lkb
->lkb_grmode
;
833 list_for_each_entry(lkb
, &r
->res_convertqueue
, lkb_statequeue
) {
834 if (lkb
->lkb_grmode
!= DLM_LOCK_IV
)
837 log_debug(ls
, "recover_conversion %x set gr to rq %d",
838 lkb
->lkb_id
, lkb
->lkb_rqmode
);
839 lkb
->lkb_grmode
= lkb
->lkb_rqmode
;
841 log_debug(ls
, "recover_conversion %x set gr %d",
842 lkb
->lkb_id
, grmode
);
843 lkb
->lkb_grmode
= grmode
;
848 /* We've become the new master for this rsb and waiting/converting locks may
849 need to be granted in dlm_recover_grant() due to locks that may have
850 existed from a removed node. */
852 static void recover_grant(struct dlm_rsb
*r
)
854 if (!list_empty(&r
->res_waitqueue
) || !list_empty(&r
->res_convertqueue
))
855 rsb_set_flag(r
, RSB_RECOVER_GRANT
);
858 void dlm_recover_rsbs(struct dlm_ls
*ls
)
861 unsigned int count
= 0;
863 down_read(&ls
->ls_root_sem
);
864 list_for_each_entry(r
, &ls
->ls_root_list
, res_root_list
) {
867 if (rsb_flag(r
, RSB_RECOVER_CONVERT
))
868 recover_conversion(r
);
870 /* recover lvb before granting locks so the updated
871 lvb/VALNOTVALID is presented in the completion */
874 if (rsb_flag(r
, RSB_NEW_MASTER2
))
878 rsb_clear_flag(r
, RSB_VALNOTVALID
);
880 rsb_clear_flag(r
, RSB_RECOVER_CONVERT
);
881 rsb_clear_flag(r
, RSB_RECOVER_LVB_INVAL
);
882 rsb_clear_flag(r
, RSB_NEW_MASTER2
);
885 up_read(&ls
->ls_root_sem
);
888 log_rinfo(ls
, "dlm_recover_rsbs %d done", count
);
891 /* Create a single list of all root rsb's to be used during recovery */
893 int dlm_create_root_list(struct dlm_ls
*ls
)
899 down_write(&ls
->ls_root_sem
);
900 if (!list_empty(&ls
->ls_root_list
)) {
901 log_error(ls
, "root list not empty");
906 for (i
= 0; i
< ls
->ls_rsbtbl_size
; i
++) {
907 spin_lock(&ls
->ls_rsbtbl
[i
].lock
);
908 for (n
= rb_first(&ls
->ls_rsbtbl
[i
].keep
); n
; n
= rb_next(n
)) {
909 r
= rb_entry(n
, struct dlm_rsb
, res_hashnode
);
910 list_add(&r
->res_root_list
, &ls
->ls_root_list
);
914 if (!RB_EMPTY_ROOT(&ls
->ls_rsbtbl
[i
].toss
))
915 log_error(ls
, "dlm_create_root_list toss not empty");
916 spin_unlock(&ls
->ls_rsbtbl
[i
].lock
);
919 up_write(&ls
->ls_root_sem
);
923 void dlm_release_root_list(struct dlm_ls
*ls
)
925 struct dlm_rsb
*r
, *safe
;
927 down_write(&ls
->ls_root_sem
);
928 list_for_each_entry_safe(r
, safe
, &ls
->ls_root_list
, res_root_list
) {
929 list_del_init(&r
->res_root_list
);
932 up_write(&ls
->ls_root_sem
);
935 void dlm_clear_toss(struct dlm_ls
*ls
)
937 struct rb_node
*n
, *next
;
939 unsigned int count
= 0;
942 for (i
= 0; i
< ls
->ls_rsbtbl_size
; i
++) {
943 spin_lock(&ls
->ls_rsbtbl
[i
].lock
);
944 for (n
= rb_first(&ls
->ls_rsbtbl
[i
].toss
); n
; n
= next
) {
946 r
= rb_entry(n
, struct dlm_rsb
, res_hashnode
);
947 rb_erase(n
, &ls
->ls_rsbtbl
[i
].toss
);
951 spin_unlock(&ls
->ls_rsbtbl
[i
].lock
);
955 log_rinfo(ls
, "dlm_clear_toss %u done", count
);