x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / fs / dlm / recoverd.c
blobfd677c8c3d3b041396817bc930bde7e5e5b0be87
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6 **
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "dir.h"
18 #include "ast.h"
19 #include "recover.h"
20 #include "lowcomms.h"
21 #include "lock.h"
22 #include "requestqueue.h"
23 #include "recoverd.h"
26 /* If the start for which we're re-enabling locking (seq) has been superseded
27 by a newer stop (ls_recover_seq), we need to leave locking disabled.
29 We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
30 locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
31 enables locking and clears the requestqueue between a and b. */
33 static int enable_locking(struct dlm_ls *ls, uint64_t seq)
35 int error = -EINTR;
37 down_write(&ls->ls_recv_active);
39 spin_lock(&ls->ls_recover_lock);
40 if (ls->ls_recover_seq == seq) {
41 set_bit(LSFL_RUNNING, &ls->ls_flags);
42 /* unblocks processes waiting to enter the dlm */
43 up_write(&ls->ls_in_recovery);
44 error = 0;
46 spin_unlock(&ls->ls_recover_lock);
48 up_write(&ls->ls_recv_active);
49 return error;
52 static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
54 unsigned long start;
55 int error, neg = 0;
57 log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
59 mutex_lock(&ls->ls_recoverd_active);
62 * Suspending and resuming dlm_astd ensures that no lkb's from this ls
63 * will be processed by dlm_astd during recovery.
66 dlm_astd_suspend();
67 dlm_astd_resume();
70 * Free non-master tossed rsb's. Master rsb's are kept on toss
71 * list and put on root list to be included in resdir recovery.
74 dlm_clear_toss_list(ls);
77 * This list of root rsb's will be the basis of most of the recovery
78 * routines.
81 dlm_create_root_list(ls);
84 * Add or remove nodes from the lockspace's ls_nodes list.
85 * Also waits for all nodes to complete dlm_recover_members.
88 error = dlm_recover_members(ls, rv, &neg);
89 if (error) {
90 log_debug(ls, "recover_members failed %d", error);
91 goto fail;
93 start = jiffies;
96 * Rebuild our own share of the directory by collecting from all other
97 * nodes their master rsb names that hash to us.
100 error = dlm_recover_directory(ls);
101 if (error) {
102 log_debug(ls, "recover_directory failed %d", error);
103 goto fail;
107 * Wait for all nodes to complete directory rebuild.
110 error = dlm_recover_directory_wait(ls);
111 if (error) {
112 log_debug(ls, "recover_directory_wait failed %d", error);
113 goto fail;
117 * We may have outstanding operations that are waiting for a reply from
118 * a failed node. Mark these to be resent after recovery. Unlock and
119 * cancel ops can just be completed.
122 dlm_recover_waiters_pre(ls);
124 error = dlm_recovery_stopped(ls);
125 if (error)
126 goto fail;
128 if (neg || dlm_no_directory(ls)) {
130 * Clear lkb's for departed nodes.
133 dlm_purge_locks(ls);
136 * Get new master nodeid's for rsb's that were mastered on
137 * departed nodes.
140 error = dlm_recover_masters(ls);
141 if (error) {
142 log_debug(ls, "recover_masters failed %d", error);
143 goto fail;
147 * Send our locks on remastered rsb's to the new masters.
150 error = dlm_recover_locks(ls);
151 if (error) {
152 log_debug(ls, "recover_locks failed %d", error);
153 goto fail;
156 error = dlm_recover_locks_wait(ls);
157 if (error) {
158 log_debug(ls, "recover_locks_wait failed %d", error);
159 goto fail;
163 * Finalize state in master rsb's now that all locks can be
164 * checked. This includes conversion resolution and lvb
165 * settings.
168 dlm_recover_rsbs(ls);
169 } else {
171 * Other lockspace members may be going through the "neg" steps
172 * while also adding us to the lockspace, in which case they'll
173 * be doing the recover_locks (RS_LOCKS) barrier.
175 dlm_set_recover_status(ls, DLM_RS_LOCKS);
177 error = dlm_recover_locks_wait(ls);
178 if (error) {
179 log_debug(ls, "recover_locks_wait failed %d", error);
180 goto fail;
184 dlm_release_root_list(ls);
187 * Purge directory-related requests that are saved in requestqueue.
188 * All dir requests from before recovery are invalid now due to the dir
189 * rebuild and will be resent by the requesting nodes.
192 dlm_purge_requestqueue(ls);
194 dlm_set_recover_status(ls, DLM_RS_DONE);
195 error = dlm_recover_done_wait(ls);
196 if (error) {
197 log_debug(ls, "recover_done_wait failed %d", error);
198 goto fail;
201 dlm_clear_members_gone(ls);
203 dlm_adjust_timeouts(ls);
205 error = enable_locking(ls, rv->seq);
206 if (error) {
207 log_debug(ls, "enable_locking failed %d", error);
208 goto fail;
211 error = dlm_process_requestqueue(ls);
212 if (error) {
213 log_debug(ls, "process_requestqueue failed %d", error);
214 goto fail;
217 error = dlm_recover_waiters_post(ls);
218 if (error) {
219 log_debug(ls, "recover_waiters_post failed %d", error);
220 goto fail;
223 dlm_grant_after_purge(ls);
225 dlm_astd_wake();
227 log_debug(ls, "recover %llx done: %u ms",
228 (unsigned long long)rv->seq,
229 jiffies_to_msecs(jiffies - start));
230 mutex_unlock(&ls->ls_recoverd_active);
232 return 0;
234 fail:
235 dlm_release_root_list(ls);
236 log_debug(ls, "recover %llx error %d",
237 (unsigned long long)rv->seq, error);
238 mutex_unlock(&ls->ls_recoverd_active);
239 return error;
242 /* The dlm_ls_start() that created the rv we take here may already have been
243 stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
244 flag set. */
246 static void do_ls_recovery(struct dlm_ls *ls)
248 struct dlm_recover *rv = NULL;
250 spin_lock(&ls->ls_recover_lock);
251 rv = ls->ls_recover_args;
252 ls->ls_recover_args = NULL;
253 if (rv && ls->ls_recover_seq == rv->seq)
254 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
255 spin_unlock(&ls->ls_recover_lock);
257 if (rv) {
258 ls_recover(ls, rv);
259 kfree(rv->nodeids);
260 kfree(rv->new);
261 kfree(rv);
265 static int dlm_recoverd(void *arg)
267 struct dlm_ls *ls;
269 ls = dlm_find_lockspace_local(arg);
270 if (!ls) {
271 log_print("dlm_recoverd: no lockspace %p", arg);
272 return -1;
275 while (!kthread_should_stop()) {
276 set_current_state(TASK_INTERRUPTIBLE);
277 if (!test_bit(LSFL_WORK, &ls->ls_flags))
278 schedule();
279 set_current_state(TASK_RUNNING);
281 if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
282 do_ls_recovery(ls);
285 dlm_put_lockspace(ls);
286 return 0;
289 void dlm_recoverd_kick(struct dlm_ls *ls)
291 set_bit(LSFL_WORK, &ls->ls_flags);
292 wake_up_process(ls->ls_recoverd_task);
295 int dlm_recoverd_start(struct dlm_ls *ls)
297 struct task_struct *p;
298 int error = 0;
300 p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
301 if (IS_ERR(p))
302 error = PTR_ERR(p);
303 else
304 ls->ls_recoverd_task = p;
305 return error;
308 void dlm_recoverd_stop(struct dlm_ls *ls)
310 kthread_stop(ls->ls_recoverd_task);
313 void dlm_recoverd_suspend(struct dlm_ls *ls)
315 wake_up(&ls->ls_wait_general);
316 mutex_lock(&ls->ls_recoverd_active);
319 void dlm_recoverd_resume(struct dlm_ls *ls)
321 mutex_unlock(&ls->ls_recoverd_active);