Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[cris-mirror.git] / fs / ocfs2 / dlm / dlmrecovery.c
blobf1beb6fc254d1720ae10b10a0a7ea904703c7d0d
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmrecovery.c
6 * recovery stuff
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57 static int dlm_recovery_thread(void *data);
58 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
59 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
60 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
61 static int dlm_do_recovery(struct dlm_ctxt *dlm);
63 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
64 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
65 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
67 u8 request_from, u8 dead_node);
68 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
71 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
72 const char *lockname, int namelen,
73 int total_locks, u64 cookie,
74 u8 flags, u8 master);
75 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
76 struct dlm_migratable_lockres *mres,
77 u8 send_to,
78 struct dlm_lock_resource *res,
79 int total_locks);
80 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
81 struct dlm_lock_resource *res,
82 struct dlm_migratable_lockres *mres);
83 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
84 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
85 u8 dead_node, u8 send_to);
86 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
87 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
88 struct list_head *list, u8 dead_node);
89 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
90 u8 dead_node, u8 new_master);
91 static void dlm_reco_ast(void *astdata);
92 static void dlm_reco_bast(void *astdata, int blocked_type);
93 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
94 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
95 void *data);
96 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
97 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
98 struct dlm_lock_resource *res,
99 u8 *real_master);
101 static u64 dlm_get_next_mig_cookie(void);
103 static DEFINE_SPINLOCK(dlm_reco_state_lock);
104 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
105 static u64 dlm_mig_cookie = 1;
107 static u64 dlm_get_next_mig_cookie(void)
109 u64 c;
110 spin_lock(&dlm_mig_cookie_lock);
111 c = dlm_mig_cookie;
112 if (dlm_mig_cookie == (~0ULL))
113 dlm_mig_cookie = 1;
114 else
115 dlm_mig_cookie++;
116 spin_unlock(&dlm_mig_cookie_lock);
117 return c;
120 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
121 u8 dead_node)
123 assert_spin_locked(&dlm->spinlock);
124 if (dlm->reco.dead_node != dead_node)
125 mlog(0, "%s: changing dead_node from %u to %u\n",
126 dlm->name, dlm->reco.dead_node, dead_node);
127 dlm->reco.dead_node = dead_node;
130 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
131 u8 master)
133 assert_spin_locked(&dlm->spinlock);
134 mlog(0, "%s: changing new_master from %u to %u\n",
135 dlm->name, dlm->reco.new_master, master);
136 dlm->reco.new_master = master;
139 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
141 assert_spin_locked(&dlm->spinlock);
142 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
143 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
144 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
147 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
149 spin_lock(&dlm->spinlock);
150 __dlm_reset_recovery(dlm);
151 spin_unlock(&dlm->spinlock);
154 /* Worker function used during recovery. */
155 void dlm_dispatch_work(struct work_struct *work)
157 struct dlm_ctxt *dlm =
158 container_of(work, struct dlm_ctxt, dispatched_work);
159 LIST_HEAD(tmp_list);
160 struct dlm_work_item *item, *next;
161 dlm_workfunc_t *workfunc;
162 int tot=0;
164 spin_lock(&dlm->work_lock);
165 list_splice_init(&dlm->work_list, &tmp_list);
166 spin_unlock(&dlm->work_lock);
168 list_for_each_entry(item, &tmp_list, list) {
169 tot++;
171 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
173 list_for_each_entry_safe(item, next, &tmp_list, list) {
174 workfunc = item->func;
175 list_del_init(&item->list);
177 /* already have ref on dlm to avoid having
178 * it disappear. just double-check. */
179 BUG_ON(item->dlm != dlm);
181 /* this is allowed to sleep and
182 * call network stuff */
183 workfunc(item, item->data);
185 dlm_put(dlm);
186 kfree(item);
191 * RECOVERY THREAD
194 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
196 /* wake the recovery thread
197 * this will wake the reco thread in one of three places
198 * 1) sleeping with no recovery happening
199 * 2) sleeping with recovery mastered elsewhere
200 * 3) recovery mastered here, waiting on reco data */
202 wake_up(&dlm->dlm_reco_thread_wq);
205 /* Launch the recovery thread */
206 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
208 mlog(0, "starting dlm recovery thread...\n");
210 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
211 "dlm_reco_thread");
212 if (IS_ERR(dlm->dlm_reco_thread_task)) {
213 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
214 dlm->dlm_reco_thread_task = NULL;
215 return -EINVAL;
218 return 0;
221 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
223 if (dlm->dlm_reco_thread_task) {
224 mlog(0, "waiting for dlm recovery thread to exit\n");
225 kthread_stop(dlm->dlm_reco_thread_task);
226 dlm->dlm_reco_thread_task = NULL;
233 * this is lame, but here's how recovery works...
234 * 1) all recovery threads cluster wide will work on recovering
235 * ONE node at a time
236 * 2) negotiate who will take over all the locks for the dead node.
237 * thats right... ALL the locks.
238 * 3) once a new master is chosen, everyone scans all locks
239 * and moves aside those mastered by the dead guy
240 * 4) each of these locks should be locked until recovery is done
241 * 5) the new master collects up all of secondary lock queue info
242 * one lock at a time, forcing each node to communicate back
243 * before continuing
244 * 6) each secondary lock queue responds with the full known lock info
245 * 7) once the new master has run all its locks, it sends a ALLDONE!
246 * message to everyone
247 * 8) upon receiving this message, the secondary queue node unlocks
248 * and responds to the ALLDONE
249 * 9) once the new master gets responses from everyone, he unlocks
250 * everything and recovery for this dead node is done
251 *10) go back to 2) while there are still dead nodes
255 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
257 struct dlm_reco_node_data *ndata;
258 struct dlm_lock_resource *res;
260 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
261 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
262 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
263 dlm->reco.dead_node, dlm->reco.new_master);
265 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
266 char *st = "unknown";
267 switch (ndata->state) {
268 case DLM_RECO_NODE_DATA_INIT:
269 st = "init";
270 break;
271 case DLM_RECO_NODE_DATA_REQUESTING:
272 st = "requesting";
273 break;
274 case DLM_RECO_NODE_DATA_DEAD:
275 st = "dead";
276 break;
277 case DLM_RECO_NODE_DATA_RECEIVING:
278 st = "receiving";
279 break;
280 case DLM_RECO_NODE_DATA_REQUESTED:
281 st = "requested";
282 break;
283 case DLM_RECO_NODE_DATA_DONE:
284 st = "done";
285 break;
286 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
287 st = "finalize-sent";
288 break;
289 default:
290 st = "bad";
291 break;
293 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
294 dlm->name, ndata->node_num, st);
296 list_for_each_entry(res, &dlm->reco.resources, recovering) {
297 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
298 dlm->name, res->lockname.len, res->lockname.name);
302 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
304 static int dlm_recovery_thread(void *data)
306 int status;
307 struct dlm_ctxt *dlm = data;
308 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
310 mlog(0, "dlm thread running for %s...\n", dlm->name);
312 while (!kthread_should_stop()) {
313 if (dlm_domain_fully_joined(dlm)) {
314 status = dlm_do_recovery(dlm);
315 if (status == -EAGAIN) {
316 /* do not sleep, recheck immediately. */
317 continue;
319 if (status < 0)
320 mlog_errno(status);
323 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
324 kthread_should_stop(),
325 timeout);
328 mlog(0, "quitting DLM recovery thread\n");
329 return 0;
332 /* returns true when the recovery master has contacted us */
333 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
335 int ready;
336 spin_lock(&dlm->spinlock);
337 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
338 spin_unlock(&dlm->spinlock);
339 return ready;
342 /* returns true if node is no longer in the domain
343 * could be dead or just not joined */
344 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
346 int dead;
347 spin_lock(&dlm->spinlock);
348 dead = !test_bit(node, dlm->domain_map);
349 spin_unlock(&dlm->spinlock);
350 return dead;
353 /* returns true if node is no longer in the domain
354 * could be dead or just not joined */
355 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
357 int recovered;
358 spin_lock(&dlm->spinlock);
359 recovered = !test_bit(node, dlm->recovery_map);
360 spin_unlock(&dlm->spinlock);
361 return recovered;
365 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
367 if (timeout) {
368 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
369 "death of node %u\n", dlm->name, timeout, node);
370 wait_event_timeout(dlm->dlm_reco_thread_wq,
371 dlm_is_node_dead(dlm, node),
372 msecs_to_jiffies(timeout));
373 } else {
374 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
375 "of death of node %u\n", dlm->name, node);
376 wait_event(dlm->dlm_reco_thread_wq,
377 dlm_is_node_dead(dlm, node));
379 /* for now, return 0 */
380 return 0;
383 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
385 if (timeout) {
386 mlog(0, "%s: waiting %dms for notification of "
387 "recovery of node %u\n", dlm->name, timeout, node);
388 wait_event_timeout(dlm->dlm_reco_thread_wq,
389 dlm_is_node_recovered(dlm, node),
390 msecs_to_jiffies(timeout));
391 } else {
392 mlog(0, "%s: waiting indefinitely for notification "
393 "of recovery of node %u\n", dlm->name, node);
394 wait_event(dlm->dlm_reco_thread_wq,
395 dlm_is_node_recovered(dlm, node));
397 /* for now, return 0 */
398 return 0;
401 /* callers of the top-level api calls (dlmlock/dlmunlock) should
402 * block on the dlm->reco.event when recovery is in progress.
403 * the dlm recovery thread will set this state when it begins
404 * recovering a dead node (as the new master or not) and clear
405 * the state and wake as soon as all affected lock resources have
406 * been marked with the RECOVERY flag */
407 static int dlm_in_recovery(struct dlm_ctxt *dlm)
409 int in_recovery;
410 spin_lock(&dlm->spinlock);
411 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
412 spin_unlock(&dlm->spinlock);
413 return in_recovery;
417 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
419 if (dlm_in_recovery(dlm)) {
420 mlog(0, "%s: reco thread %d in recovery: "
421 "state=%d, master=%u, dead=%u\n",
422 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
423 dlm->reco.state, dlm->reco.new_master,
424 dlm->reco.dead_node);
426 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
429 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
431 spin_lock(&dlm->spinlock);
432 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
433 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
434 spin_unlock(&dlm->spinlock);
437 static void dlm_end_recovery(struct dlm_ctxt *dlm)
439 spin_lock(&dlm->spinlock);
440 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
441 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
442 spin_unlock(&dlm->spinlock);
443 wake_up(&dlm->reco.event);
446 static int dlm_do_recovery(struct dlm_ctxt *dlm)
448 int status = 0;
449 int ret;
451 spin_lock(&dlm->spinlock);
453 /* check to see if the new master has died */
454 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
455 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
456 mlog(0, "new master %u died while recovering %u!\n",
457 dlm->reco.new_master, dlm->reco.dead_node);
458 /* unset the new_master, leave dead_node */
459 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
462 /* select a target to recover */
463 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
464 int bit;
466 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
467 if (bit >= O2NM_MAX_NODES || bit < 0)
468 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
469 else
470 dlm_set_reco_dead_node(dlm, bit);
471 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
472 /* BUG? */
473 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
474 dlm->reco.dead_node);
475 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
478 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
479 // mlog(0, "nothing to recover! sleeping now!\n");
480 spin_unlock(&dlm->spinlock);
481 /* return to main thread loop and sleep. */
482 return 0;
484 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
485 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
486 dlm->reco.dead_node);
487 spin_unlock(&dlm->spinlock);
489 /* take write barrier */
490 /* (stops the list reshuffling thread, proxy ast handling) */
491 dlm_begin_recovery(dlm);
493 if (dlm->reco.new_master == dlm->node_num)
494 goto master_here;
496 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
497 /* choose a new master, returns 0 if this node
498 * is the master, -EEXIST if it's another node.
499 * this does not return until a new master is chosen
500 * or recovery completes entirely. */
501 ret = dlm_pick_recovery_master(dlm);
502 if (!ret) {
503 /* already notified everyone. go. */
504 goto master_here;
506 mlog(0, "another node will master this recovery session.\n");
508 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
509 dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master,
510 dlm->node_num, dlm->reco.dead_node);
512 /* it is safe to start everything back up here
513 * because all of the dead node's lock resources
514 * have been marked as in-recovery */
515 dlm_end_recovery(dlm);
517 /* sleep out in main dlm_recovery_thread loop. */
518 return 0;
520 master_here:
521 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
522 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
523 dlm->node_num, dlm->reco.dead_node, dlm->name);
525 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
526 if (status < 0) {
527 /* we should never hit this anymore */
528 mlog(ML_ERROR, "error %d remastering locks for node %u, "
529 "retrying.\n", status, dlm->reco.dead_node);
530 /* yield a bit to allow any final network messages
531 * to get handled on remaining nodes */
532 msleep(100);
533 } else {
534 /* success! see if any other nodes need recovery */
535 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
536 dlm->name, dlm->reco.dead_node, dlm->node_num);
537 dlm_reset_recovery(dlm);
539 dlm_end_recovery(dlm);
541 /* continue and look for another dead node */
542 return -EAGAIN;
545 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
547 int status = 0;
548 struct dlm_reco_node_data *ndata;
549 int all_nodes_done;
550 int destroy = 0;
551 int pass = 0;
553 do {
554 /* we have become recovery master. there is no escaping
555 * this, so just keep trying until we get it. */
556 status = dlm_init_recovery_area(dlm, dead_node);
557 if (status < 0) {
558 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
559 "retrying\n", dlm->name);
560 msleep(1000);
562 } while (status != 0);
564 /* safe to access the node data list without a lock, since this
565 * process is the only one to change the list */
566 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
567 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
568 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
570 mlog(0, "requesting lock info from node %u\n",
571 ndata->node_num);
573 if (ndata->node_num == dlm->node_num) {
574 ndata->state = DLM_RECO_NODE_DATA_DONE;
575 continue;
578 do {
579 status = dlm_request_all_locks(dlm, ndata->node_num,
580 dead_node);
581 if (status < 0) {
582 mlog_errno(status);
583 if (dlm_is_host_down(status)) {
584 /* node died, ignore it for recovery */
585 status = 0;
586 ndata->state = DLM_RECO_NODE_DATA_DEAD;
587 /* wait for the domain map to catch up
588 * with the network state. */
589 wait_event_timeout(dlm->dlm_reco_thread_wq,
590 dlm_is_node_dead(dlm,
591 ndata->node_num),
592 msecs_to_jiffies(1000));
593 mlog(0, "waited 1 sec for %u, "
594 "dead? %s\n", ndata->node_num,
595 dlm_is_node_dead(dlm, ndata->node_num) ?
596 "yes" : "no");
597 } else {
598 /* -ENOMEM on the other node */
599 mlog(0, "%s: node %u returned "
600 "%d during recovery, retrying "
601 "after a short wait\n",
602 dlm->name, ndata->node_num,
603 status);
604 msleep(100);
607 } while (status != 0);
609 spin_lock(&dlm_reco_state_lock);
610 switch (ndata->state) {
611 case DLM_RECO_NODE_DATA_INIT:
612 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
613 case DLM_RECO_NODE_DATA_REQUESTED:
614 BUG();
615 break;
616 case DLM_RECO_NODE_DATA_DEAD:
617 mlog(0, "node %u died after requesting "
618 "recovery info for node %u\n",
619 ndata->node_num, dead_node);
620 /* fine. don't need this node's info.
621 * continue without it. */
622 break;
623 case DLM_RECO_NODE_DATA_REQUESTING:
624 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
625 mlog(0, "now receiving recovery data from "
626 "node %u for dead node %u\n",
627 ndata->node_num, dead_node);
628 break;
629 case DLM_RECO_NODE_DATA_RECEIVING:
630 mlog(0, "already receiving recovery data from "
631 "node %u for dead node %u\n",
632 ndata->node_num, dead_node);
633 break;
634 case DLM_RECO_NODE_DATA_DONE:
635 mlog(0, "already DONE receiving recovery data "
636 "from node %u for dead node %u\n",
637 ndata->node_num, dead_node);
638 break;
640 spin_unlock(&dlm_reco_state_lock);
643 mlog(0, "done requesting all lock info\n");
645 /* nodes should be sending reco data now
646 * just need to wait */
648 while (1) {
649 /* check all the nodes now to see if we are
650 * done, or if anyone died */
651 all_nodes_done = 1;
652 spin_lock(&dlm_reco_state_lock);
653 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
654 mlog(0, "checking recovery state of node %u\n",
655 ndata->node_num);
656 switch (ndata->state) {
657 case DLM_RECO_NODE_DATA_INIT:
658 case DLM_RECO_NODE_DATA_REQUESTING:
659 mlog(ML_ERROR, "bad ndata state for "
660 "node %u: state=%d\n",
661 ndata->node_num, ndata->state);
662 BUG();
663 break;
664 case DLM_RECO_NODE_DATA_DEAD:
665 mlog(0, "node %u died after "
666 "requesting recovery info for "
667 "node %u\n", ndata->node_num,
668 dead_node);
669 break;
670 case DLM_RECO_NODE_DATA_RECEIVING:
671 case DLM_RECO_NODE_DATA_REQUESTED:
672 mlog(0, "%s: node %u still in state %s\n",
673 dlm->name, ndata->node_num,
674 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
675 "receiving" : "requested");
676 all_nodes_done = 0;
677 break;
678 case DLM_RECO_NODE_DATA_DONE:
679 mlog(0, "%s: node %u state is done\n",
680 dlm->name, ndata->node_num);
681 break;
682 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
683 mlog(0, "%s: node %u state is finalize\n",
684 dlm->name, ndata->node_num);
685 break;
688 spin_unlock(&dlm_reco_state_lock);
690 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
691 all_nodes_done?"yes":"no");
692 if (all_nodes_done) {
693 int ret;
695 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
696 * just send a finalize message to everyone and
697 * clean up */
698 mlog(0, "all nodes are done! send finalize\n");
699 ret = dlm_send_finalize_reco_message(dlm);
700 if (ret < 0)
701 mlog_errno(ret);
703 spin_lock(&dlm->spinlock);
704 dlm_finish_local_lockres_recovery(dlm, dead_node,
705 dlm->node_num);
706 spin_unlock(&dlm->spinlock);
707 mlog(0, "should be done with recovery!\n");
709 mlog(0, "finishing recovery of %s at %lu, "
710 "dead=%u, this=%u, new=%u\n", dlm->name,
711 jiffies, dlm->reco.dead_node,
712 dlm->node_num, dlm->reco.new_master);
713 destroy = 1;
714 status = 0;
715 /* rescan everything marked dirty along the way */
716 dlm_kick_thread(dlm, NULL);
717 break;
719 /* wait to be signalled, with periodic timeout
720 * to check for node death */
721 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
722 kthread_should_stop(),
723 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
727 if (destroy)
728 dlm_destroy_recovery_area(dlm, dead_node);
730 return status;
733 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
735 int num=0;
736 struct dlm_reco_node_data *ndata;
738 spin_lock(&dlm->spinlock);
739 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
740 /* nodes can only be removed (by dying) after dropping
741 * this lock, and death will be trapped later, so this should do */
742 spin_unlock(&dlm->spinlock);
744 while (1) {
745 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
746 if (num >= O2NM_MAX_NODES) {
747 break;
749 BUG_ON(num == dead_node);
751 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
752 if (!ndata) {
753 dlm_destroy_recovery_area(dlm, dead_node);
754 return -ENOMEM;
756 ndata->node_num = num;
757 ndata->state = DLM_RECO_NODE_DATA_INIT;
758 spin_lock(&dlm_reco_state_lock);
759 list_add_tail(&ndata->list, &dlm->reco.node_data);
760 spin_unlock(&dlm_reco_state_lock);
761 num++;
764 return 0;
767 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
769 struct dlm_reco_node_data *ndata, *next;
770 LIST_HEAD(tmplist);
772 spin_lock(&dlm_reco_state_lock);
773 list_splice_init(&dlm->reco.node_data, &tmplist);
774 spin_unlock(&dlm_reco_state_lock);
776 list_for_each_entry_safe(ndata, next, &tmplist, list) {
777 list_del_init(&ndata->list);
778 kfree(ndata);
782 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
783 u8 dead_node)
785 struct dlm_lock_request lr;
786 enum dlm_status ret;
788 mlog(0, "\n");
791 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
792 "to %u\n", dead_node, request_from);
794 memset(&lr, 0, sizeof(lr));
795 lr.node_idx = dlm->node_num;
796 lr.dead_node = dead_node;
798 // send message
799 ret = DLM_NOLOCKMGR;
800 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
801 &lr, sizeof(lr), request_from, NULL);
803 /* negative status is handled by caller */
804 if (ret < 0)
805 mlog(ML_ERROR, "Error %d when sending message %u (key "
806 "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG,
807 dlm->key, request_from);
809 // return from here, then
810 // sleep until all received or error
811 return ret;
815 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
816 void **ret_data)
818 struct dlm_ctxt *dlm = data;
819 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
820 char *buf = NULL;
821 struct dlm_work_item *item = NULL;
823 if (!dlm_grab(dlm))
824 return -EINVAL;
826 if (lr->dead_node != dlm->reco.dead_node) {
827 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
828 "dead_node is %u\n", dlm->name, lr->node_idx,
829 lr->dead_node, dlm->reco.dead_node);
830 dlm_print_reco_node_status(dlm);
831 /* this is a hack */
832 dlm_put(dlm);
833 return -ENOMEM;
835 BUG_ON(lr->dead_node != dlm->reco.dead_node);
837 item = kzalloc(sizeof(*item), GFP_NOFS);
838 if (!item) {
839 dlm_put(dlm);
840 return -ENOMEM;
843 /* this will get freed by dlm_request_all_locks_worker */
844 buf = (char *) __get_free_page(GFP_NOFS);
845 if (!buf) {
846 kfree(item);
847 dlm_put(dlm);
848 return -ENOMEM;
851 /* queue up work for dlm_request_all_locks_worker */
852 dlm_grab(dlm); /* get an extra ref for the work item */
853 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
854 item->u.ral.reco_master = lr->node_idx;
855 item->u.ral.dead_node = lr->dead_node;
856 spin_lock(&dlm->work_lock);
857 list_add_tail(&item->list, &dlm->work_list);
858 spin_unlock(&dlm->work_lock);
859 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
861 dlm_put(dlm);
862 return 0;
865 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
867 struct dlm_migratable_lockres *mres;
868 struct dlm_lock_resource *res;
869 struct dlm_ctxt *dlm;
870 LIST_HEAD(resources);
871 int ret;
872 u8 dead_node, reco_master;
873 int skip_all_done = 0;
875 dlm = item->dlm;
876 dead_node = item->u.ral.dead_node;
877 reco_master = item->u.ral.reco_master;
878 mres = (struct dlm_migratable_lockres *)data;
880 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
881 dlm->name, dead_node, reco_master);
883 if (dead_node != dlm->reco.dead_node ||
884 reco_master != dlm->reco.new_master) {
885 /* worker could have been created before the recovery master
886 * died. if so, do not continue, but do not error. */
887 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
888 mlog(ML_NOTICE, "%s: will not send recovery state, "
889 "recovery master %u died, thread=(dead=%u,mas=%u)"
890 " current=(dead=%u,mas=%u)\n", dlm->name,
891 reco_master, dead_node, reco_master,
892 dlm->reco.dead_node, dlm->reco.new_master);
893 } else {
894 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
895 "master=%u), request(dead=%u, master=%u)\n",
896 dlm->name, dlm->reco.dead_node,
897 dlm->reco.new_master, dead_node, reco_master);
899 goto leave;
902 /* lock resources should have already been moved to the
903 * dlm->reco.resources list. now move items from that list
904 * to a temp list if the dead owner matches. note that the
905 * whole cluster recovers only one node at a time, so we
906 * can safely move UNKNOWN lock resources for each recovery
907 * session. */
908 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
910 /* now we can begin blasting lockreses without the dlm lock */
912 /* any errors returned will be due to the new_master dying,
913 * the dlm_reco_thread should detect this */
914 list_for_each_entry(res, &resources, recovering) {
915 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
916 DLM_MRES_RECOVERY);
917 if (ret < 0) {
918 mlog(ML_ERROR, "%s: node %u went down while sending "
919 "recovery state for dead node %u, ret=%d\n", dlm->name,
920 reco_master, dead_node, ret);
921 skip_all_done = 1;
922 break;
926 /* move the resources back to the list */
927 spin_lock(&dlm->spinlock);
928 list_splice_init(&resources, &dlm->reco.resources);
929 spin_unlock(&dlm->spinlock);
931 if (!skip_all_done) {
932 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
933 if (ret < 0) {
934 mlog(ML_ERROR, "%s: node %u went down while sending "
935 "recovery all-done for dead node %u, ret=%d\n",
936 dlm->name, reco_master, dead_node, ret);
939 leave:
940 free_page((unsigned long)data);
944 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
946 int ret, tmpret;
947 struct dlm_reco_data_done done_msg;
949 memset(&done_msg, 0, sizeof(done_msg));
950 done_msg.node_idx = dlm->node_num;
951 done_msg.dead_node = dead_node;
952 mlog(0, "sending DATA DONE message to %u, "
953 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
954 done_msg.dead_node);
956 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
957 sizeof(done_msg), send_to, &tmpret);
958 if (ret < 0) {
959 mlog(ML_ERROR, "Error %d when sending message %u (key "
960 "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG,
961 dlm->key, send_to);
962 if (!dlm_is_host_down(ret)) {
963 BUG();
965 } else
966 ret = tmpret;
967 return ret;
971 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
972 void **ret_data)
974 struct dlm_ctxt *dlm = data;
975 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
976 struct dlm_reco_node_data *ndata = NULL;
977 int ret = -EINVAL;
979 if (!dlm_grab(dlm))
980 return -EINVAL;
982 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
983 "node_idx=%u, this node=%u\n", done->dead_node,
984 dlm->reco.dead_node, done->node_idx, dlm->node_num);
986 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
987 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
988 "node_idx=%u, this node=%u\n", done->dead_node,
989 dlm->reco.dead_node, done->node_idx, dlm->node_num);
991 spin_lock(&dlm_reco_state_lock);
992 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
993 if (ndata->node_num != done->node_idx)
994 continue;
996 switch (ndata->state) {
997 /* should have moved beyond INIT but not to FINALIZE yet */
998 case DLM_RECO_NODE_DATA_INIT:
999 case DLM_RECO_NODE_DATA_DEAD:
1000 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1001 mlog(ML_ERROR, "bad ndata state for node %u:"
1002 " state=%d\n", ndata->node_num,
1003 ndata->state);
1004 BUG();
1005 break;
1006 /* these states are possible at this point, anywhere along
1007 * the line of recovery */
1008 case DLM_RECO_NODE_DATA_DONE:
1009 case DLM_RECO_NODE_DATA_RECEIVING:
1010 case DLM_RECO_NODE_DATA_REQUESTED:
1011 case DLM_RECO_NODE_DATA_REQUESTING:
1012 mlog(0, "node %u is DONE sending "
1013 "recovery data!\n",
1014 ndata->node_num);
1016 ndata->state = DLM_RECO_NODE_DATA_DONE;
1017 ret = 0;
1018 break;
1021 spin_unlock(&dlm_reco_state_lock);
1023 /* wake the recovery thread, some node is done */
1024 if (!ret)
1025 dlm_kick_recovery_thread(dlm);
1027 if (ret < 0)
1028 mlog(ML_ERROR, "failed to find recovery node data for node "
1029 "%u\n", done->node_idx);
1030 dlm_put(dlm);
1032 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1033 return ret;
1036 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1037 struct list_head *list,
1038 u8 dead_node)
1040 struct dlm_lock_resource *res, *next;
1041 struct dlm_lock *lock;
1043 spin_lock(&dlm->spinlock);
1044 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1045 /* always prune any $RECOVERY entries for dead nodes,
1046 * otherwise hangs can occur during later recovery */
1047 if (dlm_is_recovery_lock(res->lockname.name,
1048 res->lockname.len)) {
1049 spin_lock(&res->spinlock);
1050 list_for_each_entry(lock, &res->granted, list) {
1051 if (lock->ml.node == dead_node) {
1052 mlog(0, "AHA! there was "
1053 "a $RECOVERY lock for dead "
1054 "node %u (%s)!\n",
1055 dead_node, dlm->name);
1056 list_del_init(&lock->list);
1057 dlm_lock_put(lock);
1058 break;
1061 spin_unlock(&res->spinlock);
1062 continue;
1065 if (res->owner == dead_node) {
1066 mlog(0, "found lockres owned by dead node while "
1067 "doing recovery for node %u. sending it.\n",
1068 dead_node);
1069 list_move_tail(&res->recovering, list);
1070 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1071 mlog(0, "found UNKNOWN owner while doing recovery "
1072 "for node %u. sending it.\n", dead_node);
1073 list_move_tail(&res->recovering, list);
1076 spin_unlock(&dlm->spinlock);
1079 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1081 int total_locks = 0;
1082 struct list_head *iter, *queue = &res->granted;
1083 int i;
1085 for (i=0; i<3; i++) {
1086 list_for_each(iter, queue)
1087 total_locks++;
1088 queue++;
1090 return total_locks;
1094 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1095 struct dlm_migratable_lockres *mres,
1096 u8 send_to,
1097 struct dlm_lock_resource *res,
1098 int total_locks)
1100 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1101 int mres_total_locks = be32_to_cpu(mres->total_locks);
1102 int sz, ret = 0, status = 0;
1103 u8 orig_flags = mres->flags,
1104 orig_master = mres->master;
1106 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1107 if (!mres->num_locks)
1108 return 0;
1110 sz = sizeof(struct dlm_migratable_lockres) +
1111 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1113 /* add an all-done flag if we reached the last lock */
1114 orig_flags = mres->flags;
1115 BUG_ON(total_locks > mres_total_locks);
1116 if (total_locks == mres_total_locks)
1117 mres->flags |= DLM_MRES_ALL_DONE;
1119 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1120 dlm->name, res->lockname.len, res->lockname.name,
1121 orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1122 send_to);
1124 /* send it */
1125 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1126 sz, send_to, &status);
1127 if (ret < 0) {
1128 /* XXX: negative status is not handled.
1129 * this will end up killing this node. */
1130 mlog(ML_ERROR, "Error %d when sending message %u (key "
1131 "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG,
1132 dlm->key, send_to);
1133 } else {
1134 /* might get an -ENOMEM back here */
1135 ret = status;
1136 if (ret < 0) {
1137 mlog_errno(ret);
1139 if (ret == -EFAULT) {
1140 mlog(ML_ERROR, "node %u told me to kill "
1141 "myself!\n", send_to);
1142 BUG();
1147 /* zero and reinit the message buffer */
1148 dlm_init_migratable_lockres(mres, res->lockname.name,
1149 res->lockname.len, mres_total_locks,
1150 mig_cookie, orig_flags, orig_master);
1151 return ret;
1154 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1155 const char *lockname, int namelen,
1156 int total_locks, u64 cookie,
1157 u8 flags, u8 master)
1159 /* mres here is one full page */
1160 clear_page(mres);
1161 mres->lockname_len = namelen;
1162 memcpy(mres->lockname, lockname, namelen);
1163 mres->num_locks = 0;
1164 mres->total_locks = cpu_to_be32(total_locks);
1165 mres->mig_cookie = cpu_to_be64(cookie);
1166 mres->flags = flags;
1167 mres->master = master;
1170 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1171 struct dlm_migratable_lockres *mres,
1172 int queue)
1174 if (!lock->lksb)
1175 return;
1177 /* Ignore lvb in all locks in the blocked list */
1178 if (queue == DLM_BLOCKED_LIST)
1179 return;
1181 /* Only consider lvbs in locks with granted EX or PR lock levels */
1182 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1183 return;
1185 if (dlm_lvb_is_empty(mres->lvb)) {
1186 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1187 return;
1190 /* Ensure the lvb copied for migration matches in other valid locks */
1191 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1192 return;
1194 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1195 "node=%u\n",
1196 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1197 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1198 lock->lockres->lockname.len, lock->lockres->lockname.name,
1199 lock->ml.node);
1200 dlm_print_one_lock_resource(lock->lockres);
1201 BUG();
1204 /* returns 1 if this lock fills the network structure,
1205 * 0 otherwise */
1206 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1207 struct dlm_migratable_lockres *mres, int queue)
1209 struct dlm_migratable_lock *ml;
1210 int lock_num = mres->num_locks;
1212 ml = &(mres->ml[lock_num]);
1213 ml->cookie = lock->ml.cookie;
1214 ml->type = lock->ml.type;
1215 ml->convert_type = lock->ml.convert_type;
1216 ml->highest_blocked = lock->ml.highest_blocked;
1217 ml->list = queue;
1218 if (lock->lksb) {
1219 ml->flags = lock->lksb->flags;
1220 dlm_prepare_lvb_for_migration(lock, mres, queue);
1222 ml->node = lock->ml.node;
1223 mres->num_locks++;
1224 /* we reached the max, send this network message */
1225 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1226 return 1;
1227 return 0;
1230 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1231 struct dlm_migratable_lockres *mres)
1233 struct dlm_lock dummy;
1234 memset(&dummy, 0, sizeof(dummy));
1235 dummy.ml.cookie = 0;
1236 dummy.ml.type = LKM_IVMODE;
1237 dummy.ml.convert_type = LKM_IVMODE;
1238 dummy.ml.highest_blocked = LKM_IVMODE;
1239 dummy.lksb = NULL;
1240 dummy.ml.node = dlm->node_num;
1241 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1244 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1245 struct dlm_migratable_lock *ml,
1246 u8 *nodenum)
1248 if (unlikely(ml->cookie == 0 &&
1249 ml->type == LKM_IVMODE &&
1250 ml->convert_type == LKM_IVMODE &&
1251 ml->highest_blocked == LKM_IVMODE &&
1252 ml->list == DLM_BLOCKED_LIST)) {
1253 *nodenum = ml->node;
1254 return 1;
1256 return 0;
1259 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1260 struct dlm_migratable_lockres *mres,
1261 u8 send_to, u8 flags)
1263 struct list_head *queue;
1264 int total_locks, i;
1265 u64 mig_cookie = 0;
1266 struct dlm_lock *lock;
1267 int ret = 0;
1269 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1271 mlog(0, "sending to %u\n", send_to);
1273 total_locks = dlm_num_locks_in_lockres(res);
1274 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1275 /* rare, but possible */
1276 mlog(0, "argh. lockres has %d locks. this will "
1277 "require more than one network packet to "
1278 "migrate\n", total_locks);
1279 mig_cookie = dlm_get_next_mig_cookie();
1282 dlm_init_migratable_lockres(mres, res->lockname.name,
1283 res->lockname.len, total_locks,
1284 mig_cookie, flags, res->owner);
1286 total_locks = 0;
1287 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1288 queue = dlm_list_idx_to_ptr(res, i);
1289 list_for_each_entry(lock, queue, list) {
1290 /* add another lock. */
1291 total_locks++;
1292 if (!dlm_add_lock_to_array(lock, mres, i))
1293 continue;
1295 /* this filled the lock message,
1296 * we must send it immediately. */
1297 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1298 res, total_locks);
1299 if (ret < 0)
1300 goto error;
1303 if (total_locks == 0) {
1304 /* send a dummy lock to indicate a mastery reference only */
1305 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1306 dlm->name, res->lockname.len, res->lockname.name,
1307 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1308 "migration");
1309 dlm_add_dummy_lock(dlm, mres);
1311 /* flush any remaining locks */
1312 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1313 if (ret < 0)
1314 goto error;
1315 return ret;
1317 error:
1318 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1319 dlm->name, ret);
1320 if (!dlm_is_host_down(ret))
1321 BUG();
1322 mlog(0, "%s: node %u went down while sending %s "
1323 "lockres %.*s\n", dlm->name, send_to,
1324 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1325 res->lockname.len, res->lockname.name);
1326 return ret;
1332 * this message will contain no more than one page worth of
1333 * recovery data, and it will work on only one lockres.
1334 * there may be many locks in this page, and we may need to wait
1335 * for additional packets to complete all the locks (rare, but
1336 * possible).
1339 * NOTE: the allocation error cases here are scary
1340 * we really cannot afford to fail an alloc in recovery
1341 * do we spin? returning an error only delays the problem really
1344 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1345 void **ret_data)
1347 struct dlm_ctxt *dlm = data;
1348 struct dlm_migratable_lockres *mres =
1349 (struct dlm_migratable_lockres *)msg->buf;
1350 int ret = 0;
1351 u8 real_master;
1352 u8 extra_refs = 0;
1353 char *buf = NULL;
1354 struct dlm_work_item *item = NULL;
1355 struct dlm_lock_resource *res = NULL;
1357 if (!dlm_grab(dlm))
1358 return -EINVAL;
1360 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1362 real_master = mres->master;
1363 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1364 /* cannot migrate a lockres with no master */
1365 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1368 mlog(0, "%s message received from node %u\n",
1369 (mres->flags & DLM_MRES_RECOVERY) ?
1370 "recovery" : "migration", mres->master);
1371 if (mres->flags & DLM_MRES_ALL_DONE)
1372 mlog(0, "all done flag. all lockres data received!\n");
1374 ret = -ENOMEM;
1375 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1376 item = kzalloc(sizeof(*item), GFP_NOFS);
1377 if (!buf || !item)
1378 goto leave;
1380 /* lookup the lock to see if we have a secondary queue for this
1381 * already... just add the locks in and this will have its owner
1382 * and RECOVERY flag changed when it completes. */
1383 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1384 if (res) {
1385 /* this will get a ref on res */
1386 /* mark it as recovering/migrating and hash it */
1387 spin_lock(&res->spinlock);
1388 if (mres->flags & DLM_MRES_RECOVERY) {
1389 res->state |= DLM_LOCK_RES_RECOVERING;
1390 } else {
1391 if (res->state & DLM_LOCK_RES_MIGRATING) {
1392 /* this is at least the second
1393 * lockres message */
1394 mlog(0, "lock %.*s is already migrating\n",
1395 mres->lockname_len,
1396 mres->lockname);
1397 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1398 /* caller should BUG */
1399 mlog(ML_ERROR, "node is attempting to migrate "
1400 "lock %.*s, but marked as recovering!\n",
1401 mres->lockname_len, mres->lockname);
1402 ret = -EFAULT;
1403 spin_unlock(&res->spinlock);
1404 goto leave;
1406 res->state |= DLM_LOCK_RES_MIGRATING;
1408 spin_unlock(&res->spinlock);
1409 } else {
1410 /* need to allocate, just like if it was
1411 * mastered here normally */
1412 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1413 if (!res)
1414 goto leave;
1416 /* to match the ref that we would have gotten if
1417 * dlm_lookup_lockres had succeeded */
1418 dlm_lockres_get(res);
1420 /* mark it as recovering/migrating and hash it */
1421 if (mres->flags & DLM_MRES_RECOVERY)
1422 res->state |= DLM_LOCK_RES_RECOVERING;
1423 else
1424 res->state |= DLM_LOCK_RES_MIGRATING;
1426 spin_lock(&dlm->spinlock);
1427 __dlm_insert_lockres(dlm, res);
1428 spin_unlock(&dlm->spinlock);
1430 /* Add an extra ref for this lock-less lockres lest the
1431 * dlm_thread purges it before we get the chance to add
1432 * locks to it */
1433 dlm_lockres_get(res);
1435 /* There are three refs that need to be put.
1436 * 1. Taken above.
1437 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1438 * 3. dlm_lookup_lockres()
1439 * The first one is handled at the end of this function. The
1440 * other two are handled in the worker thread after locks have
1441 * been attached. Yes, we don't wait for purge time to match
1442 * kref_init. The lockres will still have atleast one ref
1443 * added because it is in the hash __dlm_insert_lockres() */
1444 extra_refs++;
1446 /* now that the new lockres is inserted,
1447 * make it usable by other processes */
1448 spin_lock(&res->spinlock);
1449 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1450 spin_unlock(&res->spinlock);
1451 wake_up(&res->wq);
1454 /* at this point we have allocated everything we need,
1455 * and we have a hashed lockres with an extra ref and
1456 * the proper res->state flags. */
1457 ret = 0;
1458 spin_lock(&res->spinlock);
1459 /* drop this either when master requery finds a different master
1460 * or when a lock is added by the recovery worker */
1461 dlm_lockres_grab_inflight_ref(dlm, res);
1462 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1463 /* migration cannot have an unknown master */
1464 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1465 mlog(0, "recovery has passed me a lockres with an "
1466 "unknown owner.. will need to requery: "
1467 "%.*s\n", mres->lockname_len, mres->lockname);
1468 } else {
1469 /* take a reference now to pin the lockres, drop it
1470 * when locks are added in the worker */
1471 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1473 spin_unlock(&res->spinlock);
1475 /* queue up work for dlm_mig_lockres_worker */
1476 dlm_grab(dlm); /* get an extra ref for the work item */
1477 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1478 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1479 item->u.ml.lockres = res; /* already have a ref */
1480 item->u.ml.real_master = real_master;
1481 item->u.ml.extra_ref = extra_refs;
1482 spin_lock(&dlm->work_lock);
1483 list_add_tail(&item->list, &dlm->work_list);
1484 spin_unlock(&dlm->work_lock);
1485 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1487 leave:
1488 /* One extra ref taken needs to be put here */
1489 if (extra_refs)
1490 dlm_lockres_put(res);
1492 dlm_put(dlm);
1493 if (ret < 0) {
1494 if (buf)
1495 kfree(buf);
1496 if (item)
1497 kfree(item);
1498 mlog_errno(ret);
1501 return ret;
1505 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1507 struct dlm_ctxt *dlm;
1508 struct dlm_migratable_lockres *mres;
1509 int ret = 0;
1510 struct dlm_lock_resource *res;
1511 u8 real_master;
1512 u8 extra_ref;
1514 dlm = item->dlm;
1515 mres = (struct dlm_migratable_lockres *)data;
1517 res = item->u.ml.lockres;
1518 real_master = item->u.ml.real_master;
1519 extra_ref = item->u.ml.extra_ref;
1521 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1522 /* this case is super-rare. only occurs if
1523 * node death happens during migration. */
1524 again:
1525 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1526 if (ret < 0) {
1527 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1528 ret);
1529 goto again;
1531 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1532 mlog(0, "lockres %.*s not claimed. "
1533 "this node will take it.\n",
1534 res->lockname.len, res->lockname.name);
1535 } else {
1536 spin_lock(&res->spinlock);
1537 dlm_lockres_drop_inflight_ref(dlm, res);
1538 spin_unlock(&res->spinlock);
1539 mlog(0, "master needs to respond to sender "
1540 "that node %u still owns %.*s\n",
1541 real_master, res->lockname.len,
1542 res->lockname.name);
1543 /* cannot touch this lockres */
1544 goto leave;
1548 ret = dlm_process_recovery_data(dlm, res, mres);
1549 if (ret < 0)
1550 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1551 else
1552 mlog(0, "dlm_process_recovery_data succeeded\n");
1554 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1555 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1556 ret = dlm_finish_migration(dlm, res, mres->master);
1557 if (ret < 0)
1558 mlog_errno(ret);
1561 leave:
1562 /* See comment in dlm_mig_lockres_handler() */
1563 if (res) {
1564 if (extra_ref)
1565 dlm_lockres_put(res);
1566 dlm_lockres_put(res);
1568 kfree(data);
1573 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1574 struct dlm_lock_resource *res,
1575 u8 *real_master)
1577 struct dlm_node_iter iter;
1578 int nodenum;
1579 int ret = 0;
1581 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1583 /* we only reach here if one of the two nodes in a
1584 * migration died while the migration was in progress.
1585 * at this point we need to requery the master. we
1586 * know that the new_master got as far as creating
1587 * an mle on at least one node, but we do not know
1588 * if any nodes had actually cleared the mle and set
1589 * the master to the new_master. the old master
1590 * is supposed to set the owner to UNKNOWN in the
1591 * event of a new_master death, so the only possible
1592 * responses that we can get from nodes here are
1593 * that the master is new_master, or that the master
1594 * is UNKNOWN.
1595 * if all nodes come back with UNKNOWN then we know
1596 * the lock needs remastering here.
1597 * if any node comes back with a valid master, check
1598 * to see if that master is the one that we are
1599 * recovering. if so, then the new_master died and
1600 * we need to remaster this lock. if not, then the
1601 * new_master survived and that node will respond to
1602 * other nodes about the owner.
1603 * if there is an owner, this node needs to dump this
1604 * lockres and alert the sender that this lockres
1605 * was rejected. */
1606 spin_lock(&dlm->spinlock);
1607 dlm_node_iter_init(dlm->domain_map, &iter);
1608 spin_unlock(&dlm->spinlock);
1610 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1611 /* do not send to self */
1612 if (nodenum == dlm->node_num)
1613 continue;
1614 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1615 if (ret < 0) {
1616 mlog_errno(ret);
1617 if (!dlm_is_host_down(ret))
1618 BUG();
1619 /* host is down, so answer for that node would be
1620 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1622 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1623 mlog(0, "lock master is %u\n", *real_master);
1624 break;
1627 return ret;
1631 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1632 u8 nodenum, u8 *real_master)
1634 int ret = -EINVAL;
1635 struct dlm_master_requery req;
1636 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1638 memset(&req, 0, sizeof(req));
1639 req.node_idx = dlm->node_num;
1640 req.namelen = res->lockname.len;
1641 memcpy(req.name, res->lockname.name, res->lockname.len);
1643 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1644 &req, sizeof(req), nodenum, &status);
1645 /* XXX: negative status not handled properly here. */
1646 if (ret < 0)
1647 mlog(ML_ERROR, "Error %d when sending message %u (key "
1648 "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1649 dlm->key, nodenum);
1650 else {
1651 BUG_ON(status < 0);
1652 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1653 *real_master = (u8) (status & 0xff);
1654 mlog(0, "node %u responded to master requery with %u\n",
1655 nodenum, *real_master);
1656 ret = 0;
1658 return ret;
1662 /* this function cannot error, so unless the sending
1663 * or receiving of the message failed, the owner can
1664 * be trusted */
1665 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1666 void **ret_data)
1668 struct dlm_ctxt *dlm = data;
1669 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1670 struct dlm_lock_resource *res = NULL;
1671 unsigned int hash;
1672 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1673 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1675 if (!dlm_grab(dlm)) {
1676 /* since the domain has gone away on this
1677 * node, the proper response is UNKNOWN */
1678 return master;
1681 hash = dlm_lockid_hash(req->name, req->namelen);
1683 spin_lock(&dlm->spinlock);
1684 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1685 if (res) {
1686 spin_lock(&res->spinlock);
1687 master = res->owner;
1688 if (master == dlm->node_num) {
1689 int ret = dlm_dispatch_assert_master(dlm, res,
1690 0, 0, flags);
1691 if (ret < 0) {
1692 mlog_errno(-ENOMEM);
1693 /* retry!? */
1694 BUG();
1696 } else /* put.. incase we are not the master */
1697 dlm_lockres_put(res);
1698 spin_unlock(&res->spinlock);
1700 spin_unlock(&dlm->spinlock);
1702 dlm_put(dlm);
1703 return master;
1706 static inline struct list_head *
1707 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1709 struct list_head *ret;
1710 BUG_ON(list_num < 0);
1711 BUG_ON(list_num > 2);
1712 ret = &(res->granted);
1713 ret += list_num;
1714 return ret;
1716 /* TODO: do ast flush business
1717 * TODO: do MIGRATING and RECOVERING spinning
1721 * NOTE about in-flight requests during migration:
1723 * Before attempting the migrate, the master has marked the lockres as
1724 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1725 * requests either got queued before the MIGRATING flag got set, in which
1726 * case the lock data will reflect the change and a return message is on
1727 * the way, or the request failed to get in before MIGRATING got set. In
1728 * this case, the caller will be told to spin and wait for the MIGRATING
1729 * flag to be dropped, then recheck the master.
1730 * This holds true for the convert, cancel and unlock cases, and since lvb
1731 * updates are tied to these same messages, it applies to lvb updates as
1732 * well. For the lock case, there is no way a lock can be on the master
1733 * queue and not be on the secondary queue since the lock is always added
1734 * locally first. This means that the new target node will never be sent
1735 * a lock that he doesn't already have on the list.
1736 * In total, this means that the local lock is correct and should not be
1737 * updated to match the one sent by the master. Any messages sent back
1738 * from the master before the MIGRATING flag will bring the lock properly
1739 * up-to-date, and the change will be ordered properly for the waiter.
1740 * We will *not* attempt to modify the lock underneath the waiter.
1743 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1744 struct dlm_lock_resource *res,
1745 struct dlm_migratable_lockres *mres)
1747 struct dlm_migratable_lock *ml;
1748 struct list_head *queue;
1749 struct list_head *tmpq = NULL;
1750 struct dlm_lock *newlock = NULL;
1751 struct dlm_lockstatus *lksb = NULL;
1752 int ret = 0;
1753 int i, j, bad;
1754 struct dlm_lock *lock = NULL;
1755 u8 from = O2NM_MAX_NODES;
1756 unsigned int added = 0;
1757 __be64 c;
1759 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1760 for (i=0; i<mres->num_locks; i++) {
1761 ml = &(mres->ml[i]);
1763 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1764 /* placeholder, just need to set the refmap bit */
1765 BUG_ON(mres->num_locks != 1);
1766 mlog(0, "%s:%.*s: dummy lock for %u\n",
1767 dlm->name, mres->lockname_len, mres->lockname,
1768 from);
1769 spin_lock(&res->spinlock);
1770 dlm_lockres_set_refmap_bit(from, res);
1771 spin_unlock(&res->spinlock);
1772 added++;
1773 break;
1775 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1776 newlock = NULL;
1777 lksb = NULL;
1779 queue = dlm_list_num_to_pointer(res, ml->list);
1780 tmpq = NULL;
1782 /* if the lock is for the local node it needs to
1783 * be moved to the proper location within the queue.
1784 * do not allocate a new lock structure. */
1785 if (ml->node == dlm->node_num) {
1786 /* MIGRATION ONLY! */
1787 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1789 spin_lock(&res->spinlock);
1790 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1791 tmpq = dlm_list_idx_to_ptr(res, j);
1792 list_for_each_entry(lock, tmpq, list) {
1793 if (lock->ml.cookie != ml->cookie)
1794 lock = NULL;
1795 else
1796 break;
1798 if (lock)
1799 break;
1802 /* lock is always created locally first, and
1803 * destroyed locally last. it must be on the list */
1804 if (!lock) {
1805 c = ml->cookie;
1806 mlog(ML_ERROR, "Could not find local lock "
1807 "with cookie %u:%llu, node %u, "
1808 "list %u, flags 0x%x, type %d, "
1809 "conv %d, highest blocked %d\n",
1810 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1811 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1812 ml->node, ml->list, ml->flags, ml->type,
1813 ml->convert_type, ml->highest_blocked);
1814 __dlm_print_one_lock_resource(res);
1815 BUG();
1818 if (lock->ml.node != ml->node) {
1819 c = lock->ml.cookie;
1820 mlog(ML_ERROR, "Mismatched node# in lock "
1821 "cookie %u:%llu, name %.*s, node %u\n",
1822 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1823 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1824 res->lockname.len, res->lockname.name,
1825 lock->ml.node);
1826 c = ml->cookie;
1827 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1828 "node %u, list %u, flags 0x%x, type %d, "
1829 "conv %d, highest blocked %d\n",
1830 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1831 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1832 ml->node, ml->list, ml->flags, ml->type,
1833 ml->convert_type, ml->highest_blocked);
1834 __dlm_print_one_lock_resource(res);
1835 BUG();
1838 if (tmpq != queue) {
1839 c = ml->cookie;
1840 mlog(0, "Lock cookie %u:%llu was on list %u "
1841 "instead of list %u for %.*s\n",
1842 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1843 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1844 j, ml->list, res->lockname.len,
1845 res->lockname.name);
1846 __dlm_print_one_lock_resource(res);
1847 spin_unlock(&res->spinlock);
1848 continue;
1851 /* see NOTE above about why we do not update
1852 * to match the master here */
1854 /* move the lock to its proper place */
1855 /* do not alter lock refcount. switching lists. */
1856 list_move_tail(&lock->list, queue);
1857 spin_unlock(&res->spinlock);
1858 added++;
1860 mlog(0, "just reordered a local lock!\n");
1861 continue;
1864 /* lock is for another node. */
1865 newlock = dlm_new_lock(ml->type, ml->node,
1866 be64_to_cpu(ml->cookie), NULL);
1867 if (!newlock) {
1868 ret = -ENOMEM;
1869 goto leave;
1871 lksb = newlock->lksb;
1872 dlm_lock_attach_lockres(newlock, res);
1874 if (ml->convert_type != LKM_IVMODE) {
1875 BUG_ON(queue != &res->converting);
1876 newlock->ml.convert_type = ml->convert_type;
1878 lksb->flags |= (ml->flags &
1879 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1881 if (ml->type == LKM_NLMODE)
1882 goto skip_lvb;
1884 if (!dlm_lvb_is_empty(mres->lvb)) {
1885 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1886 /* other node was trying to update
1887 * lvb when node died. recreate the
1888 * lksb with the updated lvb. */
1889 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1890 /* the lock resource lvb update must happen
1891 * NOW, before the spinlock is dropped.
1892 * we no longer wait for the AST to update
1893 * the lvb. */
1894 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1895 } else {
1896 /* otherwise, the node is sending its
1897 * most recent valid lvb info */
1898 BUG_ON(ml->type != LKM_EXMODE &&
1899 ml->type != LKM_PRMODE);
1900 if (!dlm_lvb_is_empty(res->lvb) &&
1901 (ml->type == LKM_EXMODE ||
1902 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1903 int i;
1904 mlog(ML_ERROR, "%s:%.*s: received bad "
1905 "lvb! type=%d\n", dlm->name,
1906 res->lockname.len,
1907 res->lockname.name, ml->type);
1908 printk("lockres lvb=[");
1909 for (i=0; i<DLM_LVB_LEN; i++)
1910 printk("%02x", res->lvb[i]);
1911 printk("]\nmigrated lvb=[");
1912 for (i=0; i<DLM_LVB_LEN; i++)
1913 printk("%02x", mres->lvb[i]);
1914 printk("]\n");
1915 dlm_print_one_lock_resource(res);
1916 BUG();
1918 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1921 skip_lvb:
1923 /* NOTE:
1924 * wrt lock queue ordering and recovery:
1925 * 1. order of locks on granted queue is
1926 * meaningless.
1927 * 2. order of locks on converting queue is
1928 * LOST with the node death. sorry charlie.
1929 * 3. order of locks on the blocked queue is
1930 * also LOST.
1931 * order of locks does not affect integrity, it
1932 * just means that a lock request may get pushed
1933 * back in line as a result of the node death.
1934 * also note that for a given node the lock order
1935 * for its secondary queue locks is preserved
1936 * relative to each other, but clearly *not*
1937 * preserved relative to locks from other nodes.
1939 bad = 0;
1940 spin_lock(&res->spinlock);
1941 list_for_each_entry(lock, queue, list) {
1942 if (lock->ml.cookie == ml->cookie) {
1943 c = lock->ml.cookie;
1944 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1945 "exists on this lockres!\n", dlm->name,
1946 res->lockname.len, res->lockname.name,
1947 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1948 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1950 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1951 "node=%u, cookie=%u:%llu, queue=%d\n",
1952 ml->type, ml->convert_type, ml->node,
1953 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1954 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1955 ml->list);
1957 __dlm_print_one_lock_resource(res);
1958 bad = 1;
1959 break;
1962 if (!bad) {
1963 dlm_lock_get(newlock);
1964 list_add_tail(&newlock->list, queue);
1965 mlog(0, "%s:%.*s: added lock for node %u, "
1966 "setting refmap bit\n", dlm->name,
1967 res->lockname.len, res->lockname.name, ml->node);
1968 dlm_lockres_set_refmap_bit(ml->node, res);
1969 added++;
1971 spin_unlock(&res->spinlock);
1973 mlog(0, "done running all the locks\n");
1975 leave:
1976 /* balance the ref taken when the work was queued */
1977 spin_lock(&res->spinlock);
1978 dlm_lockres_drop_inflight_ref(dlm, res);
1979 spin_unlock(&res->spinlock);
1981 if (ret < 0) {
1982 mlog_errno(ret);
1983 if (newlock)
1984 dlm_lock_put(newlock);
1987 return ret;
1990 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1991 struct dlm_lock_resource *res)
1993 int i;
1994 struct list_head *queue;
1995 struct dlm_lock *lock, *next;
1997 assert_spin_locked(&dlm->spinlock);
1998 assert_spin_locked(&res->spinlock);
1999 res->state |= DLM_LOCK_RES_RECOVERING;
2000 if (!list_empty(&res->recovering)) {
2001 mlog(0,
2002 "Recovering res %s:%.*s, is already on recovery list!\n",
2003 dlm->name, res->lockname.len, res->lockname.name);
2004 list_del_init(&res->recovering);
2005 dlm_lockres_put(res);
2007 /* We need to hold a reference while on the recovery list */
2008 dlm_lockres_get(res);
2009 list_add_tail(&res->recovering, &dlm->reco.resources);
2011 /* find any pending locks and put them back on proper list */
2012 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2013 queue = dlm_list_idx_to_ptr(res, i);
2014 list_for_each_entry_safe(lock, next, queue, list) {
2015 dlm_lock_get(lock);
2016 if (lock->convert_pending) {
2017 /* move converting lock back to granted */
2018 BUG_ON(i != DLM_CONVERTING_LIST);
2019 mlog(0, "node died with convert pending "
2020 "on %.*s. move back to granted list.\n",
2021 res->lockname.len, res->lockname.name);
2022 dlm_revert_pending_convert(res, lock);
2023 lock->convert_pending = 0;
2024 } else if (lock->lock_pending) {
2025 /* remove pending lock requests completely */
2026 BUG_ON(i != DLM_BLOCKED_LIST);
2027 mlog(0, "node died with lock pending "
2028 "on %.*s. remove from blocked list and skip.\n",
2029 res->lockname.len, res->lockname.name);
2030 /* lock will be floating until ref in
2031 * dlmlock_remote is freed after the network
2032 * call returns. ok for it to not be on any
2033 * list since no ast can be called
2034 * (the master is dead). */
2035 dlm_revert_pending_lock(res, lock);
2036 lock->lock_pending = 0;
2037 } else if (lock->unlock_pending) {
2038 /* if an unlock was in progress, treat as
2039 * if this had completed successfully
2040 * before sending this lock state to the
2041 * new master. note that the dlm_unlock
2042 * call is still responsible for calling
2043 * the unlockast. that will happen after
2044 * the network call times out. for now,
2045 * just move lists to prepare the new
2046 * recovery master. */
2047 BUG_ON(i != DLM_GRANTED_LIST);
2048 mlog(0, "node died with unlock pending "
2049 "on %.*s. remove from blocked list and skip.\n",
2050 res->lockname.len, res->lockname.name);
2051 dlm_commit_pending_unlock(res, lock);
2052 lock->unlock_pending = 0;
2053 } else if (lock->cancel_pending) {
2054 /* if a cancel was in progress, treat as
2055 * if this had completed successfully
2056 * before sending this lock state to the
2057 * new master */
2058 BUG_ON(i != DLM_CONVERTING_LIST);
2059 mlog(0, "node died with cancel pending "
2060 "on %.*s. move back to granted list.\n",
2061 res->lockname.len, res->lockname.name);
2062 dlm_commit_pending_cancel(res, lock);
2063 lock->cancel_pending = 0;
2065 dlm_lock_put(lock);
2072 /* removes all recovered locks from the recovery list.
2073 * sets the res->owner to the new master.
2074 * unsets the RECOVERY flag and wakes waiters. */
2075 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2076 u8 dead_node, u8 new_master)
2078 int i;
2079 struct hlist_node *hash_iter;
2080 struct hlist_head *bucket;
2081 struct dlm_lock_resource *res, *next;
2083 assert_spin_locked(&dlm->spinlock);
2085 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2086 if (res->owner == dead_node) {
2087 list_del_init(&res->recovering);
2088 spin_lock(&res->spinlock);
2089 /* new_master has our reference from
2090 * the lock state sent during recovery */
2091 dlm_change_lockres_owner(dlm, res, new_master);
2092 res->state &= ~DLM_LOCK_RES_RECOVERING;
2093 if (__dlm_lockres_has_locks(res))
2094 __dlm_dirty_lockres(dlm, res);
2095 spin_unlock(&res->spinlock);
2096 wake_up(&res->wq);
2097 dlm_lockres_put(res);
2101 /* this will become unnecessary eventually, but
2102 * for now we need to run the whole hash, clear
2103 * the RECOVERING state and set the owner
2104 * if necessary */
2105 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2106 bucket = dlm_lockres_hash(dlm, i);
2107 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2108 if (res->state & DLM_LOCK_RES_RECOVERING) {
2109 if (res->owner == dead_node) {
2110 mlog(0, "(this=%u) res %.*s owner=%u "
2111 "was not on recovering list, but "
2112 "clearing state anyway\n",
2113 dlm->node_num, res->lockname.len,
2114 res->lockname.name, new_master);
2115 } else if (res->owner == dlm->node_num) {
2116 mlog(0, "(this=%u) res %.*s owner=%u "
2117 "was not on recovering list, "
2118 "owner is THIS node, clearing\n",
2119 dlm->node_num, res->lockname.len,
2120 res->lockname.name, new_master);
2121 } else
2122 continue;
2124 if (!list_empty(&res->recovering)) {
2125 mlog(0, "%s:%.*s: lockres was "
2126 "marked RECOVERING, owner=%u\n",
2127 dlm->name, res->lockname.len,
2128 res->lockname.name, res->owner);
2129 list_del_init(&res->recovering);
2130 dlm_lockres_put(res);
2132 spin_lock(&res->spinlock);
2133 /* new_master has our reference from
2134 * the lock state sent during recovery */
2135 dlm_change_lockres_owner(dlm, res, new_master);
2136 res->state &= ~DLM_LOCK_RES_RECOVERING;
2137 if (__dlm_lockres_has_locks(res))
2138 __dlm_dirty_lockres(dlm, res);
2139 spin_unlock(&res->spinlock);
2140 wake_up(&res->wq);
2146 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2148 if (local) {
2149 if (lock->ml.type != LKM_EXMODE &&
2150 lock->ml.type != LKM_PRMODE)
2151 return 1;
2152 } else if (lock->ml.type == LKM_EXMODE)
2153 return 1;
2154 return 0;
2157 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2158 struct dlm_lock_resource *res, u8 dead_node)
2160 struct list_head *queue;
2161 struct dlm_lock *lock;
2162 int blank_lvb = 0, local = 0;
2163 int i;
2164 u8 search_node;
2166 assert_spin_locked(&dlm->spinlock);
2167 assert_spin_locked(&res->spinlock);
2169 if (res->owner == dlm->node_num)
2170 /* if this node owned the lockres, and if the dead node
2171 * had an EX when he died, blank out the lvb */
2172 search_node = dead_node;
2173 else {
2174 /* if this is a secondary lockres, and we had no EX or PR
2175 * locks granted, we can no longer trust the lvb */
2176 search_node = dlm->node_num;
2177 local = 1; /* check local state for valid lvb */
2180 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2181 queue = dlm_list_idx_to_ptr(res, i);
2182 list_for_each_entry(lock, queue, list) {
2183 if (lock->ml.node == search_node) {
2184 if (dlm_lvb_needs_invalidation(lock, local)) {
2185 /* zero the lksb lvb and lockres lvb */
2186 blank_lvb = 1;
2187 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2193 if (blank_lvb) {
2194 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2195 res->lockname.len, res->lockname.name, dead_node);
2196 memset(res->lvb, 0, DLM_LVB_LEN);
2200 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2201 struct dlm_lock_resource *res, u8 dead_node)
2203 struct dlm_lock *lock, *next;
2204 unsigned int freed = 0;
2206 /* this node is the lockres master:
2207 * 1) remove any stale locks for the dead node
2208 * 2) if the dead node had an EX when he died, blank out the lvb
2210 assert_spin_locked(&dlm->spinlock);
2211 assert_spin_locked(&res->spinlock);
2213 /* We do two dlm_lock_put(). One for removing from list and the other is
2214 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2216 /* TODO: check pending_asts, pending_basts here */
2217 list_for_each_entry_safe(lock, next, &res->granted, list) {
2218 if (lock->ml.node == dead_node) {
2219 list_del_init(&lock->list);
2220 dlm_lock_put(lock);
2221 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2222 dlm_lock_put(lock);
2223 freed++;
2226 list_for_each_entry_safe(lock, next, &res->converting, list) {
2227 if (lock->ml.node == dead_node) {
2228 list_del_init(&lock->list);
2229 dlm_lock_put(lock);
2230 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2231 dlm_lock_put(lock);
2232 freed++;
2235 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2236 if (lock->ml.node == dead_node) {
2237 list_del_init(&lock->list);
2238 dlm_lock_put(lock);
2239 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2240 dlm_lock_put(lock);
2241 freed++;
2245 if (freed) {
2246 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2247 "dropping ref from lockres\n", dlm->name,
2248 res->lockname.len, res->lockname.name, freed, dead_node);
2249 if(!test_bit(dead_node, res->refmap)) {
2250 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2251 "but ref was not set\n", dlm->name,
2252 res->lockname.len, res->lockname.name, freed, dead_node);
2253 __dlm_print_one_lock_resource(res);
2255 dlm_lockres_clear_refmap_bit(dead_node, res);
2256 } else if (test_bit(dead_node, res->refmap)) {
2257 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2258 "no locks and had not purged before dying\n", dlm->name,
2259 res->lockname.len, res->lockname.name, dead_node);
2260 dlm_lockres_clear_refmap_bit(dead_node, res);
2263 /* do not kick thread yet */
2264 __dlm_dirty_lockres(dlm, res);
2267 /* if this node is the recovery master, and there are no
2268 * locks for a given lockres owned by this node that are in
2269 * either PR or EX mode, zero out the lvb before requesting.
2274 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2276 struct hlist_node *iter;
2277 struct dlm_lock_resource *res;
2278 int i;
2279 struct hlist_head *bucket;
2280 struct dlm_lock *lock;
2283 /* purge any stale mles */
2284 dlm_clean_master_list(dlm, dead_node);
2287 * now clean up all lock resources. there are two rules:
2289 * 1) if the dead node was the master, move the lockres
2290 * to the recovering list. set the RECOVERING flag.
2291 * this lockres needs to be cleaned up before it can
2292 * be used further.
2294 * 2) if this node was the master, remove all locks from
2295 * each of the lockres queues that were owned by the
2296 * dead node. once recovery finishes, the dlm thread
2297 * can be kicked again to see if any ASTs or BASTs
2298 * need to be fired as a result.
2300 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2301 bucket = dlm_lockres_hash(dlm, i);
2302 hlist_for_each_entry(res, iter, bucket, hash_node) {
2303 /* always prune any $RECOVERY entries for dead nodes,
2304 * otherwise hangs can occur during later recovery */
2305 if (dlm_is_recovery_lock(res->lockname.name,
2306 res->lockname.len)) {
2307 spin_lock(&res->spinlock);
2308 list_for_each_entry(lock, &res->granted, list) {
2309 if (lock->ml.node == dead_node) {
2310 mlog(0, "AHA! there was "
2311 "a $RECOVERY lock for dead "
2312 "node %u (%s)!\n",
2313 dead_node, dlm->name);
2314 list_del_init(&lock->list);
2315 dlm_lock_put(lock);
2316 break;
2319 spin_unlock(&res->spinlock);
2320 continue;
2322 spin_lock(&res->spinlock);
2323 /* zero the lvb if necessary */
2324 dlm_revalidate_lvb(dlm, res, dead_node);
2325 if (res->owner == dead_node) {
2326 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2327 mlog(ML_NOTICE, "Ignore %.*s for "
2328 "recovery as it is being freed\n",
2329 res->lockname.len,
2330 res->lockname.name);
2331 } else
2332 dlm_move_lockres_to_recovery_list(dlm,
2333 res);
2335 } else if (res->owner == dlm->node_num) {
2336 dlm_free_dead_locks(dlm, res, dead_node);
2337 __dlm_lockres_calc_usage(dlm, res);
2339 spin_unlock(&res->spinlock);
2345 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2347 assert_spin_locked(&dlm->spinlock);
2349 if (dlm->reco.new_master == idx) {
2350 mlog(0, "%s: recovery master %d just died\n",
2351 dlm->name, idx);
2352 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2353 /* finalize1 was reached, so it is safe to clear
2354 * the new_master and dead_node. that recovery
2355 * is complete. */
2356 mlog(0, "%s: dead master %d had reached "
2357 "finalize1 state, clearing\n", dlm->name, idx);
2358 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2359 __dlm_reset_recovery(dlm);
2363 /* Clean up join state on node death. */
2364 if (dlm->joining_node == idx) {
2365 mlog(0, "Clearing join state for node %u\n", idx);
2366 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2369 /* check to see if the node is already considered dead */
2370 if (!test_bit(idx, dlm->live_nodes_map)) {
2371 mlog(0, "for domain %s, node %d is already dead. "
2372 "another node likely did recovery already.\n",
2373 dlm->name, idx);
2374 return;
2377 /* check to see if we do not care about this node */
2378 if (!test_bit(idx, dlm->domain_map)) {
2379 /* This also catches the case that we get a node down
2380 * but haven't joined the domain yet. */
2381 mlog(0, "node %u already removed from domain!\n", idx);
2382 return;
2385 clear_bit(idx, dlm->live_nodes_map);
2387 /* make sure local cleanup occurs before the heartbeat events */
2388 if (!test_bit(idx, dlm->recovery_map))
2389 dlm_do_local_recovery_cleanup(dlm, idx);
2391 /* notify anything attached to the heartbeat events */
2392 dlm_hb_event_notify_attached(dlm, idx, 0);
2394 mlog(0, "node %u being removed from domain map!\n", idx);
2395 clear_bit(idx, dlm->domain_map);
2396 /* wake up migration waiters if a node goes down.
2397 * perhaps later we can genericize this for other waiters. */
2398 wake_up(&dlm->migration_wq);
2400 if (test_bit(idx, dlm->recovery_map))
2401 mlog(0, "domain %s, node %u already added "
2402 "to recovery map!\n", dlm->name, idx);
2403 else
2404 set_bit(idx, dlm->recovery_map);
2407 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2409 struct dlm_ctxt *dlm = data;
2411 if (!dlm_grab(dlm))
2412 return;
2415 * This will notify any dlm users that a node in our domain
2416 * went away without notifying us first.
2418 if (test_bit(idx, dlm->domain_map))
2419 dlm_fire_domain_eviction_callbacks(dlm, idx);
2421 spin_lock(&dlm->spinlock);
2422 __dlm_hb_node_down(dlm, idx);
2423 spin_unlock(&dlm->spinlock);
2425 dlm_put(dlm);
2428 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2430 struct dlm_ctxt *dlm = data;
2432 if (!dlm_grab(dlm))
2433 return;
2435 spin_lock(&dlm->spinlock);
2436 set_bit(idx, dlm->live_nodes_map);
2437 /* do NOT notify mle attached to the heartbeat events.
2438 * new nodes are not interesting in mastery until joined. */
2439 spin_unlock(&dlm->spinlock);
2441 dlm_put(dlm);
2444 static void dlm_reco_ast(void *astdata)
2446 struct dlm_ctxt *dlm = astdata;
2447 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2448 dlm->node_num, dlm->name);
2450 static void dlm_reco_bast(void *astdata, int blocked_type)
2452 struct dlm_ctxt *dlm = astdata;
2453 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2454 dlm->node_num, dlm->name);
2456 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2458 mlog(0, "unlockast for recovery lock fired!\n");
2462 * dlm_pick_recovery_master will continually attempt to use
2463 * dlmlock() on the special "$RECOVERY" lockres with the
2464 * LKM_NOQUEUE flag to get an EX. every thread that enters
2465 * this function on each node racing to become the recovery
2466 * master will not stop attempting this until either:
2467 * a) this node gets the EX (and becomes the recovery master),
2468 * or b) dlm->reco.new_master gets set to some nodenum
2469 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2470 * so each time a recovery master is needed, the entire cluster
2471 * will sync at this point. if the new master dies, that will
2472 * be detected in dlm_do_recovery */
2473 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2475 enum dlm_status ret;
2476 struct dlm_lockstatus lksb;
2477 int status = -EINVAL;
2479 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2480 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2481 again:
2482 memset(&lksb, 0, sizeof(lksb));
2484 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2485 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2486 dlm_reco_ast, dlm, dlm_reco_bast);
2488 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2489 dlm->name, ret, lksb.status);
2491 if (ret == DLM_NORMAL) {
2492 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2493 dlm->name, dlm->node_num);
2495 /* got the EX lock. check to see if another node
2496 * just became the reco master */
2497 if (dlm_reco_master_ready(dlm)) {
2498 mlog(0, "%s: got reco EX lock, but %u will "
2499 "do the recovery\n", dlm->name,
2500 dlm->reco.new_master);
2501 status = -EEXIST;
2502 } else {
2503 status = 0;
2505 /* see if recovery was already finished elsewhere */
2506 spin_lock(&dlm->spinlock);
2507 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2508 status = -EINVAL;
2509 mlog(0, "%s: got reco EX lock, but "
2510 "node got recovered already\n", dlm->name);
2511 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2512 mlog(ML_ERROR, "%s: new master is %u "
2513 "but no dead node!\n",
2514 dlm->name, dlm->reco.new_master);
2515 BUG();
2518 spin_unlock(&dlm->spinlock);
2521 /* if this node has actually become the recovery master,
2522 * set the master and send the messages to begin recovery */
2523 if (!status) {
2524 mlog(0, "%s: dead=%u, this=%u, sending "
2525 "begin_reco now\n", dlm->name,
2526 dlm->reco.dead_node, dlm->node_num);
2527 status = dlm_send_begin_reco_message(dlm,
2528 dlm->reco.dead_node);
2529 /* this always succeeds */
2530 BUG_ON(status);
2532 /* set the new_master to this node */
2533 spin_lock(&dlm->spinlock);
2534 dlm_set_reco_master(dlm, dlm->node_num);
2535 spin_unlock(&dlm->spinlock);
2538 /* recovery lock is a special case. ast will not get fired,
2539 * so just go ahead and unlock it. */
2540 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2541 if (ret == DLM_DENIED) {
2542 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2543 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2545 if (ret != DLM_NORMAL) {
2546 /* this would really suck. this could only happen
2547 * if there was a network error during the unlock
2548 * because of node death. this means the unlock
2549 * is actually "done" and the lock structure is
2550 * even freed. we can continue, but only
2551 * because this specific lock name is special. */
2552 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2554 } else if (ret == DLM_NOTQUEUED) {
2555 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2556 dlm->name, dlm->node_num);
2557 /* another node is master. wait on
2558 * reco.new_master != O2NM_INVALID_NODE_NUM
2559 * for at most one second */
2560 wait_event_timeout(dlm->dlm_reco_thread_wq,
2561 dlm_reco_master_ready(dlm),
2562 msecs_to_jiffies(1000));
2563 if (!dlm_reco_master_ready(dlm)) {
2564 mlog(0, "%s: reco master taking awhile\n",
2565 dlm->name);
2566 goto again;
2568 /* another node has informed this one that it is reco master */
2569 mlog(0, "%s: reco master %u is ready to recover %u\n",
2570 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2571 status = -EEXIST;
2572 } else if (ret == DLM_RECOVERING) {
2573 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2574 dlm->name, dlm->node_num);
2575 goto again;
2576 } else {
2577 struct dlm_lock_resource *res;
2579 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2580 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2581 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2582 dlm_errname(lksb.status));
2583 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2584 DLM_RECOVERY_LOCK_NAME_LEN);
2585 if (res) {
2586 dlm_print_one_lock_resource(res);
2587 dlm_lockres_put(res);
2588 } else {
2589 mlog(ML_ERROR, "recovery lock not found\n");
2591 BUG();
2594 return status;
2597 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2599 struct dlm_begin_reco br;
2600 int ret = 0;
2601 struct dlm_node_iter iter;
2602 int nodenum;
2603 int status;
2605 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2607 spin_lock(&dlm->spinlock);
2608 dlm_node_iter_init(dlm->domain_map, &iter);
2609 spin_unlock(&dlm->spinlock);
2611 clear_bit(dead_node, iter.node_map);
2613 memset(&br, 0, sizeof(br));
2614 br.node_idx = dlm->node_num;
2615 br.dead_node = dead_node;
2617 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2618 ret = 0;
2619 if (nodenum == dead_node) {
2620 mlog(0, "not sending begin reco to dead node "
2621 "%u\n", dead_node);
2622 continue;
2624 if (nodenum == dlm->node_num) {
2625 mlog(0, "not sending begin reco to self\n");
2626 continue;
2628 retry:
2629 ret = -EINVAL;
2630 mlog(0, "attempting to send begin reco msg to %d\n",
2631 nodenum);
2632 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2633 &br, sizeof(br), nodenum, &status);
2634 /* negative status is handled ok by caller here */
2635 if (ret >= 0)
2636 ret = status;
2637 if (dlm_is_host_down(ret)) {
2638 /* node is down. not involved in recovery
2639 * so just keep going */
2640 mlog(ML_NOTICE, "%s: node %u was down when sending "
2641 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2642 ret = 0;
2646 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2647 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2648 * We are handling both for compatibility reasons.
2650 if (ret == -EAGAIN || ret == EAGAIN) {
2651 mlog(0, "%s: trying to start recovery of node "
2652 "%u, but node %u is waiting for last recovery "
2653 "to complete, backoff for a bit\n", dlm->name,
2654 dead_node, nodenum);
2655 msleep(100);
2656 goto retry;
2658 if (ret < 0) {
2659 struct dlm_lock_resource *res;
2661 /* this is now a serious problem, possibly ENOMEM
2662 * in the network stack. must retry */
2663 mlog_errno(ret);
2664 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2665 "returned %d\n", dlm->name, nodenum, ret);
2666 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2667 DLM_RECOVERY_LOCK_NAME_LEN);
2668 if (res) {
2669 dlm_print_one_lock_resource(res);
2670 dlm_lockres_put(res);
2671 } else {
2672 mlog(ML_ERROR, "recovery lock not found\n");
2674 /* sleep for a bit in hopes that we can avoid
2675 * another ENOMEM */
2676 msleep(100);
2677 goto retry;
2681 return ret;
2684 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2685 void **ret_data)
2687 struct dlm_ctxt *dlm = data;
2688 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2690 /* ok to return 0, domain has gone away */
2691 if (!dlm_grab(dlm))
2692 return 0;
2694 spin_lock(&dlm->spinlock);
2695 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2696 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2697 "but this node is in finalize state, waiting on finalize2\n",
2698 dlm->name, br->node_idx, br->dead_node,
2699 dlm->reco.dead_node, dlm->reco.new_master);
2700 spin_unlock(&dlm->spinlock);
2701 return -EAGAIN;
2703 spin_unlock(&dlm->spinlock);
2705 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2706 dlm->name, br->node_idx, br->dead_node,
2707 dlm->reco.dead_node, dlm->reco.new_master);
2709 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2711 spin_lock(&dlm->spinlock);
2712 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2713 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2714 mlog(0, "%s: new_master %u died, changing "
2715 "to %u\n", dlm->name, dlm->reco.new_master,
2716 br->node_idx);
2717 } else {
2718 mlog(0, "%s: new_master %u NOT DEAD, changing "
2719 "to %u\n", dlm->name, dlm->reco.new_master,
2720 br->node_idx);
2721 /* may not have seen the new master as dead yet */
2724 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2725 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2726 "node %u changing it to %u\n", dlm->name,
2727 dlm->reco.dead_node, br->node_idx, br->dead_node);
2729 dlm_set_reco_master(dlm, br->node_idx);
2730 dlm_set_reco_dead_node(dlm, br->dead_node);
2731 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2732 mlog(0, "recovery master %u sees %u as dead, but this "
2733 "node has not yet. marking %u as dead\n",
2734 br->node_idx, br->dead_node, br->dead_node);
2735 if (!test_bit(br->dead_node, dlm->domain_map) ||
2736 !test_bit(br->dead_node, dlm->live_nodes_map))
2737 mlog(0, "%u not in domain/live_nodes map "
2738 "so setting it in reco map manually\n",
2739 br->dead_node);
2740 /* force the recovery cleanup in __dlm_hb_node_down
2741 * both of these will be cleared in a moment */
2742 set_bit(br->dead_node, dlm->domain_map);
2743 set_bit(br->dead_node, dlm->live_nodes_map);
2744 __dlm_hb_node_down(dlm, br->dead_node);
2746 spin_unlock(&dlm->spinlock);
2748 dlm_kick_recovery_thread(dlm);
2750 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2751 dlm->name, br->node_idx, br->dead_node,
2752 dlm->reco.dead_node, dlm->reco.new_master);
2754 dlm_put(dlm);
2755 return 0;
2758 #define DLM_FINALIZE_STAGE2 0x01
2759 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2761 int ret = 0;
2762 struct dlm_finalize_reco fr;
2763 struct dlm_node_iter iter;
2764 int nodenum;
2765 int status;
2766 int stage = 1;
2768 mlog(0, "finishing recovery for node %s:%u, "
2769 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2771 spin_lock(&dlm->spinlock);
2772 dlm_node_iter_init(dlm->domain_map, &iter);
2773 spin_unlock(&dlm->spinlock);
2775 stage2:
2776 memset(&fr, 0, sizeof(fr));
2777 fr.node_idx = dlm->node_num;
2778 fr.dead_node = dlm->reco.dead_node;
2779 if (stage == 2)
2780 fr.flags |= DLM_FINALIZE_STAGE2;
2782 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2783 if (nodenum == dlm->node_num)
2784 continue;
2785 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2786 &fr, sizeof(fr), nodenum, &status);
2787 if (ret >= 0)
2788 ret = status;
2789 if (ret < 0) {
2790 mlog(ML_ERROR, "Error %d when sending message %u (key "
2791 "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2792 dlm->key, nodenum);
2793 if (dlm_is_host_down(ret)) {
2794 /* this has no effect on this recovery
2795 * session, so set the status to zero to
2796 * finish out the last recovery */
2797 mlog(ML_ERROR, "node %u went down after this "
2798 "node finished recovery.\n", nodenum);
2799 ret = 0;
2800 continue;
2802 break;
2805 if (stage == 1) {
2806 /* reset the node_iter back to the top and send finalize2 */
2807 iter.curnode = -1;
2808 stage = 2;
2809 goto stage2;
2812 return ret;
2815 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2816 void **ret_data)
2818 struct dlm_ctxt *dlm = data;
2819 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2820 int stage = 1;
2822 /* ok to return 0, domain has gone away */
2823 if (!dlm_grab(dlm))
2824 return 0;
2826 if (fr->flags & DLM_FINALIZE_STAGE2)
2827 stage = 2;
2829 mlog(0, "%s: node %u finalizing recovery stage%d of "
2830 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2831 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2833 spin_lock(&dlm->spinlock);
2835 if (dlm->reco.new_master != fr->node_idx) {
2836 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2837 "%u is supposed to be the new master, dead=%u\n",
2838 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2839 BUG();
2841 if (dlm->reco.dead_node != fr->dead_node) {
2842 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2843 "node %u, but node %u is supposed to be dead\n",
2844 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2845 BUG();
2848 switch (stage) {
2849 case 1:
2850 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2851 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2852 mlog(ML_ERROR, "%s: received finalize1 from "
2853 "new master %u for dead node %u, but "
2854 "this node has already received it!\n",
2855 dlm->name, fr->node_idx, fr->dead_node);
2856 dlm_print_reco_node_status(dlm);
2857 BUG();
2859 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2860 spin_unlock(&dlm->spinlock);
2861 break;
2862 case 2:
2863 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2864 mlog(ML_ERROR, "%s: received finalize2 from "
2865 "new master %u for dead node %u, but "
2866 "this node did not have finalize1!\n",
2867 dlm->name, fr->node_idx, fr->dead_node);
2868 dlm_print_reco_node_status(dlm);
2869 BUG();
2871 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2872 spin_unlock(&dlm->spinlock);
2873 dlm_reset_recovery(dlm);
2874 dlm_kick_recovery_thread(dlm);
2875 break;
2876 default:
2877 BUG();
2880 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2881 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2883 dlm_put(dlm);
2884 return 0;