init from v2.6.32.60
[mach-moxart.git] / fs / ocfs2 / dlm / dlmmaster.c
blobef1ac9ab4ee1a8504da627d037b63a74c5d20302
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmmod.c
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 struct dlm_master_list_entry *mle,
57 struct o2nm_node *node,
58 int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 struct dlm_master_list_entry *mle,
61 struct o2nm_node *node,
62 int idx);
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 struct dlm_master_list_entry *mle,
72 const char *name,
73 unsigned int namelen)
75 if (dlm != mle->dlm)
76 return 0;
78 if (namelen != mle->mnamelen ||
79 memcmp(name, mle->mname, namelen) != 0)
80 return 0;
82 return 1;
85 static struct kmem_cache *dlm_lockres_cache = NULL;
86 static struct kmem_cache *dlm_lockname_cache = NULL;
87 static struct kmem_cache *dlm_mle_cache = NULL;
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 enum dlm_mle_type type,
92 struct dlm_ctxt *dlm,
93 struct dlm_lock_resource *res,
94 const char *name,
95 unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 struct dlm_master_list_entry **mle,
100 char *name, unsigned int namelen);
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 struct dlm_master_list_entry *mle, int to);
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 struct dlm_lock_resource *res,
108 struct dlm_master_list_entry *mle,
109 int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 struct dlm_lock_resource *res,
112 struct dlm_master_list_entry *mle,
113 int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 struct dlm_lock_resource *res,
116 struct dlm_master_list_entry *mle,
117 struct dlm_master_list_entry **oldmle,
118 const char *name, unsigned int namelen,
119 u8 new_master, u8 master);
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 struct dlm_lock_resource *res,
127 u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 struct dlm_lock_resource *res);
132 int dlm_is_host_down(int errno)
134 switch (errno) {
135 case -EBADF:
136 case -ECONNREFUSED:
137 case -ENOTCONN:
138 case -ECONNRESET:
139 case -EPIPE:
140 case -EHOSTDOWN:
141 case -EHOSTUNREACH:
142 case -ETIMEDOUT:
143 case -ECONNABORTED:
144 case -ENETDOWN:
145 case -ENETUNREACH:
146 case -ENETRESET:
147 case -ESHUTDOWN:
148 case -ENOPROTOOPT:
149 case -EINVAL: /* if returned from our tcp code,
150 this means there is no socket */
151 return 1;
153 return 0;
158 * MASTER LIST FUNCTIONS
163 * regarding master list entries and heartbeat callbacks:
165 * in order to avoid sleeping and allocation that occurs in
166 * heartbeat, master list entries are simply attached to the
167 * dlm's established heartbeat callbacks. the mle is attached
168 * when it is created, and since the dlm->spinlock is held at
169 * that time, any heartbeat event will be properly discovered
170 * by the mle. the mle needs to be detached from the
171 * dlm->mle_hb_events list as soon as heartbeat events are no
172 * longer useful to the mle, and before the mle is freed.
174 * as a general rule, heartbeat events are no longer needed by
175 * the mle once an "answer" regarding the lock master has been
176 * received.
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 struct dlm_master_list_entry *mle)
181 assert_spin_locked(&dlm->spinlock);
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 struct dlm_master_list_entry *mle)
190 if (!list_empty(&mle->hb_events))
191 list_del_init(&mle->hb_events);
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 struct dlm_master_list_entry *mle)
198 spin_lock(&dlm->spinlock);
199 __dlm_mle_detach_hb_events(dlm, mle);
200 spin_unlock(&dlm->spinlock);
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
205 struct dlm_ctxt *dlm;
206 dlm = mle->dlm;
208 assert_spin_locked(&dlm->spinlock);
209 assert_spin_locked(&dlm->master_lock);
210 mle->inuse++;
211 kref_get(&mle->mle_refs);
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
216 struct dlm_ctxt *dlm;
217 dlm = mle->dlm;
219 spin_lock(&dlm->spinlock);
220 spin_lock(&dlm->master_lock);
221 mle->inuse--;
222 __dlm_put_mle(mle);
223 spin_unlock(&dlm->master_lock);
224 spin_unlock(&dlm->spinlock);
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
231 struct dlm_ctxt *dlm;
232 dlm = mle->dlm;
234 assert_spin_locked(&dlm->spinlock);
235 assert_spin_locked(&dlm->master_lock);
236 if (!atomic_read(&mle->mle_refs.refcount)) {
237 /* this may or may not crash, but who cares.
238 * it's a BUG. */
239 mlog(ML_ERROR, "bad mle: %p\n", mle);
240 dlm_print_one_mle(mle);
241 BUG();
242 } else
243 kref_put(&mle->mle_refs, dlm_mle_release);
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
250 struct dlm_ctxt *dlm;
251 dlm = mle->dlm;
253 spin_lock(&dlm->spinlock);
254 spin_lock(&dlm->master_lock);
255 __dlm_put_mle(mle);
256 spin_unlock(&dlm->master_lock);
257 spin_unlock(&dlm->spinlock);
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
262 kref_get(&mle->mle_refs);
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 enum dlm_mle_type type,
267 struct dlm_ctxt *dlm,
268 struct dlm_lock_resource *res,
269 const char *name,
270 unsigned int namelen)
272 assert_spin_locked(&dlm->spinlock);
274 mle->dlm = dlm;
275 mle->type = type;
276 INIT_HLIST_NODE(&mle->master_hash_node);
277 INIT_LIST_HEAD(&mle->hb_events);
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 spin_lock_init(&mle->spinlock);
280 init_waitqueue_head(&mle->wq);
281 atomic_set(&mle->woken, 0);
282 kref_init(&mle->mle_refs);
283 memset(mle->response_map, 0, sizeof(mle->response_map));
284 mle->master = O2NM_MAX_NODES;
285 mle->new_master = O2NM_MAX_NODES;
286 mle->inuse = 0;
288 BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 mle->type != DLM_MLE_MASTER &&
290 mle->type != DLM_MLE_MIGRATION);
292 if (mle->type == DLM_MLE_MASTER) {
293 BUG_ON(!res);
294 mle->mleres = res;
295 memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 mle->mnamelen = res->lockname.len;
297 mle->mnamehash = res->lockname.hash;
298 } else {
299 BUG_ON(!name);
300 mle->mleres = NULL;
301 memcpy(mle->mname, name, namelen);
302 mle->mnamelen = namelen;
303 mle->mnamehash = dlm_lockid_hash(name, namelen);
306 atomic_inc(&dlm->mle_tot_count[mle->type]);
307 atomic_inc(&dlm->mle_cur_count[mle->type]);
309 /* copy off the node_map and register hb callbacks on our copy */
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 clear_bit(dlm->node_num, mle->vote_map);
313 clear_bit(dlm->node_num, mle->node_map);
315 /* attach the mle to the domain node up/down events */
316 __dlm_mle_attach_hb_events(dlm, mle);
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
321 assert_spin_locked(&dlm->spinlock);
322 assert_spin_locked(&dlm->master_lock);
324 if (!hlist_unhashed(&mle->master_hash_node))
325 hlist_del_init(&mle->master_hash_node);
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
330 struct hlist_head *bucket;
332 assert_spin_locked(&dlm->master_lock);
334 bucket = dlm_master_hash(dlm, mle->mnamehash);
335 hlist_add_head(&mle->master_hash_node, bucket);
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 struct dlm_master_list_entry **mle,
341 char *name, unsigned int namelen)
343 struct dlm_master_list_entry *tmpmle;
344 struct hlist_head *bucket;
345 struct hlist_node *list;
346 unsigned int hash;
348 assert_spin_locked(&dlm->master_lock);
350 hash = dlm_lockid_hash(name, namelen);
351 bucket = dlm_master_hash(dlm, hash);
352 hlist_for_each(list, bucket) {
353 tmpmle = hlist_entry(list, struct dlm_master_list_entry,
354 master_hash_node);
355 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
356 continue;
357 dlm_get_mle(tmpmle);
358 *mle = tmpmle;
359 return 1;
361 return 0;
364 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366 struct dlm_master_list_entry *mle;
368 assert_spin_locked(&dlm->spinlock);
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx);
373 else
374 dlm_mle_node_down(dlm, mle, NULL, idx);
378 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
379 struct dlm_master_list_entry *mle,
380 struct o2nm_node *node, int idx)
382 spin_lock(&mle->spinlock);
384 if (!test_bit(idx, mle->node_map))
385 mlog(0, "node %u already removed from nodemap!\n", idx);
386 else
387 clear_bit(idx, mle->node_map);
389 spin_unlock(&mle->spinlock);
392 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
393 struct dlm_master_list_entry *mle,
394 struct o2nm_node *node, int idx)
396 spin_lock(&mle->spinlock);
398 if (test_bit(idx, mle->node_map))
399 mlog(0, "node %u already in node map!\n", idx);
400 else
401 set_bit(idx, mle->node_map);
403 spin_unlock(&mle->spinlock);
407 int dlm_init_mle_cache(void)
409 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
410 sizeof(struct dlm_master_list_entry),
411 0, SLAB_HWCACHE_ALIGN,
412 NULL);
413 if (dlm_mle_cache == NULL)
414 return -ENOMEM;
415 return 0;
418 void dlm_destroy_mle_cache(void)
420 if (dlm_mle_cache)
421 kmem_cache_destroy(dlm_mle_cache);
424 static void dlm_mle_release(struct kref *kref)
426 struct dlm_master_list_entry *mle;
427 struct dlm_ctxt *dlm;
429 mlog_entry_void();
431 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
432 dlm = mle->dlm;
434 assert_spin_locked(&dlm->spinlock);
435 assert_spin_locked(&dlm->master_lock);
437 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
438 mle->type);
440 /* remove from list if not already */
441 __dlm_unlink_mle(dlm, mle);
443 /* detach the mle from the domain node up/down events */
444 __dlm_mle_detach_hb_events(dlm, mle);
446 atomic_dec(&dlm->mle_cur_count[mle->type]);
448 /* NOTE: kfree under spinlock here.
449 * if this is bad, we can move this to a freelist. */
450 kmem_cache_free(dlm_mle_cache, mle);
455 * LOCK RESOURCE FUNCTIONS
458 int dlm_init_master_caches(void)
460 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
461 sizeof(struct dlm_lock_resource),
462 0, SLAB_HWCACHE_ALIGN, NULL);
463 if (!dlm_lockres_cache)
464 goto bail;
466 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
467 DLM_LOCKID_NAME_MAX, 0,
468 SLAB_HWCACHE_ALIGN, NULL);
469 if (!dlm_lockname_cache)
470 goto bail;
472 return 0;
473 bail:
474 dlm_destroy_master_caches();
475 return -ENOMEM;
478 void dlm_destroy_master_caches(void)
480 if (dlm_lockname_cache)
481 kmem_cache_destroy(dlm_lockname_cache);
483 if (dlm_lockres_cache)
484 kmem_cache_destroy(dlm_lockres_cache);
487 static void dlm_lockres_release(struct kref *kref)
489 struct dlm_lock_resource *res;
490 struct dlm_ctxt *dlm;
492 res = container_of(kref, struct dlm_lock_resource, refs);
493 dlm = res->dlm;
495 /* This should not happen -- all lockres' have a name
496 * associated with them at init time. */
497 BUG_ON(!res->lockname.name);
499 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
500 res->lockname.name);
502 spin_lock(&dlm->track_lock);
503 if (!list_empty(&res->tracking))
504 list_del_init(&res->tracking);
505 else {
506 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
507 res->lockname.len, res->lockname.name);
508 dlm_print_one_lock_resource(res);
510 spin_unlock(&dlm->track_lock);
512 atomic_dec(&dlm->res_cur_count);
514 if (!hlist_unhashed(&res->hash_node) ||
515 !list_empty(&res->granted) ||
516 !list_empty(&res->converting) ||
517 !list_empty(&res->blocked) ||
518 !list_empty(&res->dirty) ||
519 !list_empty(&res->recovering) ||
520 !list_empty(&res->purge)) {
521 mlog(ML_ERROR,
522 "Going to BUG for resource %.*s."
523 " We're on a list! [%c%c%c%c%c%c%c]\n",
524 res->lockname.len, res->lockname.name,
525 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
526 !list_empty(&res->granted) ? 'G' : ' ',
527 !list_empty(&res->converting) ? 'C' : ' ',
528 !list_empty(&res->blocked) ? 'B' : ' ',
529 !list_empty(&res->dirty) ? 'D' : ' ',
530 !list_empty(&res->recovering) ? 'R' : ' ',
531 !list_empty(&res->purge) ? 'P' : ' ');
533 dlm_print_one_lock_resource(res);
536 /* By the time we're ready to blow this guy away, we shouldn't
537 * be on any lists. */
538 BUG_ON(!hlist_unhashed(&res->hash_node));
539 BUG_ON(!list_empty(&res->granted));
540 BUG_ON(!list_empty(&res->converting));
541 BUG_ON(!list_empty(&res->blocked));
542 BUG_ON(!list_empty(&res->dirty));
543 BUG_ON(!list_empty(&res->recovering));
544 BUG_ON(!list_empty(&res->purge));
546 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
548 kmem_cache_free(dlm_lockres_cache, res);
551 void dlm_lockres_put(struct dlm_lock_resource *res)
553 kref_put(&res->refs, dlm_lockres_release);
556 static void dlm_init_lockres(struct dlm_ctxt *dlm,
557 struct dlm_lock_resource *res,
558 const char *name, unsigned int namelen)
560 char *qname;
562 /* If we memset here, we lose our reference to the kmalloc'd
563 * res->lockname.name, so be sure to init every field
564 * correctly! */
566 qname = (char *) res->lockname.name;
567 memcpy(qname, name, namelen);
569 res->lockname.len = namelen;
570 res->lockname.hash = dlm_lockid_hash(name, namelen);
572 init_waitqueue_head(&res->wq);
573 spin_lock_init(&res->spinlock);
574 INIT_HLIST_NODE(&res->hash_node);
575 INIT_LIST_HEAD(&res->granted);
576 INIT_LIST_HEAD(&res->converting);
577 INIT_LIST_HEAD(&res->blocked);
578 INIT_LIST_HEAD(&res->dirty);
579 INIT_LIST_HEAD(&res->recovering);
580 INIT_LIST_HEAD(&res->purge);
581 INIT_LIST_HEAD(&res->tracking);
582 atomic_set(&res->asts_reserved, 0);
583 res->migration_pending = 0;
584 res->inflight_locks = 0;
586 res->dlm = dlm;
588 kref_init(&res->refs);
590 atomic_inc(&dlm->res_tot_count);
591 atomic_inc(&dlm->res_cur_count);
593 /* just for consistency */
594 spin_lock(&res->spinlock);
595 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
596 spin_unlock(&res->spinlock);
598 res->state = DLM_LOCK_RES_IN_PROGRESS;
600 res->last_used = 0;
602 spin_lock(&dlm->spinlock);
603 list_add_tail(&res->tracking, &dlm->tracking_list);
604 spin_unlock(&dlm->spinlock);
606 memset(res->lvb, 0, DLM_LVB_LEN);
607 memset(res->refmap, 0, sizeof(res->refmap));
610 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
611 const char *name,
612 unsigned int namelen)
614 struct dlm_lock_resource *res = NULL;
616 res = (struct dlm_lock_resource *)
617 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
618 if (!res)
619 goto error;
621 res->lockname.name = (char *)
622 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
623 if (!res->lockname.name)
624 goto error;
626 dlm_init_lockres(dlm, res, name, namelen);
627 return res;
629 error:
630 if (res && res->lockname.name)
631 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
633 if (res)
634 kmem_cache_free(dlm_lockres_cache, res);
635 return NULL;
638 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
639 struct dlm_lock_resource *res,
640 int new_lockres,
641 const char *file,
642 int line)
644 if (!new_lockres)
645 assert_spin_locked(&res->spinlock);
647 if (!test_bit(dlm->node_num, res->refmap)) {
648 BUG_ON(res->inflight_locks != 0);
649 dlm_lockres_set_refmap_bit(dlm->node_num, res);
651 res->inflight_locks++;
652 mlog(0, "%s:%.*s: inflight++: now %u\n",
653 dlm->name, res->lockname.len, res->lockname.name,
654 res->inflight_locks);
657 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
658 struct dlm_lock_resource *res,
659 const char *file,
660 int line)
662 assert_spin_locked(&res->spinlock);
664 BUG_ON(res->inflight_locks == 0);
665 res->inflight_locks--;
666 mlog(0, "%s:%.*s: inflight--: now %u\n",
667 dlm->name, res->lockname.len, res->lockname.name,
668 res->inflight_locks);
669 if (res->inflight_locks == 0)
670 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
671 wake_up(&res->wq);
675 * lookup a lock resource by name.
676 * may already exist in the hashtable.
677 * lockid is null terminated
679 * if not, allocate enough for the lockres and for
680 * the temporary structure used in doing the mastering.
682 * also, do a lookup in the dlm->master_list to see
683 * if another node has begun mastering the same lock.
684 * if so, there should be a block entry in there
685 * for this name, and we should *not* attempt to master
686 * the lock here. need to wait around for that node
687 * to assert_master (or die).
690 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
691 const char *lockid,
692 int namelen,
693 int flags)
695 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
696 struct dlm_master_list_entry *mle = NULL;
697 struct dlm_master_list_entry *alloc_mle = NULL;
698 int blocked = 0;
699 int ret, nodenum;
700 struct dlm_node_iter iter;
701 unsigned int hash;
702 int tries = 0;
703 int bit, wait_on_recovery = 0;
704 int drop_inflight_if_nonlocal = 0;
706 BUG_ON(!lockid);
708 hash = dlm_lockid_hash(lockid, namelen);
710 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
712 lookup:
713 spin_lock(&dlm->spinlock);
714 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
715 if (tmpres) {
716 int dropping_ref = 0;
718 spin_unlock(&dlm->spinlock);
720 spin_lock(&tmpres->spinlock);
721 /* We wait for the other thread that is mastering the resource */
722 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
723 __dlm_wait_on_lockres(tmpres);
724 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
727 if (tmpres->owner == dlm->node_num) {
728 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
729 dlm_lockres_grab_inflight_ref(dlm, tmpres);
730 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
731 dropping_ref = 1;
732 spin_unlock(&tmpres->spinlock);
734 /* wait until done messaging the master, drop our ref to allow
735 * the lockres to be purged, start over. */
736 if (dropping_ref) {
737 spin_lock(&tmpres->spinlock);
738 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
739 spin_unlock(&tmpres->spinlock);
740 dlm_lockres_put(tmpres);
741 tmpres = NULL;
742 goto lookup;
745 mlog(0, "found in hash!\n");
746 if (res)
747 dlm_lockres_put(res);
748 res = tmpres;
749 goto leave;
752 if (!res) {
753 spin_unlock(&dlm->spinlock);
754 mlog(0, "allocating a new resource\n");
755 /* nothing found and we need to allocate one. */
756 alloc_mle = (struct dlm_master_list_entry *)
757 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
758 if (!alloc_mle)
759 goto leave;
760 res = dlm_new_lockres(dlm, lockid, namelen);
761 if (!res)
762 goto leave;
763 goto lookup;
766 mlog(0, "no lockres found, allocated our own: %p\n", res);
768 if (flags & LKM_LOCAL) {
769 /* caller knows it's safe to assume it's not mastered elsewhere
770 * DONE! return right away */
771 spin_lock(&res->spinlock);
772 dlm_change_lockres_owner(dlm, res, dlm->node_num);
773 __dlm_insert_lockres(dlm, res);
774 dlm_lockres_grab_inflight_ref(dlm, res);
775 spin_unlock(&res->spinlock);
776 spin_unlock(&dlm->spinlock);
777 /* lockres still marked IN_PROGRESS */
778 goto wake_waiters;
781 /* check master list to see if another node has started mastering it */
782 spin_lock(&dlm->master_lock);
784 /* if we found a block, wait for lock to be mastered by another node */
785 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
786 if (blocked) {
787 int mig;
788 if (mle->type == DLM_MLE_MASTER) {
789 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
790 BUG();
792 mig = (mle->type == DLM_MLE_MIGRATION);
793 /* if there is a migration in progress, let the migration
794 * finish before continuing. we can wait for the absence
795 * of the MIGRATION mle: either the migrate finished or
796 * one of the nodes died and the mle was cleaned up.
797 * if there is a BLOCK here, but it already has a master
798 * set, we are too late. the master does not have a ref
799 * for us in the refmap. detach the mle and drop it.
800 * either way, go back to the top and start over. */
801 if (mig || mle->master != O2NM_MAX_NODES) {
802 BUG_ON(mig && mle->master == dlm->node_num);
803 /* we arrived too late. the master does not
804 * have a ref for us. retry. */
805 mlog(0, "%s:%.*s: late on %s\n",
806 dlm->name, namelen, lockid,
807 mig ? "MIGRATION" : "BLOCK");
808 spin_unlock(&dlm->master_lock);
809 spin_unlock(&dlm->spinlock);
811 /* master is known, detach */
812 if (!mig)
813 dlm_mle_detach_hb_events(dlm, mle);
814 dlm_put_mle(mle);
815 mle = NULL;
816 /* this is lame, but we cant wait on either
817 * the mle or lockres waitqueue here */
818 if (mig)
819 msleep(100);
820 goto lookup;
822 } else {
823 /* go ahead and try to master lock on this node */
824 mle = alloc_mle;
825 /* make sure this does not get freed below */
826 alloc_mle = NULL;
827 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
828 set_bit(dlm->node_num, mle->maybe_map);
829 __dlm_insert_mle(dlm, mle);
831 /* still holding the dlm spinlock, check the recovery map
832 * to see if there are any nodes that still need to be
833 * considered. these will not appear in the mle nodemap
834 * but they might own this lockres. wait on them. */
835 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
836 if (bit < O2NM_MAX_NODES) {
837 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
838 "recover before lock mastery can begin\n",
839 dlm->name, namelen, (char *)lockid, bit);
840 wait_on_recovery = 1;
844 /* at this point there is either a DLM_MLE_BLOCK or a
845 * DLM_MLE_MASTER on the master list, so it's safe to add the
846 * lockres to the hashtable. anyone who finds the lock will
847 * still have to wait on the IN_PROGRESS. */
849 /* finally add the lockres to its hash bucket */
850 __dlm_insert_lockres(dlm, res);
851 /* since this lockres is new it doesnt not require the spinlock */
852 dlm_lockres_grab_inflight_ref_new(dlm, res);
854 /* if this node does not become the master make sure to drop
855 * this inflight reference below */
856 drop_inflight_if_nonlocal = 1;
858 /* get an extra ref on the mle in case this is a BLOCK
859 * if so, the creator of the BLOCK may try to put the last
860 * ref at this time in the assert master handler, so we
861 * need an extra one to keep from a bad ptr deref. */
862 dlm_get_mle_inuse(mle);
863 spin_unlock(&dlm->master_lock);
864 spin_unlock(&dlm->spinlock);
866 redo_request:
867 while (wait_on_recovery) {
868 /* any cluster changes that occurred after dropping the
869 * dlm spinlock would be detectable be a change on the mle,
870 * so we only need to clear out the recovery map once. */
871 if (dlm_is_recovery_lock(lockid, namelen)) {
872 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
873 "must master $RECOVERY lock now\n", dlm->name);
874 if (!dlm_pre_master_reco_lockres(dlm, res))
875 wait_on_recovery = 0;
876 else {
877 mlog(0, "%s: waiting 500ms for heartbeat state "
878 "change\n", dlm->name);
879 msleep(500);
881 continue;
884 dlm_kick_recovery_thread(dlm);
885 msleep(1000);
886 dlm_wait_for_recovery(dlm);
888 spin_lock(&dlm->spinlock);
889 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
890 if (bit < O2NM_MAX_NODES) {
891 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
892 "recover before lock mastery can begin\n",
893 dlm->name, namelen, (char *)lockid, bit);
894 wait_on_recovery = 1;
895 } else
896 wait_on_recovery = 0;
897 spin_unlock(&dlm->spinlock);
899 if (wait_on_recovery)
900 dlm_wait_for_node_recovery(dlm, bit, 10000);
903 /* must wait for lock to be mastered elsewhere */
904 if (blocked)
905 goto wait;
907 ret = -EINVAL;
908 dlm_node_iter_init(mle->vote_map, &iter);
909 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
910 ret = dlm_do_master_request(res, mle, nodenum);
911 if (ret < 0)
912 mlog_errno(ret);
913 if (mle->master != O2NM_MAX_NODES) {
914 /* found a master ! */
915 if (mle->master <= nodenum)
916 break;
917 /* if our master request has not reached the master
918 * yet, keep going until it does. this is how the
919 * master will know that asserts are needed back to
920 * the lower nodes. */
921 mlog(0, "%s:%.*s: requests only up to %u but master "
922 "is %u, keep going\n", dlm->name, namelen,
923 lockid, nodenum, mle->master);
927 wait:
928 /* keep going until the response map includes all nodes */
929 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
930 if (ret < 0) {
931 wait_on_recovery = 1;
932 mlog(0, "%s:%.*s: node map changed, redo the "
933 "master request now, blocked=%d\n",
934 dlm->name, res->lockname.len,
935 res->lockname.name, blocked);
936 if (++tries > 20) {
937 mlog(ML_ERROR, "%s:%.*s: spinning on "
938 "dlm_wait_for_lock_mastery, blocked=%d\n",
939 dlm->name, res->lockname.len,
940 res->lockname.name, blocked);
941 dlm_print_one_lock_resource(res);
942 dlm_print_one_mle(mle);
943 tries = 0;
945 goto redo_request;
948 mlog(0, "lockres mastered by %u\n", res->owner);
949 /* make sure we never continue without this */
950 BUG_ON(res->owner == O2NM_MAX_NODES);
952 /* master is known, detach if not already detached */
953 dlm_mle_detach_hb_events(dlm, mle);
954 dlm_put_mle(mle);
955 /* put the extra ref */
956 dlm_put_mle_inuse(mle);
958 wake_waiters:
959 spin_lock(&res->spinlock);
960 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
961 dlm_lockres_drop_inflight_ref(dlm, res);
962 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
963 spin_unlock(&res->spinlock);
964 wake_up(&res->wq);
966 leave:
967 /* need to free the unused mle */
968 if (alloc_mle)
969 kmem_cache_free(dlm_mle_cache, alloc_mle);
971 return res;
975 #define DLM_MASTERY_TIMEOUT_MS 5000
977 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
978 struct dlm_lock_resource *res,
979 struct dlm_master_list_entry *mle,
980 int *blocked)
982 u8 m;
983 int ret, bit;
984 int map_changed, voting_done;
985 int assert, sleep;
987 recheck:
988 ret = 0;
989 assert = 0;
991 /* check if another node has already become the owner */
992 spin_lock(&res->spinlock);
993 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
994 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
995 res->lockname.len, res->lockname.name, res->owner);
996 spin_unlock(&res->spinlock);
997 /* this will cause the master to re-assert across
998 * the whole cluster, freeing up mles */
999 if (res->owner != dlm->node_num) {
1000 ret = dlm_do_master_request(res, mle, res->owner);
1001 if (ret < 0) {
1002 /* give recovery a chance to run */
1003 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1004 msleep(500);
1005 goto recheck;
1008 ret = 0;
1009 goto leave;
1011 spin_unlock(&res->spinlock);
1013 spin_lock(&mle->spinlock);
1014 m = mle->master;
1015 map_changed = (memcmp(mle->vote_map, mle->node_map,
1016 sizeof(mle->vote_map)) != 0);
1017 voting_done = (memcmp(mle->vote_map, mle->response_map,
1018 sizeof(mle->vote_map)) == 0);
1020 /* restart if we hit any errors */
1021 if (map_changed) {
1022 int b;
1023 mlog(0, "%s: %.*s: node map changed, restarting\n",
1024 dlm->name, res->lockname.len, res->lockname.name);
1025 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1026 b = (mle->type == DLM_MLE_BLOCK);
1027 if ((*blocked && !b) || (!*blocked && b)) {
1028 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1029 dlm->name, res->lockname.len, res->lockname.name,
1030 *blocked, b);
1031 *blocked = b;
1033 spin_unlock(&mle->spinlock);
1034 if (ret < 0) {
1035 mlog_errno(ret);
1036 goto leave;
1038 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1039 "rechecking now\n", dlm->name, res->lockname.len,
1040 res->lockname.name);
1041 goto recheck;
1042 } else {
1043 if (!voting_done) {
1044 mlog(0, "map not changed and voting not done "
1045 "for %s:%.*s\n", dlm->name, res->lockname.len,
1046 res->lockname.name);
1050 if (m != O2NM_MAX_NODES) {
1051 /* another node has done an assert!
1052 * all done! */
1053 sleep = 0;
1054 } else {
1055 sleep = 1;
1056 /* have all nodes responded? */
1057 if (voting_done && !*blocked) {
1058 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1059 if (dlm->node_num <= bit) {
1060 /* my node number is lowest.
1061 * now tell other nodes that I am
1062 * mastering this. */
1063 mle->master = dlm->node_num;
1064 /* ref was grabbed in get_lock_resource
1065 * will be dropped in dlmlock_master */
1066 assert = 1;
1067 sleep = 0;
1069 /* if voting is done, but we have not received
1070 * an assert master yet, we must sleep */
1074 spin_unlock(&mle->spinlock);
1076 /* sleep if we haven't finished voting yet */
1077 if (sleep) {
1078 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1081 if (atomic_read(&mle->mle_refs.refcount) < 2)
1082 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1083 atomic_read(&mle->mle_refs.refcount),
1084 res->lockname.len, res->lockname.name);
1086 atomic_set(&mle->woken, 0);
1087 (void)wait_event_timeout(mle->wq,
1088 (atomic_read(&mle->woken) == 1),
1089 timeo);
1090 if (res->owner == O2NM_MAX_NODES) {
1091 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1092 res->lockname.len, res->lockname.name);
1093 goto recheck;
1095 mlog(0, "done waiting, master is %u\n", res->owner);
1096 ret = 0;
1097 goto leave;
1100 ret = 0; /* done */
1101 if (assert) {
1102 m = dlm->node_num;
1103 mlog(0, "about to master %.*s here, this=%u\n",
1104 res->lockname.len, res->lockname.name, m);
1105 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1106 if (ret) {
1107 /* This is a failure in the network path,
1108 * not in the response to the assert_master
1109 * (any nonzero response is a BUG on this node).
1110 * Most likely a socket just got disconnected
1111 * due to node death. */
1112 mlog_errno(ret);
1114 /* no longer need to restart lock mastery.
1115 * all living nodes have been contacted. */
1116 ret = 0;
1119 /* set the lockres owner */
1120 spin_lock(&res->spinlock);
1121 /* mastery reference obtained either during
1122 * assert_master_handler or in get_lock_resource */
1123 dlm_change_lockres_owner(dlm, res, m);
1124 spin_unlock(&res->spinlock);
1126 leave:
1127 return ret;
1130 struct dlm_bitmap_diff_iter
1132 int curnode;
1133 unsigned long *orig_bm;
1134 unsigned long *cur_bm;
1135 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1138 enum dlm_node_state_change
1140 NODE_DOWN = -1,
1141 NODE_NO_CHANGE = 0,
1142 NODE_UP
1145 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1146 unsigned long *orig_bm,
1147 unsigned long *cur_bm)
1149 unsigned long p1, p2;
1150 int i;
1152 iter->curnode = -1;
1153 iter->orig_bm = orig_bm;
1154 iter->cur_bm = cur_bm;
1156 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1157 p1 = *(iter->orig_bm + i);
1158 p2 = *(iter->cur_bm + i);
1159 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1163 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1164 enum dlm_node_state_change *state)
1166 int bit;
1168 if (iter->curnode >= O2NM_MAX_NODES)
1169 return -ENOENT;
1171 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1172 iter->curnode+1);
1173 if (bit >= O2NM_MAX_NODES) {
1174 iter->curnode = O2NM_MAX_NODES;
1175 return -ENOENT;
1178 /* if it was there in the original then this node died */
1179 if (test_bit(bit, iter->orig_bm))
1180 *state = NODE_DOWN;
1181 else
1182 *state = NODE_UP;
1184 iter->curnode = bit;
1185 return bit;
1189 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1190 struct dlm_lock_resource *res,
1191 struct dlm_master_list_entry *mle,
1192 int blocked)
1194 struct dlm_bitmap_diff_iter bdi;
1195 enum dlm_node_state_change sc;
1196 int node;
1197 int ret = 0;
1199 mlog(0, "something happened such that the "
1200 "master process may need to be restarted!\n");
1202 assert_spin_locked(&mle->spinlock);
1204 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1205 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1206 while (node >= 0) {
1207 if (sc == NODE_UP) {
1208 /* a node came up. clear any old vote from
1209 * the response map and set it in the vote map
1210 * then restart the mastery. */
1211 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1213 /* redo the master request, but only for the new node */
1214 mlog(0, "sending request to new node\n");
1215 clear_bit(node, mle->response_map);
1216 set_bit(node, mle->vote_map);
1217 } else {
1218 mlog(ML_ERROR, "node down! %d\n", node);
1219 if (blocked) {
1220 int lowest = find_next_bit(mle->maybe_map,
1221 O2NM_MAX_NODES, 0);
1223 /* act like it was never there */
1224 clear_bit(node, mle->maybe_map);
1226 if (node == lowest) {
1227 mlog(0, "expected master %u died"
1228 " while this node was blocked "
1229 "waiting on it!\n", node);
1230 lowest = find_next_bit(mle->maybe_map,
1231 O2NM_MAX_NODES,
1232 lowest+1);
1233 if (lowest < O2NM_MAX_NODES) {
1234 mlog(0, "%s:%.*s:still "
1235 "blocked. waiting on %u "
1236 "now\n", dlm->name,
1237 res->lockname.len,
1238 res->lockname.name,
1239 lowest);
1240 } else {
1241 /* mle is an MLE_BLOCK, but
1242 * there is now nothing left to
1243 * block on. we need to return
1244 * all the way back out and try
1245 * again with an MLE_MASTER.
1246 * dlm_do_local_recovery_cleanup
1247 * has already run, so the mle
1248 * refcount is ok */
1249 mlog(0, "%s:%.*s: no "
1250 "longer blocking. try to "
1251 "master this here\n",
1252 dlm->name,
1253 res->lockname.len,
1254 res->lockname.name);
1255 mle->type = DLM_MLE_MASTER;
1256 mle->mleres = res;
1261 /* now blank out everything, as if we had never
1262 * contacted anyone */
1263 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1264 memset(mle->response_map, 0, sizeof(mle->response_map));
1265 /* reset the vote_map to the current node_map */
1266 memcpy(mle->vote_map, mle->node_map,
1267 sizeof(mle->node_map));
1268 /* put myself into the maybe map */
1269 if (mle->type != DLM_MLE_BLOCK)
1270 set_bit(dlm->node_num, mle->maybe_map);
1272 ret = -EAGAIN;
1273 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1275 return ret;
1280 * DLM_MASTER_REQUEST_MSG
1282 * returns: 0 on success,
1283 * -errno on a network error
1285 * on error, the caller should assume the target node is "dead"
1289 static int dlm_do_master_request(struct dlm_lock_resource *res,
1290 struct dlm_master_list_entry *mle, int to)
1292 struct dlm_ctxt *dlm = mle->dlm;
1293 struct dlm_master_request request;
1294 int ret, response=0, resend;
1296 memset(&request, 0, sizeof(request));
1297 request.node_idx = dlm->node_num;
1299 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1301 request.namelen = (u8)mle->mnamelen;
1302 memcpy(request.name, mle->mname, request.namelen);
1304 again:
1305 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1306 sizeof(request), to, &response);
1307 if (ret < 0) {
1308 if (ret == -ESRCH) {
1309 /* should never happen */
1310 mlog(ML_ERROR, "TCP stack not ready!\n");
1311 BUG();
1312 } else if (ret == -EINVAL) {
1313 mlog(ML_ERROR, "bad args passed to o2net!\n");
1314 BUG();
1315 } else if (ret == -ENOMEM) {
1316 mlog(ML_ERROR, "out of memory while trying to send "
1317 "network message! retrying\n");
1318 /* this is totally crude */
1319 msleep(50);
1320 goto again;
1321 } else if (!dlm_is_host_down(ret)) {
1322 /* not a network error. bad. */
1323 mlog_errno(ret);
1324 mlog(ML_ERROR, "unhandled error!");
1325 BUG();
1327 /* all other errors should be network errors,
1328 * and likely indicate node death */
1329 mlog(ML_ERROR, "link to %d went down!\n", to);
1330 goto out;
1333 ret = 0;
1334 resend = 0;
1335 spin_lock(&mle->spinlock);
1336 switch (response) {
1337 case DLM_MASTER_RESP_YES:
1338 set_bit(to, mle->response_map);
1339 mlog(0, "node %u is the master, response=YES\n", to);
1340 mlog(0, "%s:%.*s: master node %u now knows I have a "
1341 "reference\n", dlm->name, res->lockname.len,
1342 res->lockname.name, to);
1343 mle->master = to;
1344 break;
1345 case DLM_MASTER_RESP_NO:
1346 mlog(0, "node %u not master, response=NO\n", to);
1347 set_bit(to, mle->response_map);
1348 break;
1349 case DLM_MASTER_RESP_MAYBE:
1350 mlog(0, "node %u not master, response=MAYBE\n", to);
1351 set_bit(to, mle->response_map);
1352 set_bit(to, mle->maybe_map);
1353 break;
1354 case DLM_MASTER_RESP_ERROR:
1355 mlog(0, "node %u hit an error, resending\n", to);
1356 resend = 1;
1357 response = 0;
1358 break;
1359 default:
1360 mlog(ML_ERROR, "bad response! %u\n", response);
1361 BUG();
1363 spin_unlock(&mle->spinlock);
1364 if (resend) {
1365 /* this is also totally crude */
1366 msleep(50);
1367 goto again;
1370 out:
1371 return ret;
1375 * locks that can be taken here:
1376 * dlm->spinlock
1377 * res->spinlock
1378 * mle->spinlock
1379 * dlm->master_list
1381 * if possible, TRIM THIS DOWN!!!
1383 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1384 void **ret_data)
1386 u8 response = DLM_MASTER_RESP_MAYBE;
1387 struct dlm_ctxt *dlm = data;
1388 struct dlm_lock_resource *res = NULL;
1389 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1390 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1391 char *name;
1392 unsigned int namelen, hash;
1393 int found, ret;
1394 int set_maybe;
1395 int dispatch_assert = 0;
1397 if (!dlm_grab(dlm))
1398 return DLM_MASTER_RESP_NO;
1400 if (!dlm_domain_fully_joined(dlm)) {
1401 response = DLM_MASTER_RESP_NO;
1402 goto send_response;
1405 name = request->name;
1406 namelen = request->namelen;
1407 hash = dlm_lockid_hash(name, namelen);
1409 if (namelen > DLM_LOCKID_NAME_MAX) {
1410 response = DLM_IVBUFLEN;
1411 goto send_response;
1414 way_up_top:
1415 spin_lock(&dlm->spinlock);
1416 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1417 if (res) {
1418 spin_unlock(&dlm->spinlock);
1420 /* take care of the easy cases up front */
1421 spin_lock(&res->spinlock);
1422 if (res->state & (DLM_LOCK_RES_RECOVERING|
1423 DLM_LOCK_RES_MIGRATING)) {
1424 spin_unlock(&res->spinlock);
1425 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1426 "being recovered/migrated\n");
1427 response = DLM_MASTER_RESP_ERROR;
1428 if (mle)
1429 kmem_cache_free(dlm_mle_cache, mle);
1430 goto send_response;
1433 if (res->owner == dlm->node_num) {
1434 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1435 dlm->name, namelen, name, request->node_idx);
1436 dlm_lockres_set_refmap_bit(request->node_idx, res);
1437 spin_unlock(&res->spinlock);
1438 response = DLM_MASTER_RESP_YES;
1439 if (mle)
1440 kmem_cache_free(dlm_mle_cache, mle);
1442 /* this node is the owner.
1443 * there is some extra work that needs to
1444 * happen now. the requesting node has
1445 * caused all nodes up to this one to
1446 * create mles. this node now needs to
1447 * go back and clean those up. */
1448 dispatch_assert = 1;
1449 goto send_response;
1450 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1451 spin_unlock(&res->spinlock);
1452 // mlog(0, "node %u is the master\n", res->owner);
1453 response = DLM_MASTER_RESP_NO;
1454 if (mle)
1455 kmem_cache_free(dlm_mle_cache, mle);
1456 goto send_response;
1459 /* ok, there is no owner. either this node is
1460 * being blocked, or it is actively trying to
1461 * master this lock. */
1462 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1463 mlog(ML_ERROR, "lock with no owner should be "
1464 "in-progress!\n");
1465 BUG();
1468 // mlog(0, "lockres is in progress...\n");
1469 spin_lock(&dlm->master_lock);
1470 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1471 if (!found) {
1472 mlog(ML_ERROR, "no mle found for this lock!\n");
1473 BUG();
1475 set_maybe = 1;
1476 spin_lock(&tmpmle->spinlock);
1477 if (tmpmle->type == DLM_MLE_BLOCK) {
1478 // mlog(0, "this node is waiting for "
1479 // "lockres to be mastered\n");
1480 response = DLM_MASTER_RESP_NO;
1481 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1482 mlog(0, "node %u is master, but trying to migrate to "
1483 "node %u.\n", tmpmle->master, tmpmle->new_master);
1484 if (tmpmle->master == dlm->node_num) {
1485 mlog(ML_ERROR, "no owner on lockres, but this "
1486 "node is trying to migrate it to %u?!\n",
1487 tmpmle->new_master);
1488 BUG();
1489 } else {
1490 /* the real master can respond on its own */
1491 response = DLM_MASTER_RESP_NO;
1493 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1494 set_maybe = 0;
1495 if (tmpmle->master == dlm->node_num) {
1496 response = DLM_MASTER_RESP_YES;
1497 /* this node will be the owner.
1498 * go back and clean the mles on any
1499 * other nodes */
1500 dispatch_assert = 1;
1501 dlm_lockres_set_refmap_bit(request->node_idx, res);
1502 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1503 dlm->name, namelen, name,
1504 request->node_idx);
1505 } else
1506 response = DLM_MASTER_RESP_NO;
1507 } else {
1508 // mlog(0, "this node is attempting to "
1509 // "master lockres\n");
1510 response = DLM_MASTER_RESP_MAYBE;
1512 if (set_maybe)
1513 set_bit(request->node_idx, tmpmle->maybe_map);
1514 spin_unlock(&tmpmle->spinlock);
1516 spin_unlock(&dlm->master_lock);
1517 spin_unlock(&res->spinlock);
1519 /* keep the mle attached to heartbeat events */
1520 dlm_put_mle(tmpmle);
1521 if (mle)
1522 kmem_cache_free(dlm_mle_cache, mle);
1523 goto send_response;
1527 * lockres doesn't exist on this node
1528 * if there is an MLE_BLOCK, return NO
1529 * if there is an MLE_MASTER, return MAYBE
1530 * otherwise, add an MLE_BLOCK, return NO
1532 spin_lock(&dlm->master_lock);
1533 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1534 if (!found) {
1535 /* this lockid has never been seen on this node yet */
1536 // mlog(0, "no mle found\n");
1537 if (!mle) {
1538 spin_unlock(&dlm->master_lock);
1539 spin_unlock(&dlm->spinlock);
1541 mle = (struct dlm_master_list_entry *)
1542 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1543 if (!mle) {
1544 response = DLM_MASTER_RESP_ERROR;
1545 mlog_errno(-ENOMEM);
1546 goto send_response;
1548 goto way_up_top;
1551 // mlog(0, "this is second time thru, already allocated, "
1552 // "add the block.\n");
1553 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1554 set_bit(request->node_idx, mle->maybe_map);
1555 __dlm_insert_mle(dlm, mle);
1556 response = DLM_MASTER_RESP_NO;
1557 } else {
1558 // mlog(0, "mle was found\n");
1559 set_maybe = 1;
1560 spin_lock(&tmpmle->spinlock);
1561 if (tmpmle->master == dlm->node_num) {
1562 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1563 BUG();
1565 if (tmpmle->type == DLM_MLE_BLOCK)
1566 response = DLM_MASTER_RESP_NO;
1567 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1568 mlog(0, "migration mle was found (%u->%u)\n",
1569 tmpmle->master, tmpmle->new_master);
1570 /* real master can respond on its own */
1571 response = DLM_MASTER_RESP_NO;
1572 } else
1573 response = DLM_MASTER_RESP_MAYBE;
1574 if (set_maybe)
1575 set_bit(request->node_idx, tmpmle->maybe_map);
1576 spin_unlock(&tmpmle->spinlock);
1578 spin_unlock(&dlm->master_lock);
1579 spin_unlock(&dlm->spinlock);
1581 if (found) {
1582 /* keep the mle attached to heartbeat events */
1583 dlm_put_mle(tmpmle);
1585 send_response:
1587 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1588 * The reference is released by dlm_assert_master_worker() under
1589 * the call to dlm_dispatch_assert_master(). If
1590 * dlm_assert_master_worker() isn't called, we drop it here.
1592 if (dispatch_assert) {
1593 if (response != DLM_MASTER_RESP_YES)
1594 mlog(ML_ERROR, "invalid response %d\n", response);
1595 if (!res) {
1596 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1597 BUG();
1599 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1600 dlm->node_num, res->lockname.len, res->lockname.name);
1601 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1602 DLM_ASSERT_MASTER_MLE_CLEANUP);
1603 if (ret < 0) {
1604 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1605 response = DLM_MASTER_RESP_ERROR;
1606 dlm_lockres_put(res);
1608 } else {
1609 if (res)
1610 dlm_lockres_put(res);
1613 dlm_put(dlm);
1614 return response;
1618 * DLM_ASSERT_MASTER_MSG
1623 * NOTE: this can be used for debugging
1624 * can periodically run all locks owned by this node
1625 * and re-assert across the cluster...
1627 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1628 struct dlm_lock_resource *res,
1629 void *nodemap, u32 flags)
1631 struct dlm_assert_master assert;
1632 int to, tmpret;
1633 struct dlm_node_iter iter;
1634 int ret = 0;
1635 int reassert;
1636 const char *lockname = res->lockname.name;
1637 unsigned int namelen = res->lockname.len;
1639 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1641 spin_lock(&res->spinlock);
1642 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1643 spin_unlock(&res->spinlock);
1645 again:
1646 reassert = 0;
1648 /* note that if this nodemap is empty, it returns 0 */
1649 dlm_node_iter_init(nodemap, &iter);
1650 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1651 int r = 0;
1652 struct dlm_master_list_entry *mle = NULL;
1654 mlog(0, "sending assert master to %d (%.*s)\n", to,
1655 namelen, lockname);
1656 memset(&assert, 0, sizeof(assert));
1657 assert.node_idx = dlm->node_num;
1658 assert.namelen = namelen;
1659 memcpy(assert.name, lockname, namelen);
1660 assert.flags = cpu_to_be32(flags);
1662 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1663 &assert, sizeof(assert), to, &r);
1664 if (tmpret < 0) {
1665 mlog(0, "assert_master returned %d!\n", tmpret);
1666 if (!dlm_is_host_down(tmpret)) {
1667 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1668 BUG();
1670 /* a node died. finish out the rest of the nodes. */
1671 mlog(0, "link to %d went down!\n", to);
1672 /* any nonzero status return will do */
1673 ret = tmpret;
1674 r = 0;
1675 } else if (r < 0) {
1676 /* ok, something horribly messed. kill thyself. */
1677 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1678 "got %d.\n", namelen, lockname, to, r);
1679 spin_lock(&dlm->spinlock);
1680 spin_lock(&dlm->master_lock);
1681 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1682 namelen)) {
1683 dlm_print_one_mle(mle);
1684 __dlm_put_mle(mle);
1686 spin_unlock(&dlm->master_lock);
1687 spin_unlock(&dlm->spinlock);
1688 BUG();
1691 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1692 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1693 mlog(ML_ERROR, "%.*s: very strange, "
1694 "master MLE but no lockres on %u\n",
1695 namelen, lockname, to);
1698 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1699 mlog(0, "%.*s: node %u create mles on other "
1700 "nodes and requests a re-assert\n",
1701 namelen, lockname, to);
1702 reassert = 1;
1704 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1705 mlog(0, "%.*s: node %u has a reference to this "
1706 "lockres, set the bit in the refmap\n",
1707 namelen, lockname, to);
1708 spin_lock(&res->spinlock);
1709 dlm_lockres_set_refmap_bit(to, res);
1710 spin_unlock(&res->spinlock);
1714 if (reassert)
1715 goto again;
1717 spin_lock(&res->spinlock);
1718 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1719 spin_unlock(&res->spinlock);
1720 wake_up(&res->wq);
1722 return ret;
1726 * locks that can be taken here:
1727 * dlm->spinlock
1728 * res->spinlock
1729 * mle->spinlock
1730 * dlm->master_list
1732 * if possible, TRIM THIS DOWN!!!
1734 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1735 void **ret_data)
1737 struct dlm_ctxt *dlm = data;
1738 struct dlm_master_list_entry *mle = NULL;
1739 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1740 struct dlm_lock_resource *res = NULL;
1741 char *name;
1742 unsigned int namelen, hash;
1743 u32 flags;
1744 int master_request = 0, have_lockres_ref = 0;
1745 int ret = 0;
1747 if (!dlm_grab(dlm))
1748 return 0;
1750 name = assert->name;
1751 namelen = assert->namelen;
1752 hash = dlm_lockid_hash(name, namelen);
1753 flags = be32_to_cpu(assert->flags);
1755 if (namelen > DLM_LOCKID_NAME_MAX) {
1756 mlog(ML_ERROR, "Invalid name length!");
1757 goto done;
1760 spin_lock(&dlm->spinlock);
1762 if (flags)
1763 mlog(0, "assert_master with flags: %u\n", flags);
1765 /* find the MLE */
1766 spin_lock(&dlm->master_lock);
1767 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1768 /* not an error, could be master just re-asserting */
1769 mlog(0, "just got an assert_master from %u, but no "
1770 "MLE for it! (%.*s)\n", assert->node_idx,
1771 namelen, name);
1772 } else {
1773 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1774 if (bit >= O2NM_MAX_NODES) {
1775 /* not necessarily an error, though less likely.
1776 * could be master just re-asserting. */
1777 mlog(0, "no bits set in the maybe_map, but %u "
1778 "is asserting! (%.*s)\n", assert->node_idx,
1779 namelen, name);
1780 } else if (bit != assert->node_idx) {
1781 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1782 mlog(0, "master %u was found, %u should "
1783 "back off\n", assert->node_idx, bit);
1784 } else {
1785 /* with the fix for bug 569, a higher node
1786 * number winning the mastery will respond
1787 * YES to mastery requests, but this node
1788 * had no way of knowing. let it pass. */
1789 mlog(0, "%u is the lowest node, "
1790 "%u is asserting. (%.*s) %u must "
1791 "have begun after %u won.\n", bit,
1792 assert->node_idx, namelen, name, bit,
1793 assert->node_idx);
1796 if (mle->type == DLM_MLE_MIGRATION) {
1797 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1798 mlog(0, "%s:%.*s: got cleanup assert"
1799 " from %u for migration\n",
1800 dlm->name, namelen, name,
1801 assert->node_idx);
1802 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1803 mlog(0, "%s:%.*s: got unrelated assert"
1804 " from %u for migration, ignoring\n",
1805 dlm->name, namelen, name,
1806 assert->node_idx);
1807 __dlm_put_mle(mle);
1808 spin_unlock(&dlm->master_lock);
1809 spin_unlock(&dlm->spinlock);
1810 goto done;
1814 spin_unlock(&dlm->master_lock);
1816 /* ok everything checks out with the MLE
1817 * now check to see if there is a lockres */
1818 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1819 if (res) {
1820 spin_lock(&res->spinlock);
1821 if (res->state & DLM_LOCK_RES_RECOVERING) {
1822 mlog(ML_ERROR, "%u asserting but %.*s is "
1823 "RECOVERING!\n", assert->node_idx, namelen, name);
1824 goto kill;
1826 if (!mle) {
1827 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1828 res->owner != assert->node_idx) {
1829 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1830 "but current owner is %u! (%.*s)\n",
1831 assert->node_idx, res->owner, namelen,
1832 name);
1833 __dlm_print_one_lock_resource(res);
1834 BUG();
1836 } else if (mle->type != DLM_MLE_MIGRATION) {
1837 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1838 /* owner is just re-asserting */
1839 if (res->owner == assert->node_idx) {
1840 mlog(0, "owner %u re-asserting on "
1841 "lock %.*s\n", assert->node_idx,
1842 namelen, name);
1843 goto ok;
1845 mlog(ML_ERROR, "got assert_master from "
1846 "node %u, but %u is the owner! "
1847 "(%.*s)\n", assert->node_idx,
1848 res->owner, namelen, name);
1849 goto kill;
1851 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1852 mlog(ML_ERROR, "got assert from %u, but lock "
1853 "with no owner should be "
1854 "in-progress! (%.*s)\n",
1855 assert->node_idx,
1856 namelen, name);
1857 goto kill;
1859 } else /* mle->type == DLM_MLE_MIGRATION */ {
1860 /* should only be getting an assert from new master */
1861 if (assert->node_idx != mle->new_master) {
1862 mlog(ML_ERROR, "got assert from %u, but "
1863 "new master is %u, and old master "
1864 "was %u (%.*s)\n",
1865 assert->node_idx, mle->new_master,
1866 mle->master, namelen, name);
1867 goto kill;
1872 spin_unlock(&res->spinlock);
1874 spin_unlock(&dlm->spinlock);
1876 // mlog(0, "woo! got an assert_master from node %u!\n",
1877 // assert->node_idx);
1878 if (mle) {
1879 int extra_ref = 0;
1880 int nn = -1;
1881 int rr, err = 0;
1883 spin_lock(&mle->spinlock);
1884 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1885 extra_ref = 1;
1886 else {
1887 /* MASTER mle: if any bits set in the response map
1888 * then the calling node needs to re-assert to clear
1889 * up nodes that this node contacted */
1890 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1891 nn+1)) < O2NM_MAX_NODES) {
1892 if (nn != dlm->node_num && nn != assert->node_idx)
1893 master_request = 1;
1896 mle->master = assert->node_idx;
1897 atomic_set(&mle->woken, 1);
1898 wake_up(&mle->wq);
1899 spin_unlock(&mle->spinlock);
1901 if (res) {
1902 int wake = 0;
1903 spin_lock(&res->spinlock);
1904 if (mle->type == DLM_MLE_MIGRATION) {
1905 mlog(0, "finishing off migration of lockres %.*s, "
1906 "from %u to %u\n",
1907 res->lockname.len, res->lockname.name,
1908 dlm->node_num, mle->new_master);
1909 res->state &= ~DLM_LOCK_RES_MIGRATING;
1910 wake = 1;
1911 dlm_change_lockres_owner(dlm, res, mle->new_master);
1912 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1913 } else {
1914 dlm_change_lockres_owner(dlm, res, mle->master);
1916 spin_unlock(&res->spinlock);
1917 have_lockres_ref = 1;
1918 if (wake)
1919 wake_up(&res->wq);
1922 /* master is known, detach if not already detached.
1923 * ensures that only one assert_master call will happen
1924 * on this mle. */
1925 spin_lock(&dlm->spinlock);
1926 spin_lock(&dlm->master_lock);
1928 rr = atomic_read(&mle->mle_refs.refcount);
1929 if (mle->inuse > 0) {
1930 if (extra_ref && rr < 3)
1931 err = 1;
1932 else if (!extra_ref && rr < 2)
1933 err = 1;
1934 } else {
1935 if (extra_ref && rr < 2)
1936 err = 1;
1937 else if (!extra_ref && rr < 1)
1938 err = 1;
1940 if (err) {
1941 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1942 "that will mess up this node, refs=%d, extra=%d, "
1943 "inuse=%d\n", dlm->name, namelen, name,
1944 assert->node_idx, rr, extra_ref, mle->inuse);
1945 dlm_print_one_mle(mle);
1947 __dlm_unlink_mle(dlm, mle);
1948 __dlm_mle_detach_hb_events(dlm, mle);
1949 __dlm_put_mle(mle);
1950 if (extra_ref) {
1951 /* the assert master message now balances the extra
1952 * ref given by the master / migration request message.
1953 * if this is the last put, it will be removed
1954 * from the list. */
1955 __dlm_put_mle(mle);
1957 spin_unlock(&dlm->master_lock);
1958 spin_unlock(&dlm->spinlock);
1959 } else if (res) {
1960 if (res->owner != assert->node_idx) {
1961 mlog(0, "assert_master from %u, but current "
1962 "owner is %u (%.*s), no mle\n", assert->node_idx,
1963 res->owner, namelen, name);
1967 done:
1968 ret = 0;
1969 if (res) {
1970 spin_lock(&res->spinlock);
1971 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1972 spin_unlock(&res->spinlock);
1973 *ret_data = (void *)res;
1975 dlm_put(dlm);
1976 if (master_request) {
1977 mlog(0, "need to tell master to reassert\n");
1978 /* positive. negative would shoot down the node. */
1979 ret |= DLM_ASSERT_RESPONSE_REASSERT;
1980 if (!have_lockres_ref) {
1981 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1982 "mle present here for %s:%.*s, but no lockres!\n",
1983 assert->node_idx, dlm->name, namelen, name);
1986 if (have_lockres_ref) {
1987 /* let the master know we have a reference to the lockres */
1988 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1989 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1990 dlm->name, namelen, name, assert->node_idx);
1992 return ret;
1994 kill:
1995 /* kill the caller! */
1996 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1997 "and killing the other node now! This node is OK and can continue.\n");
1998 __dlm_print_one_lock_resource(res);
1999 spin_unlock(&res->spinlock);
2000 spin_unlock(&dlm->spinlock);
2001 *ret_data = (void *)res;
2002 dlm_put(dlm);
2003 return -EINVAL;
2006 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2008 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2010 if (ret_data) {
2011 spin_lock(&res->spinlock);
2012 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2013 spin_unlock(&res->spinlock);
2014 wake_up(&res->wq);
2015 dlm_lockres_put(res);
2017 return;
2020 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2021 struct dlm_lock_resource *res,
2022 int ignore_higher, u8 request_from, u32 flags)
2024 struct dlm_work_item *item;
2025 item = kzalloc(sizeof(*item), GFP_NOFS);
2026 if (!item)
2027 return -ENOMEM;
2030 /* queue up work for dlm_assert_master_worker */
2031 dlm_grab(dlm); /* get an extra ref for the work item */
2032 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2033 item->u.am.lockres = res; /* already have a ref */
2034 /* can optionally ignore node numbers higher than this node */
2035 item->u.am.ignore_higher = ignore_higher;
2036 item->u.am.request_from = request_from;
2037 item->u.am.flags = flags;
2039 if (ignore_higher)
2040 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2041 res->lockname.name);
2043 spin_lock(&dlm->work_lock);
2044 list_add_tail(&item->list, &dlm->work_list);
2045 spin_unlock(&dlm->work_lock);
2047 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2048 return 0;
2051 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2053 struct dlm_ctxt *dlm = data;
2054 int ret = 0;
2055 struct dlm_lock_resource *res;
2056 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2057 int ignore_higher;
2058 int bit;
2059 u8 request_from;
2060 u32 flags;
2062 dlm = item->dlm;
2063 res = item->u.am.lockres;
2064 ignore_higher = item->u.am.ignore_higher;
2065 request_from = item->u.am.request_from;
2066 flags = item->u.am.flags;
2068 spin_lock(&dlm->spinlock);
2069 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2070 spin_unlock(&dlm->spinlock);
2072 clear_bit(dlm->node_num, nodemap);
2073 if (ignore_higher) {
2074 /* if is this just to clear up mles for nodes below
2075 * this node, do not send the message to the original
2076 * caller or any node number higher than this */
2077 clear_bit(request_from, nodemap);
2078 bit = dlm->node_num;
2079 while (1) {
2080 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2081 bit+1);
2082 if (bit >= O2NM_MAX_NODES)
2083 break;
2084 clear_bit(bit, nodemap);
2089 * If we're migrating this lock to someone else, we are no
2090 * longer allowed to assert out own mastery. OTOH, we need to
2091 * prevent migration from starting while we're still asserting
2092 * our dominance. The reserved ast delays migration.
2094 spin_lock(&res->spinlock);
2095 if (res->state & DLM_LOCK_RES_MIGRATING) {
2096 mlog(0, "Someone asked us to assert mastery, but we're "
2097 "in the middle of migration. Skipping assert, "
2098 "the new master will handle that.\n");
2099 spin_unlock(&res->spinlock);
2100 goto put;
2101 } else
2102 __dlm_lockres_reserve_ast(res);
2103 spin_unlock(&res->spinlock);
2105 /* this call now finishes out the nodemap
2106 * even if one or more nodes die */
2107 mlog(0, "worker about to master %.*s here, this=%u\n",
2108 res->lockname.len, res->lockname.name, dlm->node_num);
2109 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2110 if (ret < 0) {
2111 /* no need to restart, we are done */
2112 if (!dlm_is_host_down(ret))
2113 mlog_errno(ret);
2116 /* Ok, we've asserted ourselves. Let's let migration start. */
2117 dlm_lockres_release_ast(dlm, res);
2119 put:
2120 dlm_lockres_put(res);
2122 mlog(0, "finished with dlm_assert_master_worker\n");
2125 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2126 * We cannot wait for node recovery to complete to begin mastering this
2127 * lockres because this lockres is used to kick off recovery! ;-)
2128 * So, do a pre-check on all living nodes to see if any of those nodes
2129 * think that $RECOVERY is currently mastered by a dead node. If so,
2130 * we wait a short time to allow that node to get notified by its own
2131 * heartbeat stack, then check again. All $RECOVERY lock resources
2132 * mastered by dead nodes are purged when the hearbeat callback is
2133 * fired, so we can know for sure that it is safe to continue once
2134 * the node returns a live node or no node. */
2135 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2136 struct dlm_lock_resource *res)
2138 struct dlm_node_iter iter;
2139 int nodenum;
2140 int ret = 0;
2141 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2143 spin_lock(&dlm->spinlock);
2144 dlm_node_iter_init(dlm->domain_map, &iter);
2145 spin_unlock(&dlm->spinlock);
2147 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2148 /* do not send to self */
2149 if (nodenum == dlm->node_num)
2150 continue;
2151 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2152 if (ret < 0) {
2153 mlog_errno(ret);
2154 if (!dlm_is_host_down(ret))
2155 BUG();
2156 /* host is down, so answer for that node would be
2157 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2158 ret = 0;
2161 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2162 /* check to see if this master is in the recovery map */
2163 spin_lock(&dlm->spinlock);
2164 if (test_bit(master, dlm->recovery_map)) {
2165 mlog(ML_NOTICE, "%s: node %u has not seen "
2166 "node %u go down yet, and thinks the "
2167 "dead node is mastering the recovery "
2168 "lock. must wait.\n", dlm->name,
2169 nodenum, master);
2170 ret = -EAGAIN;
2172 spin_unlock(&dlm->spinlock);
2173 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2174 master);
2175 break;
2178 return ret;
2182 * DLM_DEREF_LOCKRES_MSG
2185 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2187 struct dlm_deref_lockres deref;
2188 int ret = 0, r;
2189 const char *lockname;
2190 unsigned int namelen;
2192 lockname = res->lockname.name;
2193 namelen = res->lockname.len;
2194 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2196 mlog(0, "%s:%.*s: sending deref to %d\n",
2197 dlm->name, namelen, lockname, res->owner);
2198 memset(&deref, 0, sizeof(deref));
2199 deref.node_idx = dlm->node_num;
2200 deref.namelen = namelen;
2201 memcpy(deref.name, lockname, namelen);
2203 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2204 &deref, sizeof(deref), res->owner, &r);
2205 if (ret < 0)
2206 mlog_errno(ret);
2207 else if (r < 0) {
2208 /* BAD. other node says I did not have a ref. */
2209 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2210 "(master=%u) got %d.\n", dlm->name, namelen,
2211 lockname, res->owner, r);
2212 dlm_print_one_lock_resource(res);
2213 BUG();
2215 return ret;
2218 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2219 void **ret_data)
2221 struct dlm_ctxt *dlm = data;
2222 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2223 struct dlm_lock_resource *res = NULL;
2224 char *name;
2225 unsigned int namelen;
2226 int ret = -EINVAL;
2227 u8 node;
2228 unsigned int hash;
2229 struct dlm_work_item *item;
2230 int cleared = 0;
2231 int dispatch = 0;
2233 if (!dlm_grab(dlm))
2234 return 0;
2236 name = deref->name;
2237 namelen = deref->namelen;
2238 node = deref->node_idx;
2240 if (namelen > DLM_LOCKID_NAME_MAX) {
2241 mlog(ML_ERROR, "Invalid name length!");
2242 goto done;
2244 if (deref->node_idx >= O2NM_MAX_NODES) {
2245 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2246 goto done;
2249 hash = dlm_lockid_hash(name, namelen);
2251 spin_lock(&dlm->spinlock);
2252 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2253 if (!res) {
2254 spin_unlock(&dlm->spinlock);
2255 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2256 dlm->name, namelen, name);
2257 goto done;
2259 spin_unlock(&dlm->spinlock);
2261 spin_lock(&res->spinlock);
2262 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2263 dispatch = 1;
2264 else {
2265 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2266 if (test_bit(node, res->refmap)) {
2267 dlm_lockres_clear_refmap_bit(node, res);
2268 cleared = 1;
2271 spin_unlock(&res->spinlock);
2273 if (!dispatch) {
2274 if (cleared)
2275 dlm_lockres_calc_usage(dlm, res);
2276 else {
2277 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2278 "but it is already dropped!\n", dlm->name,
2279 res->lockname.len, res->lockname.name, node);
2280 dlm_print_one_lock_resource(res);
2282 ret = 0;
2283 goto done;
2286 item = kzalloc(sizeof(*item), GFP_NOFS);
2287 if (!item) {
2288 ret = -ENOMEM;
2289 mlog_errno(ret);
2290 goto done;
2293 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2294 item->u.dl.deref_res = res;
2295 item->u.dl.deref_node = node;
2297 spin_lock(&dlm->work_lock);
2298 list_add_tail(&item->list, &dlm->work_list);
2299 spin_unlock(&dlm->work_lock);
2301 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2302 return 0;
2304 done:
2305 if (res)
2306 dlm_lockres_put(res);
2307 dlm_put(dlm);
2309 return ret;
2312 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2314 struct dlm_ctxt *dlm;
2315 struct dlm_lock_resource *res;
2316 u8 node;
2317 u8 cleared = 0;
2319 dlm = item->dlm;
2320 res = item->u.dl.deref_res;
2321 node = item->u.dl.deref_node;
2323 spin_lock(&res->spinlock);
2324 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2325 if (test_bit(node, res->refmap)) {
2326 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2327 dlm_lockres_clear_refmap_bit(node, res);
2328 cleared = 1;
2330 spin_unlock(&res->spinlock);
2332 if (cleared) {
2333 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2334 dlm->name, res->lockname.len, res->lockname.name, node);
2335 dlm_lockres_calc_usage(dlm, res);
2336 } else {
2337 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2338 "but it is already dropped!\n", dlm->name,
2339 res->lockname.len, res->lockname.name, node);
2340 dlm_print_one_lock_resource(res);
2343 dlm_lockres_put(res);
2346 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2347 * if not. If 0, numlocks is set to the number of locks in the lockres.
2349 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2350 struct dlm_lock_resource *res,
2351 int *numlocks)
2353 int ret;
2354 int i;
2355 int count = 0;
2356 struct list_head *queue;
2357 struct dlm_lock *lock;
2359 assert_spin_locked(&res->spinlock);
2361 ret = -EINVAL;
2362 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2363 mlog(0, "cannot migrate lockres with unknown owner!\n");
2364 goto leave;
2367 if (res->owner != dlm->node_num) {
2368 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2369 goto leave;
2372 ret = 0;
2373 queue = &res->granted;
2374 for (i = 0; i < 3; i++) {
2375 list_for_each_entry(lock, queue, list) {
2376 ++count;
2377 if (lock->ml.node == dlm->node_num) {
2378 mlog(0, "found a lock owned by this node still "
2379 "on the %s queue! will not migrate this "
2380 "lockres\n", (i == 0 ? "granted" :
2381 (i == 1 ? "converting" :
2382 "blocked")));
2383 ret = -ENOTEMPTY;
2384 goto leave;
2387 queue++;
2390 *numlocks = count;
2391 mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2393 leave:
2394 return ret;
2398 * DLM_MIGRATE_LOCKRES
2402 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2403 struct dlm_lock_resource *res,
2404 u8 target)
2406 struct dlm_master_list_entry *mle = NULL;
2407 struct dlm_master_list_entry *oldmle = NULL;
2408 struct dlm_migratable_lockres *mres = NULL;
2409 int ret = 0;
2410 const char *name;
2411 unsigned int namelen;
2412 int mle_added = 0;
2413 int numlocks;
2414 int wake = 0;
2416 if (!dlm_grab(dlm))
2417 return -EINVAL;
2419 name = res->lockname.name;
2420 namelen = res->lockname.len;
2422 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2425 * ensure this lockres is a proper candidate for migration
2427 spin_lock(&res->spinlock);
2428 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2429 if (ret < 0) {
2430 spin_unlock(&res->spinlock);
2431 goto leave;
2433 spin_unlock(&res->spinlock);
2435 /* no work to do */
2436 if (numlocks == 0) {
2437 mlog(0, "no locks were found on this lockres! done!\n");
2438 goto leave;
2442 * preallocate up front
2443 * if this fails, abort
2446 ret = -ENOMEM;
2447 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2448 if (!mres) {
2449 mlog_errno(ret);
2450 goto leave;
2453 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2454 GFP_NOFS);
2455 if (!mle) {
2456 mlog_errno(ret);
2457 goto leave;
2459 ret = 0;
2462 * find a node to migrate the lockres to
2465 mlog(0, "picking a migration node\n");
2466 spin_lock(&dlm->spinlock);
2467 /* pick a new node */
2468 if (!test_bit(target, dlm->domain_map) ||
2469 target >= O2NM_MAX_NODES) {
2470 target = dlm_pick_migration_target(dlm, res);
2472 mlog(0, "node %u chosen for migration\n", target);
2474 if (target >= O2NM_MAX_NODES ||
2475 !test_bit(target, dlm->domain_map)) {
2476 /* target chosen is not alive */
2477 ret = -EINVAL;
2480 if (ret) {
2481 spin_unlock(&dlm->spinlock);
2482 goto fail;
2485 mlog(0, "continuing with target = %u\n", target);
2488 * clear any existing master requests and
2489 * add the migration mle to the list
2491 spin_lock(&dlm->master_lock);
2492 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2493 namelen, target, dlm->node_num);
2494 spin_unlock(&dlm->master_lock);
2495 spin_unlock(&dlm->spinlock);
2497 if (ret == -EEXIST) {
2498 mlog(0, "another process is already migrating it\n");
2499 goto fail;
2501 mle_added = 1;
2504 * set the MIGRATING flag and flush asts
2505 * if we fail after this we need to re-dirty the lockres
2507 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2508 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2509 "the target went down.\n", res->lockname.len,
2510 res->lockname.name, target);
2511 spin_lock(&res->spinlock);
2512 res->state &= ~DLM_LOCK_RES_MIGRATING;
2513 wake = 1;
2514 spin_unlock(&res->spinlock);
2515 ret = -EINVAL;
2518 fail:
2519 if (oldmle) {
2520 /* master is known, detach if not already detached */
2521 dlm_mle_detach_hb_events(dlm, oldmle);
2522 dlm_put_mle(oldmle);
2525 if (ret < 0) {
2526 if (mle_added) {
2527 dlm_mle_detach_hb_events(dlm, mle);
2528 dlm_put_mle(mle);
2529 } else if (mle) {
2530 kmem_cache_free(dlm_mle_cache, mle);
2532 goto leave;
2536 * at this point, we have a migration target, an mle
2537 * in the master list, and the MIGRATING flag set on
2538 * the lockres
2541 /* now that remote nodes are spinning on the MIGRATING flag,
2542 * ensure that all assert_master work is flushed. */
2543 flush_workqueue(dlm->dlm_worker);
2545 /* get an extra reference on the mle.
2546 * otherwise the assert_master from the new
2547 * master will destroy this.
2548 * also, make sure that all callers of dlm_get_mle
2549 * take both dlm->spinlock and dlm->master_lock */
2550 spin_lock(&dlm->spinlock);
2551 spin_lock(&dlm->master_lock);
2552 dlm_get_mle_inuse(mle);
2553 spin_unlock(&dlm->master_lock);
2554 spin_unlock(&dlm->spinlock);
2556 /* notify new node and send all lock state */
2557 /* call send_one_lockres with migration flag.
2558 * this serves as notice to the target node that a
2559 * migration is starting. */
2560 ret = dlm_send_one_lockres(dlm, res, mres, target,
2561 DLM_MRES_MIGRATION);
2563 if (ret < 0) {
2564 mlog(0, "migration to node %u failed with %d\n",
2565 target, ret);
2566 /* migration failed, detach and clean up mle */
2567 dlm_mle_detach_hb_events(dlm, mle);
2568 dlm_put_mle(mle);
2569 dlm_put_mle_inuse(mle);
2570 spin_lock(&res->spinlock);
2571 res->state &= ~DLM_LOCK_RES_MIGRATING;
2572 wake = 1;
2573 spin_unlock(&res->spinlock);
2574 goto leave;
2577 /* at this point, the target sends a message to all nodes,
2578 * (using dlm_do_migrate_request). this node is skipped since
2579 * we had to put an mle in the list to begin the process. this
2580 * node now waits for target to do an assert master. this node
2581 * will be the last one notified, ensuring that the migration
2582 * is complete everywhere. if the target dies while this is
2583 * going on, some nodes could potentially see the target as the
2584 * master, so it is important that my recovery finds the migration
2585 * mle and sets the master to UNKNONWN. */
2588 /* wait for new node to assert master */
2589 while (1) {
2590 ret = wait_event_interruptible_timeout(mle->wq,
2591 (atomic_read(&mle->woken) == 1),
2592 msecs_to_jiffies(5000));
2594 if (ret >= 0) {
2595 if (atomic_read(&mle->woken) == 1 ||
2596 res->owner == target)
2597 break;
2599 mlog(0, "%s:%.*s: timed out during migration\n",
2600 dlm->name, res->lockname.len, res->lockname.name);
2601 /* avoid hang during shutdown when migrating lockres
2602 * to a node which also goes down */
2603 if (dlm_is_node_dead(dlm, target)) {
2604 mlog(0, "%s:%.*s: expected migration "
2605 "target %u is no longer up, restarting\n",
2606 dlm->name, res->lockname.len,
2607 res->lockname.name, target);
2608 ret = -EINVAL;
2609 /* migration failed, detach and clean up mle */
2610 dlm_mle_detach_hb_events(dlm, mle);
2611 dlm_put_mle(mle);
2612 dlm_put_mle_inuse(mle);
2613 spin_lock(&res->spinlock);
2614 res->state &= ~DLM_LOCK_RES_MIGRATING;
2615 wake = 1;
2616 spin_unlock(&res->spinlock);
2617 goto leave;
2619 } else
2620 mlog(0, "%s:%.*s: caught signal during migration\n",
2621 dlm->name, res->lockname.len, res->lockname.name);
2624 /* all done, set the owner, clear the flag */
2625 spin_lock(&res->spinlock);
2626 dlm_set_lockres_owner(dlm, res, target);
2627 res->state &= ~DLM_LOCK_RES_MIGRATING;
2628 dlm_remove_nonlocal_locks(dlm, res);
2629 spin_unlock(&res->spinlock);
2630 wake_up(&res->wq);
2632 /* master is known, detach if not already detached */
2633 dlm_mle_detach_hb_events(dlm, mle);
2634 dlm_put_mle_inuse(mle);
2635 ret = 0;
2637 dlm_lockres_calc_usage(dlm, res);
2639 leave:
2640 /* re-dirty the lockres if we failed */
2641 if (ret < 0)
2642 dlm_kick_thread(dlm, res);
2644 /* wake up waiters if the MIGRATING flag got set
2645 * but migration failed */
2646 if (wake)
2647 wake_up(&res->wq);
2649 /* TODO: cleanup */
2650 if (mres)
2651 free_page((unsigned long)mres);
2653 dlm_put(dlm);
2655 mlog(0, "returning %d\n", ret);
2656 return ret;
2659 #define DLM_MIGRATION_RETRY_MS 100
2661 /* Should be called only after beginning the domain leave process.
2662 * There should not be any remaining locks on nonlocal lock resources,
2663 * and there should be no local locks left on locally mastered resources.
2665 * Called with the dlm spinlock held, may drop it to do migration, but
2666 * will re-acquire before exit.
2668 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2669 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2671 int ret;
2672 int lock_dropped = 0;
2673 int numlocks;
2675 spin_lock(&res->spinlock);
2676 if (res->owner != dlm->node_num) {
2677 if (!__dlm_lockres_unused(res)) {
2678 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2679 "trying to free this but locks remain\n",
2680 dlm->name, res->lockname.len, res->lockname.name);
2682 spin_unlock(&res->spinlock);
2683 goto leave;
2686 /* No need to migrate a lockres having no locks */
2687 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2688 if (ret >= 0 && numlocks == 0) {
2689 spin_unlock(&res->spinlock);
2690 goto leave;
2692 spin_unlock(&res->spinlock);
2694 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2695 spin_unlock(&dlm->spinlock);
2696 lock_dropped = 1;
2697 while (1) {
2698 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2699 if (ret >= 0)
2700 break;
2701 if (ret == -ENOTEMPTY) {
2702 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2703 res->lockname.len, res->lockname.name);
2704 BUG();
2707 mlog(0, "lockres %.*s: migrate failed, "
2708 "retrying\n", res->lockname.len,
2709 res->lockname.name);
2710 msleep(DLM_MIGRATION_RETRY_MS);
2712 spin_lock(&dlm->spinlock);
2713 leave:
2714 return lock_dropped;
2717 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2719 int ret;
2720 spin_lock(&dlm->ast_lock);
2721 spin_lock(&lock->spinlock);
2722 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2723 spin_unlock(&lock->spinlock);
2724 spin_unlock(&dlm->ast_lock);
2725 return ret;
2728 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2729 struct dlm_lock_resource *res,
2730 u8 mig_target)
2732 int can_proceed;
2733 spin_lock(&res->spinlock);
2734 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2735 spin_unlock(&res->spinlock);
2737 /* target has died, so make the caller break out of the
2738 * wait_event, but caller must recheck the domain_map */
2739 spin_lock(&dlm->spinlock);
2740 if (!test_bit(mig_target, dlm->domain_map))
2741 can_proceed = 1;
2742 spin_unlock(&dlm->spinlock);
2743 return can_proceed;
2746 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2747 struct dlm_lock_resource *res)
2749 int ret;
2750 spin_lock(&res->spinlock);
2751 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2752 spin_unlock(&res->spinlock);
2753 return ret;
2757 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2758 struct dlm_lock_resource *res,
2759 u8 target)
2761 int ret = 0;
2763 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2764 res->lockname.len, res->lockname.name, dlm->node_num,
2765 target);
2766 /* need to set MIGRATING flag on lockres. this is done by
2767 * ensuring that all asts have been flushed for this lockres. */
2768 spin_lock(&res->spinlock);
2769 BUG_ON(res->migration_pending);
2770 res->migration_pending = 1;
2771 /* strategy is to reserve an extra ast then release
2772 * it below, letting the release do all of the work */
2773 __dlm_lockres_reserve_ast(res);
2774 spin_unlock(&res->spinlock);
2776 /* now flush all the pending asts */
2777 dlm_kick_thread(dlm, res);
2778 /* before waiting on DIRTY, block processes which may
2779 * try to dirty the lockres before MIGRATING is set */
2780 spin_lock(&res->spinlock);
2781 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2782 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2783 spin_unlock(&res->spinlock);
2784 /* now wait on any pending asts and the DIRTY state */
2785 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2786 dlm_lockres_release_ast(dlm, res);
2788 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2789 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2790 /* if the extra ref we just put was the final one, this
2791 * will pass thru immediately. otherwise, we need to wait
2792 * for the last ast to finish. */
2793 again:
2794 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2795 dlm_migration_can_proceed(dlm, res, target),
2796 msecs_to_jiffies(1000));
2797 if (ret < 0) {
2798 mlog(0, "woken again: migrating? %s, dead? %s\n",
2799 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2800 test_bit(target, dlm->domain_map) ? "no":"yes");
2801 } else {
2802 mlog(0, "all is well: migrating? %s, dead? %s\n",
2803 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2804 test_bit(target, dlm->domain_map) ? "no":"yes");
2806 if (!dlm_migration_can_proceed(dlm, res, target)) {
2807 mlog(0, "trying again...\n");
2808 goto again;
2810 /* now that we are sure the MIGRATING state is there, drop
2811 * the unneded state which blocked threads trying to DIRTY */
2812 spin_lock(&res->spinlock);
2813 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2814 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2815 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2816 spin_unlock(&res->spinlock);
2818 /* did the target go down or die? */
2819 spin_lock(&dlm->spinlock);
2820 if (!test_bit(target, dlm->domain_map)) {
2821 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2822 target);
2823 ret = -EHOSTDOWN;
2825 spin_unlock(&dlm->spinlock);
2828 * at this point:
2830 * o the DLM_LOCK_RES_MIGRATING flag is set
2831 * o there are no pending asts on this lockres
2832 * o all processes trying to reserve an ast on this
2833 * lockres must wait for the MIGRATING flag to clear
2835 return ret;
2838 /* last step in the migration process.
2839 * original master calls this to free all of the dlm_lock
2840 * structures that used to be for other nodes. */
2841 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2842 struct dlm_lock_resource *res)
2844 struct list_head *queue = &res->granted;
2845 int i, bit;
2846 struct dlm_lock *lock, *next;
2848 assert_spin_locked(&res->spinlock);
2850 BUG_ON(res->owner == dlm->node_num);
2852 for (i=0; i<3; i++) {
2853 list_for_each_entry_safe(lock, next, queue, list) {
2854 if (lock->ml.node != dlm->node_num) {
2855 mlog(0, "putting lock for node %u\n",
2856 lock->ml.node);
2857 /* be extra careful */
2858 BUG_ON(!list_empty(&lock->ast_list));
2859 BUG_ON(!list_empty(&lock->bast_list));
2860 BUG_ON(lock->ast_pending);
2861 BUG_ON(lock->bast_pending);
2862 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2863 list_del_init(&lock->list);
2864 dlm_lock_put(lock);
2865 /* In a normal unlock, we would have added a
2866 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2867 dlm_lock_put(lock);
2870 queue++;
2872 bit = 0;
2873 while (1) {
2874 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2875 if (bit >= O2NM_MAX_NODES)
2876 break;
2877 /* do not clear the local node reference, if there is a
2878 * process holding this, let it drop the ref itself */
2879 if (bit != dlm->node_num) {
2880 mlog(0, "%s:%.*s: node %u had a ref to this "
2881 "migrating lockres, clearing\n", dlm->name,
2882 res->lockname.len, res->lockname.name, bit);
2883 dlm_lockres_clear_refmap_bit(bit, res);
2885 bit++;
2889 /* for now this is not too intelligent. we will
2890 * need stats to make this do the right thing.
2891 * this just finds the first lock on one of the
2892 * queues and uses that node as the target. */
2893 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2894 struct dlm_lock_resource *res)
2896 int i;
2897 struct list_head *queue = &res->granted;
2898 struct dlm_lock *lock;
2899 int nodenum;
2901 assert_spin_locked(&dlm->spinlock);
2903 spin_lock(&res->spinlock);
2904 for (i=0; i<3; i++) {
2905 list_for_each_entry(lock, queue, list) {
2906 /* up to the caller to make sure this node
2907 * is alive */
2908 if (lock->ml.node != dlm->node_num) {
2909 spin_unlock(&res->spinlock);
2910 return lock->ml.node;
2913 queue++;
2915 spin_unlock(&res->spinlock);
2916 mlog(0, "have not found a suitable target yet! checking domain map\n");
2918 /* ok now we're getting desperate. pick anyone alive. */
2919 nodenum = -1;
2920 while (1) {
2921 nodenum = find_next_bit(dlm->domain_map,
2922 O2NM_MAX_NODES, nodenum+1);
2923 mlog(0, "found %d in domain map\n", nodenum);
2924 if (nodenum >= O2NM_MAX_NODES)
2925 break;
2926 if (nodenum != dlm->node_num) {
2927 mlog(0, "picking %d\n", nodenum);
2928 return nodenum;
2932 mlog(0, "giving up. no master to migrate to\n");
2933 return DLM_LOCK_RES_OWNER_UNKNOWN;
2938 /* this is called by the new master once all lockres
2939 * data has been received */
2940 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2941 struct dlm_lock_resource *res,
2942 u8 master, u8 new_master,
2943 struct dlm_node_iter *iter)
2945 struct dlm_migrate_request migrate;
2946 int ret, skip, status = 0;
2947 int nodenum;
2949 memset(&migrate, 0, sizeof(migrate));
2950 migrate.namelen = res->lockname.len;
2951 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2952 migrate.new_master = new_master;
2953 migrate.master = master;
2955 ret = 0;
2957 /* send message to all nodes, except the master and myself */
2958 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2959 if (nodenum == master ||
2960 nodenum == new_master)
2961 continue;
2963 /* We could race exit domain. If exited, skip. */
2964 spin_lock(&dlm->spinlock);
2965 skip = (!test_bit(nodenum, dlm->domain_map));
2966 spin_unlock(&dlm->spinlock);
2967 if (skip) {
2968 clear_bit(nodenum, iter->node_map);
2969 continue;
2972 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2973 &migrate, sizeof(migrate), nodenum,
2974 &status);
2975 if (ret < 0) {
2976 mlog(0, "migrate_request returned %d!\n", ret);
2977 if (!dlm_is_host_down(ret)) {
2978 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2979 BUG();
2981 clear_bit(nodenum, iter->node_map);
2982 ret = 0;
2983 } else if (status < 0) {
2984 mlog(0, "migrate request (node %u) returned %d!\n",
2985 nodenum, status);
2986 ret = status;
2987 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
2988 /* during the migration request we short-circuited
2989 * the mastery of the lockres. make sure we have
2990 * a mastery ref for nodenum */
2991 mlog(0, "%s:%.*s: need ref for node %u\n",
2992 dlm->name, res->lockname.len, res->lockname.name,
2993 nodenum);
2994 spin_lock(&res->spinlock);
2995 dlm_lockres_set_refmap_bit(nodenum, res);
2996 spin_unlock(&res->spinlock);
3000 if (ret < 0)
3001 mlog_errno(ret);
3003 mlog(0, "returning ret=%d\n", ret);
3004 return ret;
3008 /* if there is an existing mle for this lockres, we now know who the master is.
3009 * (the one who sent us *this* message) we can clear it up right away.
3010 * since the process that put the mle on the list still has a reference to it,
3011 * we can unhash it now, set the master and wake the process. as a result,
3012 * we will have no mle in the list to start with. now we can add an mle for
3013 * the migration and this should be the only one found for those scanning the
3014 * list. */
3015 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3016 void **ret_data)
3018 struct dlm_ctxt *dlm = data;
3019 struct dlm_lock_resource *res = NULL;
3020 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3021 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3022 const char *name;
3023 unsigned int namelen, hash;
3024 int ret = 0;
3026 if (!dlm_grab(dlm))
3027 return -EINVAL;
3029 name = migrate->name;
3030 namelen = migrate->namelen;
3031 hash = dlm_lockid_hash(name, namelen);
3033 /* preallocate.. if this fails, abort */
3034 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
3035 GFP_NOFS);
3037 if (!mle) {
3038 ret = -ENOMEM;
3039 goto leave;
3042 /* check for pre-existing lock */
3043 spin_lock(&dlm->spinlock);
3044 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3045 if (res) {
3046 spin_lock(&res->spinlock);
3047 if (res->state & DLM_LOCK_RES_RECOVERING) {
3048 /* if all is working ok, this can only mean that we got
3049 * a migrate request from a node that we now see as
3050 * dead. what can we do here? drop it to the floor? */
3051 spin_unlock(&res->spinlock);
3052 mlog(ML_ERROR, "Got a migrate request, but the "
3053 "lockres is marked as recovering!");
3054 kmem_cache_free(dlm_mle_cache, mle);
3055 ret = -EINVAL; /* need a better solution */
3056 goto unlock;
3058 res->state |= DLM_LOCK_RES_MIGRATING;
3059 spin_unlock(&res->spinlock);
3062 spin_lock(&dlm->master_lock);
3063 /* ignore status. only nonzero status would BUG. */
3064 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3065 name, namelen,
3066 migrate->new_master,
3067 migrate->master);
3069 spin_unlock(&dlm->master_lock);
3070 unlock:
3071 spin_unlock(&dlm->spinlock);
3073 if (oldmle) {
3074 /* master is known, detach if not already detached */
3075 dlm_mle_detach_hb_events(dlm, oldmle);
3076 dlm_put_mle(oldmle);
3079 if (res)
3080 dlm_lockres_put(res);
3081 leave:
3082 dlm_put(dlm);
3083 return ret;
3086 /* must be holding dlm->spinlock and dlm->master_lock
3087 * when adding a migration mle, we can clear any other mles
3088 * in the master list because we know with certainty that
3089 * the master is "master". so we remove any old mle from
3090 * the list after setting it's master field, and then add
3091 * the new migration mle. this way we can hold with the rule
3092 * of having only one mle for a given lock name at all times. */
3093 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3094 struct dlm_lock_resource *res,
3095 struct dlm_master_list_entry *mle,
3096 struct dlm_master_list_entry **oldmle,
3097 const char *name, unsigned int namelen,
3098 u8 new_master, u8 master)
3100 int found;
3101 int ret = 0;
3103 *oldmle = NULL;
3105 mlog_entry_void();
3107 assert_spin_locked(&dlm->spinlock);
3108 assert_spin_locked(&dlm->master_lock);
3110 /* caller is responsible for any ref taken here on oldmle */
3111 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3112 if (found) {
3113 struct dlm_master_list_entry *tmp = *oldmle;
3114 spin_lock(&tmp->spinlock);
3115 if (tmp->type == DLM_MLE_MIGRATION) {
3116 if (master == dlm->node_num) {
3117 /* ah another process raced me to it */
3118 mlog(0, "tried to migrate %.*s, but some "
3119 "process beat me to it\n",
3120 namelen, name);
3121 ret = -EEXIST;
3122 } else {
3123 /* bad. 2 NODES are trying to migrate! */
3124 mlog(ML_ERROR, "migration error mle: "
3125 "master=%u new_master=%u // request: "
3126 "master=%u new_master=%u // "
3127 "lockres=%.*s\n",
3128 tmp->master, tmp->new_master,
3129 master, new_master,
3130 namelen, name);
3131 BUG();
3133 } else {
3134 /* this is essentially what assert_master does */
3135 tmp->master = master;
3136 atomic_set(&tmp->woken, 1);
3137 wake_up(&tmp->wq);
3138 /* remove it so that only one mle will be found */
3139 __dlm_unlink_mle(dlm, tmp);
3140 __dlm_mle_detach_hb_events(dlm, tmp);
3141 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3142 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3143 "telling master to get ref for cleared out mle "
3144 "during migration\n", dlm->name, namelen, name,
3145 master, new_master);
3147 spin_unlock(&tmp->spinlock);
3150 /* now add a migration mle to the tail of the list */
3151 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3152 mle->new_master = new_master;
3153 /* the new master will be sending an assert master for this.
3154 * at that point we will get the refmap reference */
3155 mle->master = master;
3156 /* do this for consistency with other mle types */
3157 set_bit(new_master, mle->maybe_map);
3158 __dlm_insert_mle(dlm, mle);
3160 return ret;
3164 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3166 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3167 struct dlm_master_list_entry *mle)
3169 struct dlm_lock_resource *res;
3171 /* Find the lockres associated to the mle and set its owner to UNK */
3172 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3173 mle->mnamehash);
3174 if (res) {
3175 spin_unlock(&dlm->master_lock);
3177 /* move lockres onto recovery list */
3178 spin_lock(&res->spinlock);
3179 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3180 dlm_move_lockres_to_recovery_list(dlm, res);
3181 spin_unlock(&res->spinlock);
3182 dlm_lockres_put(res);
3184 /* about to get rid of mle, detach from heartbeat */
3185 __dlm_mle_detach_hb_events(dlm, mle);
3187 /* dump the mle */
3188 spin_lock(&dlm->master_lock);
3189 __dlm_put_mle(mle);
3190 spin_unlock(&dlm->master_lock);
3193 return res;
3196 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3197 struct dlm_master_list_entry *mle)
3199 __dlm_mle_detach_hb_events(dlm, mle);
3201 spin_lock(&mle->spinlock);
3202 __dlm_unlink_mle(dlm, mle);
3203 atomic_set(&mle->woken, 1);
3204 spin_unlock(&mle->spinlock);
3206 wake_up(&mle->wq);
3209 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3210 struct dlm_master_list_entry *mle, u8 dead_node)
3212 int bit;
3214 BUG_ON(mle->type != DLM_MLE_BLOCK);
3216 spin_lock(&mle->spinlock);
3217 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3218 if (bit != dead_node) {
3219 mlog(0, "mle found, but dead node %u would not have been "
3220 "master\n", dead_node);
3221 spin_unlock(&mle->spinlock);
3222 } else {
3223 /* Must drop the refcount by one since the assert_master will
3224 * never arrive. This may result in the mle being unlinked and
3225 * freed, but there may still be a process waiting in the
3226 * dlmlock path which is fine. */
3227 mlog(0, "node %u was expected master\n", dead_node);
3228 atomic_set(&mle->woken, 1);
3229 spin_unlock(&mle->spinlock);
3230 wake_up(&mle->wq);
3232 /* Do not need events any longer, so detach from heartbeat */
3233 __dlm_mle_detach_hb_events(dlm, mle);
3234 __dlm_put_mle(mle);
3238 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3240 struct dlm_master_list_entry *mle;
3241 struct dlm_lock_resource *res;
3242 struct hlist_head *bucket;
3243 struct hlist_node *list;
3244 unsigned int i;
3246 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3247 top:
3248 assert_spin_locked(&dlm->spinlock);
3250 /* clean the master list */
3251 spin_lock(&dlm->master_lock);
3252 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3253 bucket = dlm_master_hash(dlm, i);
3254 hlist_for_each(list, bucket) {
3255 mle = hlist_entry(list, struct dlm_master_list_entry,
3256 master_hash_node);
3258 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3259 mle->type != DLM_MLE_MASTER &&
3260 mle->type != DLM_MLE_MIGRATION);
3262 /* MASTER mles are initiated locally. The waiting
3263 * process will notice the node map change shortly.
3264 * Let that happen as normal. */
3265 if (mle->type == DLM_MLE_MASTER)
3266 continue;
3268 /* BLOCK mles are initiated by other nodes. Need to
3269 * clean up if the dead node would have been the
3270 * master. */
3271 if (mle->type == DLM_MLE_BLOCK) {
3272 dlm_clean_block_mle(dlm, mle, dead_node);
3273 continue;
3276 /* Everything else is a MIGRATION mle */
3278 /* The rule for MIGRATION mles is that the master
3279 * becomes UNKNOWN if *either* the original or the new
3280 * master dies. All UNKNOWN lockres' are sent to
3281 * whichever node becomes the recovery master. The new
3282 * master is responsible for determining if there is
3283 * still a master for this lockres, or if he needs to
3284 * take over mastery. Either way, this node should
3285 * expect another message to resolve this. */
3287 if (mle->master != dead_node &&
3288 mle->new_master != dead_node)
3289 continue;
3291 /* If we have reached this point, this mle needs to be
3292 * removed from the list and freed. */
3293 dlm_clean_migration_mle(dlm, mle);
3295 mlog(0, "%s: node %u died during migration from "
3296 "%u to %u!\n", dlm->name, dead_node, mle->master,
3297 mle->new_master);
3299 /* If we find a lockres associated with the mle, we've
3300 * hit this rare case that messes up our lock ordering.
3301 * If so, we need to drop the master lock so that we can
3302 * take the lockres lock, meaning that we will have to
3303 * restart from the head of list. */
3304 res = dlm_reset_mleres_owner(dlm, mle);
3305 if (res)
3306 /* restart */
3307 goto top;
3309 /* This may be the last reference */
3310 __dlm_put_mle(mle);
3313 spin_unlock(&dlm->master_lock);
3316 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3317 u8 old_master)
3319 struct dlm_node_iter iter;
3320 int ret = 0;
3322 spin_lock(&dlm->spinlock);
3323 dlm_node_iter_init(dlm->domain_map, &iter);
3324 clear_bit(old_master, iter.node_map);
3325 clear_bit(dlm->node_num, iter.node_map);
3326 spin_unlock(&dlm->spinlock);
3328 /* ownership of the lockres is changing. account for the
3329 * mastery reference here since old_master will briefly have
3330 * a reference after the migration completes */
3331 spin_lock(&res->spinlock);
3332 dlm_lockres_set_refmap_bit(old_master, res);
3333 spin_unlock(&res->spinlock);
3335 mlog(0, "now time to do a migrate request to other nodes\n");
3336 ret = dlm_do_migrate_request(dlm, res, old_master,
3337 dlm->node_num, &iter);
3338 if (ret < 0) {
3339 mlog_errno(ret);
3340 goto leave;
3343 mlog(0, "doing assert master of %.*s to all except the original node\n",
3344 res->lockname.len, res->lockname.name);
3345 /* this call now finishes out the nodemap
3346 * even if one or more nodes die */
3347 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3348 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3349 if (ret < 0) {
3350 /* no longer need to retry. all living nodes contacted. */
3351 mlog_errno(ret);
3352 ret = 0;
3355 memset(iter.node_map, 0, sizeof(iter.node_map));
3356 set_bit(old_master, iter.node_map);
3357 mlog(0, "doing assert master of %.*s back to %u\n",
3358 res->lockname.len, res->lockname.name, old_master);
3359 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3360 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3361 if (ret < 0) {
3362 mlog(0, "assert master to original master failed "
3363 "with %d.\n", ret);
3364 /* the only nonzero status here would be because of
3365 * a dead original node. we're done. */
3366 ret = 0;
3369 /* all done, set the owner, clear the flag */
3370 spin_lock(&res->spinlock);
3371 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3372 res->state &= ~DLM_LOCK_RES_MIGRATING;
3373 spin_unlock(&res->spinlock);
3374 /* re-dirty it on the new master */
3375 dlm_kick_thread(dlm, res);
3376 wake_up(&res->wq);
3377 leave:
3378 return ret;
3382 * LOCKRES AST REFCOUNT
3383 * this is integral to migration
3386 /* for future intent to call an ast, reserve one ahead of time.
3387 * this should be called only after waiting on the lockres
3388 * with dlm_wait_on_lockres, and while still holding the
3389 * spinlock after the call. */
3390 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3392 assert_spin_locked(&res->spinlock);
3393 if (res->state & DLM_LOCK_RES_MIGRATING) {
3394 __dlm_print_one_lock_resource(res);
3396 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3398 atomic_inc(&res->asts_reserved);
3402 * used to drop the reserved ast, either because it went unused,
3403 * or because the ast/bast was actually called.
3405 * also, if there is a pending migration on this lockres,
3406 * and this was the last pending ast on the lockres,
3407 * atomically set the MIGRATING flag before we drop the lock.
3408 * this is how we ensure that migration can proceed with no
3409 * asts in progress. note that it is ok if the state of the
3410 * queues is such that a lock should be granted in the future
3411 * or that a bast should be fired, because the new master will
3412 * shuffle the lists on this lockres as soon as it is migrated.
3414 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3415 struct dlm_lock_resource *res)
3417 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3418 return;
3420 if (!res->migration_pending) {
3421 spin_unlock(&res->spinlock);
3422 return;
3425 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3426 res->migration_pending = 0;
3427 res->state |= DLM_LOCK_RES_MIGRATING;
3428 spin_unlock(&res->spinlock);
3429 wake_up(&res->wq);
3430 wake_up(&dlm->migration_wq);