FRV: Use generic show_interrupts()
[cris-mirror.git] / fs / ocfs2 / dlm / dlmmaster.c
blob59f0f6bdfc62110141ca7ad413dc51abadd4b3c8
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmmod.c
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
47 #include "dlmapi.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
50 #include "dlmdebug.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
55 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
56 struct dlm_master_list_entry *mle,
57 struct o2nm_node *node,
58 int idx);
59 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
60 struct dlm_master_list_entry *mle,
61 struct o2nm_node *node,
62 int idx);
64 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
65 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
66 struct dlm_lock_resource *res,
67 void *nodemap, u32 flags);
68 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
70 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
71 struct dlm_master_list_entry *mle,
72 const char *name,
73 unsigned int namelen)
75 if (dlm != mle->dlm)
76 return 0;
78 if (namelen != mle->mnamelen ||
79 memcmp(name, mle->mname, namelen) != 0)
80 return 0;
82 return 1;
85 static struct kmem_cache *dlm_lockres_cache = NULL;
86 static struct kmem_cache *dlm_lockname_cache = NULL;
87 static struct kmem_cache *dlm_mle_cache = NULL;
89 static void dlm_mle_release(struct kref *kref);
90 static void dlm_init_mle(struct dlm_master_list_entry *mle,
91 enum dlm_mle_type type,
92 struct dlm_ctxt *dlm,
93 struct dlm_lock_resource *res,
94 const char *name,
95 unsigned int namelen);
96 static void dlm_put_mle(struct dlm_master_list_entry *mle);
97 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
98 static int dlm_find_mle(struct dlm_ctxt *dlm,
99 struct dlm_master_list_entry **mle,
100 char *name, unsigned int namelen);
102 static int dlm_do_master_request(struct dlm_lock_resource *res,
103 struct dlm_master_list_entry *mle, int to);
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
107 struct dlm_lock_resource *res,
108 struct dlm_master_list_entry *mle,
109 int *blocked);
110 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
111 struct dlm_lock_resource *res,
112 struct dlm_master_list_entry *mle,
113 int blocked);
114 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
115 struct dlm_lock_resource *res,
116 struct dlm_master_list_entry *mle,
117 struct dlm_master_list_entry **oldmle,
118 const char *name, unsigned int namelen,
119 u8 new_master, u8 master);
121 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
122 struct dlm_lock_resource *res);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
124 struct dlm_lock_resource *res);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
126 struct dlm_lock_resource *res,
127 u8 target);
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
129 struct dlm_lock_resource *res);
132 int dlm_is_host_down(int errno)
134 switch (errno) {
135 case -EBADF:
136 case -ECONNREFUSED:
137 case -ENOTCONN:
138 case -ECONNRESET:
139 case -EPIPE:
140 case -EHOSTDOWN:
141 case -EHOSTUNREACH:
142 case -ETIMEDOUT:
143 case -ECONNABORTED:
144 case -ENETDOWN:
145 case -ENETUNREACH:
146 case -ENETRESET:
147 case -ESHUTDOWN:
148 case -ENOPROTOOPT:
149 case -EINVAL: /* if returned from our tcp code,
150 this means there is no socket */
151 return 1;
153 return 0;
158 * MASTER LIST FUNCTIONS
163 * regarding master list entries and heartbeat callbacks:
165 * in order to avoid sleeping and allocation that occurs in
166 * heartbeat, master list entries are simply attached to the
167 * dlm's established heartbeat callbacks. the mle is attached
168 * when it is created, and since the dlm->spinlock is held at
169 * that time, any heartbeat event will be properly discovered
170 * by the mle. the mle needs to be detached from the
171 * dlm->mle_hb_events list as soon as heartbeat events are no
172 * longer useful to the mle, and before the mle is freed.
174 * as a general rule, heartbeat events are no longer needed by
175 * the mle once an "answer" regarding the lock master has been
176 * received.
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
179 struct dlm_master_list_entry *mle)
181 assert_spin_locked(&dlm->spinlock);
183 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
188 struct dlm_master_list_entry *mle)
190 if (!list_empty(&mle->hb_events))
191 list_del_init(&mle->hb_events);
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
196 struct dlm_master_list_entry *mle)
198 spin_lock(&dlm->spinlock);
199 __dlm_mle_detach_hb_events(dlm, mle);
200 spin_unlock(&dlm->spinlock);
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
205 struct dlm_ctxt *dlm;
206 dlm = mle->dlm;
208 assert_spin_locked(&dlm->spinlock);
209 assert_spin_locked(&dlm->master_lock);
210 mle->inuse++;
211 kref_get(&mle->mle_refs);
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
216 struct dlm_ctxt *dlm;
217 dlm = mle->dlm;
219 spin_lock(&dlm->spinlock);
220 spin_lock(&dlm->master_lock);
221 mle->inuse--;
222 __dlm_put_mle(mle);
223 spin_unlock(&dlm->master_lock);
224 spin_unlock(&dlm->spinlock);
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
231 struct dlm_ctxt *dlm;
232 dlm = mle->dlm;
234 assert_spin_locked(&dlm->spinlock);
235 assert_spin_locked(&dlm->master_lock);
236 if (!atomic_read(&mle->mle_refs.refcount)) {
237 /* this may or may not crash, but who cares.
238 * it's a BUG. */
239 mlog(ML_ERROR, "bad mle: %p\n", mle);
240 dlm_print_one_mle(mle);
241 BUG();
242 } else
243 kref_put(&mle->mle_refs, dlm_mle_release);
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry *mle)
250 struct dlm_ctxt *dlm;
251 dlm = mle->dlm;
253 spin_lock(&dlm->spinlock);
254 spin_lock(&dlm->master_lock);
255 __dlm_put_mle(mle);
256 spin_unlock(&dlm->master_lock);
257 spin_unlock(&dlm->spinlock);
260 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
262 kref_get(&mle->mle_refs);
265 static void dlm_init_mle(struct dlm_master_list_entry *mle,
266 enum dlm_mle_type type,
267 struct dlm_ctxt *dlm,
268 struct dlm_lock_resource *res,
269 const char *name,
270 unsigned int namelen)
272 assert_spin_locked(&dlm->spinlock);
274 mle->dlm = dlm;
275 mle->type = type;
276 INIT_HLIST_NODE(&mle->master_hash_node);
277 INIT_LIST_HEAD(&mle->hb_events);
278 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
279 spin_lock_init(&mle->spinlock);
280 init_waitqueue_head(&mle->wq);
281 atomic_set(&mle->woken, 0);
282 kref_init(&mle->mle_refs);
283 memset(mle->response_map, 0, sizeof(mle->response_map));
284 mle->master = O2NM_MAX_NODES;
285 mle->new_master = O2NM_MAX_NODES;
286 mle->inuse = 0;
288 BUG_ON(mle->type != DLM_MLE_BLOCK &&
289 mle->type != DLM_MLE_MASTER &&
290 mle->type != DLM_MLE_MIGRATION);
292 if (mle->type == DLM_MLE_MASTER) {
293 BUG_ON(!res);
294 mle->mleres = res;
295 memcpy(mle->mname, res->lockname.name, res->lockname.len);
296 mle->mnamelen = res->lockname.len;
297 mle->mnamehash = res->lockname.hash;
298 } else {
299 BUG_ON(!name);
300 mle->mleres = NULL;
301 memcpy(mle->mname, name, namelen);
302 mle->mnamelen = namelen;
303 mle->mnamehash = dlm_lockid_hash(name, namelen);
306 atomic_inc(&dlm->mle_tot_count[mle->type]);
307 atomic_inc(&dlm->mle_cur_count[mle->type]);
309 /* copy off the node_map and register hb callbacks on our copy */
310 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
311 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
312 clear_bit(dlm->node_num, mle->vote_map);
313 clear_bit(dlm->node_num, mle->node_map);
315 /* attach the mle to the domain node up/down events */
316 __dlm_mle_attach_hb_events(dlm, mle);
319 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
321 assert_spin_locked(&dlm->spinlock);
322 assert_spin_locked(&dlm->master_lock);
324 if (!hlist_unhashed(&mle->master_hash_node))
325 hlist_del_init(&mle->master_hash_node);
328 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
330 struct hlist_head *bucket;
332 assert_spin_locked(&dlm->master_lock);
334 bucket = dlm_master_hash(dlm, mle->mnamehash);
335 hlist_add_head(&mle->master_hash_node, bucket);
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt *dlm,
340 struct dlm_master_list_entry **mle,
341 char *name, unsigned int namelen)
343 struct dlm_master_list_entry *tmpmle;
344 struct hlist_head *bucket;
345 struct hlist_node *list;
346 unsigned int hash;
348 assert_spin_locked(&dlm->master_lock);
350 hash = dlm_lockid_hash(name, namelen);
351 bucket = dlm_master_hash(dlm, hash);
352 hlist_for_each(list, bucket) {
353 tmpmle = hlist_entry(list, struct dlm_master_list_entry,
354 master_hash_node);
355 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
356 continue;
357 dlm_get_mle(tmpmle);
358 *mle = tmpmle;
359 return 1;
361 return 0;
364 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366 struct dlm_master_list_entry *mle;
368 assert_spin_locked(&dlm->spinlock);
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx);
373 else
374 dlm_mle_node_down(dlm, mle, NULL, idx);
378 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
379 struct dlm_master_list_entry *mle,
380 struct o2nm_node *node, int idx)
382 spin_lock(&mle->spinlock);
384 if (!test_bit(idx, mle->node_map))
385 mlog(0, "node %u already removed from nodemap!\n", idx);
386 else
387 clear_bit(idx, mle->node_map);
389 spin_unlock(&mle->spinlock);
392 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
393 struct dlm_master_list_entry *mle,
394 struct o2nm_node *node, int idx)
396 spin_lock(&mle->spinlock);
398 if (test_bit(idx, mle->node_map))
399 mlog(0, "node %u already in node map!\n", idx);
400 else
401 set_bit(idx, mle->node_map);
403 spin_unlock(&mle->spinlock);
407 int dlm_init_mle_cache(void)
409 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
410 sizeof(struct dlm_master_list_entry),
411 0, SLAB_HWCACHE_ALIGN,
412 NULL);
413 if (dlm_mle_cache == NULL)
414 return -ENOMEM;
415 return 0;
418 void dlm_destroy_mle_cache(void)
420 if (dlm_mle_cache)
421 kmem_cache_destroy(dlm_mle_cache);
424 static void dlm_mle_release(struct kref *kref)
426 struct dlm_master_list_entry *mle;
427 struct dlm_ctxt *dlm;
429 mlog_entry_void();
431 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
432 dlm = mle->dlm;
434 assert_spin_locked(&dlm->spinlock);
435 assert_spin_locked(&dlm->master_lock);
437 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
438 mle->type);
440 /* remove from list if not already */
441 __dlm_unlink_mle(dlm, mle);
443 /* detach the mle from the domain node up/down events */
444 __dlm_mle_detach_hb_events(dlm, mle);
446 atomic_dec(&dlm->mle_cur_count[mle->type]);
448 /* NOTE: kfree under spinlock here.
449 * if this is bad, we can move this to a freelist. */
450 kmem_cache_free(dlm_mle_cache, mle);
455 * LOCK RESOURCE FUNCTIONS
458 int dlm_init_master_caches(void)
460 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
461 sizeof(struct dlm_lock_resource),
462 0, SLAB_HWCACHE_ALIGN, NULL);
463 if (!dlm_lockres_cache)
464 goto bail;
466 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
467 DLM_LOCKID_NAME_MAX, 0,
468 SLAB_HWCACHE_ALIGN, NULL);
469 if (!dlm_lockname_cache)
470 goto bail;
472 return 0;
473 bail:
474 dlm_destroy_master_caches();
475 return -ENOMEM;
478 void dlm_destroy_master_caches(void)
480 if (dlm_lockname_cache)
481 kmem_cache_destroy(dlm_lockname_cache);
483 if (dlm_lockres_cache)
484 kmem_cache_destroy(dlm_lockres_cache);
487 static void dlm_lockres_release(struct kref *kref)
489 struct dlm_lock_resource *res;
490 struct dlm_ctxt *dlm;
492 res = container_of(kref, struct dlm_lock_resource, refs);
493 dlm = res->dlm;
495 /* This should not happen -- all lockres' have a name
496 * associated with them at init time. */
497 BUG_ON(!res->lockname.name);
499 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
500 res->lockname.name);
502 spin_lock(&dlm->track_lock);
503 if (!list_empty(&res->tracking))
504 list_del_init(&res->tracking);
505 else {
506 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
507 res->lockname.len, res->lockname.name);
508 dlm_print_one_lock_resource(res);
510 spin_unlock(&dlm->track_lock);
512 atomic_dec(&dlm->res_cur_count);
514 if (!hlist_unhashed(&res->hash_node) ||
515 !list_empty(&res->granted) ||
516 !list_empty(&res->converting) ||
517 !list_empty(&res->blocked) ||
518 !list_empty(&res->dirty) ||
519 !list_empty(&res->recovering) ||
520 !list_empty(&res->purge)) {
521 mlog(ML_ERROR,
522 "Going to BUG for resource %.*s."
523 " We're on a list! [%c%c%c%c%c%c%c]\n",
524 res->lockname.len, res->lockname.name,
525 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
526 !list_empty(&res->granted) ? 'G' : ' ',
527 !list_empty(&res->converting) ? 'C' : ' ',
528 !list_empty(&res->blocked) ? 'B' : ' ',
529 !list_empty(&res->dirty) ? 'D' : ' ',
530 !list_empty(&res->recovering) ? 'R' : ' ',
531 !list_empty(&res->purge) ? 'P' : ' ');
533 dlm_print_one_lock_resource(res);
536 /* By the time we're ready to blow this guy away, we shouldn't
537 * be on any lists. */
538 BUG_ON(!hlist_unhashed(&res->hash_node));
539 BUG_ON(!list_empty(&res->granted));
540 BUG_ON(!list_empty(&res->converting));
541 BUG_ON(!list_empty(&res->blocked));
542 BUG_ON(!list_empty(&res->dirty));
543 BUG_ON(!list_empty(&res->recovering));
544 BUG_ON(!list_empty(&res->purge));
546 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
548 kmem_cache_free(dlm_lockres_cache, res);
551 void dlm_lockres_put(struct dlm_lock_resource *res)
553 kref_put(&res->refs, dlm_lockres_release);
556 static void dlm_init_lockres(struct dlm_ctxt *dlm,
557 struct dlm_lock_resource *res,
558 const char *name, unsigned int namelen)
560 char *qname;
562 /* If we memset here, we lose our reference to the kmalloc'd
563 * res->lockname.name, so be sure to init every field
564 * correctly! */
566 qname = (char *) res->lockname.name;
567 memcpy(qname, name, namelen);
569 res->lockname.len = namelen;
570 res->lockname.hash = dlm_lockid_hash(name, namelen);
572 init_waitqueue_head(&res->wq);
573 spin_lock_init(&res->spinlock);
574 INIT_HLIST_NODE(&res->hash_node);
575 INIT_LIST_HEAD(&res->granted);
576 INIT_LIST_HEAD(&res->converting);
577 INIT_LIST_HEAD(&res->blocked);
578 INIT_LIST_HEAD(&res->dirty);
579 INIT_LIST_HEAD(&res->recovering);
580 INIT_LIST_HEAD(&res->purge);
581 INIT_LIST_HEAD(&res->tracking);
582 atomic_set(&res->asts_reserved, 0);
583 res->migration_pending = 0;
584 res->inflight_locks = 0;
586 res->dlm = dlm;
588 kref_init(&res->refs);
590 atomic_inc(&dlm->res_tot_count);
591 atomic_inc(&dlm->res_cur_count);
593 /* just for consistency */
594 spin_lock(&res->spinlock);
595 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
596 spin_unlock(&res->spinlock);
598 res->state = DLM_LOCK_RES_IN_PROGRESS;
600 res->last_used = 0;
602 spin_lock(&dlm->spinlock);
603 list_add_tail(&res->tracking, &dlm->tracking_list);
604 spin_unlock(&dlm->spinlock);
606 memset(res->lvb, 0, DLM_LVB_LEN);
607 memset(res->refmap, 0, sizeof(res->refmap));
610 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
611 const char *name,
612 unsigned int namelen)
614 struct dlm_lock_resource *res = NULL;
616 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
617 if (!res)
618 goto error;
620 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
621 if (!res->lockname.name)
622 goto error;
624 dlm_init_lockres(dlm, res, name, namelen);
625 return res;
627 error:
628 if (res && res->lockname.name)
629 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
631 if (res)
632 kmem_cache_free(dlm_lockres_cache, res);
633 return NULL;
636 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
637 struct dlm_lock_resource *res,
638 int new_lockres,
639 const char *file,
640 int line)
642 if (!new_lockres)
643 assert_spin_locked(&res->spinlock);
645 if (!test_bit(dlm->node_num, res->refmap)) {
646 BUG_ON(res->inflight_locks != 0);
647 dlm_lockres_set_refmap_bit(dlm->node_num, res);
649 res->inflight_locks++;
650 mlog(0, "%s:%.*s: inflight++: now %u\n",
651 dlm->name, res->lockname.len, res->lockname.name,
652 res->inflight_locks);
655 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
656 struct dlm_lock_resource *res,
657 const char *file,
658 int line)
660 assert_spin_locked(&res->spinlock);
662 BUG_ON(res->inflight_locks == 0);
663 res->inflight_locks--;
664 mlog(0, "%s:%.*s: inflight--: now %u\n",
665 dlm->name, res->lockname.len, res->lockname.name,
666 res->inflight_locks);
667 if (res->inflight_locks == 0)
668 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
669 wake_up(&res->wq);
673 * lookup a lock resource by name.
674 * may already exist in the hashtable.
675 * lockid is null terminated
677 * if not, allocate enough for the lockres and for
678 * the temporary structure used in doing the mastering.
680 * also, do a lookup in the dlm->master_list to see
681 * if another node has begun mastering the same lock.
682 * if so, there should be a block entry in there
683 * for this name, and we should *not* attempt to master
684 * the lock here. need to wait around for that node
685 * to assert_master (or die).
688 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
689 const char *lockid,
690 int namelen,
691 int flags)
693 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
694 struct dlm_master_list_entry *mle = NULL;
695 struct dlm_master_list_entry *alloc_mle = NULL;
696 int blocked = 0;
697 int ret, nodenum;
698 struct dlm_node_iter iter;
699 unsigned int hash;
700 int tries = 0;
701 int bit, wait_on_recovery = 0;
702 int drop_inflight_if_nonlocal = 0;
704 BUG_ON(!lockid);
706 hash = dlm_lockid_hash(lockid, namelen);
708 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
710 lookup:
711 spin_lock(&dlm->spinlock);
712 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
713 if (tmpres) {
714 int dropping_ref = 0;
716 spin_unlock(&dlm->spinlock);
718 spin_lock(&tmpres->spinlock);
719 /* We wait for the other thread that is mastering the resource */
720 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
721 __dlm_wait_on_lockres(tmpres);
722 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
725 if (tmpres->owner == dlm->node_num) {
726 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
727 dlm_lockres_grab_inflight_ref(dlm, tmpres);
728 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
729 dropping_ref = 1;
730 spin_unlock(&tmpres->spinlock);
732 /* wait until done messaging the master, drop our ref to allow
733 * the lockres to be purged, start over. */
734 if (dropping_ref) {
735 spin_lock(&tmpres->spinlock);
736 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
737 spin_unlock(&tmpres->spinlock);
738 dlm_lockres_put(tmpres);
739 tmpres = NULL;
740 goto lookup;
743 mlog(0, "found in hash!\n");
744 if (res)
745 dlm_lockres_put(res);
746 res = tmpres;
747 goto leave;
750 if (!res) {
751 spin_unlock(&dlm->spinlock);
752 mlog(0, "allocating a new resource\n");
753 /* nothing found and we need to allocate one. */
754 alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
755 if (!alloc_mle)
756 goto leave;
757 res = dlm_new_lockres(dlm, lockid, namelen);
758 if (!res)
759 goto leave;
760 goto lookup;
763 mlog(0, "no lockres found, allocated our own: %p\n", res);
765 if (flags & LKM_LOCAL) {
766 /* caller knows it's safe to assume it's not mastered elsewhere
767 * DONE! return right away */
768 spin_lock(&res->spinlock);
769 dlm_change_lockres_owner(dlm, res, dlm->node_num);
770 __dlm_insert_lockres(dlm, res);
771 dlm_lockres_grab_inflight_ref(dlm, res);
772 spin_unlock(&res->spinlock);
773 spin_unlock(&dlm->spinlock);
774 /* lockres still marked IN_PROGRESS */
775 goto wake_waiters;
778 /* check master list to see if another node has started mastering it */
779 spin_lock(&dlm->master_lock);
781 /* if we found a block, wait for lock to be mastered by another node */
782 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
783 if (blocked) {
784 int mig;
785 if (mle->type == DLM_MLE_MASTER) {
786 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
787 BUG();
789 mig = (mle->type == DLM_MLE_MIGRATION);
790 /* if there is a migration in progress, let the migration
791 * finish before continuing. we can wait for the absence
792 * of the MIGRATION mle: either the migrate finished or
793 * one of the nodes died and the mle was cleaned up.
794 * if there is a BLOCK here, but it already has a master
795 * set, we are too late. the master does not have a ref
796 * for us in the refmap. detach the mle and drop it.
797 * either way, go back to the top and start over. */
798 if (mig || mle->master != O2NM_MAX_NODES) {
799 BUG_ON(mig && mle->master == dlm->node_num);
800 /* we arrived too late. the master does not
801 * have a ref for us. retry. */
802 mlog(0, "%s:%.*s: late on %s\n",
803 dlm->name, namelen, lockid,
804 mig ? "MIGRATION" : "BLOCK");
805 spin_unlock(&dlm->master_lock);
806 spin_unlock(&dlm->spinlock);
808 /* master is known, detach */
809 if (!mig)
810 dlm_mle_detach_hb_events(dlm, mle);
811 dlm_put_mle(mle);
812 mle = NULL;
813 /* this is lame, but we cant wait on either
814 * the mle or lockres waitqueue here */
815 if (mig)
816 msleep(100);
817 goto lookup;
819 } else {
820 /* go ahead and try to master lock on this node */
821 mle = alloc_mle;
822 /* make sure this does not get freed below */
823 alloc_mle = NULL;
824 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
825 set_bit(dlm->node_num, mle->maybe_map);
826 __dlm_insert_mle(dlm, mle);
828 /* still holding the dlm spinlock, check the recovery map
829 * to see if there are any nodes that still need to be
830 * considered. these will not appear in the mle nodemap
831 * but they might own this lockres. wait on them. */
832 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
833 if (bit < O2NM_MAX_NODES) {
834 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
835 "recover before lock mastery can begin\n",
836 dlm->name, namelen, (char *)lockid, bit);
837 wait_on_recovery = 1;
841 /* at this point there is either a DLM_MLE_BLOCK or a
842 * DLM_MLE_MASTER on the master list, so it's safe to add the
843 * lockres to the hashtable. anyone who finds the lock will
844 * still have to wait on the IN_PROGRESS. */
846 /* finally add the lockres to its hash bucket */
847 __dlm_insert_lockres(dlm, res);
848 /* since this lockres is new it doesnt not require the spinlock */
849 dlm_lockres_grab_inflight_ref_new(dlm, res);
851 /* if this node does not become the master make sure to drop
852 * this inflight reference below */
853 drop_inflight_if_nonlocal = 1;
855 /* get an extra ref on the mle in case this is a BLOCK
856 * if so, the creator of the BLOCK may try to put the last
857 * ref at this time in the assert master handler, so we
858 * need an extra one to keep from a bad ptr deref. */
859 dlm_get_mle_inuse(mle);
860 spin_unlock(&dlm->master_lock);
861 spin_unlock(&dlm->spinlock);
863 redo_request:
864 while (wait_on_recovery) {
865 /* any cluster changes that occurred after dropping the
866 * dlm spinlock would be detectable be a change on the mle,
867 * so we only need to clear out the recovery map once. */
868 if (dlm_is_recovery_lock(lockid, namelen)) {
869 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
870 "must master $RECOVERY lock now\n", dlm->name);
871 if (!dlm_pre_master_reco_lockres(dlm, res))
872 wait_on_recovery = 0;
873 else {
874 mlog(0, "%s: waiting 500ms for heartbeat state "
875 "change\n", dlm->name);
876 msleep(500);
878 continue;
881 dlm_kick_recovery_thread(dlm);
882 msleep(1000);
883 dlm_wait_for_recovery(dlm);
885 spin_lock(&dlm->spinlock);
886 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
887 if (bit < O2NM_MAX_NODES) {
888 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
889 "recover before lock mastery can begin\n",
890 dlm->name, namelen, (char *)lockid, bit);
891 wait_on_recovery = 1;
892 } else
893 wait_on_recovery = 0;
894 spin_unlock(&dlm->spinlock);
896 if (wait_on_recovery)
897 dlm_wait_for_node_recovery(dlm, bit, 10000);
900 /* must wait for lock to be mastered elsewhere */
901 if (blocked)
902 goto wait;
904 ret = -EINVAL;
905 dlm_node_iter_init(mle->vote_map, &iter);
906 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
907 ret = dlm_do_master_request(res, mle, nodenum);
908 if (ret < 0)
909 mlog_errno(ret);
910 if (mle->master != O2NM_MAX_NODES) {
911 /* found a master ! */
912 if (mle->master <= nodenum)
913 break;
914 /* if our master request has not reached the master
915 * yet, keep going until it does. this is how the
916 * master will know that asserts are needed back to
917 * the lower nodes. */
918 mlog(0, "%s:%.*s: requests only up to %u but master "
919 "is %u, keep going\n", dlm->name, namelen,
920 lockid, nodenum, mle->master);
924 wait:
925 /* keep going until the response map includes all nodes */
926 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
927 if (ret < 0) {
928 wait_on_recovery = 1;
929 mlog(0, "%s:%.*s: node map changed, redo the "
930 "master request now, blocked=%d\n",
931 dlm->name, res->lockname.len,
932 res->lockname.name, blocked);
933 if (++tries > 20) {
934 mlog(ML_ERROR, "%s:%.*s: spinning on "
935 "dlm_wait_for_lock_mastery, blocked=%d\n",
936 dlm->name, res->lockname.len,
937 res->lockname.name, blocked);
938 dlm_print_one_lock_resource(res);
939 dlm_print_one_mle(mle);
940 tries = 0;
942 goto redo_request;
945 mlog(0, "lockres mastered by %u\n", res->owner);
946 /* make sure we never continue without this */
947 BUG_ON(res->owner == O2NM_MAX_NODES);
949 /* master is known, detach if not already detached */
950 dlm_mle_detach_hb_events(dlm, mle);
951 dlm_put_mle(mle);
952 /* put the extra ref */
953 dlm_put_mle_inuse(mle);
955 wake_waiters:
956 spin_lock(&res->spinlock);
957 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
958 dlm_lockres_drop_inflight_ref(dlm, res);
959 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
960 spin_unlock(&res->spinlock);
961 wake_up(&res->wq);
963 leave:
964 /* need to free the unused mle */
965 if (alloc_mle)
966 kmem_cache_free(dlm_mle_cache, alloc_mle);
968 return res;
972 #define DLM_MASTERY_TIMEOUT_MS 5000
974 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
975 struct dlm_lock_resource *res,
976 struct dlm_master_list_entry *mle,
977 int *blocked)
979 u8 m;
980 int ret, bit;
981 int map_changed, voting_done;
982 int assert, sleep;
984 recheck:
985 ret = 0;
986 assert = 0;
988 /* check if another node has already become the owner */
989 spin_lock(&res->spinlock);
990 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
991 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
992 res->lockname.len, res->lockname.name, res->owner);
993 spin_unlock(&res->spinlock);
994 /* this will cause the master to re-assert across
995 * the whole cluster, freeing up mles */
996 if (res->owner != dlm->node_num) {
997 ret = dlm_do_master_request(res, mle, res->owner);
998 if (ret < 0) {
999 /* give recovery a chance to run */
1000 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1001 msleep(500);
1002 goto recheck;
1005 ret = 0;
1006 goto leave;
1008 spin_unlock(&res->spinlock);
1010 spin_lock(&mle->spinlock);
1011 m = mle->master;
1012 map_changed = (memcmp(mle->vote_map, mle->node_map,
1013 sizeof(mle->vote_map)) != 0);
1014 voting_done = (memcmp(mle->vote_map, mle->response_map,
1015 sizeof(mle->vote_map)) == 0);
1017 /* restart if we hit any errors */
1018 if (map_changed) {
1019 int b;
1020 mlog(0, "%s: %.*s: node map changed, restarting\n",
1021 dlm->name, res->lockname.len, res->lockname.name);
1022 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1023 b = (mle->type == DLM_MLE_BLOCK);
1024 if ((*blocked && !b) || (!*blocked && b)) {
1025 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1026 dlm->name, res->lockname.len, res->lockname.name,
1027 *blocked, b);
1028 *blocked = b;
1030 spin_unlock(&mle->spinlock);
1031 if (ret < 0) {
1032 mlog_errno(ret);
1033 goto leave;
1035 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1036 "rechecking now\n", dlm->name, res->lockname.len,
1037 res->lockname.name);
1038 goto recheck;
1039 } else {
1040 if (!voting_done) {
1041 mlog(0, "map not changed and voting not done "
1042 "for %s:%.*s\n", dlm->name, res->lockname.len,
1043 res->lockname.name);
1047 if (m != O2NM_MAX_NODES) {
1048 /* another node has done an assert!
1049 * all done! */
1050 sleep = 0;
1051 } else {
1052 sleep = 1;
1053 /* have all nodes responded? */
1054 if (voting_done && !*blocked) {
1055 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1056 if (dlm->node_num <= bit) {
1057 /* my node number is lowest.
1058 * now tell other nodes that I am
1059 * mastering this. */
1060 mle->master = dlm->node_num;
1061 /* ref was grabbed in get_lock_resource
1062 * will be dropped in dlmlock_master */
1063 assert = 1;
1064 sleep = 0;
1066 /* if voting is done, but we have not received
1067 * an assert master yet, we must sleep */
1071 spin_unlock(&mle->spinlock);
1073 /* sleep if we haven't finished voting yet */
1074 if (sleep) {
1075 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1078 if (atomic_read(&mle->mle_refs.refcount) < 2)
1079 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1080 atomic_read(&mle->mle_refs.refcount),
1081 res->lockname.len, res->lockname.name);
1083 atomic_set(&mle->woken, 0);
1084 (void)wait_event_timeout(mle->wq,
1085 (atomic_read(&mle->woken) == 1),
1086 timeo);
1087 if (res->owner == O2NM_MAX_NODES) {
1088 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1089 res->lockname.len, res->lockname.name);
1090 goto recheck;
1092 mlog(0, "done waiting, master is %u\n", res->owner);
1093 ret = 0;
1094 goto leave;
1097 ret = 0; /* done */
1098 if (assert) {
1099 m = dlm->node_num;
1100 mlog(0, "about to master %.*s here, this=%u\n",
1101 res->lockname.len, res->lockname.name, m);
1102 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1103 if (ret) {
1104 /* This is a failure in the network path,
1105 * not in the response to the assert_master
1106 * (any nonzero response is a BUG on this node).
1107 * Most likely a socket just got disconnected
1108 * due to node death. */
1109 mlog_errno(ret);
1111 /* no longer need to restart lock mastery.
1112 * all living nodes have been contacted. */
1113 ret = 0;
1116 /* set the lockres owner */
1117 spin_lock(&res->spinlock);
1118 /* mastery reference obtained either during
1119 * assert_master_handler or in get_lock_resource */
1120 dlm_change_lockres_owner(dlm, res, m);
1121 spin_unlock(&res->spinlock);
1123 leave:
1124 return ret;
1127 struct dlm_bitmap_diff_iter
1129 int curnode;
1130 unsigned long *orig_bm;
1131 unsigned long *cur_bm;
1132 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1135 enum dlm_node_state_change
1137 NODE_DOWN = -1,
1138 NODE_NO_CHANGE = 0,
1139 NODE_UP
1142 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1143 unsigned long *orig_bm,
1144 unsigned long *cur_bm)
1146 unsigned long p1, p2;
1147 int i;
1149 iter->curnode = -1;
1150 iter->orig_bm = orig_bm;
1151 iter->cur_bm = cur_bm;
1153 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1154 p1 = *(iter->orig_bm + i);
1155 p2 = *(iter->cur_bm + i);
1156 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1160 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1161 enum dlm_node_state_change *state)
1163 int bit;
1165 if (iter->curnode >= O2NM_MAX_NODES)
1166 return -ENOENT;
1168 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1169 iter->curnode+1);
1170 if (bit >= O2NM_MAX_NODES) {
1171 iter->curnode = O2NM_MAX_NODES;
1172 return -ENOENT;
1175 /* if it was there in the original then this node died */
1176 if (test_bit(bit, iter->orig_bm))
1177 *state = NODE_DOWN;
1178 else
1179 *state = NODE_UP;
1181 iter->curnode = bit;
1182 return bit;
1186 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1187 struct dlm_lock_resource *res,
1188 struct dlm_master_list_entry *mle,
1189 int blocked)
1191 struct dlm_bitmap_diff_iter bdi;
1192 enum dlm_node_state_change sc;
1193 int node;
1194 int ret = 0;
1196 mlog(0, "something happened such that the "
1197 "master process may need to be restarted!\n");
1199 assert_spin_locked(&mle->spinlock);
1201 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1202 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1203 while (node >= 0) {
1204 if (sc == NODE_UP) {
1205 /* a node came up. clear any old vote from
1206 * the response map and set it in the vote map
1207 * then restart the mastery. */
1208 mlog(ML_NOTICE, "node %d up while restarting\n", node);
1210 /* redo the master request, but only for the new node */
1211 mlog(0, "sending request to new node\n");
1212 clear_bit(node, mle->response_map);
1213 set_bit(node, mle->vote_map);
1214 } else {
1215 mlog(ML_ERROR, "node down! %d\n", node);
1216 if (blocked) {
1217 int lowest = find_next_bit(mle->maybe_map,
1218 O2NM_MAX_NODES, 0);
1220 /* act like it was never there */
1221 clear_bit(node, mle->maybe_map);
1223 if (node == lowest) {
1224 mlog(0, "expected master %u died"
1225 " while this node was blocked "
1226 "waiting on it!\n", node);
1227 lowest = find_next_bit(mle->maybe_map,
1228 O2NM_MAX_NODES,
1229 lowest+1);
1230 if (lowest < O2NM_MAX_NODES) {
1231 mlog(0, "%s:%.*s:still "
1232 "blocked. waiting on %u "
1233 "now\n", dlm->name,
1234 res->lockname.len,
1235 res->lockname.name,
1236 lowest);
1237 } else {
1238 /* mle is an MLE_BLOCK, but
1239 * there is now nothing left to
1240 * block on. we need to return
1241 * all the way back out and try
1242 * again with an MLE_MASTER.
1243 * dlm_do_local_recovery_cleanup
1244 * has already run, so the mle
1245 * refcount is ok */
1246 mlog(0, "%s:%.*s: no "
1247 "longer blocking. try to "
1248 "master this here\n",
1249 dlm->name,
1250 res->lockname.len,
1251 res->lockname.name);
1252 mle->type = DLM_MLE_MASTER;
1253 mle->mleres = res;
1258 /* now blank out everything, as if we had never
1259 * contacted anyone */
1260 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1261 memset(mle->response_map, 0, sizeof(mle->response_map));
1262 /* reset the vote_map to the current node_map */
1263 memcpy(mle->vote_map, mle->node_map,
1264 sizeof(mle->node_map));
1265 /* put myself into the maybe map */
1266 if (mle->type != DLM_MLE_BLOCK)
1267 set_bit(dlm->node_num, mle->maybe_map);
1269 ret = -EAGAIN;
1270 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1272 return ret;
1277 * DLM_MASTER_REQUEST_MSG
1279 * returns: 0 on success,
1280 * -errno on a network error
1282 * on error, the caller should assume the target node is "dead"
1286 static int dlm_do_master_request(struct dlm_lock_resource *res,
1287 struct dlm_master_list_entry *mle, int to)
1289 struct dlm_ctxt *dlm = mle->dlm;
1290 struct dlm_master_request request;
1291 int ret, response=0, resend;
1293 memset(&request, 0, sizeof(request));
1294 request.node_idx = dlm->node_num;
1296 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1298 request.namelen = (u8)mle->mnamelen;
1299 memcpy(request.name, mle->mname, request.namelen);
1301 again:
1302 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1303 sizeof(request), to, &response);
1304 if (ret < 0) {
1305 if (ret == -ESRCH) {
1306 /* should never happen */
1307 mlog(ML_ERROR, "TCP stack not ready!\n");
1308 BUG();
1309 } else if (ret == -EINVAL) {
1310 mlog(ML_ERROR, "bad args passed to o2net!\n");
1311 BUG();
1312 } else if (ret == -ENOMEM) {
1313 mlog(ML_ERROR, "out of memory while trying to send "
1314 "network message! retrying\n");
1315 /* this is totally crude */
1316 msleep(50);
1317 goto again;
1318 } else if (!dlm_is_host_down(ret)) {
1319 /* not a network error. bad. */
1320 mlog_errno(ret);
1321 mlog(ML_ERROR, "unhandled error!");
1322 BUG();
1324 /* all other errors should be network errors,
1325 * and likely indicate node death */
1326 mlog(ML_ERROR, "link to %d went down!\n", to);
1327 goto out;
1330 ret = 0;
1331 resend = 0;
1332 spin_lock(&mle->spinlock);
1333 switch (response) {
1334 case DLM_MASTER_RESP_YES:
1335 set_bit(to, mle->response_map);
1336 mlog(0, "node %u is the master, response=YES\n", to);
1337 mlog(0, "%s:%.*s: master node %u now knows I have a "
1338 "reference\n", dlm->name, res->lockname.len,
1339 res->lockname.name, to);
1340 mle->master = to;
1341 break;
1342 case DLM_MASTER_RESP_NO:
1343 mlog(0, "node %u not master, response=NO\n", to);
1344 set_bit(to, mle->response_map);
1345 break;
1346 case DLM_MASTER_RESP_MAYBE:
1347 mlog(0, "node %u not master, response=MAYBE\n", to);
1348 set_bit(to, mle->response_map);
1349 set_bit(to, mle->maybe_map);
1350 break;
1351 case DLM_MASTER_RESP_ERROR:
1352 mlog(0, "node %u hit an error, resending\n", to);
1353 resend = 1;
1354 response = 0;
1355 break;
1356 default:
1357 mlog(ML_ERROR, "bad response! %u\n", response);
1358 BUG();
1360 spin_unlock(&mle->spinlock);
1361 if (resend) {
1362 /* this is also totally crude */
1363 msleep(50);
1364 goto again;
1367 out:
1368 return ret;
1372 * locks that can be taken here:
1373 * dlm->spinlock
1374 * res->spinlock
1375 * mle->spinlock
1376 * dlm->master_list
1378 * if possible, TRIM THIS DOWN!!!
1380 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1381 void **ret_data)
1383 u8 response = DLM_MASTER_RESP_MAYBE;
1384 struct dlm_ctxt *dlm = data;
1385 struct dlm_lock_resource *res = NULL;
1386 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1387 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1388 char *name;
1389 unsigned int namelen, hash;
1390 int found, ret;
1391 int set_maybe;
1392 int dispatch_assert = 0;
1394 if (!dlm_grab(dlm))
1395 return DLM_MASTER_RESP_NO;
1397 if (!dlm_domain_fully_joined(dlm)) {
1398 response = DLM_MASTER_RESP_NO;
1399 goto send_response;
1402 name = request->name;
1403 namelen = request->namelen;
1404 hash = dlm_lockid_hash(name, namelen);
1406 if (namelen > DLM_LOCKID_NAME_MAX) {
1407 response = DLM_IVBUFLEN;
1408 goto send_response;
1411 way_up_top:
1412 spin_lock(&dlm->spinlock);
1413 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1414 if (res) {
1415 spin_unlock(&dlm->spinlock);
1417 /* take care of the easy cases up front */
1418 spin_lock(&res->spinlock);
1419 if (res->state & (DLM_LOCK_RES_RECOVERING|
1420 DLM_LOCK_RES_MIGRATING)) {
1421 spin_unlock(&res->spinlock);
1422 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1423 "being recovered/migrated\n");
1424 response = DLM_MASTER_RESP_ERROR;
1425 if (mle)
1426 kmem_cache_free(dlm_mle_cache, mle);
1427 goto send_response;
1430 if (res->owner == dlm->node_num) {
1431 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1432 dlm->name, namelen, name, request->node_idx);
1433 dlm_lockres_set_refmap_bit(request->node_idx, res);
1434 spin_unlock(&res->spinlock);
1435 response = DLM_MASTER_RESP_YES;
1436 if (mle)
1437 kmem_cache_free(dlm_mle_cache, mle);
1439 /* this node is the owner.
1440 * there is some extra work that needs to
1441 * happen now. the requesting node has
1442 * caused all nodes up to this one to
1443 * create mles. this node now needs to
1444 * go back and clean those up. */
1445 dispatch_assert = 1;
1446 goto send_response;
1447 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1448 spin_unlock(&res->spinlock);
1449 // mlog(0, "node %u is the master\n", res->owner);
1450 response = DLM_MASTER_RESP_NO;
1451 if (mle)
1452 kmem_cache_free(dlm_mle_cache, mle);
1453 goto send_response;
1456 /* ok, there is no owner. either this node is
1457 * being blocked, or it is actively trying to
1458 * master this lock. */
1459 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1460 mlog(ML_ERROR, "lock with no owner should be "
1461 "in-progress!\n");
1462 BUG();
1465 // mlog(0, "lockres is in progress...\n");
1466 spin_lock(&dlm->master_lock);
1467 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1468 if (!found) {
1469 mlog(ML_ERROR, "no mle found for this lock!\n");
1470 BUG();
1472 set_maybe = 1;
1473 spin_lock(&tmpmle->spinlock);
1474 if (tmpmle->type == DLM_MLE_BLOCK) {
1475 // mlog(0, "this node is waiting for "
1476 // "lockres to be mastered\n");
1477 response = DLM_MASTER_RESP_NO;
1478 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1479 mlog(0, "node %u is master, but trying to migrate to "
1480 "node %u.\n", tmpmle->master, tmpmle->new_master);
1481 if (tmpmle->master == dlm->node_num) {
1482 mlog(ML_ERROR, "no owner on lockres, but this "
1483 "node is trying to migrate it to %u?!\n",
1484 tmpmle->new_master);
1485 BUG();
1486 } else {
1487 /* the real master can respond on its own */
1488 response = DLM_MASTER_RESP_NO;
1490 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1491 set_maybe = 0;
1492 if (tmpmle->master == dlm->node_num) {
1493 response = DLM_MASTER_RESP_YES;
1494 /* this node will be the owner.
1495 * go back and clean the mles on any
1496 * other nodes */
1497 dispatch_assert = 1;
1498 dlm_lockres_set_refmap_bit(request->node_idx, res);
1499 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1500 dlm->name, namelen, name,
1501 request->node_idx);
1502 } else
1503 response = DLM_MASTER_RESP_NO;
1504 } else {
1505 // mlog(0, "this node is attempting to "
1506 // "master lockres\n");
1507 response = DLM_MASTER_RESP_MAYBE;
1509 if (set_maybe)
1510 set_bit(request->node_idx, tmpmle->maybe_map);
1511 spin_unlock(&tmpmle->spinlock);
1513 spin_unlock(&dlm->master_lock);
1514 spin_unlock(&res->spinlock);
1516 /* keep the mle attached to heartbeat events */
1517 dlm_put_mle(tmpmle);
1518 if (mle)
1519 kmem_cache_free(dlm_mle_cache, mle);
1520 goto send_response;
1524 * lockres doesn't exist on this node
1525 * if there is an MLE_BLOCK, return NO
1526 * if there is an MLE_MASTER, return MAYBE
1527 * otherwise, add an MLE_BLOCK, return NO
1529 spin_lock(&dlm->master_lock);
1530 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1531 if (!found) {
1532 /* this lockid has never been seen on this node yet */
1533 // mlog(0, "no mle found\n");
1534 if (!mle) {
1535 spin_unlock(&dlm->master_lock);
1536 spin_unlock(&dlm->spinlock);
1538 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1539 if (!mle) {
1540 response = DLM_MASTER_RESP_ERROR;
1541 mlog_errno(-ENOMEM);
1542 goto send_response;
1544 goto way_up_top;
1547 // mlog(0, "this is second time thru, already allocated, "
1548 // "add the block.\n");
1549 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1550 set_bit(request->node_idx, mle->maybe_map);
1551 __dlm_insert_mle(dlm, mle);
1552 response = DLM_MASTER_RESP_NO;
1553 } else {
1554 // mlog(0, "mle was found\n");
1555 set_maybe = 1;
1556 spin_lock(&tmpmle->spinlock);
1557 if (tmpmle->master == dlm->node_num) {
1558 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1559 BUG();
1561 if (tmpmle->type == DLM_MLE_BLOCK)
1562 response = DLM_MASTER_RESP_NO;
1563 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1564 mlog(0, "migration mle was found (%u->%u)\n",
1565 tmpmle->master, tmpmle->new_master);
1566 /* real master can respond on its own */
1567 response = DLM_MASTER_RESP_NO;
1568 } else
1569 response = DLM_MASTER_RESP_MAYBE;
1570 if (set_maybe)
1571 set_bit(request->node_idx, tmpmle->maybe_map);
1572 spin_unlock(&tmpmle->spinlock);
1574 spin_unlock(&dlm->master_lock);
1575 spin_unlock(&dlm->spinlock);
1577 if (found) {
1578 /* keep the mle attached to heartbeat events */
1579 dlm_put_mle(tmpmle);
1581 send_response:
1583 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1584 * The reference is released by dlm_assert_master_worker() under
1585 * the call to dlm_dispatch_assert_master(). If
1586 * dlm_assert_master_worker() isn't called, we drop it here.
1588 if (dispatch_assert) {
1589 if (response != DLM_MASTER_RESP_YES)
1590 mlog(ML_ERROR, "invalid response %d\n", response);
1591 if (!res) {
1592 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1593 BUG();
1595 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1596 dlm->node_num, res->lockname.len, res->lockname.name);
1597 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1598 DLM_ASSERT_MASTER_MLE_CLEANUP);
1599 if (ret < 0) {
1600 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1601 response = DLM_MASTER_RESP_ERROR;
1602 dlm_lockres_put(res);
1604 } else {
1605 if (res)
1606 dlm_lockres_put(res);
1609 dlm_put(dlm);
1610 return response;
1614 * DLM_ASSERT_MASTER_MSG
1619 * NOTE: this can be used for debugging
1620 * can periodically run all locks owned by this node
1621 * and re-assert across the cluster...
1623 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1624 struct dlm_lock_resource *res,
1625 void *nodemap, u32 flags)
1627 struct dlm_assert_master assert;
1628 int to, tmpret;
1629 struct dlm_node_iter iter;
1630 int ret = 0;
1631 int reassert;
1632 const char *lockname = res->lockname.name;
1633 unsigned int namelen = res->lockname.len;
1635 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1637 spin_lock(&res->spinlock);
1638 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1639 spin_unlock(&res->spinlock);
1641 again:
1642 reassert = 0;
1644 /* note that if this nodemap is empty, it returns 0 */
1645 dlm_node_iter_init(nodemap, &iter);
1646 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1647 int r = 0;
1648 struct dlm_master_list_entry *mle = NULL;
1650 mlog(0, "sending assert master to %d (%.*s)\n", to,
1651 namelen, lockname);
1652 memset(&assert, 0, sizeof(assert));
1653 assert.node_idx = dlm->node_num;
1654 assert.namelen = namelen;
1655 memcpy(assert.name, lockname, namelen);
1656 assert.flags = cpu_to_be32(flags);
1658 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1659 &assert, sizeof(assert), to, &r);
1660 if (tmpret < 0) {
1661 mlog(ML_ERROR, "Error %d when sending message %u (key "
1662 "0x%x) to node %u\n", tmpret,
1663 DLM_ASSERT_MASTER_MSG, dlm->key, to);
1664 if (!dlm_is_host_down(tmpret)) {
1665 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1666 BUG();
1668 /* a node died. finish out the rest of the nodes. */
1669 mlog(0, "link to %d went down!\n", to);
1670 /* any nonzero status return will do */
1671 ret = tmpret;
1672 r = 0;
1673 } else if (r < 0) {
1674 /* ok, something horribly messed. kill thyself. */
1675 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1676 "got %d.\n", namelen, lockname, to, r);
1677 spin_lock(&dlm->spinlock);
1678 spin_lock(&dlm->master_lock);
1679 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1680 namelen)) {
1681 dlm_print_one_mle(mle);
1682 __dlm_put_mle(mle);
1684 spin_unlock(&dlm->master_lock);
1685 spin_unlock(&dlm->spinlock);
1686 BUG();
1689 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1690 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1691 mlog(ML_ERROR, "%.*s: very strange, "
1692 "master MLE but no lockres on %u\n",
1693 namelen, lockname, to);
1696 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1697 mlog(0, "%.*s: node %u create mles on other "
1698 "nodes and requests a re-assert\n",
1699 namelen, lockname, to);
1700 reassert = 1;
1702 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1703 mlog(0, "%.*s: node %u has a reference to this "
1704 "lockres, set the bit in the refmap\n",
1705 namelen, lockname, to);
1706 spin_lock(&res->spinlock);
1707 dlm_lockres_set_refmap_bit(to, res);
1708 spin_unlock(&res->spinlock);
1712 if (reassert)
1713 goto again;
1715 spin_lock(&res->spinlock);
1716 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1717 spin_unlock(&res->spinlock);
1718 wake_up(&res->wq);
1720 return ret;
1724 * locks that can be taken here:
1725 * dlm->spinlock
1726 * res->spinlock
1727 * mle->spinlock
1728 * dlm->master_list
1730 * if possible, TRIM THIS DOWN!!!
1732 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1733 void **ret_data)
1735 struct dlm_ctxt *dlm = data;
1736 struct dlm_master_list_entry *mle = NULL;
1737 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1738 struct dlm_lock_resource *res = NULL;
1739 char *name;
1740 unsigned int namelen, hash;
1741 u32 flags;
1742 int master_request = 0, have_lockres_ref = 0;
1743 int ret = 0;
1745 if (!dlm_grab(dlm))
1746 return 0;
1748 name = assert->name;
1749 namelen = assert->namelen;
1750 hash = dlm_lockid_hash(name, namelen);
1751 flags = be32_to_cpu(assert->flags);
1753 if (namelen > DLM_LOCKID_NAME_MAX) {
1754 mlog(ML_ERROR, "Invalid name length!");
1755 goto done;
1758 spin_lock(&dlm->spinlock);
1760 if (flags)
1761 mlog(0, "assert_master with flags: %u\n", flags);
1763 /* find the MLE */
1764 spin_lock(&dlm->master_lock);
1765 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1766 /* not an error, could be master just re-asserting */
1767 mlog(0, "just got an assert_master from %u, but no "
1768 "MLE for it! (%.*s)\n", assert->node_idx,
1769 namelen, name);
1770 } else {
1771 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1772 if (bit >= O2NM_MAX_NODES) {
1773 /* not necessarily an error, though less likely.
1774 * could be master just re-asserting. */
1775 mlog(0, "no bits set in the maybe_map, but %u "
1776 "is asserting! (%.*s)\n", assert->node_idx,
1777 namelen, name);
1778 } else if (bit != assert->node_idx) {
1779 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1780 mlog(0, "master %u was found, %u should "
1781 "back off\n", assert->node_idx, bit);
1782 } else {
1783 /* with the fix for bug 569, a higher node
1784 * number winning the mastery will respond
1785 * YES to mastery requests, but this node
1786 * had no way of knowing. let it pass. */
1787 mlog(0, "%u is the lowest node, "
1788 "%u is asserting. (%.*s) %u must "
1789 "have begun after %u won.\n", bit,
1790 assert->node_idx, namelen, name, bit,
1791 assert->node_idx);
1794 if (mle->type == DLM_MLE_MIGRATION) {
1795 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1796 mlog(0, "%s:%.*s: got cleanup assert"
1797 " from %u for migration\n",
1798 dlm->name, namelen, name,
1799 assert->node_idx);
1800 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1801 mlog(0, "%s:%.*s: got unrelated assert"
1802 " from %u for migration, ignoring\n",
1803 dlm->name, namelen, name,
1804 assert->node_idx);
1805 __dlm_put_mle(mle);
1806 spin_unlock(&dlm->master_lock);
1807 spin_unlock(&dlm->spinlock);
1808 goto done;
1812 spin_unlock(&dlm->master_lock);
1814 /* ok everything checks out with the MLE
1815 * now check to see if there is a lockres */
1816 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1817 if (res) {
1818 spin_lock(&res->spinlock);
1819 if (res->state & DLM_LOCK_RES_RECOVERING) {
1820 mlog(ML_ERROR, "%u asserting but %.*s is "
1821 "RECOVERING!\n", assert->node_idx, namelen, name);
1822 goto kill;
1824 if (!mle) {
1825 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1826 res->owner != assert->node_idx) {
1827 mlog(ML_ERROR, "DIE! Mastery assert from %u, "
1828 "but current owner is %u! (%.*s)\n",
1829 assert->node_idx, res->owner, namelen,
1830 name);
1831 __dlm_print_one_lock_resource(res);
1832 BUG();
1834 } else if (mle->type != DLM_MLE_MIGRATION) {
1835 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1836 /* owner is just re-asserting */
1837 if (res->owner == assert->node_idx) {
1838 mlog(0, "owner %u re-asserting on "
1839 "lock %.*s\n", assert->node_idx,
1840 namelen, name);
1841 goto ok;
1843 mlog(ML_ERROR, "got assert_master from "
1844 "node %u, but %u is the owner! "
1845 "(%.*s)\n", assert->node_idx,
1846 res->owner, namelen, name);
1847 goto kill;
1849 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1850 mlog(ML_ERROR, "got assert from %u, but lock "
1851 "with no owner should be "
1852 "in-progress! (%.*s)\n",
1853 assert->node_idx,
1854 namelen, name);
1855 goto kill;
1857 } else /* mle->type == DLM_MLE_MIGRATION */ {
1858 /* should only be getting an assert from new master */
1859 if (assert->node_idx != mle->new_master) {
1860 mlog(ML_ERROR, "got assert from %u, but "
1861 "new master is %u, and old master "
1862 "was %u (%.*s)\n",
1863 assert->node_idx, mle->new_master,
1864 mle->master, namelen, name);
1865 goto kill;
1870 spin_unlock(&res->spinlock);
1873 // mlog(0, "woo! got an assert_master from node %u!\n",
1874 // assert->node_idx);
1875 if (mle) {
1876 int extra_ref = 0;
1877 int nn = -1;
1878 int rr, err = 0;
1880 spin_lock(&mle->spinlock);
1881 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1882 extra_ref = 1;
1883 else {
1884 /* MASTER mle: if any bits set in the response map
1885 * then the calling node needs to re-assert to clear
1886 * up nodes that this node contacted */
1887 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1888 nn+1)) < O2NM_MAX_NODES) {
1889 if (nn != dlm->node_num && nn != assert->node_idx)
1890 master_request = 1;
1893 mle->master = assert->node_idx;
1894 atomic_set(&mle->woken, 1);
1895 wake_up(&mle->wq);
1896 spin_unlock(&mle->spinlock);
1898 if (res) {
1899 int wake = 0;
1900 spin_lock(&res->spinlock);
1901 if (mle->type == DLM_MLE_MIGRATION) {
1902 mlog(0, "finishing off migration of lockres %.*s, "
1903 "from %u to %u\n",
1904 res->lockname.len, res->lockname.name,
1905 dlm->node_num, mle->new_master);
1906 res->state &= ~DLM_LOCK_RES_MIGRATING;
1907 wake = 1;
1908 dlm_change_lockres_owner(dlm, res, mle->new_master);
1909 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1910 } else {
1911 dlm_change_lockres_owner(dlm, res, mle->master);
1913 spin_unlock(&res->spinlock);
1914 have_lockres_ref = 1;
1915 if (wake)
1916 wake_up(&res->wq);
1919 /* master is known, detach if not already detached.
1920 * ensures that only one assert_master call will happen
1921 * on this mle. */
1922 spin_lock(&dlm->master_lock);
1924 rr = atomic_read(&mle->mle_refs.refcount);
1925 if (mle->inuse > 0) {
1926 if (extra_ref && rr < 3)
1927 err = 1;
1928 else if (!extra_ref && rr < 2)
1929 err = 1;
1930 } else {
1931 if (extra_ref && rr < 2)
1932 err = 1;
1933 else if (!extra_ref && rr < 1)
1934 err = 1;
1936 if (err) {
1937 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1938 "that will mess up this node, refs=%d, extra=%d, "
1939 "inuse=%d\n", dlm->name, namelen, name,
1940 assert->node_idx, rr, extra_ref, mle->inuse);
1941 dlm_print_one_mle(mle);
1943 __dlm_unlink_mle(dlm, mle);
1944 __dlm_mle_detach_hb_events(dlm, mle);
1945 __dlm_put_mle(mle);
1946 if (extra_ref) {
1947 /* the assert master message now balances the extra
1948 * ref given by the master / migration request message.
1949 * if this is the last put, it will be removed
1950 * from the list. */
1951 __dlm_put_mle(mle);
1953 spin_unlock(&dlm->master_lock);
1954 } else if (res) {
1955 if (res->owner != assert->node_idx) {
1956 mlog(0, "assert_master from %u, but current "
1957 "owner is %u (%.*s), no mle\n", assert->node_idx,
1958 res->owner, namelen, name);
1961 spin_unlock(&dlm->spinlock);
1963 done:
1964 ret = 0;
1965 if (res) {
1966 spin_lock(&res->spinlock);
1967 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1968 spin_unlock(&res->spinlock);
1969 *ret_data = (void *)res;
1971 dlm_put(dlm);
1972 if (master_request) {
1973 mlog(0, "need to tell master to reassert\n");
1974 /* positive. negative would shoot down the node. */
1975 ret |= DLM_ASSERT_RESPONSE_REASSERT;
1976 if (!have_lockres_ref) {
1977 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1978 "mle present here for %s:%.*s, but no lockres!\n",
1979 assert->node_idx, dlm->name, namelen, name);
1982 if (have_lockres_ref) {
1983 /* let the master know we have a reference to the lockres */
1984 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
1985 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
1986 dlm->name, namelen, name, assert->node_idx);
1988 return ret;
1990 kill:
1991 /* kill the caller! */
1992 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
1993 "and killing the other node now! This node is OK and can continue.\n");
1994 __dlm_print_one_lock_resource(res);
1995 spin_unlock(&res->spinlock);
1996 spin_unlock(&dlm->spinlock);
1997 *ret_data = (void *)res;
1998 dlm_put(dlm);
1999 return -EINVAL;
2002 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2004 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2006 if (ret_data) {
2007 spin_lock(&res->spinlock);
2008 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2009 spin_unlock(&res->spinlock);
2010 wake_up(&res->wq);
2011 dlm_lockres_put(res);
2013 return;
2016 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2017 struct dlm_lock_resource *res,
2018 int ignore_higher, u8 request_from, u32 flags)
2020 struct dlm_work_item *item;
2021 item = kzalloc(sizeof(*item), GFP_NOFS);
2022 if (!item)
2023 return -ENOMEM;
2026 /* queue up work for dlm_assert_master_worker */
2027 dlm_grab(dlm); /* get an extra ref for the work item */
2028 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2029 item->u.am.lockres = res; /* already have a ref */
2030 /* can optionally ignore node numbers higher than this node */
2031 item->u.am.ignore_higher = ignore_higher;
2032 item->u.am.request_from = request_from;
2033 item->u.am.flags = flags;
2035 if (ignore_higher)
2036 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2037 res->lockname.name);
2039 spin_lock(&dlm->work_lock);
2040 list_add_tail(&item->list, &dlm->work_list);
2041 spin_unlock(&dlm->work_lock);
2043 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2044 return 0;
2047 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2049 struct dlm_ctxt *dlm = data;
2050 int ret = 0;
2051 struct dlm_lock_resource *res;
2052 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2053 int ignore_higher;
2054 int bit;
2055 u8 request_from;
2056 u32 flags;
2058 dlm = item->dlm;
2059 res = item->u.am.lockres;
2060 ignore_higher = item->u.am.ignore_higher;
2061 request_from = item->u.am.request_from;
2062 flags = item->u.am.flags;
2064 spin_lock(&dlm->spinlock);
2065 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2066 spin_unlock(&dlm->spinlock);
2068 clear_bit(dlm->node_num, nodemap);
2069 if (ignore_higher) {
2070 /* if is this just to clear up mles for nodes below
2071 * this node, do not send the message to the original
2072 * caller or any node number higher than this */
2073 clear_bit(request_from, nodemap);
2074 bit = dlm->node_num;
2075 while (1) {
2076 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2077 bit+1);
2078 if (bit >= O2NM_MAX_NODES)
2079 break;
2080 clear_bit(bit, nodemap);
2085 * If we're migrating this lock to someone else, we are no
2086 * longer allowed to assert out own mastery. OTOH, we need to
2087 * prevent migration from starting while we're still asserting
2088 * our dominance. The reserved ast delays migration.
2090 spin_lock(&res->spinlock);
2091 if (res->state & DLM_LOCK_RES_MIGRATING) {
2092 mlog(0, "Someone asked us to assert mastery, but we're "
2093 "in the middle of migration. Skipping assert, "
2094 "the new master will handle that.\n");
2095 spin_unlock(&res->spinlock);
2096 goto put;
2097 } else
2098 __dlm_lockres_reserve_ast(res);
2099 spin_unlock(&res->spinlock);
2101 /* this call now finishes out the nodemap
2102 * even if one or more nodes die */
2103 mlog(0, "worker about to master %.*s here, this=%u\n",
2104 res->lockname.len, res->lockname.name, dlm->node_num);
2105 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2106 if (ret < 0) {
2107 /* no need to restart, we are done */
2108 if (!dlm_is_host_down(ret))
2109 mlog_errno(ret);
2112 /* Ok, we've asserted ourselves. Let's let migration start. */
2113 dlm_lockres_release_ast(dlm, res);
2115 put:
2116 dlm_lockres_put(res);
2118 mlog(0, "finished with dlm_assert_master_worker\n");
2121 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2122 * We cannot wait for node recovery to complete to begin mastering this
2123 * lockres because this lockres is used to kick off recovery! ;-)
2124 * So, do a pre-check on all living nodes to see if any of those nodes
2125 * think that $RECOVERY is currently mastered by a dead node. If so,
2126 * we wait a short time to allow that node to get notified by its own
2127 * heartbeat stack, then check again. All $RECOVERY lock resources
2128 * mastered by dead nodes are purged when the hearbeat callback is
2129 * fired, so we can know for sure that it is safe to continue once
2130 * the node returns a live node or no node. */
2131 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2132 struct dlm_lock_resource *res)
2134 struct dlm_node_iter iter;
2135 int nodenum;
2136 int ret = 0;
2137 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2139 spin_lock(&dlm->spinlock);
2140 dlm_node_iter_init(dlm->domain_map, &iter);
2141 spin_unlock(&dlm->spinlock);
2143 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2144 /* do not send to self */
2145 if (nodenum == dlm->node_num)
2146 continue;
2147 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2148 if (ret < 0) {
2149 mlog_errno(ret);
2150 if (!dlm_is_host_down(ret))
2151 BUG();
2152 /* host is down, so answer for that node would be
2153 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2154 ret = 0;
2157 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2158 /* check to see if this master is in the recovery map */
2159 spin_lock(&dlm->spinlock);
2160 if (test_bit(master, dlm->recovery_map)) {
2161 mlog(ML_NOTICE, "%s: node %u has not seen "
2162 "node %u go down yet, and thinks the "
2163 "dead node is mastering the recovery "
2164 "lock. must wait.\n", dlm->name,
2165 nodenum, master);
2166 ret = -EAGAIN;
2168 spin_unlock(&dlm->spinlock);
2169 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2170 master);
2171 break;
2174 return ret;
2178 * DLM_DEREF_LOCKRES_MSG
2181 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2183 struct dlm_deref_lockres deref;
2184 int ret = 0, r;
2185 const char *lockname;
2186 unsigned int namelen;
2188 lockname = res->lockname.name;
2189 namelen = res->lockname.len;
2190 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2192 mlog(0, "%s:%.*s: sending deref to %d\n",
2193 dlm->name, namelen, lockname, res->owner);
2194 memset(&deref, 0, sizeof(deref));
2195 deref.node_idx = dlm->node_num;
2196 deref.namelen = namelen;
2197 memcpy(deref.name, lockname, namelen);
2199 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2200 &deref, sizeof(deref), res->owner, &r);
2201 if (ret < 0)
2202 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
2203 "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
2204 res->owner);
2205 else if (r < 0) {
2206 /* BAD. other node says I did not have a ref. */
2207 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2208 "(master=%u) got %d.\n", dlm->name, namelen,
2209 lockname, res->owner, r);
2210 dlm_print_one_lock_resource(res);
2211 BUG();
2213 return ret;
2216 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2217 void **ret_data)
2219 struct dlm_ctxt *dlm = data;
2220 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2221 struct dlm_lock_resource *res = NULL;
2222 char *name;
2223 unsigned int namelen;
2224 int ret = -EINVAL;
2225 u8 node;
2226 unsigned int hash;
2227 struct dlm_work_item *item;
2228 int cleared = 0;
2229 int dispatch = 0;
2231 if (!dlm_grab(dlm))
2232 return 0;
2234 name = deref->name;
2235 namelen = deref->namelen;
2236 node = deref->node_idx;
2238 if (namelen > DLM_LOCKID_NAME_MAX) {
2239 mlog(ML_ERROR, "Invalid name length!");
2240 goto done;
2242 if (deref->node_idx >= O2NM_MAX_NODES) {
2243 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2244 goto done;
2247 hash = dlm_lockid_hash(name, namelen);
2249 spin_lock(&dlm->spinlock);
2250 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2251 if (!res) {
2252 spin_unlock(&dlm->spinlock);
2253 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2254 dlm->name, namelen, name);
2255 goto done;
2257 spin_unlock(&dlm->spinlock);
2259 spin_lock(&res->spinlock);
2260 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2261 dispatch = 1;
2262 else {
2263 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2264 if (test_bit(node, res->refmap)) {
2265 dlm_lockres_clear_refmap_bit(node, res);
2266 cleared = 1;
2269 spin_unlock(&res->spinlock);
2271 if (!dispatch) {
2272 if (cleared)
2273 dlm_lockres_calc_usage(dlm, res);
2274 else {
2275 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2276 "but it is already dropped!\n", dlm->name,
2277 res->lockname.len, res->lockname.name, node);
2278 dlm_print_one_lock_resource(res);
2280 ret = 0;
2281 goto done;
2284 item = kzalloc(sizeof(*item), GFP_NOFS);
2285 if (!item) {
2286 ret = -ENOMEM;
2287 mlog_errno(ret);
2288 goto done;
2291 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2292 item->u.dl.deref_res = res;
2293 item->u.dl.deref_node = node;
2295 spin_lock(&dlm->work_lock);
2296 list_add_tail(&item->list, &dlm->work_list);
2297 spin_unlock(&dlm->work_lock);
2299 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2300 return 0;
2302 done:
2303 if (res)
2304 dlm_lockres_put(res);
2305 dlm_put(dlm);
2307 return ret;
2310 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2312 struct dlm_ctxt *dlm;
2313 struct dlm_lock_resource *res;
2314 u8 node;
2315 u8 cleared = 0;
2317 dlm = item->dlm;
2318 res = item->u.dl.deref_res;
2319 node = item->u.dl.deref_node;
2321 spin_lock(&res->spinlock);
2322 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2323 if (test_bit(node, res->refmap)) {
2324 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2325 dlm_lockres_clear_refmap_bit(node, res);
2326 cleared = 1;
2328 spin_unlock(&res->spinlock);
2330 if (cleared) {
2331 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2332 dlm->name, res->lockname.len, res->lockname.name, node);
2333 dlm_lockres_calc_usage(dlm, res);
2334 } else {
2335 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2336 "but it is already dropped!\n", dlm->name,
2337 res->lockname.len, res->lockname.name, node);
2338 dlm_print_one_lock_resource(res);
2341 dlm_lockres_put(res);
2344 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2345 * if not. If 0, numlocks is set to the number of locks in the lockres.
2347 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2348 struct dlm_lock_resource *res,
2349 int *numlocks,
2350 int *hasrefs)
2352 int ret;
2353 int i;
2354 int count = 0;
2355 struct list_head *queue;
2356 struct dlm_lock *lock;
2358 assert_spin_locked(&res->spinlock);
2360 *numlocks = 0;
2361 *hasrefs = 0;
2363 ret = -EINVAL;
2364 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2365 mlog(0, "cannot migrate lockres with unknown owner!\n");
2366 goto leave;
2369 if (res->owner != dlm->node_num) {
2370 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2371 goto leave;
2374 ret = 0;
2375 queue = &res->granted;
2376 for (i = 0; i < 3; i++) {
2377 list_for_each_entry(lock, queue, list) {
2378 ++count;
2379 if (lock->ml.node == dlm->node_num) {
2380 mlog(0, "found a lock owned by this node still "
2381 "on the %s queue! will not migrate this "
2382 "lockres\n", (i == 0 ? "granted" :
2383 (i == 1 ? "converting" :
2384 "blocked")));
2385 ret = -ENOTEMPTY;
2386 goto leave;
2389 queue++;
2392 *numlocks = count;
2394 count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2395 if (count < O2NM_MAX_NODES)
2396 *hasrefs = 1;
2398 mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name,
2399 res->lockname.len, res->lockname.name, *numlocks, *hasrefs);
2401 leave:
2402 return ret;
2406 * DLM_MIGRATE_LOCKRES
2410 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2411 struct dlm_lock_resource *res,
2412 u8 target)
2414 struct dlm_master_list_entry *mle = NULL;
2415 struct dlm_master_list_entry *oldmle = NULL;
2416 struct dlm_migratable_lockres *mres = NULL;
2417 int ret = 0;
2418 const char *name;
2419 unsigned int namelen;
2420 int mle_added = 0;
2421 int numlocks, hasrefs;
2422 int wake = 0;
2424 if (!dlm_grab(dlm))
2425 return -EINVAL;
2427 name = res->lockname.name;
2428 namelen = res->lockname.len;
2430 mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
2433 * ensure this lockres is a proper candidate for migration
2435 spin_lock(&res->spinlock);
2436 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2437 if (ret < 0) {
2438 spin_unlock(&res->spinlock);
2439 goto leave;
2441 spin_unlock(&res->spinlock);
2443 /* no work to do */
2444 if (numlocks == 0 && !hasrefs)
2445 goto leave;
2448 * preallocate up front
2449 * if this fails, abort
2452 ret = -ENOMEM;
2453 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2454 if (!mres) {
2455 mlog_errno(ret);
2456 goto leave;
2459 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2460 if (!mle) {
2461 mlog_errno(ret);
2462 goto leave;
2464 ret = 0;
2467 * find a node to migrate the lockres to
2470 spin_lock(&dlm->spinlock);
2471 /* pick a new node */
2472 if (!test_bit(target, dlm->domain_map) ||
2473 target >= O2NM_MAX_NODES) {
2474 target = dlm_pick_migration_target(dlm, res);
2476 mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name,
2477 namelen, name, target);
2479 if (target >= O2NM_MAX_NODES ||
2480 !test_bit(target, dlm->domain_map)) {
2481 /* target chosen is not alive */
2482 ret = -EINVAL;
2485 if (ret) {
2486 spin_unlock(&dlm->spinlock);
2487 goto fail;
2490 mlog(0, "continuing with target = %u\n", target);
2493 * clear any existing master requests and
2494 * add the migration mle to the list
2496 spin_lock(&dlm->master_lock);
2497 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2498 namelen, target, dlm->node_num);
2499 spin_unlock(&dlm->master_lock);
2500 spin_unlock(&dlm->spinlock);
2502 if (ret == -EEXIST) {
2503 mlog(0, "another process is already migrating it\n");
2504 goto fail;
2506 mle_added = 1;
2509 * set the MIGRATING flag and flush asts
2510 * if we fail after this we need to re-dirty the lockres
2512 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2513 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2514 "the target went down.\n", res->lockname.len,
2515 res->lockname.name, target);
2516 spin_lock(&res->spinlock);
2517 res->state &= ~DLM_LOCK_RES_MIGRATING;
2518 wake = 1;
2519 spin_unlock(&res->spinlock);
2520 ret = -EINVAL;
2523 fail:
2524 if (oldmle) {
2525 /* master is known, detach if not already detached */
2526 dlm_mle_detach_hb_events(dlm, oldmle);
2527 dlm_put_mle(oldmle);
2530 if (ret < 0) {
2531 if (mle_added) {
2532 dlm_mle_detach_hb_events(dlm, mle);
2533 dlm_put_mle(mle);
2534 } else if (mle) {
2535 kmem_cache_free(dlm_mle_cache, mle);
2537 goto leave;
2541 * at this point, we have a migration target, an mle
2542 * in the master list, and the MIGRATING flag set on
2543 * the lockres
2546 /* now that remote nodes are spinning on the MIGRATING flag,
2547 * ensure that all assert_master work is flushed. */
2548 flush_workqueue(dlm->dlm_worker);
2550 /* get an extra reference on the mle.
2551 * otherwise the assert_master from the new
2552 * master will destroy this.
2553 * also, make sure that all callers of dlm_get_mle
2554 * take both dlm->spinlock and dlm->master_lock */
2555 spin_lock(&dlm->spinlock);
2556 spin_lock(&dlm->master_lock);
2557 dlm_get_mle_inuse(mle);
2558 spin_unlock(&dlm->master_lock);
2559 spin_unlock(&dlm->spinlock);
2561 /* notify new node and send all lock state */
2562 /* call send_one_lockres with migration flag.
2563 * this serves as notice to the target node that a
2564 * migration is starting. */
2565 ret = dlm_send_one_lockres(dlm, res, mres, target,
2566 DLM_MRES_MIGRATION);
2568 if (ret < 0) {
2569 mlog(0, "migration to node %u failed with %d\n",
2570 target, ret);
2571 /* migration failed, detach and clean up mle */
2572 dlm_mle_detach_hb_events(dlm, mle);
2573 dlm_put_mle(mle);
2574 dlm_put_mle_inuse(mle);
2575 spin_lock(&res->spinlock);
2576 res->state &= ~DLM_LOCK_RES_MIGRATING;
2577 wake = 1;
2578 spin_unlock(&res->spinlock);
2579 goto leave;
2582 /* at this point, the target sends a message to all nodes,
2583 * (using dlm_do_migrate_request). this node is skipped since
2584 * we had to put an mle in the list to begin the process. this
2585 * node now waits for target to do an assert master. this node
2586 * will be the last one notified, ensuring that the migration
2587 * is complete everywhere. if the target dies while this is
2588 * going on, some nodes could potentially see the target as the
2589 * master, so it is important that my recovery finds the migration
2590 * mle and sets the master to UNKNOWN. */
2593 /* wait for new node to assert master */
2594 while (1) {
2595 ret = wait_event_interruptible_timeout(mle->wq,
2596 (atomic_read(&mle->woken) == 1),
2597 msecs_to_jiffies(5000));
2599 if (ret >= 0) {
2600 if (atomic_read(&mle->woken) == 1 ||
2601 res->owner == target)
2602 break;
2604 mlog(0, "%s:%.*s: timed out during migration\n",
2605 dlm->name, res->lockname.len, res->lockname.name);
2606 /* avoid hang during shutdown when migrating lockres
2607 * to a node which also goes down */
2608 if (dlm_is_node_dead(dlm, target)) {
2609 mlog(0, "%s:%.*s: expected migration "
2610 "target %u is no longer up, restarting\n",
2611 dlm->name, res->lockname.len,
2612 res->lockname.name, target);
2613 ret = -EINVAL;
2614 /* migration failed, detach and clean up mle */
2615 dlm_mle_detach_hb_events(dlm, mle);
2616 dlm_put_mle(mle);
2617 dlm_put_mle_inuse(mle);
2618 spin_lock(&res->spinlock);
2619 res->state &= ~DLM_LOCK_RES_MIGRATING;
2620 wake = 1;
2621 spin_unlock(&res->spinlock);
2622 goto leave;
2624 } else
2625 mlog(0, "%s:%.*s: caught signal during migration\n",
2626 dlm->name, res->lockname.len, res->lockname.name);
2629 /* all done, set the owner, clear the flag */
2630 spin_lock(&res->spinlock);
2631 dlm_set_lockres_owner(dlm, res, target);
2632 res->state &= ~DLM_LOCK_RES_MIGRATING;
2633 dlm_remove_nonlocal_locks(dlm, res);
2634 spin_unlock(&res->spinlock);
2635 wake_up(&res->wq);
2637 /* master is known, detach if not already detached */
2638 dlm_mle_detach_hb_events(dlm, mle);
2639 dlm_put_mle_inuse(mle);
2640 ret = 0;
2642 dlm_lockres_calc_usage(dlm, res);
2644 leave:
2645 /* re-dirty the lockres if we failed */
2646 if (ret < 0)
2647 dlm_kick_thread(dlm, res);
2649 /* wake up waiters if the MIGRATING flag got set
2650 * but migration failed */
2651 if (wake)
2652 wake_up(&res->wq);
2654 /* TODO: cleanup */
2655 if (mres)
2656 free_page((unsigned long)mres);
2658 dlm_put(dlm);
2660 mlog(0, "returning %d\n", ret);
2661 return ret;
2664 #define DLM_MIGRATION_RETRY_MS 100
2666 /* Should be called only after beginning the domain leave process.
2667 * There should not be any remaining locks on nonlocal lock resources,
2668 * and there should be no local locks left on locally mastered resources.
2670 * Called with the dlm spinlock held, may drop it to do migration, but
2671 * will re-acquire before exit.
2673 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2674 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2676 int ret;
2677 int lock_dropped = 0;
2678 int numlocks, hasrefs;
2680 spin_lock(&res->spinlock);
2681 if (res->owner != dlm->node_num) {
2682 if (!__dlm_lockres_unused(res)) {
2683 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2684 "trying to free this but locks remain\n",
2685 dlm->name, res->lockname.len, res->lockname.name);
2687 spin_unlock(&res->spinlock);
2688 goto leave;
2691 /* No need to migrate a lockres having no locks */
2692 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2693 if (ret >= 0 && numlocks == 0 && !hasrefs) {
2694 spin_unlock(&res->spinlock);
2695 goto leave;
2697 spin_unlock(&res->spinlock);
2699 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2700 spin_unlock(&dlm->spinlock);
2701 lock_dropped = 1;
2702 while (1) {
2703 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2704 if (ret >= 0)
2705 break;
2706 if (ret == -ENOTEMPTY) {
2707 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2708 res->lockname.len, res->lockname.name);
2709 BUG();
2712 mlog(0, "lockres %.*s: migrate failed, "
2713 "retrying\n", res->lockname.len,
2714 res->lockname.name);
2715 msleep(DLM_MIGRATION_RETRY_MS);
2717 spin_lock(&dlm->spinlock);
2718 leave:
2719 return lock_dropped;
2722 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2724 int ret;
2725 spin_lock(&dlm->ast_lock);
2726 spin_lock(&lock->spinlock);
2727 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2728 spin_unlock(&lock->spinlock);
2729 spin_unlock(&dlm->ast_lock);
2730 return ret;
2733 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2734 struct dlm_lock_resource *res,
2735 u8 mig_target)
2737 int can_proceed;
2738 spin_lock(&res->spinlock);
2739 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2740 spin_unlock(&res->spinlock);
2742 /* target has died, so make the caller break out of the
2743 * wait_event, but caller must recheck the domain_map */
2744 spin_lock(&dlm->spinlock);
2745 if (!test_bit(mig_target, dlm->domain_map))
2746 can_proceed = 1;
2747 spin_unlock(&dlm->spinlock);
2748 return can_proceed;
2751 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2752 struct dlm_lock_resource *res)
2754 int ret;
2755 spin_lock(&res->spinlock);
2756 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2757 spin_unlock(&res->spinlock);
2758 return ret;
2762 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2763 struct dlm_lock_resource *res,
2764 u8 target)
2766 int ret = 0;
2768 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2769 res->lockname.len, res->lockname.name, dlm->node_num,
2770 target);
2771 /* need to set MIGRATING flag on lockres. this is done by
2772 * ensuring that all asts have been flushed for this lockres. */
2773 spin_lock(&res->spinlock);
2774 BUG_ON(res->migration_pending);
2775 res->migration_pending = 1;
2776 /* strategy is to reserve an extra ast then release
2777 * it below, letting the release do all of the work */
2778 __dlm_lockres_reserve_ast(res);
2779 spin_unlock(&res->spinlock);
2781 /* now flush all the pending asts */
2782 dlm_kick_thread(dlm, res);
2783 /* before waiting on DIRTY, block processes which may
2784 * try to dirty the lockres before MIGRATING is set */
2785 spin_lock(&res->spinlock);
2786 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2787 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2788 spin_unlock(&res->spinlock);
2789 /* now wait on any pending asts and the DIRTY state */
2790 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2791 dlm_lockres_release_ast(dlm, res);
2793 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2794 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2795 /* if the extra ref we just put was the final one, this
2796 * will pass thru immediately. otherwise, we need to wait
2797 * for the last ast to finish. */
2798 again:
2799 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2800 dlm_migration_can_proceed(dlm, res, target),
2801 msecs_to_jiffies(1000));
2802 if (ret < 0) {
2803 mlog(0, "woken again: migrating? %s, dead? %s\n",
2804 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2805 test_bit(target, dlm->domain_map) ? "no":"yes");
2806 } else {
2807 mlog(0, "all is well: migrating? %s, dead? %s\n",
2808 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2809 test_bit(target, dlm->domain_map) ? "no":"yes");
2811 if (!dlm_migration_can_proceed(dlm, res, target)) {
2812 mlog(0, "trying again...\n");
2813 goto again;
2816 ret = 0;
2817 /* did the target go down or die? */
2818 spin_lock(&dlm->spinlock);
2819 if (!test_bit(target, dlm->domain_map)) {
2820 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2821 target);
2822 ret = -EHOSTDOWN;
2824 spin_unlock(&dlm->spinlock);
2827 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2828 * another try; otherwise, we are sure the MIGRATING state is there,
2829 * drop the unneded state which blocked threads trying to DIRTY
2831 spin_lock(&res->spinlock);
2832 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2833 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2834 if (!ret)
2835 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2836 spin_unlock(&res->spinlock);
2839 * at this point:
2841 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2842 * o there are no pending asts on this lockres
2843 * o all processes trying to reserve an ast on this
2844 * lockres must wait for the MIGRATING flag to clear
2846 return ret;
2849 /* last step in the migration process.
2850 * original master calls this to free all of the dlm_lock
2851 * structures that used to be for other nodes. */
2852 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2853 struct dlm_lock_resource *res)
2855 struct list_head *queue = &res->granted;
2856 int i, bit;
2857 struct dlm_lock *lock, *next;
2859 assert_spin_locked(&res->spinlock);
2861 BUG_ON(res->owner == dlm->node_num);
2863 for (i=0; i<3; i++) {
2864 list_for_each_entry_safe(lock, next, queue, list) {
2865 if (lock->ml.node != dlm->node_num) {
2866 mlog(0, "putting lock for node %u\n",
2867 lock->ml.node);
2868 /* be extra careful */
2869 BUG_ON(!list_empty(&lock->ast_list));
2870 BUG_ON(!list_empty(&lock->bast_list));
2871 BUG_ON(lock->ast_pending);
2872 BUG_ON(lock->bast_pending);
2873 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2874 list_del_init(&lock->list);
2875 dlm_lock_put(lock);
2876 /* In a normal unlock, we would have added a
2877 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2878 dlm_lock_put(lock);
2881 queue++;
2883 bit = 0;
2884 while (1) {
2885 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2886 if (bit >= O2NM_MAX_NODES)
2887 break;
2888 /* do not clear the local node reference, if there is a
2889 * process holding this, let it drop the ref itself */
2890 if (bit != dlm->node_num) {
2891 mlog(0, "%s:%.*s: node %u had a ref to this "
2892 "migrating lockres, clearing\n", dlm->name,
2893 res->lockname.len, res->lockname.name, bit);
2894 dlm_lockres_clear_refmap_bit(bit, res);
2896 bit++;
2900 /* for now this is not too intelligent. we will
2901 * need stats to make this do the right thing.
2902 * this just finds the first lock on one of the
2903 * queues and uses that node as the target. */
2904 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2905 struct dlm_lock_resource *res)
2907 int i;
2908 struct list_head *queue = &res->granted;
2909 struct dlm_lock *lock;
2910 int nodenum;
2912 assert_spin_locked(&dlm->spinlock);
2914 spin_lock(&res->spinlock);
2915 for (i=0; i<3; i++) {
2916 list_for_each_entry(lock, queue, list) {
2917 /* up to the caller to make sure this node
2918 * is alive */
2919 if (lock->ml.node != dlm->node_num) {
2920 spin_unlock(&res->spinlock);
2921 return lock->ml.node;
2924 queue++;
2927 nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2928 if (nodenum < O2NM_MAX_NODES) {
2929 spin_unlock(&res->spinlock);
2930 return nodenum;
2932 spin_unlock(&res->spinlock);
2933 mlog(0, "have not found a suitable target yet! checking domain map\n");
2935 /* ok now we're getting desperate. pick anyone alive. */
2936 nodenum = -1;
2937 while (1) {
2938 nodenum = find_next_bit(dlm->domain_map,
2939 O2NM_MAX_NODES, nodenum+1);
2940 mlog(0, "found %d in domain map\n", nodenum);
2941 if (nodenum >= O2NM_MAX_NODES)
2942 break;
2943 if (nodenum != dlm->node_num) {
2944 mlog(0, "picking %d\n", nodenum);
2945 return nodenum;
2949 mlog(0, "giving up. no master to migrate to\n");
2950 return DLM_LOCK_RES_OWNER_UNKNOWN;
2955 /* this is called by the new master once all lockres
2956 * data has been received */
2957 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2958 struct dlm_lock_resource *res,
2959 u8 master, u8 new_master,
2960 struct dlm_node_iter *iter)
2962 struct dlm_migrate_request migrate;
2963 int ret, skip, status = 0;
2964 int nodenum;
2966 memset(&migrate, 0, sizeof(migrate));
2967 migrate.namelen = res->lockname.len;
2968 memcpy(migrate.name, res->lockname.name, migrate.namelen);
2969 migrate.new_master = new_master;
2970 migrate.master = master;
2972 ret = 0;
2974 /* send message to all nodes, except the master and myself */
2975 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2976 if (nodenum == master ||
2977 nodenum == new_master)
2978 continue;
2980 /* We could race exit domain. If exited, skip. */
2981 spin_lock(&dlm->spinlock);
2982 skip = (!test_bit(nodenum, dlm->domain_map));
2983 spin_unlock(&dlm->spinlock);
2984 if (skip) {
2985 clear_bit(nodenum, iter->node_map);
2986 continue;
2989 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2990 &migrate, sizeof(migrate), nodenum,
2991 &status);
2992 if (ret < 0) {
2993 mlog(ML_ERROR, "Error %d when sending message %u (key "
2994 "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
2995 dlm->key, nodenum);
2996 if (!dlm_is_host_down(ret)) {
2997 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2998 BUG();
3000 clear_bit(nodenum, iter->node_map);
3001 ret = 0;
3002 } else if (status < 0) {
3003 mlog(0, "migrate request (node %u) returned %d!\n",
3004 nodenum, status);
3005 ret = status;
3006 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3007 /* during the migration request we short-circuited
3008 * the mastery of the lockres. make sure we have
3009 * a mastery ref for nodenum */
3010 mlog(0, "%s:%.*s: need ref for node %u\n",
3011 dlm->name, res->lockname.len, res->lockname.name,
3012 nodenum);
3013 spin_lock(&res->spinlock);
3014 dlm_lockres_set_refmap_bit(nodenum, res);
3015 spin_unlock(&res->spinlock);
3019 if (ret < 0)
3020 mlog_errno(ret);
3022 mlog(0, "returning ret=%d\n", ret);
3023 return ret;
3027 /* if there is an existing mle for this lockres, we now know who the master is.
3028 * (the one who sent us *this* message) we can clear it up right away.
3029 * since the process that put the mle on the list still has a reference to it,
3030 * we can unhash it now, set the master and wake the process. as a result,
3031 * we will have no mle in the list to start with. now we can add an mle for
3032 * the migration and this should be the only one found for those scanning the
3033 * list. */
3034 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3035 void **ret_data)
3037 struct dlm_ctxt *dlm = data;
3038 struct dlm_lock_resource *res = NULL;
3039 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3040 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3041 const char *name;
3042 unsigned int namelen, hash;
3043 int ret = 0;
3045 if (!dlm_grab(dlm))
3046 return -EINVAL;
3048 name = migrate->name;
3049 namelen = migrate->namelen;
3050 hash = dlm_lockid_hash(name, namelen);
3052 /* preallocate.. if this fails, abort */
3053 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3055 if (!mle) {
3056 ret = -ENOMEM;
3057 goto leave;
3060 /* check for pre-existing lock */
3061 spin_lock(&dlm->spinlock);
3062 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3063 if (res) {
3064 spin_lock(&res->spinlock);
3065 if (res->state & DLM_LOCK_RES_RECOVERING) {
3066 /* if all is working ok, this can only mean that we got
3067 * a migrate request from a node that we now see as
3068 * dead. what can we do here? drop it to the floor? */
3069 spin_unlock(&res->spinlock);
3070 mlog(ML_ERROR, "Got a migrate request, but the "
3071 "lockres is marked as recovering!");
3072 kmem_cache_free(dlm_mle_cache, mle);
3073 ret = -EINVAL; /* need a better solution */
3074 goto unlock;
3076 res->state |= DLM_LOCK_RES_MIGRATING;
3077 spin_unlock(&res->spinlock);
3080 spin_lock(&dlm->master_lock);
3081 /* ignore status. only nonzero status would BUG. */
3082 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3083 name, namelen,
3084 migrate->new_master,
3085 migrate->master);
3087 spin_unlock(&dlm->master_lock);
3088 unlock:
3089 spin_unlock(&dlm->spinlock);
3091 if (oldmle) {
3092 /* master is known, detach if not already detached */
3093 dlm_mle_detach_hb_events(dlm, oldmle);
3094 dlm_put_mle(oldmle);
3097 if (res)
3098 dlm_lockres_put(res);
3099 leave:
3100 dlm_put(dlm);
3101 return ret;
3104 /* must be holding dlm->spinlock and dlm->master_lock
3105 * when adding a migration mle, we can clear any other mles
3106 * in the master list because we know with certainty that
3107 * the master is "master". so we remove any old mle from
3108 * the list after setting it's master field, and then add
3109 * the new migration mle. this way we can hold with the rule
3110 * of having only one mle for a given lock name at all times. */
3111 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3112 struct dlm_lock_resource *res,
3113 struct dlm_master_list_entry *mle,
3114 struct dlm_master_list_entry **oldmle,
3115 const char *name, unsigned int namelen,
3116 u8 new_master, u8 master)
3118 int found;
3119 int ret = 0;
3121 *oldmle = NULL;
3123 mlog_entry_void();
3125 assert_spin_locked(&dlm->spinlock);
3126 assert_spin_locked(&dlm->master_lock);
3128 /* caller is responsible for any ref taken here on oldmle */
3129 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3130 if (found) {
3131 struct dlm_master_list_entry *tmp = *oldmle;
3132 spin_lock(&tmp->spinlock);
3133 if (tmp->type == DLM_MLE_MIGRATION) {
3134 if (master == dlm->node_num) {
3135 /* ah another process raced me to it */
3136 mlog(0, "tried to migrate %.*s, but some "
3137 "process beat me to it\n",
3138 namelen, name);
3139 ret = -EEXIST;
3140 } else {
3141 /* bad. 2 NODES are trying to migrate! */
3142 mlog(ML_ERROR, "migration error mle: "
3143 "master=%u new_master=%u // request: "
3144 "master=%u new_master=%u // "
3145 "lockres=%.*s\n",
3146 tmp->master, tmp->new_master,
3147 master, new_master,
3148 namelen, name);
3149 BUG();
3151 } else {
3152 /* this is essentially what assert_master does */
3153 tmp->master = master;
3154 atomic_set(&tmp->woken, 1);
3155 wake_up(&tmp->wq);
3156 /* remove it so that only one mle will be found */
3157 __dlm_unlink_mle(dlm, tmp);
3158 __dlm_mle_detach_hb_events(dlm, tmp);
3159 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3160 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3161 "telling master to get ref for cleared out mle "
3162 "during migration\n", dlm->name, namelen, name,
3163 master, new_master);
3165 spin_unlock(&tmp->spinlock);
3168 /* now add a migration mle to the tail of the list */
3169 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3170 mle->new_master = new_master;
3171 /* the new master will be sending an assert master for this.
3172 * at that point we will get the refmap reference */
3173 mle->master = master;
3174 /* do this for consistency with other mle types */
3175 set_bit(new_master, mle->maybe_map);
3176 __dlm_insert_mle(dlm, mle);
3178 return ret;
3182 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3184 static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
3185 struct dlm_master_list_entry *mle)
3187 struct dlm_lock_resource *res;
3189 /* Find the lockres associated to the mle and set its owner to UNK */
3190 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3191 mle->mnamehash);
3192 if (res) {
3193 spin_unlock(&dlm->master_lock);
3195 /* move lockres onto recovery list */
3196 spin_lock(&res->spinlock);
3197 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3198 dlm_move_lockres_to_recovery_list(dlm, res);
3199 spin_unlock(&res->spinlock);
3200 dlm_lockres_put(res);
3202 /* about to get rid of mle, detach from heartbeat */
3203 __dlm_mle_detach_hb_events(dlm, mle);
3205 /* dump the mle */
3206 spin_lock(&dlm->master_lock);
3207 __dlm_put_mle(mle);
3208 spin_unlock(&dlm->master_lock);
3211 return res;
3214 static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
3215 struct dlm_master_list_entry *mle)
3217 __dlm_mle_detach_hb_events(dlm, mle);
3219 spin_lock(&mle->spinlock);
3220 __dlm_unlink_mle(dlm, mle);
3221 atomic_set(&mle->woken, 1);
3222 spin_unlock(&mle->spinlock);
3224 wake_up(&mle->wq);
3227 static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
3228 struct dlm_master_list_entry *mle, u8 dead_node)
3230 int bit;
3232 BUG_ON(mle->type != DLM_MLE_BLOCK);
3234 spin_lock(&mle->spinlock);
3235 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3236 if (bit != dead_node) {
3237 mlog(0, "mle found, but dead node %u would not have been "
3238 "master\n", dead_node);
3239 spin_unlock(&mle->spinlock);
3240 } else {
3241 /* Must drop the refcount by one since the assert_master will
3242 * never arrive. This may result in the mle being unlinked and
3243 * freed, but there may still be a process waiting in the
3244 * dlmlock path which is fine. */
3245 mlog(0, "node %u was expected master\n", dead_node);
3246 atomic_set(&mle->woken, 1);
3247 spin_unlock(&mle->spinlock);
3248 wake_up(&mle->wq);
3250 /* Do not need events any longer, so detach from heartbeat */
3251 __dlm_mle_detach_hb_events(dlm, mle);
3252 __dlm_put_mle(mle);
3256 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3258 struct dlm_master_list_entry *mle;
3259 struct dlm_lock_resource *res;
3260 struct hlist_head *bucket;
3261 struct hlist_node *list;
3262 unsigned int i;
3264 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3265 top:
3266 assert_spin_locked(&dlm->spinlock);
3268 /* clean the master list */
3269 spin_lock(&dlm->master_lock);
3270 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3271 bucket = dlm_master_hash(dlm, i);
3272 hlist_for_each(list, bucket) {
3273 mle = hlist_entry(list, struct dlm_master_list_entry,
3274 master_hash_node);
3276 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3277 mle->type != DLM_MLE_MASTER &&
3278 mle->type != DLM_MLE_MIGRATION);
3280 /* MASTER mles are initiated locally. The waiting
3281 * process will notice the node map change shortly.
3282 * Let that happen as normal. */
3283 if (mle->type == DLM_MLE_MASTER)
3284 continue;
3286 /* BLOCK mles are initiated by other nodes. Need to
3287 * clean up if the dead node would have been the
3288 * master. */
3289 if (mle->type == DLM_MLE_BLOCK) {
3290 dlm_clean_block_mle(dlm, mle, dead_node);
3291 continue;
3294 /* Everything else is a MIGRATION mle */
3296 /* The rule for MIGRATION mles is that the master
3297 * becomes UNKNOWN if *either* the original or the new
3298 * master dies. All UNKNOWN lockres' are sent to
3299 * whichever node becomes the recovery master. The new
3300 * master is responsible for determining if there is
3301 * still a master for this lockres, or if he needs to
3302 * take over mastery. Either way, this node should
3303 * expect another message to resolve this. */
3305 if (mle->master != dead_node &&
3306 mle->new_master != dead_node)
3307 continue;
3309 /* If we have reached this point, this mle needs to be
3310 * removed from the list and freed. */
3311 dlm_clean_migration_mle(dlm, mle);
3313 mlog(0, "%s: node %u died during migration from "
3314 "%u to %u!\n", dlm->name, dead_node, mle->master,
3315 mle->new_master);
3317 /* If we find a lockres associated with the mle, we've
3318 * hit this rare case that messes up our lock ordering.
3319 * If so, we need to drop the master lock so that we can
3320 * take the lockres lock, meaning that we will have to
3321 * restart from the head of list. */
3322 res = dlm_reset_mleres_owner(dlm, mle);
3323 if (res)
3324 /* restart */
3325 goto top;
3327 /* This may be the last reference */
3328 __dlm_put_mle(mle);
3331 spin_unlock(&dlm->master_lock);
3334 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3335 u8 old_master)
3337 struct dlm_node_iter iter;
3338 int ret = 0;
3340 spin_lock(&dlm->spinlock);
3341 dlm_node_iter_init(dlm->domain_map, &iter);
3342 clear_bit(old_master, iter.node_map);
3343 clear_bit(dlm->node_num, iter.node_map);
3344 spin_unlock(&dlm->spinlock);
3346 /* ownership of the lockres is changing. account for the
3347 * mastery reference here since old_master will briefly have
3348 * a reference after the migration completes */
3349 spin_lock(&res->spinlock);
3350 dlm_lockres_set_refmap_bit(old_master, res);
3351 spin_unlock(&res->spinlock);
3353 mlog(0, "now time to do a migrate request to other nodes\n");
3354 ret = dlm_do_migrate_request(dlm, res, old_master,
3355 dlm->node_num, &iter);
3356 if (ret < 0) {
3357 mlog_errno(ret);
3358 goto leave;
3361 mlog(0, "doing assert master of %.*s to all except the original node\n",
3362 res->lockname.len, res->lockname.name);
3363 /* this call now finishes out the nodemap
3364 * even if one or more nodes die */
3365 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3366 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3367 if (ret < 0) {
3368 /* no longer need to retry. all living nodes contacted. */
3369 mlog_errno(ret);
3370 ret = 0;
3373 memset(iter.node_map, 0, sizeof(iter.node_map));
3374 set_bit(old_master, iter.node_map);
3375 mlog(0, "doing assert master of %.*s back to %u\n",
3376 res->lockname.len, res->lockname.name, old_master);
3377 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3378 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3379 if (ret < 0) {
3380 mlog(0, "assert master to original master failed "
3381 "with %d.\n", ret);
3382 /* the only nonzero status here would be because of
3383 * a dead original node. we're done. */
3384 ret = 0;
3387 /* all done, set the owner, clear the flag */
3388 spin_lock(&res->spinlock);
3389 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3390 res->state &= ~DLM_LOCK_RES_MIGRATING;
3391 spin_unlock(&res->spinlock);
3392 /* re-dirty it on the new master */
3393 dlm_kick_thread(dlm, res);
3394 wake_up(&res->wq);
3395 leave:
3396 return ret;
3400 * LOCKRES AST REFCOUNT
3401 * this is integral to migration
3404 /* for future intent to call an ast, reserve one ahead of time.
3405 * this should be called only after waiting on the lockres
3406 * with dlm_wait_on_lockres, and while still holding the
3407 * spinlock after the call. */
3408 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3410 assert_spin_locked(&res->spinlock);
3411 if (res->state & DLM_LOCK_RES_MIGRATING) {
3412 __dlm_print_one_lock_resource(res);
3414 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3416 atomic_inc(&res->asts_reserved);
3420 * used to drop the reserved ast, either because it went unused,
3421 * or because the ast/bast was actually called.
3423 * also, if there is a pending migration on this lockres,
3424 * and this was the last pending ast on the lockres,
3425 * atomically set the MIGRATING flag before we drop the lock.
3426 * this is how we ensure that migration can proceed with no
3427 * asts in progress. note that it is ok if the state of the
3428 * queues is such that a lock should be granted in the future
3429 * or that a bast should be fired, because the new master will
3430 * shuffle the lists on this lockres as soon as it is migrated.
3432 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3433 struct dlm_lock_resource *res)
3435 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3436 return;
3438 if (!res->migration_pending) {
3439 spin_unlock(&res->spinlock);
3440 return;
3443 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3444 res->migration_pending = 0;
3445 res->state |= DLM_LOCK_RES_MIGRATING;
3446 spin_unlock(&res->spinlock);
3447 wake_up(&res->wq);
3448 wake_up(&dlm->migration_wq);
3451 void dlm_force_free_mles(struct dlm_ctxt *dlm)
3453 int i;
3454 struct hlist_head *bucket;
3455 struct dlm_master_list_entry *mle;
3456 struct hlist_node *tmp, *list;
3459 * We notified all other nodes that we are exiting the domain and
3460 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3461 * around we force free them and wake any processes that are waiting
3462 * on the mles
3464 spin_lock(&dlm->spinlock);
3465 spin_lock(&dlm->master_lock);
3467 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3468 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3470 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3471 bucket = dlm_master_hash(dlm, i);
3472 hlist_for_each_safe(list, tmp, bucket) {
3473 mle = hlist_entry(list, struct dlm_master_list_entry,
3474 master_hash_node);
3475 if (mle->type != DLM_MLE_BLOCK) {
3476 mlog(ML_ERROR, "bad mle: %p\n", mle);
3477 dlm_print_one_mle(mle);
3479 atomic_set(&mle->woken, 1);
3480 wake_up(&mle->wq);
3482 __dlm_unlink_mle(dlm, mle);
3483 __dlm_mle_detach_hb_events(dlm, mle);
3484 __dlm_put_mle(mle);
3487 spin_unlock(&dlm->master_lock);
3488 spin_unlock(&dlm->spinlock);