1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
48 #include "dlmcommon.h"
49 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
55 static void dlm_mle_node_down(struct dlm_ctxt
*dlm
,
56 struct dlm_master_list_entry
*mle
,
57 struct o2nm_node
*node
,
59 static void dlm_mle_node_up(struct dlm_ctxt
*dlm
,
60 struct dlm_master_list_entry
*mle
,
61 struct o2nm_node
*node
,
64 static void dlm_assert_master_worker(struct dlm_work_item
*item
, void *data
);
65 static int dlm_do_assert_master(struct dlm_ctxt
*dlm
,
66 struct dlm_lock_resource
*res
,
67 void *nodemap
, u32 flags
);
68 static void dlm_deref_lockres_worker(struct dlm_work_item
*item
, void *data
);
70 static inline int dlm_mle_equal(struct dlm_ctxt
*dlm
,
71 struct dlm_master_list_entry
*mle
,
78 if (namelen
!= mle
->mnamelen
||
79 memcmp(name
, mle
->mname
, namelen
) != 0)
85 static struct kmem_cache
*dlm_lockres_cache
;
86 static struct kmem_cache
*dlm_lockname_cache
;
87 static struct kmem_cache
*dlm_mle_cache
;
89 static void dlm_mle_release(struct kref
*kref
);
90 static void dlm_init_mle(struct dlm_master_list_entry
*mle
,
91 enum dlm_mle_type type
,
93 struct dlm_lock_resource
*res
,
95 unsigned int namelen
);
96 static void dlm_put_mle(struct dlm_master_list_entry
*mle
);
97 static void __dlm_put_mle(struct dlm_master_list_entry
*mle
);
98 static int dlm_find_mle(struct dlm_ctxt
*dlm
,
99 struct dlm_master_list_entry
**mle
,
100 char *name
, unsigned int namelen
);
102 static int dlm_do_master_request(struct dlm_lock_resource
*res
,
103 struct dlm_master_list_entry
*mle
, int to
);
106 static int dlm_wait_for_lock_mastery(struct dlm_ctxt
*dlm
,
107 struct dlm_lock_resource
*res
,
108 struct dlm_master_list_entry
*mle
,
110 static int dlm_restart_lock_mastery(struct dlm_ctxt
*dlm
,
111 struct dlm_lock_resource
*res
,
112 struct dlm_master_list_entry
*mle
,
114 static int dlm_add_migration_mle(struct dlm_ctxt
*dlm
,
115 struct dlm_lock_resource
*res
,
116 struct dlm_master_list_entry
*mle
,
117 struct dlm_master_list_entry
**oldmle
,
118 const char *name
, unsigned int namelen
,
119 u8 new_master
, u8 master
);
121 static u8
dlm_pick_migration_target(struct dlm_ctxt
*dlm
,
122 struct dlm_lock_resource
*res
);
123 static void dlm_remove_nonlocal_locks(struct dlm_ctxt
*dlm
,
124 struct dlm_lock_resource
*res
);
125 static int dlm_mark_lockres_migrating(struct dlm_ctxt
*dlm
,
126 struct dlm_lock_resource
*res
,
128 static int dlm_pre_master_reco_lockres(struct dlm_ctxt
*dlm
,
129 struct dlm_lock_resource
*res
);
132 int dlm_is_host_down(int errno
)
149 case -EINVAL
: /* if returned from our tcp code,
150 this means there is no socket */
158 * MASTER LIST FUNCTIONS
163 * regarding master list entries and heartbeat callbacks:
165 * in order to avoid sleeping and allocation that occurs in
166 * heartbeat, master list entries are simply attached to the
167 * dlm's established heartbeat callbacks. the mle is attached
168 * when it is created, and since the dlm->spinlock is held at
169 * that time, any heartbeat event will be properly discovered
170 * by the mle. the mle needs to be detached from the
171 * dlm->mle_hb_events list as soon as heartbeat events are no
172 * longer useful to the mle, and before the mle is freed.
174 * as a general rule, heartbeat events are no longer needed by
175 * the mle once an "answer" regarding the lock master has been
178 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt
*dlm
,
179 struct dlm_master_list_entry
*mle
)
181 assert_spin_locked(&dlm
->spinlock
);
183 list_add_tail(&mle
->hb_events
, &dlm
->mle_hb_events
);
187 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt
*dlm
,
188 struct dlm_master_list_entry
*mle
)
190 if (!list_empty(&mle
->hb_events
))
191 list_del_init(&mle
->hb_events
);
195 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt
*dlm
,
196 struct dlm_master_list_entry
*mle
)
198 spin_lock(&dlm
->spinlock
);
199 __dlm_mle_detach_hb_events(dlm
, mle
);
200 spin_unlock(&dlm
->spinlock
);
203 static void dlm_get_mle_inuse(struct dlm_master_list_entry
*mle
)
205 struct dlm_ctxt
*dlm
;
208 assert_spin_locked(&dlm
->spinlock
);
209 assert_spin_locked(&dlm
->master_lock
);
211 kref_get(&mle
->mle_refs
);
214 static void dlm_put_mle_inuse(struct dlm_master_list_entry
*mle
)
216 struct dlm_ctxt
*dlm
;
219 spin_lock(&dlm
->spinlock
);
220 spin_lock(&dlm
->master_lock
);
223 spin_unlock(&dlm
->master_lock
);
224 spin_unlock(&dlm
->spinlock
);
228 /* remove from list and free */
229 static void __dlm_put_mle(struct dlm_master_list_entry
*mle
)
231 struct dlm_ctxt
*dlm
;
234 assert_spin_locked(&dlm
->spinlock
);
235 assert_spin_locked(&dlm
->master_lock
);
236 if (!atomic_read(&mle
->mle_refs
.refcount
)) {
237 /* this may or may not crash, but who cares.
239 mlog(ML_ERROR
, "bad mle: %p\n", mle
);
240 dlm_print_one_mle(mle
);
243 kref_put(&mle
->mle_refs
, dlm_mle_release
);
247 /* must not have any spinlocks coming in */
248 static void dlm_put_mle(struct dlm_master_list_entry
*mle
)
250 struct dlm_ctxt
*dlm
;
253 spin_lock(&dlm
->spinlock
);
254 spin_lock(&dlm
->master_lock
);
256 spin_unlock(&dlm
->master_lock
);
257 spin_unlock(&dlm
->spinlock
);
260 static inline void dlm_get_mle(struct dlm_master_list_entry
*mle
)
262 kref_get(&mle
->mle_refs
);
265 static void dlm_init_mle(struct dlm_master_list_entry
*mle
,
266 enum dlm_mle_type type
,
267 struct dlm_ctxt
*dlm
,
268 struct dlm_lock_resource
*res
,
270 unsigned int namelen
)
272 assert_spin_locked(&dlm
->spinlock
);
276 INIT_HLIST_NODE(&mle
->master_hash_node
);
277 INIT_LIST_HEAD(&mle
->hb_events
);
278 memset(mle
->maybe_map
, 0, sizeof(mle
->maybe_map
));
279 spin_lock_init(&mle
->spinlock
);
280 init_waitqueue_head(&mle
->wq
);
281 atomic_set(&mle
->woken
, 0);
282 kref_init(&mle
->mle_refs
);
283 memset(mle
->response_map
, 0, sizeof(mle
->response_map
));
284 mle
->master
= O2NM_MAX_NODES
;
285 mle
->new_master
= O2NM_MAX_NODES
;
288 BUG_ON(mle
->type
!= DLM_MLE_BLOCK
&&
289 mle
->type
!= DLM_MLE_MASTER
&&
290 mle
->type
!= DLM_MLE_MIGRATION
);
292 if (mle
->type
== DLM_MLE_MASTER
) {
295 memcpy(mle
->mname
, res
->lockname
.name
, res
->lockname
.len
);
296 mle
->mnamelen
= res
->lockname
.len
;
297 mle
->mnamehash
= res
->lockname
.hash
;
301 memcpy(mle
->mname
, name
, namelen
);
302 mle
->mnamelen
= namelen
;
303 mle
->mnamehash
= dlm_lockid_hash(name
, namelen
);
306 atomic_inc(&dlm
->mle_tot_count
[mle
->type
]);
307 atomic_inc(&dlm
->mle_cur_count
[mle
->type
]);
309 /* copy off the node_map and register hb callbacks on our copy */
310 memcpy(mle
->node_map
, dlm
->domain_map
, sizeof(mle
->node_map
));
311 memcpy(mle
->vote_map
, dlm
->domain_map
, sizeof(mle
->vote_map
));
312 clear_bit(dlm
->node_num
, mle
->vote_map
);
313 clear_bit(dlm
->node_num
, mle
->node_map
);
315 /* attach the mle to the domain node up/down events */
316 __dlm_mle_attach_hb_events(dlm
, mle
);
319 void __dlm_unlink_mle(struct dlm_ctxt
*dlm
, struct dlm_master_list_entry
*mle
)
321 assert_spin_locked(&dlm
->spinlock
);
322 assert_spin_locked(&dlm
->master_lock
);
324 if (!hlist_unhashed(&mle
->master_hash_node
))
325 hlist_del_init(&mle
->master_hash_node
);
328 void __dlm_insert_mle(struct dlm_ctxt
*dlm
, struct dlm_master_list_entry
*mle
)
330 struct hlist_head
*bucket
;
332 assert_spin_locked(&dlm
->master_lock
);
334 bucket
= dlm_master_hash(dlm
, mle
->mnamehash
);
335 hlist_add_head(&mle
->master_hash_node
, bucket
);
338 /* returns 1 if found, 0 if not */
339 static int dlm_find_mle(struct dlm_ctxt
*dlm
,
340 struct dlm_master_list_entry
**mle
,
341 char *name
, unsigned int namelen
)
343 struct dlm_master_list_entry
*tmpmle
;
344 struct hlist_head
*bucket
;
347 assert_spin_locked(&dlm
->master_lock
);
349 hash
= dlm_lockid_hash(name
, namelen
);
350 bucket
= dlm_master_hash(dlm
, hash
);
351 hlist_for_each_entry(tmpmle
, bucket
, master_hash_node
) {
352 if (!dlm_mle_equal(dlm
, tmpmle
, name
, namelen
))
361 void dlm_hb_event_notify_attached(struct dlm_ctxt
*dlm
, int idx
, int node_up
)
363 struct dlm_master_list_entry
*mle
;
365 assert_spin_locked(&dlm
->spinlock
);
367 list_for_each_entry(mle
, &dlm
->mle_hb_events
, hb_events
) {
369 dlm_mle_node_up(dlm
, mle
, NULL
, idx
);
371 dlm_mle_node_down(dlm
, mle
, NULL
, idx
);
375 static void dlm_mle_node_down(struct dlm_ctxt
*dlm
,
376 struct dlm_master_list_entry
*mle
,
377 struct o2nm_node
*node
, int idx
)
379 spin_lock(&mle
->spinlock
);
381 if (!test_bit(idx
, mle
->node_map
))
382 mlog(0, "node %u already removed from nodemap!\n", idx
);
384 clear_bit(idx
, mle
->node_map
);
386 spin_unlock(&mle
->spinlock
);
389 static void dlm_mle_node_up(struct dlm_ctxt
*dlm
,
390 struct dlm_master_list_entry
*mle
,
391 struct o2nm_node
*node
, int idx
)
393 spin_lock(&mle
->spinlock
);
395 if (test_bit(idx
, mle
->node_map
))
396 mlog(0, "node %u already in node map!\n", idx
);
398 set_bit(idx
, mle
->node_map
);
400 spin_unlock(&mle
->spinlock
);
404 int dlm_init_mle_cache(void)
406 dlm_mle_cache
= kmem_cache_create("o2dlm_mle",
407 sizeof(struct dlm_master_list_entry
),
408 0, SLAB_HWCACHE_ALIGN
,
410 if (dlm_mle_cache
== NULL
)
415 void dlm_destroy_mle_cache(void)
418 kmem_cache_destroy(dlm_mle_cache
);
421 static void dlm_mle_release(struct kref
*kref
)
423 struct dlm_master_list_entry
*mle
;
424 struct dlm_ctxt
*dlm
;
426 mle
= container_of(kref
, struct dlm_master_list_entry
, mle_refs
);
429 assert_spin_locked(&dlm
->spinlock
);
430 assert_spin_locked(&dlm
->master_lock
);
432 mlog(0, "Releasing mle for %.*s, type %d\n", mle
->mnamelen
, mle
->mname
,
435 /* remove from list if not already */
436 __dlm_unlink_mle(dlm
, mle
);
438 /* detach the mle from the domain node up/down events */
439 __dlm_mle_detach_hb_events(dlm
, mle
);
441 atomic_dec(&dlm
->mle_cur_count
[mle
->type
]);
443 /* NOTE: kfree under spinlock here.
444 * if this is bad, we can move this to a freelist. */
445 kmem_cache_free(dlm_mle_cache
, mle
);
450 * LOCK RESOURCE FUNCTIONS
453 int dlm_init_master_caches(void)
455 dlm_lockres_cache
= kmem_cache_create("o2dlm_lockres",
456 sizeof(struct dlm_lock_resource
),
457 0, SLAB_HWCACHE_ALIGN
, NULL
);
458 if (!dlm_lockres_cache
)
461 dlm_lockname_cache
= kmem_cache_create("o2dlm_lockname",
462 DLM_LOCKID_NAME_MAX
, 0,
463 SLAB_HWCACHE_ALIGN
, NULL
);
464 if (!dlm_lockname_cache
)
469 dlm_destroy_master_caches();
473 void dlm_destroy_master_caches(void)
475 if (dlm_lockname_cache
) {
476 kmem_cache_destroy(dlm_lockname_cache
);
477 dlm_lockname_cache
= NULL
;
480 if (dlm_lockres_cache
) {
481 kmem_cache_destroy(dlm_lockres_cache
);
482 dlm_lockres_cache
= NULL
;
486 static void dlm_lockres_release(struct kref
*kref
)
488 struct dlm_lock_resource
*res
;
489 struct dlm_ctxt
*dlm
;
491 res
= container_of(kref
, struct dlm_lock_resource
, refs
);
494 /* This should not happen -- all lockres' have a name
495 * associated with them at init time. */
496 BUG_ON(!res
->lockname
.name
);
498 mlog(0, "destroying lockres %.*s\n", res
->lockname
.len
,
501 atomic_dec(&dlm
->res_cur_count
);
503 if (!hlist_unhashed(&res
->hash_node
) ||
504 !list_empty(&res
->granted
) ||
505 !list_empty(&res
->converting
) ||
506 !list_empty(&res
->blocked
) ||
507 !list_empty(&res
->dirty
) ||
508 !list_empty(&res
->recovering
) ||
509 !list_empty(&res
->purge
)) {
511 "Going to BUG for resource %.*s."
512 " We're on a list! [%c%c%c%c%c%c%c]\n",
513 res
->lockname
.len
, res
->lockname
.name
,
514 !hlist_unhashed(&res
->hash_node
) ? 'H' : ' ',
515 !list_empty(&res
->granted
) ? 'G' : ' ',
516 !list_empty(&res
->converting
) ? 'C' : ' ',
517 !list_empty(&res
->blocked
) ? 'B' : ' ',
518 !list_empty(&res
->dirty
) ? 'D' : ' ',
519 !list_empty(&res
->recovering
) ? 'R' : ' ',
520 !list_empty(&res
->purge
) ? 'P' : ' ');
522 dlm_print_one_lock_resource(res
);
525 /* By the time we're ready to blow this guy away, we shouldn't
526 * be on any lists. */
527 BUG_ON(!hlist_unhashed(&res
->hash_node
));
528 BUG_ON(!list_empty(&res
->granted
));
529 BUG_ON(!list_empty(&res
->converting
));
530 BUG_ON(!list_empty(&res
->blocked
));
531 BUG_ON(!list_empty(&res
->dirty
));
532 BUG_ON(!list_empty(&res
->recovering
));
533 BUG_ON(!list_empty(&res
->purge
));
535 kmem_cache_free(dlm_lockname_cache
, (void *)res
->lockname
.name
);
537 kmem_cache_free(dlm_lockres_cache
, res
);
540 void dlm_lockres_put(struct dlm_lock_resource
*res
)
542 kref_put(&res
->refs
, dlm_lockres_release
);
545 static void dlm_init_lockres(struct dlm_ctxt
*dlm
,
546 struct dlm_lock_resource
*res
,
547 const char *name
, unsigned int namelen
)
551 /* If we memset here, we lose our reference to the kmalloc'd
552 * res->lockname.name, so be sure to init every field
555 qname
= (char *) res
->lockname
.name
;
556 memcpy(qname
, name
, namelen
);
558 res
->lockname
.len
= namelen
;
559 res
->lockname
.hash
= dlm_lockid_hash(name
, namelen
);
561 init_waitqueue_head(&res
->wq
);
562 spin_lock_init(&res
->spinlock
);
563 INIT_HLIST_NODE(&res
->hash_node
);
564 INIT_LIST_HEAD(&res
->granted
);
565 INIT_LIST_HEAD(&res
->converting
);
566 INIT_LIST_HEAD(&res
->blocked
);
567 INIT_LIST_HEAD(&res
->dirty
);
568 INIT_LIST_HEAD(&res
->recovering
);
569 INIT_LIST_HEAD(&res
->purge
);
570 INIT_LIST_HEAD(&res
->tracking
);
571 atomic_set(&res
->asts_reserved
, 0);
572 res
->migration_pending
= 0;
573 res
->inflight_locks
= 0;
574 res
->inflight_assert_workers
= 0;
578 kref_init(&res
->refs
);
580 atomic_inc(&dlm
->res_tot_count
);
581 atomic_inc(&dlm
->res_cur_count
);
583 /* just for consistency */
584 spin_lock(&res
->spinlock
);
585 dlm_set_lockres_owner(dlm
, res
, DLM_LOCK_RES_OWNER_UNKNOWN
);
586 spin_unlock(&res
->spinlock
);
588 res
->state
= DLM_LOCK_RES_IN_PROGRESS
;
592 spin_lock(&dlm
->spinlock
);
593 list_add_tail(&res
->tracking
, &dlm
->tracking_list
);
594 spin_unlock(&dlm
->spinlock
);
596 memset(res
->lvb
, 0, DLM_LVB_LEN
);
597 memset(res
->refmap
, 0, sizeof(res
->refmap
));
600 struct dlm_lock_resource
*dlm_new_lockres(struct dlm_ctxt
*dlm
,
602 unsigned int namelen
)
604 struct dlm_lock_resource
*res
= NULL
;
606 res
= kmem_cache_zalloc(dlm_lockres_cache
, GFP_NOFS
);
610 res
->lockname
.name
= kmem_cache_zalloc(dlm_lockname_cache
, GFP_NOFS
);
611 if (!res
->lockname
.name
)
614 dlm_init_lockres(dlm
, res
, name
, namelen
);
619 kmem_cache_free(dlm_lockres_cache
, res
);
623 void dlm_lockres_set_refmap_bit(struct dlm_ctxt
*dlm
,
624 struct dlm_lock_resource
*res
, int bit
)
626 assert_spin_locked(&res
->spinlock
);
628 mlog(0, "res %.*s, set node %u, %ps()\n", res
->lockname
.len
,
629 res
->lockname
.name
, bit
, __builtin_return_address(0));
631 set_bit(bit
, res
->refmap
);
634 void dlm_lockres_clear_refmap_bit(struct dlm_ctxt
*dlm
,
635 struct dlm_lock_resource
*res
, int bit
)
637 assert_spin_locked(&res
->spinlock
);
639 mlog(0, "res %.*s, clr node %u, %ps()\n", res
->lockname
.len
,
640 res
->lockname
.name
, bit
, __builtin_return_address(0));
642 clear_bit(bit
, res
->refmap
);
645 static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt
*dlm
,
646 struct dlm_lock_resource
*res
)
648 res
->inflight_locks
++;
650 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm
->name
,
651 res
->lockname
.len
, res
->lockname
.name
, res
->inflight_locks
,
652 __builtin_return_address(0));
655 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt
*dlm
,
656 struct dlm_lock_resource
*res
)
658 assert_spin_locked(&res
->spinlock
);
659 __dlm_lockres_grab_inflight_ref(dlm
, res
);
662 void dlm_lockres_drop_inflight_ref(struct dlm_ctxt
*dlm
,
663 struct dlm_lock_resource
*res
)
665 assert_spin_locked(&res
->spinlock
);
667 BUG_ON(res
->inflight_locks
== 0);
669 res
->inflight_locks
--;
671 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm
->name
,
672 res
->lockname
.len
, res
->lockname
.name
, res
->inflight_locks
,
673 __builtin_return_address(0));
678 void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt
*dlm
,
679 struct dlm_lock_resource
*res
)
681 assert_spin_locked(&res
->spinlock
);
682 res
->inflight_assert_workers
++;
683 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
684 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
685 res
->inflight_assert_workers
);
688 static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt
*dlm
,
689 struct dlm_lock_resource
*res
)
691 assert_spin_locked(&res
->spinlock
);
692 BUG_ON(res
->inflight_assert_workers
== 0);
693 res
->inflight_assert_workers
--;
694 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
695 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
696 res
->inflight_assert_workers
);
699 static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt
*dlm
,
700 struct dlm_lock_resource
*res
)
702 spin_lock(&res
->spinlock
);
703 __dlm_lockres_drop_inflight_worker(dlm
, res
);
704 spin_unlock(&res
->spinlock
);
708 * lookup a lock resource by name.
709 * may already exist in the hashtable.
710 * lockid is null terminated
712 * if not, allocate enough for the lockres and for
713 * the temporary structure used in doing the mastering.
715 * also, do a lookup in the dlm->master_list to see
716 * if another node has begun mastering the same lock.
717 * if so, there should be a block entry in there
718 * for this name, and we should *not* attempt to master
719 * the lock here. need to wait around for that node
720 * to assert_master (or die).
723 struct dlm_lock_resource
* dlm_get_lock_resource(struct dlm_ctxt
*dlm
,
728 struct dlm_lock_resource
*tmpres
=NULL
, *res
=NULL
;
729 struct dlm_master_list_entry
*mle
= NULL
;
730 struct dlm_master_list_entry
*alloc_mle
= NULL
;
733 struct dlm_node_iter iter
;
736 int bit
, wait_on_recovery
= 0;
740 hash
= dlm_lockid_hash(lockid
, namelen
);
742 mlog(0, "get lockres %s (len %d)\n", lockid
, namelen
);
745 spin_lock(&dlm
->spinlock
);
746 tmpres
= __dlm_lookup_lockres_full(dlm
, lockid
, namelen
, hash
);
748 spin_unlock(&dlm
->spinlock
);
749 spin_lock(&tmpres
->spinlock
);
752 * Right after dlm spinlock was released, dlm_thread could have
753 * purged the lockres. Check if lockres got unhashed. If so
756 if (hlist_unhashed(&tmpres
->hash_node
)) {
757 spin_unlock(&tmpres
->spinlock
);
758 dlm_lockres_put(tmpres
);
763 /* Wait on the thread that is mastering the resource */
764 if (tmpres
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
765 __dlm_wait_on_lockres(tmpres
);
766 BUG_ON(tmpres
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
);
767 spin_unlock(&tmpres
->spinlock
);
768 dlm_lockres_put(tmpres
);
773 /* Wait on the resource purge to complete before continuing */
774 if (tmpres
->state
& DLM_LOCK_RES_DROPPING_REF
) {
775 BUG_ON(tmpres
->owner
== dlm
->node_num
);
776 __dlm_wait_on_lockres_flags(tmpres
,
777 DLM_LOCK_RES_DROPPING_REF
);
778 spin_unlock(&tmpres
->spinlock
);
779 dlm_lockres_put(tmpres
);
784 /* Grab inflight ref to pin the resource */
785 dlm_lockres_grab_inflight_ref(dlm
, tmpres
);
787 spin_unlock(&tmpres
->spinlock
);
789 spin_lock(&dlm
->track_lock
);
790 if (!list_empty(&res
->tracking
))
791 list_del_init(&res
->tracking
);
793 mlog(ML_ERROR
, "Resource %.*s not "
794 "on the Tracking list\n",
797 spin_unlock(&dlm
->track_lock
);
798 dlm_lockres_put(res
);
805 spin_unlock(&dlm
->spinlock
);
806 mlog(0, "allocating a new resource\n");
807 /* nothing found and we need to allocate one. */
808 alloc_mle
= kmem_cache_alloc(dlm_mle_cache
, GFP_NOFS
);
811 res
= dlm_new_lockres(dlm
, lockid
, namelen
);
817 mlog(0, "no lockres found, allocated our own: %p\n", res
);
819 if (flags
& LKM_LOCAL
) {
820 /* caller knows it's safe to assume it's not mastered elsewhere
821 * DONE! return right away */
822 spin_lock(&res
->spinlock
);
823 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
824 __dlm_insert_lockres(dlm
, res
);
825 dlm_lockres_grab_inflight_ref(dlm
, res
);
826 spin_unlock(&res
->spinlock
);
827 spin_unlock(&dlm
->spinlock
);
828 /* lockres still marked IN_PROGRESS */
832 /* check master list to see if another node has started mastering it */
833 spin_lock(&dlm
->master_lock
);
835 /* if we found a block, wait for lock to be mastered by another node */
836 blocked
= dlm_find_mle(dlm
, &mle
, (char *)lockid
, namelen
);
839 if (mle
->type
== DLM_MLE_MASTER
) {
840 mlog(ML_ERROR
, "master entry for nonexistent lock!\n");
843 mig
= (mle
->type
== DLM_MLE_MIGRATION
);
844 /* if there is a migration in progress, let the migration
845 * finish before continuing. we can wait for the absence
846 * of the MIGRATION mle: either the migrate finished or
847 * one of the nodes died and the mle was cleaned up.
848 * if there is a BLOCK here, but it already has a master
849 * set, we are too late. the master does not have a ref
850 * for us in the refmap. detach the mle and drop it.
851 * either way, go back to the top and start over. */
852 if (mig
|| mle
->master
!= O2NM_MAX_NODES
) {
853 BUG_ON(mig
&& mle
->master
== dlm
->node_num
);
854 /* we arrived too late. the master does not
855 * have a ref for us. retry. */
856 mlog(0, "%s:%.*s: late on %s\n",
857 dlm
->name
, namelen
, lockid
,
858 mig
? "MIGRATION" : "BLOCK");
859 spin_unlock(&dlm
->master_lock
);
860 spin_unlock(&dlm
->spinlock
);
862 /* master is known, detach */
864 dlm_mle_detach_hb_events(dlm
, mle
);
867 /* this is lame, but we can't wait on either
868 * the mle or lockres waitqueue here */
874 /* go ahead and try to master lock on this node */
876 /* make sure this does not get freed below */
878 dlm_init_mle(mle
, DLM_MLE_MASTER
, dlm
, res
, NULL
, 0);
879 set_bit(dlm
->node_num
, mle
->maybe_map
);
880 __dlm_insert_mle(dlm
, mle
);
882 /* still holding the dlm spinlock, check the recovery map
883 * to see if there are any nodes that still need to be
884 * considered. these will not appear in the mle nodemap
885 * but they might own this lockres. wait on them. */
886 bit
= find_next_bit(dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
887 if (bit
< O2NM_MAX_NODES
) {
888 mlog(0, "%s: res %.*s, At least one node (%d) "
889 "to recover before lock mastery can begin\n",
890 dlm
->name
, namelen
, (char *)lockid
, bit
);
891 wait_on_recovery
= 1;
895 /* at this point there is either a DLM_MLE_BLOCK or a
896 * DLM_MLE_MASTER on the master list, so it's safe to add the
897 * lockres to the hashtable. anyone who finds the lock will
898 * still have to wait on the IN_PROGRESS. */
900 /* finally add the lockres to its hash bucket */
901 __dlm_insert_lockres(dlm
, res
);
903 /* since this lockres is new it doesn't not require the spinlock */
904 __dlm_lockres_grab_inflight_ref(dlm
, res
);
906 /* get an extra ref on the mle in case this is a BLOCK
907 * if so, the creator of the BLOCK may try to put the last
908 * ref at this time in the assert master handler, so we
909 * need an extra one to keep from a bad ptr deref. */
910 dlm_get_mle_inuse(mle
);
911 spin_unlock(&dlm
->master_lock
);
912 spin_unlock(&dlm
->spinlock
);
915 while (wait_on_recovery
) {
916 /* any cluster changes that occurred after dropping the
917 * dlm spinlock would be detectable be a change on the mle,
918 * so we only need to clear out the recovery map once. */
919 if (dlm_is_recovery_lock(lockid
, namelen
)) {
920 mlog(0, "%s: Recovery map is not empty, but must "
921 "master $RECOVERY lock now\n", dlm
->name
);
922 if (!dlm_pre_master_reco_lockres(dlm
, res
))
923 wait_on_recovery
= 0;
925 mlog(0, "%s: waiting 500ms for heartbeat state "
926 "change\n", dlm
->name
);
932 dlm_kick_recovery_thread(dlm
);
934 dlm_wait_for_recovery(dlm
);
936 spin_lock(&dlm
->spinlock
);
937 bit
= find_next_bit(dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
938 if (bit
< O2NM_MAX_NODES
) {
939 mlog(0, "%s: res %.*s, At least one node (%d) "
940 "to recover before lock mastery can begin\n",
941 dlm
->name
, namelen
, (char *)lockid
, bit
);
942 wait_on_recovery
= 1;
944 wait_on_recovery
= 0;
945 spin_unlock(&dlm
->spinlock
);
947 if (wait_on_recovery
)
948 dlm_wait_for_node_recovery(dlm
, bit
, 10000);
951 /* must wait for lock to be mastered elsewhere */
956 dlm_node_iter_init(mle
->vote_map
, &iter
);
957 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
958 ret
= dlm_do_master_request(res
, mle
, nodenum
);
961 if (mle
->master
!= O2NM_MAX_NODES
) {
962 /* found a master ! */
963 if (mle
->master
<= nodenum
)
965 /* if our master request has not reached the master
966 * yet, keep going until it does. this is how the
967 * master will know that asserts are needed back to
968 * the lower nodes. */
969 mlog(0, "%s: res %.*s, Requests only up to %u but "
970 "master is %u, keep going\n", dlm
->name
, namelen
,
971 lockid
, nodenum
, mle
->master
);
976 /* keep going until the response map includes all nodes */
977 ret
= dlm_wait_for_lock_mastery(dlm
, res
, mle
, &blocked
);
979 wait_on_recovery
= 1;
980 mlog(0, "%s: res %.*s, Node map changed, redo the master "
981 "request now, blocked=%d\n", dlm
->name
, res
->lockname
.len
,
982 res
->lockname
.name
, blocked
);
984 mlog(ML_ERROR
, "%s: res %.*s, Spinning on "
985 "dlm_wait_for_lock_mastery, blocked = %d\n",
986 dlm
->name
, res
->lockname
.len
,
987 res
->lockname
.name
, blocked
);
988 dlm_print_one_lock_resource(res
);
989 dlm_print_one_mle(mle
);
995 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm
->name
, res
->lockname
.len
,
996 res
->lockname
.name
, res
->owner
);
997 /* make sure we never continue without this */
998 BUG_ON(res
->owner
== O2NM_MAX_NODES
);
1000 /* master is known, detach if not already detached */
1001 dlm_mle_detach_hb_events(dlm
, mle
);
1003 /* put the extra ref */
1004 dlm_put_mle_inuse(mle
);
1007 spin_lock(&res
->spinlock
);
1008 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
1009 spin_unlock(&res
->spinlock
);
1013 /* need to free the unused mle */
1015 kmem_cache_free(dlm_mle_cache
, alloc_mle
);
1021 #define DLM_MASTERY_TIMEOUT_MS 5000
1023 static int dlm_wait_for_lock_mastery(struct dlm_ctxt
*dlm
,
1024 struct dlm_lock_resource
*res
,
1025 struct dlm_master_list_entry
*mle
,
1030 int map_changed
, voting_done
;
1037 /* check if another node has already become the owner */
1038 spin_lock(&res
->spinlock
);
1039 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1040 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm
->name
,
1041 res
->lockname
.len
, res
->lockname
.name
, res
->owner
);
1042 spin_unlock(&res
->spinlock
);
1043 /* this will cause the master to re-assert across
1044 * the whole cluster, freeing up mles */
1045 if (res
->owner
!= dlm
->node_num
) {
1046 ret
= dlm_do_master_request(res
, mle
, res
->owner
);
1048 /* give recovery a chance to run */
1049 mlog(ML_ERROR
, "link to %u went down?: %d\n", res
->owner
, ret
);
1057 spin_unlock(&res
->spinlock
);
1059 spin_lock(&mle
->spinlock
);
1061 map_changed
= (memcmp(mle
->vote_map
, mle
->node_map
,
1062 sizeof(mle
->vote_map
)) != 0);
1063 voting_done
= (memcmp(mle
->vote_map
, mle
->response_map
,
1064 sizeof(mle
->vote_map
)) == 0);
1066 /* restart if we hit any errors */
1069 mlog(0, "%s: %.*s: node map changed, restarting\n",
1070 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
1071 ret
= dlm_restart_lock_mastery(dlm
, res
, mle
, *blocked
);
1072 b
= (mle
->type
== DLM_MLE_BLOCK
);
1073 if ((*blocked
&& !b
) || (!*blocked
&& b
)) {
1074 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1075 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1079 spin_unlock(&mle
->spinlock
);
1084 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1085 "rechecking now\n", dlm
->name
, res
->lockname
.len
,
1086 res
->lockname
.name
);
1090 mlog(0, "map not changed and voting not done "
1091 "for %s:%.*s\n", dlm
->name
, res
->lockname
.len
,
1092 res
->lockname
.name
);
1096 if (m
!= O2NM_MAX_NODES
) {
1097 /* another node has done an assert!
1102 /* have all nodes responded? */
1103 if (voting_done
&& !*blocked
) {
1104 bit
= find_next_bit(mle
->maybe_map
, O2NM_MAX_NODES
, 0);
1105 if (dlm
->node_num
<= bit
) {
1106 /* my node number is lowest.
1107 * now tell other nodes that I am
1108 * mastering this. */
1109 mle
->master
= dlm
->node_num
;
1110 /* ref was grabbed in get_lock_resource
1111 * will be dropped in dlmlock_master */
1115 /* if voting is done, but we have not received
1116 * an assert master yet, we must sleep */
1120 spin_unlock(&mle
->spinlock
);
1122 /* sleep if we haven't finished voting yet */
1124 unsigned long timeo
= msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS
);
1127 if (atomic_read(&mle->mle_refs.refcount) < 2)
1128 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1129 atomic_read(&mle->mle_refs.refcount),
1130 res->lockname.len, res->lockname.name);
1132 atomic_set(&mle
->woken
, 0);
1133 (void)wait_event_timeout(mle
->wq
,
1134 (atomic_read(&mle
->woken
) == 1),
1136 if (res
->owner
== O2NM_MAX_NODES
) {
1137 mlog(0, "%s:%.*s: waiting again\n", dlm
->name
,
1138 res
->lockname
.len
, res
->lockname
.name
);
1141 mlog(0, "done waiting, master is %u\n", res
->owner
);
1149 mlog(0, "about to master %.*s here, this=%u\n",
1150 res
->lockname
.len
, res
->lockname
.name
, m
);
1151 ret
= dlm_do_assert_master(dlm
, res
, mle
->vote_map
, 0);
1153 /* This is a failure in the network path,
1154 * not in the response to the assert_master
1155 * (any nonzero response is a BUG on this node).
1156 * Most likely a socket just got disconnected
1157 * due to node death. */
1160 /* no longer need to restart lock mastery.
1161 * all living nodes have been contacted. */
1165 /* set the lockres owner */
1166 spin_lock(&res
->spinlock
);
1167 /* mastery reference obtained either during
1168 * assert_master_handler or in get_lock_resource */
1169 dlm_change_lockres_owner(dlm
, res
, m
);
1170 spin_unlock(&res
->spinlock
);
1176 struct dlm_bitmap_diff_iter
1179 unsigned long *orig_bm
;
1180 unsigned long *cur_bm
;
1181 unsigned long diff_bm
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
1184 enum dlm_node_state_change
1191 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter
*iter
,
1192 unsigned long *orig_bm
,
1193 unsigned long *cur_bm
)
1195 unsigned long p1
, p2
;
1199 iter
->orig_bm
= orig_bm
;
1200 iter
->cur_bm
= cur_bm
;
1202 for (i
= 0; i
< BITS_TO_LONGS(O2NM_MAX_NODES
); i
++) {
1203 p1
= *(iter
->orig_bm
+ i
);
1204 p2
= *(iter
->cur_bm
+ i
);
1205 iter
->diff_bm
[i
] = (p1
& ~p2
) | (p2
& ~p1
);
1209 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter
*iter
,
1210 enum dlm_node_state_change
*state
)
1214 if (iter
->curnode
>= O2NM_MAX_NODES
)
1217 bit
= find_next_bit(iter
->diff_bm
, O2NM_MAX_NODES
,
1219 if (bit
>= O2NM_MAX_NODES
) {
1220 iter
->curnode
= O2NM_MAX_NODES
;
1224 /* if it was there in the original then this node died */
1225 if (test_bit(bit
, iter
->orig_bm
))
1230 iter
->curnode
= bit
;
1235 static int dlm_restart_lock_mastery(struct dlm_ctxt
*dlm
,
1236 struct dlm_lock_resource
*res
,
1237 struct dlm_master_list_entry
*mle
,
1240 struct dlm_bitmap_diff_iter bdi
;
1241 enum dlm_node_state_change sc
;
1245 mlog(0, "something happened such that the "
1246 "master process may need to be restarted!\n");
1248 assert_spin_locked(&mle
->spinlock
);
1250 dlm_bitmap_diff_iter_init(&bdi
, mle
->vote_map
, mle
->node_map
);
1251 node
= dlm_bitmap_diff_iter_next(&bdi
, &sc
);
1253 if (sc
== NODE_UP
) {
1254 /* a node came up. clear any old vote from
1255 * the response map and set it in the vote map
1256 * then restart the mastery. */
1257 mlog(ML_NOTICE
, "node %d up while restarting\n", node
);
1259 /* redo the master request, but only for the new node */
1260 mlog(0, "sending request to new node\n");
1261 clear_bit(node
, mle
->response_map
);
1262 set_bit(node
, mle
->vote_map
);
1264 mlog(ML_ERROR
, "node down! %d\n", node
);
1266 int lowest
= find_next_bit(mle
->maybe_map
,
1269 /* act like it was never there */
1270 clear_bit(node
, mle
->maybe_map
);
1272 if (node
== lowest
) {
1273 mlog(0, "expected master %u died"
1274 " while this node was blocked "
1275 "waiting on it!\n", node
);
1276 lowest
= find_next_bit(mle
->maybe_map
,
1279 if (lowest
< O2NM_MAX_NODES
) {
1280 mlog(0, "%s:%.*s:still "
1281 "blocked. waiting on %u "
1287 /* mle is an MLE_BLOCK, but
1288 * there is now nothing left to
1289 * block on. we need to return
1290 * all the way back out and try
1291 * again with an MLE_MASTER.
1292 * dlm_do_local_recovery_cleanup
1293 * has already run, so the mle
1295 mlog(0, "%s:%.*s: no "
1296 "longer blocking. try to "
1297 "master this here\n",
1300 res
->lockname
.name
);
1301 mle
->type
= DLM_MLE_MASTER
;
1307 /* now blank out everything, as if we had never
1308 * contacted anyone */
1309 memset(mle
->maybe_map
, 0, sizeof(mle
->maybe_map
));
1310 memset(mle
->response_map
, 0, sizeof(mle
->response_map
));
1311 /* reset the vote_map to the current node_map */
1312 memcpy(mle
->vote_map
, mle
->node_map
,
1313 sizeof(mle
->node_map
));
1314 /* put myself into the maybe map */
1315 if (mle
->type
!= DLM_MLE_BLOCK
)
1316 set_bit(dlm
->node_num
, mle
->maybe_map
);
1319 node
= dlm_bitmap_diff_iter_next(&bdi
, &sc
);
1326 * DLM_MASTER_REQUEST_MSG
1328 * returns: 0 on success,
1329 * -errno on a network error
1331 * on error, the caller should assume the target node is "dead"
1335 static int dlm_do_master_request(struct dlm_lock_resource
*res
,
1336 struct dlm_master_list_entry
*mle
, int to
)
1338 struct dlm_ctxt
*dlm
= mle
->dlm
;
1339 struct dlm_master_request request
;
1340 int ret
, response
=0, resend
;
1342 memset(&request
, 0, sizeof(request
));
1343 request
.node_idx
= dlm
->node_num
;
1345 BUG_ON(mle
->type
== DLM_MLE_MIGRATION
);
1347 request
.namelen
= (u8
)mle
->mnamelen
;
1348 memcpy(request
.name
, mle
->mname
, request
.namelen
);
1351 ret
= o2net_send_message(DLM_MASTER_REQUEST_MSG
, dlm
->key
, &request
,
1352 sizeof(request
), to
, &response
);
1354 if (ret
== -ESRCH
) {
1355 /* should never happen */
1356 mlog(ML_ERROR
, "TCP stack not ready!\n");
1358 } else if (ret
== -EINVAL
) {
1359 mlog(ML_ERROR
, "bad args passed to o2net!\n");
1361 } else if (ret
== -ENOMEM
) {
1362 mlog(ML_ERROR
, "out of memory while trying to send "
1363 "network message! retrying\n");
1364 /* this is totally crude */
1367 } else if (!dlm_is_host_down(ret
)) {
1368 /* not a network error. bad. */
1370 mlog(ML_ERROR
, "unhandled error!");
1373 /* all other errors should be network errors,
1374 * and likely indicate node death */
1375 mlog(ML_ERROR
, "link to %d went down!\n", to
);
1381 spin_lock(&mle
->spinlock
);
1383 case DLM_MASTER_RESP_YES
:
1384 set_bit(to
, mle
->response_map
);
1385 mlog(0, "node %u is the master, response=YES\n", to
);
1386 mlog(0, "%s:%.*s: master node %u now knows I have a "
1387 "reference\n", dlm
->name
, res
->lockname
.len
,
1388 res
->lockname
.name
, to
);
1391 case DLM_MASTER_RESP_NO
:
1392 mlog(0, "node %u not master, response=NO\n", to
);
1393 set_bit(to
, mle
->response_map
);
1395 case DLM_MASTER_RESP_MAYBE
:
1396 mlog(0, "node %u not master, response=MAYBE\n", to
);
1397 set_bit(to
, mle
->response_map
);
1398 set_bit(to
, mle
->maybe_map
);
1400 case DLM_MASTER_RESP_ERROR
:
1401 mlog(0, "node %u hit an error, resending\n", to
);
1406 mlog(ML_ERROR
, "bad response! %u\n", response
);
1409 spin_unlock(&mle
->spinlock
);
1411 /* this is also totally crude */
1421 * locks that can be taken here:
1427 * if possible, TRIM THIS DOWN!!!
1429 int dlm_master_request_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1432 u8 response
= DLM_MASTER_RESP_MAYBE
;
1433 struct dlm_ctxt
*dlm
= data
;
1434 struct dlm_lock_resource
*res
= NULL
;
1435 struct dlm_master_request
*request
= (struct dlm_master_request
*) msg
->buf
;
1436 struct dlm_master_list_entry
*mle
= NULL
, *tmpmle
= NULL
;
1438 unsigned int namelen
, hash
;
1441 int dispatch_assert
= 0;
1445 return DLM_MASTER_RESP_NO
;
1447 if (!dlm_domain_fully_joined(dlm
)) {
1448 response
= DLM_MASTER_RESP_NO
;
1452 name
= request
->name
;
1453 namelen
= request
->namelen
;
1454 hash
= dlm_lockid_hash(name
, namelen
);
1456 if (namelen
> DLM_LOCKID_NAME_MAX
) {
1457 response
= DLM_IVBUFLEN
;
1462 spin_lock(&dlm
->spinlock
);
1463 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
1465 spin_unlock(&dlm
->spinlock
);
1467 /* take care of the easy cases up front */
1468 spin_lock(&res
->spinlock
);
1471 * Right after dlm spinlock was released, dlm_thread could have
1472 * purged the lockres. Check if lockres got unhashed. If so
1475 if (hlist_unhashed(&res
->hash_node
)) {
1476 spin_unlock(&res
->spinlock
);
1477 dlm_lockres_put(res
);
1481 if (res
->state
& (DLM_LOCK_RES_RECOVERING
|
1482 DLM_LOCK_RES_MIGRATING
)) {
1483 spin_unlock(&res
->spinlock
);
1484 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1485 "being recovered/migrated\n");
1486 response
= DLM_MASTER_RESP_ERROR
;
1488 kmem_cache_free(dlm_mle_cache
, mle
);
1492 if (res
->owner
== dlm
->node_num
) {
1493 dlm_lockres_set_refmap_bit(dlm
, res
, request
->node_idx
);
1494 spin_unlock(&res
->spinlock
);
1495 response
= DLM_MASTER_RESP_YES
;
1497 kmem_cache_free(dlm_mle_cache
, mle
);
1499 /* this node is the owner.
1500 * there is some extra work that needs to
1501 * happen now. the requesting node has
1502 * caused all nodes up to this one to
1503 * create mles. this node now needs to
1504 * go back and clean those up. */
1505 dispatch_assert
= 1;
1507 } else if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1508 spin_unlock(&res
->spinlock
);
1509 // mlog(0, "node %u is the master\n", res->owner);
1510 response
= DLM_MASTER_RESP_NO
;
1512 kmem_cache_free(dlm_mle_cache
, mle
);
1516 /* ok, there is no owner. either this node is
1517 * being blocked, or it is actively trying to
1518 * master this lock. */
1519 if (!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
)) {
1520 mlog(ML_ERROR
, "lock with no owner should be "
1525 // mlog(0, "lockres is in progress...\n");
1526 spin_lock(&dlm
->master_lock
);
1527 found
= dlm_find_mle(dlm
, &tmpmle
, name
, namelen
);
1529 mlog(ML_ERROR
, "no mle found for this lock!\n");
1533 spin_lock(&tmpmle
->spinlock
);
1534 if (tmpmle
->type
== DLM_MLE_BLOCK
) {
1535 // mlog(0, "this node is waiting for "
1536 // "lockres to be mastered\n");
1537 response
= DLM_MASTER_RESP_NO
;
1538 } else if (tmpmle
->type
== DLM_MLE_MIGRATION
) {
1539 mlog(0, "node %u is master, but trying to migrate to "
1540 "node %u.\n", tmpmle
->master
, tmpmle
->new_master
);
1541 if (tmpmle
->master
== dlm
->node_num
) {
1542 mlog(ML_ERROR
, "no owner on lockres, but this "
1543 "node is trying to migrate it to %u?!\n",
1544 tmpmle
->new_master
);
1547 /* the real master can respond on its own */
1548 response
= DLM_MASTER_RESP_NO
;
1550 } else if (tmpmle
->master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1552 if (tmpmle
->master
== dlm
->node_num
) {
1553 response
= DLM_MASTER_RESP_YES
;
1554 /* this node will be the owner.
1555 * go back and clean the mles on any
1557 dispatch_assert
= 1;
1558 dlm_lockres_set_refmap_bit(dlm
, res
,
1561 response
= DLM_MASTER_RESP_NO
;
1563 // mlog(0, "this node is attempting to "
1564 // "master lockres\n");
1565 response
= DLM_MASTER_RESP_MAYBE
;
1568 set_bit(request
->node_idx
, tmpmle
->maybe_map
);
1569 spin_unlock(&tmpmle
->spinlock
);
1571 spin_unlock(&dlm
->master_lock
);
1572 spin_unlock(&res
->spinlock
);
1574 /* keep the mle attached to heartbeat events */
1575 dlm_put_mle(tmpmle
);
1577 kmem_cache_free(dlm_mle_cache
, mle
);
1582 * lockres doesn't exist on this node
1583 * if there is an MLE_BLOCK, return NO
1584 * if there is an MLE_MASTER, return MAYBE
1585 * otherwise, add an MLE_BLOCK, return NO
1587 spin_lock(&dlm
->master_lock
);
1588 found
= dlm_find_mle(dlm
, &tmpmle
, name
, namelen
);
1590 /* this lockid has never been seen on this node yet */
1591 // mlog(0, "no mle found\n");
1593 spin_unlock(&dlm
->master_lock
);
1594 spin_unlock(&dlm
->spinlock
);
1596 mle
= kmem_cache_alloc(dlm_mle_cache
, GFP_NOFS
);
1598 response
= DLM_MASTER_RESP_ERROR
;
1599 mlog_errno(-ENOMEM
);
1605 // mlog(0, "this is second time thru, already allocated, "
1606 // "add the block.\n");
1607 dlm_init_mle(mle
, DLM_MLE_BLOCK
, dlm
, NULL
, name
, namelen
);
1608 set_bit(request
->node_idx
, mle
->maybe_map
);
1609 __dlm_insert_mle(dlm
, mle
);
1610 response
= DLM_MASTER_RESP_NO
;
1612 // mlog(0, "mle was found\n");
1614 spin_lock(&tmpmle
->spinlock
);
1615 if (tmpmle
->master
== dlm
->node_num
) {
1616 mlog(ML_ERROR
, "no lockres, but an mle with this node as master!\n");
1619 if (tmpmle
->type
== DLM_MLE_BLOCK
)
1620 response
= DLM_MASTER_RESP_NO
;
1621 else if (tmpmle
->type
== DLM_MLE_MIGRATION
) {
1622 mlog(0, "migration mle was found (%u->%u)\n",
1623 tmpmle
->master
, tmpmle
->new_master
);
1624 /* real master can respond on its own */
1625 response
= DLM_MASTER_RESP_NO
;
1627 response
= DLM_MASTER_RESP_MAYBE
;
1629 set_bit(request
->node_idx
, tmpmle
->maybe_map
);
1630 spin_unlock(&tmpmle
->spinlock
);
1632 spin_unlock(&dlm
->master_lock
);
1633 spin_unlock(&dlm
->spinlock
);
1636 /* keep the mle attached to heartbeat events */
1637 dlm_put_mle(tmpmle
);
1641 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1642 * The reference is released by dlm_assert_master_worker() under
1643 * the call to dlm_dispatch_assert_master(). If
1644 * dlm_assert_master_worker() isn't called, we drop it here.
1646 if (dispatch_assert
) {
1647 if (response
!= DLM_MASTER_RESP_YES
)
1648 mlog(ML_ERROR
, "invalid response %d\n", response
);
1650 mlog(ML_ERROR
, "bad lockres while trying to assert!\n");
1653 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1654 dlm
->node_num
, res
->lockname
.len
, res
->lockname
.name
);
1655 spin_lock(&res
->spinlock
);
1656 ret
= dlm_dispatch_assert_master(dlm
, res
, 0, request
->node_idx
,
1657 DLM_ASSERT_MASTER_MLE_CLEANUP
);
1659 mlog(ML_ERROR
, "failed to dispatch assert master work\n");
1660 response
= DLM_MASTER_RESP_ERROR
;
1661 spin_unlock(&res
->spinlock
);
1662 dlm_lockres_put(res
);
1665 __dlm_lockres_grab_inflight_worker(dlm
, res
);
1666 spin_unlock(&res
->spinlock
);
1670 dlm_lockres_put(res
);
1679 * DLM_ASSERT_MASTER_MSG
1684 * NOTE: this can be used for debugging
1685 * can periodically run all locks owned by this node
1686 * and re-assert across the cluster...
1688 static int dlm_do_assert_master(struct dlm_ctxt
*dlm
,
1689 struct dlm_lock_resource
*res
,
1690 void *nodemap
, u32 flags
)
1692 struct dlm_assert_master
assert;
1694 struct dlm_node_iter iter
;
1697 const char *lockname
= res
->lockname
.name
;
1698 unsigned int namelen
= res
->lockname
.len
;
1700 BUG_ON(namelen
> O2NM_MAX_NAME_LEN
);
1702 spin_lock(&res
->spinlock
);
1703 res
->state
|= DLM_LOCK_RES_SETREF_INPROG
;
1704 spin_unlock(&res
->spinlock
);
1709 /* note that if this nodemap is empty, it returns 0 */
1710 dlm_node_iter_init(nodemap
, &iter
);
1711 while ((to
= dlm_node_iter_next(&iter
)) >= 0) {
1713 struct dlm_master_list_entry
*mle
= NULL
;
1715 mlog(0, "sending assert master to %d (%.*s)\n", to
,
1717 memset(&assert, 0, sizeof(assert));
1718 assert.node_idx
= dlm
->node_num
;
1719 assert.namelen
= namelen
;
1720 memcpy(assert.name
, lockname
, namelen
);
1721 assert.flags
= cpu_to_be32(flags
);
1723 tmpret
= o2net_send_message(DLM_ASSERT_MASTER_MSG
, dlm
->key
,
1724 &assert, sizeof(assert), to
, &r
);
1726 mlog(ML_ERROR
, "Error %d when sending message %u (key "
1727 "0x%x) to node %u\n", tmpret
,
1728 DLM_ASSERT_MASTER_MSG
, dlm
->key
, to
);
1729 if (!dlm_is_host_down(tmpret
)) {
1730 mlog(ML_ERROR
, "unhandled error=%d!\n", tmpret
);
1733 /* a node died. finish out the rest of the nodes. */
1734 mlog(0, "link to %d went down!\n", to
);
1735 /* any nonzero status return will do */
1739 /* ok, something horribly messed. kill thyself. */
1740 mlog(ML_ERROR
,"during assert master of %.*s to %u, "
1741 "got %d.\n", namelen
, lockname
, to
, r
);
1742 spin_lock(&dlm
->spinlock
);
1743 spin_lock(&dlm
->master_lock
);
1744 if (dlm_find_mle(dlm
, &mle
, (char *)lockname
,
1746 dlm_print_one_mle(mle
);
1749 spin_unlock(&dlm
->master_lock
);
1750 spin_unlock(&dlm
->spinlock
);
1754 if (r
& DLM_ASSERT_RESPONSE_REASSERT
&&
1755 !(r
& DLM_ASSERT_RESPONSE_MASTERY_REF
)) {
1756 mlog(ML_ERROR
, "%.*s: very strange, "
1757 "master MLE but no lockres on %u\n",
1758 namelen
, lockname
, to
);
1761 if (r
& DLM_ASSERT_RESPONSE_REASSERT
) {
1762 mlog(0, "%.*s: node %u create mles on other "
1763 "nodes and requests a re-assert\n",
1764 namelen
, lockname
, to
);
1767 if (r
& DLM_ASSERT_RESPONSE_MASTERY_REF
) {
1768 mlog(0, "%.*s: node %u has a reference to this "
1769 "lockres, set the bit in the refmap\n",
1770 namelen
, lockname
, to
);
1771 spin_lock(&res
->spinlock
);
1772 dlm_lockres_set_refmap_bit(dlm
, res
, to
);
1773 spin_unlock(&res
->spinlock
);
1780 spin_lock(&res
->spinlock
);
1781 res
->state
&= ~DLM_LOCK_RES_SETREF_INPROG
;
1782 spin_unlock(&res
->spinlock
);
1789 * locks that can be taken here:
1795 * if possible, TRIM THIS DOWN!!!
1797 int dlm_assert_master_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1800 struct dlm_ctxt
*dlm
= data
;
1801 struct dlm_master_list_entry
*mle
= NULL
;
1802 struct dlm_assert_master
*assert = (struct dlm_assert_master
*)msg
->buf
;
1803 struct dlm_lock_resource
*res
= NULL
;
1805 unsigned int namelen
, hash
;
1807 int master_request
= 0, have_lockres_ref
= 0;
1813 name
= assert->name
;
1814 namelen
= assert->namelen
;
1815 hash
= dlm_lockid_hash(name
, namelen
);
1816 flags
= be32_to_cpu(assert->flags
);
1818 if (namelen
> DLM_LOCKID_NAME_MAX
) {
1819 mlog(ML_ERROR
, "Invalid name length!");
1823 spin_lock(&dlm
->spinlock
);
1826 mlog(0, "assert_master with flags: %u\n", flags
);
1829 spin_lock(&dlm
->master_lock
);
1830 if (!dlm_find_mle(dlm
, &mle
, name
, namelen
)) {
1831 /* not an error, could be master just re-asserting */
1832 mlog(0, "just got an assert_master from %u, but no "
1833 "MLE for it! (%.*s)\n", assert->node_idx
,
1836 int bit
= find_next_bit (mle
->maybe_map
, O2NM_MAX_NODES
, 0);
1837 if (bit
>= O2NM_MAX_NODES
) {
1838 /* not necessarily an error, though less likely.
1839 * could be master just re-asserting. */
1840 mlog(0, "no bits set in the maybe_map, but %u "
1841 "is asserting! (%.*s)\n", assert->node_idx
,
1843 } else if (bit
!= assert->node_idx
) {
1844 if (flags
& DLM_ASSERT_MASTER_MLE_CLEANUP
) {
1845 mlog(0, "master %u was found, %u should "
1846 "back off\n", assert->node_idx
, bit
);
1848 /* with the fix for bug 569, a higher node
1849 * number winning the mastery will respond
1850 * YES to mastery requests, but this node
1851 * had no way of knowing. let it pass. */
1852 mlog(0, "%u is the lowest node, "
1853 "%u is asserting. (%.*s) %u must "
1854 "have begun after %u won.\n", bit
,
1855 assert->node_idx
, namelen
, name
, bit
,
1859 if (mle
->type
== DLM_MLE_MIGRATION
) {
1860 if (flags
& DLM_ASSERT_MASTER_MLE_CLEANUP
) {
1861 mlog(0, "%s:%.*s: got cleanup assert"
1862 " from %u for migration\n",
1863 dlm
->name
, namelen
, name
,
1865 } else if (!(flags
& DLM_ASSERT_MASTER_FINISH_MIGRATION
)) {
1866 mlog(0, "%s:%.*s: got unrelated assert"
1867 " from %u for migration, ignoring\n",
1868 dlm
->name
, namelen
, name
,
1871 spin_unlock(&dlm
->master_lock
);
1872 spin_unlock(&dlm
->spinlock
);
1877 spin_unlock(&dlm
->master_lock
);
1879 /* ok everything checks out with the MLE
1880 * now check to see if there is a lockres */
1881 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
1883 spin_lock(&res
->spinlock
);
1884 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1885 mlog(ML_ERROR
, "%u asserting but %.*s is "
1886 "RECOVERING!\n", assert->node_idx
, namelen
, name
);
1890 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
&&
1891 res
->owner
!= assert->node_idx
) {
1892 mlog(ML_ERROR
, "DIE! Mastery assert from %u, "
1893 "but current owner is %u! (%.*s)\n",
1894 assert->node_idx
, res
->owner
, namelen
,
1896 __dlm_print_one_lock_resource(res
);
1899 } else if (mle
->type
!= DLM_MLE_MIGRATION
) {
1900 if (res
->owner
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1901 /* owner is just re-asserting */
1902 if (res
->owner
== assert->node_idx
) {
1903 mlog(0, "owner %u re-asserting on "
1904 "lock %.*s\n", assert->node_idx
,
1908 mlog(ML_ERROR
, "got assert_master from "
1909 "node %u, but %u is the owner! "
1910 "(%.*s)\n", assert->node_idx
,
1911 res
->owner
, namelen
, name
);
1914 if (!(res
->state
& DLM_LOCK_RES_IN_PROGRESS
)) {
1915 mlog(ML_ERROR
, "got assert from %u, but lock "
1916 "with no owner should be "
1917 "in-progress! (%.*s)\n",
1922 } else /* mle->type == DLM_MLE_MIGRATION */ {
1923 /* should only be getting an assert from new master */
1924 if (assert->node_idx
!= mle
->new_master
) {
1925 mlog(ML_ERROR
, "got assert from %u, but "
1926 "new master is %u, and old master "
1928 assert->node_idx
, mle
->new_master
,
1929 mle
->master
, namelen
, name
);
1935 spin_unlock(&res
->spinlock
);
1938 // mlog(0, "woo! got an assert_master from node %u!\n",
1939 // assert->node_idx);
1945 spin_lock(&mle
->spinlock
);
1946 if (mle
->type
== DLM_MLE_BLOCK
|| mle
->type
== DLM_MLE_MIGRATION
)
1949 /* MASTER mle: if any bits set in the response map
1950 * then the calling node needs to re-assert to clear
1951 * up nodes that this node contacted */
1952 while ((nn
= find_next_bit (mle
->response_map
, O2NM_MAX_NODES
,
1953 nn
+1)) < O2NM_MAX_NODES
) {
1954 if (nn
!= dlm
->node_num
&& nn
!= assert->node_idx
) {
1960 mle
->master
= assert->node_idx
;
1961 atomic_set(&mle
->woken
, 1);
1963 spin_unlock(&mle
->spinlock
);
1967 spin_lock(&res
->spinlock
);
1968 if (mle
->type
== DLM_MLE_MIGRATION
) {
1969 mlog(0, "finishing off migration of lockres %.*s, "
1971 res
->lockname
.len
, res
->lockname
.name
,
1972 dlm
->node_num
, mle
->new_master
);
1973 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
1975 dlm_change_lockres_owner(dlm
, res
, mle
->new_master
);
1976 BUG_ON(res
->state
& DLM_LOCK_RES_DIRTY
);
1978 dlm_change_lockres_owner(dlm
, res
, mle
->master
);
1980 spin_unlock(&res
->spinlock
);
1981 have_lockres_ref
= 1;
1986 /* master is known, detach if not already detached.
1987 * ensures that only one assert_master call will happen
1989 spin_lock(&dlm
->master_lock
);
1991 rr
= atomic_read(&mle
->mle_refs
.refcount
);
1992 if (mle
->inuse
> 0) {
1993 if (extra_ref
&& rr
< 3)
1995 else if (!extra_ref
&& rr
< 2)
1998 if (extra_ref
&& rr
< 2)
2000 else if (!extra_ref
&& rr
< 1)
2004 mlog(ML_ERROR
, "%s:%.*s: got assert master from %u "
2005 "that will mess up this node, refs=%d, extra=%d, "
2006 "inuse=%d\n", dlm
->name
, namelen
, name
,
2007 assert->node_idx
, rr
, extra_ref
, mle
->inuse
);
2008 dlm_print_one_mle(mle
);
2010 __dlm_unlink_mle(dlm
, mle
);
2011 __dlm_mle_detach_hb_events(dlm
, mle
);
2014 /* the assert master message now balances the extra
2015 * ref given by the master / migration request message.
2016 * if this is the last put, it will be removed
2020 spin_unlock(&dlm
->master_lock
);
2022 if (res
->owner
!= assert->node_idx
) {
2023 mlog(0, "assert_master from %u, but current "
2024 "owner is %u (%.*s), no mle\n", assert->node_idx
,
2025 res
->owner
, namelen
, name
);
2028 spin_unlock(&dlm
->spinlock
);
2033 spin_lock(&res
->spinlock
);
2034 res
->state
|= DLM_LOCK_RES_SETREF_INPROG
;
2035 spin_unlock(&res
->spinlock
);
2036 *ret_data
= (void *)res
;
2039 if (master_request
) {
2040 mlog(0, "need to tell master to reassert\n");
2041 /* positive. negative would shoot down the node. */
2042 ret
|= DLM_ASSERT_RESPONSE_REASSERT
;
2043 if (!have_lockres_ref
) {
2044 mlog(ML_ERROR
, "strange, got assert from %u, MASTER "
2045 "mle present here for %s:%.*s, but no lockres!\n",
2046 assert->node_idx
, dlm
->name
, namelen
, name
);
2049 if (have_lockres_ref
) {
2050 /* let the master know we have a reference to the lockres */
2051 ret
|= DLM_ASSERT_RESPONSE_MASTERY_REF
;
2052 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2053 dlm
->name
, namelen
, name
, assert->node_idx
);
2058 /* kill the caller! */
2059 mlog(ML_ERROR
, "Bad message received from another node. Dumping state "
2060 "and killing the other node now! This node is OK and can continue.\n");
2061 __dlm_print_one_lock_resource(res
);
2062 spin_unlock(&res
->spinlock
);
2063 spin_lock(&dlm
->master_lock
);
2066 spin_unlock(&dlm
->master_lock
);
2067 spin_unlock(&dlm
->spinlock
);
2068 *ret_data
= (void *)res
;
2073 void dlm_assert_master_post_handler(int status
, void *data
, void *ret_data
)
2075 struct dlm_lock_resource
*res
= (struct dlm_lock_resource
*)ret_data
;
2078 spin_lock(&res
->spinlock
);
2079 res
->state
&= ~DLM_LOCK_RES_SETREF_INPROG
;
2080 spin_unlock(&res
->spinlock
);
2082 dlm_lockres_put(res
);
2087 int dlm_dispatch_assert_master(struct dlm_ctxt
*dlm
,
2088 struct dlm_lock_resource
*res
,
2089 int ignore_higher
, u8 request_from
, u32 flags
)
2091 struct dlm_work_item
*item
;
2092 item
= kzalloc(sizeof(*item
), GFP_ATOMIC
);
2097 /* queue up work for dlm_assert_master_worker */
2098 dlm_init_work_item(dlm
, item
, dlm_assert_master_worker
, NULL
);
2099 item
->u
.am
.lockres
= res
; /* already have a ref */
2100 /* can optionally ignore node numbers higher than this node */
2101 item
->u
.am
.ignore_higher
= ignore_higher
;
2102 item
->u
.am
.request_from
= request_from
;
2103 item
->u
.am
.flags
= flags
;
2106 mlog(0, "IGNORE HIGHER: %.*s\n", res
->lockname
.len
,
2107 res
->lockname
.name
);
2109 spin_lock(&dlm
->work_lock
);
2110 list_add_tail(&item
->list
, &dlm
->work_list
);
2111 spin_unlock(&dlm
->work_lock
);
2113 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
2117 static void dlm_assert_master_worker(struct dlm_work_item
*item
, void *data
)
2119 struct dlm_ctxt
*dlm
= data
;
2121 struct dlm_lock_resource
*res
;
2122 unsigned long nodemap
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
2129 res
= item
->u
.am
.lockres
;
2130 ignore_higher
= item
->u
.am
.ignore_higher
;
2131 request_from
= item
->u
.am
.request_from
;
2132 flags
= item
->u
.am
.flags
;
2134 spin_lock(&dlm
->spinlock
);
2135 memcpy(nodemap
, dlm
->domain_map
, sizeof(nodemap
));
2136 spin_unlock(&dlm
->spinlock
);
2138 clear_bit(dlm
->node_num
, nodemap
);
2139 if (ignore_higher
) {
2140 /* if is this just to clear up mles for nodes below
2141 * this node, do not send the message to the original
2142 * caller or any node number higher than this */
2143 clear_bit(request_from
, nodemap
);
2144 bit
= dlm
->node_num
;
2146 bit
= find_next_bit(nodemap
, O2NM_MAX_NODES
,
2148 if (bit
>= O2NM_MAX_NODES
)
2150 clear_bit(bit
, nodemap
);
2155 * If we're migrating this lock to someone else, we are no
2156 * longer allowed to assert out own mastery. OTOH, we need to
2157 * prevent migration from starting while we're still asserting
2158 * our dominance. The reserved ast delays migration.
2160 spin_lock(&res
->spinlock
);
2161 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
2162 mlog(0, "Someone asked us to assert mastery, but we're "
2163 "in the middle of migration. Skipping assert, "
2164 "the new master will handle that.\n");
2165 spin_unlock(&res
->spinlock
);
2168 __dlm_lockres_reserve_ast(res
);
2169 spin_unlock(&res
->spinlock
);
2171 /* this call now finishes out the nodemap
2172 * even if one or more nodes die */
2173 mlog(0, "worker about to master %.*s here, this=%u\n",
2174 res
->lockname
.len
, res
->lockname
.name
, dlm
->node_num
);
2175 ret
= dlm_do_assert_master(dlm
, res
, nodemap
, flags
);
2177 /* no need to restart, we are done */
2178 if (!dlm_is_host_down(ret
))
2182 /* Ok, we've asserted ourselves. Let's let migration start. */
2183 dlm_lockres_release_ast(dlm
, res
);
2186 dlm_lockres_drop_inflight_worker(dlm
, res
);
2188 dlm_lockres_put(res
);
2190 mlog(0, "finished with dlm_assert_master_worker\n");
2193 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2194 * We cannot wait for node recovery to complete to begin mastering this
2195 * lockres because this lockres is used to kick off recovery! ;-)
2196 * So, do a pre-check on all living nodes to see if any of those nodes
2197 * think that $RECOVERY is currently mastered by a dead node. If so,
2198 * we wait a short time to allow that node to get notified by its own
2199 * heartbeat stack, then check again. All $RECOVERY lock resources
2200 * mastered by dead nodes are purged when the hearbeat callback is
2201 * fired, so we can know for sure that it is safe to continue once
2202 * the node returns a live node or no node. */
2203 static int dlm_pre_master_reco_lockres(struct dlm_ctxt
*dlm
,
2204 struct dlm_lock_resource
*res
)
2206 struct dlm_node_iter iter
;
2209 u8 master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
2211 spin_lock(&dlm
->spinlock
);
2212 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2213 spin_unlock(&dlm
->spinlock
);
2215 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2216 /* do not send to self */
2217 if (nodenum
== dlm
->node_num
)
2219 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, &master
);
2222 if (!dlm_is_host_down(ret
))
2224 /* host is down, so answer for that node would be
2225 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
2229 if (master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
2230 /* check to see if this master is in the recovery map */
2231 spin_lock(&dlm
->spinlock
);
2232 if (test_bit(master
, dlm
->recovery_map
)) {
2233 mlog(ML_NOTICE
, "%s: node %u has not seen "
2234 "node %u go down yet, and thinks the "
2235 "dead node is mastering the recovery "
2236 "lock. must wait.\n", dlm
->name
,
2240 spin_unlock(&dlm
->spinlock
);
2241 mlog(0, "%s: reco lock master is %u\n", dlm
->name
,
2250 * DLM_DEREF_LOCKRES_MSG
2253 int dlm_drop_lockres_ref(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
2255 struct dlm_deref_lockres deref
;
2257 const char *lockname
;
2258 unsigned int namelen
;
2260 lockname
= res
->lockname
.name
;
2261 namelen
= res
->lockname
.len
;
2262 BUG_ON(namelen
> O2NM_MAX_NAME_LEN
);
2264 memset(&deref
, 0, sizeof(deref
));
2265 deref
.node_idx
= dlm
->node_num
;
2266 deref
.namelen
= namelen
;
2267 memcpy(deref
.name
, lockname
, namelen
);
2269 ret
= o2net_send_message(DLM_DEREF_LOCKRES_MSG
, dlm
->key
,
2270 &deref
, sizeof(deref
), res
->owner
, &r
);
2272 mlog(ML_ERROR
, "%s: res %.*s, error %d send DEREF to node %u\n",
2273 dlm
->name
, namelen
, lockname
, ret
, res
->owner
);
2275 /* BAD. other node says I did not have a ref. */
2276 mlog(ML_ERROR
, "%s: res %.*s, DEREF to node %u got %d\n",
2277 dlm
->name
, namelen
, lockname
, res
->owner
, r
);
2278 dlm_print_one_lock_resource(res
);
2287 int dlm_deref_lockres_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2290 struct dlm_ctxt
*dlm
= data
;
2291 struct dlm_deref_lockres
*deref
= (struct dlm_deref_lockres
*)msg
->buf
;
2292 struct dlm_lock_resource
*res
= NULL
;
2294 unsigned int namelen
;
2298 struct dlm_work_item
*item
;
2306 namelen
= deref
->namelen
;
2307 node
= deref
->node_idx
;
2309 if (namelen
> DLM_LOCKID_NAME_MAX
) {
2310 mlog(ML_ERROR
, "Invalid name length!");
2313 if (deref
->node_idx
>= O2NM_MAX_NODES
) {
2314 mlog(ML_ERROR
, "Invalid node number: %u\n", node
);
2318 hash
= dlm_lockid_hash(name
, namelen
);
2320 spin_lock(&dlm
->spinlock
);
2321 res
= __dlm_lookup_lockres_full(dlm
, name
, namelen
, hash
);
2323 spin_unlock(&dlm
->spinlock
);
2324 mlog(ML_ERROR
, "%s:%.*s: bad lockres name\n",
2325 dlm
->name
, namelen
, name
);
2328 spin_unlock(&dlm
->spinlock
);
2330 spin_lock(&res
->spinlock
);
2331 if (res
->state
& DLM_LOCK_RES_SETREF_INPROG
)
2334 BUG_ON(res
->state
& DLM_LOCK_RES_DROPPING_REF
);
2335 if (test_bit(node
, res
->refmap
)) {
2336 dlm_lockres_clear_refmap_bit(dlm
, res
, node
);
2340 spin_unlock(&res
->spinlock
);
2344 dlm_lockres_calc_usage(dlm
, res
);
2346 mlog(ML_ERROR
, "%s:%.*s: node %u trying to drop ref "
2347 "but it is already dropped!\n", dlm
->name
,
2348 res
->lockname
.len
, res
->lockname
.name
, node
);
2349 dlm_print_one_lock_resource(res
);
2351 ret
= DLM_DEREF_RESPONSE_DONE
;
2355 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
2362 dlm_init_work_item(dlm
, item
, dlm_deref_lockres_worker
, NULL
);
2363 item
->u
.dl
.deref_res
= res
;
2364 item
->u
.dl
.deref_node
= node
;
2366 spin_lock(&dlm
->work_lock
);
2367 list_add_tail(&item
->list
, &dlm
->work_list
);
2368 spin_unlock(&dlm
->work_lock
);
2370 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
2371 return DLM_DEREF_RESPONSE_INPROG
;
2375 dlm_lockres_put(res
);
2381 int dlm_deref_lockres_done_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2384 struct dlm_ctxt
*dlm
= data
;
2385 struct dlm_deref_lockres_done
*deref
2386 = (struct dlm_deref_lockres_done
*)msg
->buf
;
2387 struct dlm_lock_resource
*res
= NULL
;
2389 unsigned int namelen
;
2398 namelen
= deref
->namelen
;
2399 node
= deref
->node_idx
;
2401 if (namelen
> DLM_LOCKID_NAME_MAX
) {
2402 mlog(ML_ERROR
, "Invalid name length!");
2405 if (deref
->node_idx
>= O2NM_MAX_NODES
) {
2406 mlog(ML_ERROR
, "Invalid node number: %u\n", node
);
2410 hash
= dlm_lockid_hash(name
, namelen
);
2412 spin_lock(&dlm
->spinlock
);
2413 res
= __dlm_lookup_lockres_full(dlm
, name
, namelen
, hash
);
2415 spin_unlock(&dlm
->spinlock
);
2416 mlog(ML_ERROR
, "%s:%.*s: bad lockres name\n",
2417 dlm
->name
, namelen
, name
);
2421 spin_lock(&res
->spinlock
);
2422 if (!(res
->state
& DLM_LOCK_RES_DROPPING_REF
)) {
2423 spin_unlock(&res
->spinlock
);
2424 spin_unlock(&dlm
->spinlock
);
2425 mlog(ML_NOTICE
, "%s:%.*s: node %u sends deref done "
2426 "but it is already derefed!\n", dlm
->name
,
2427 res
->lockname
.len
, res
->lockname
.name
, node
);
2432 __dlm_do_purge_lockres(dlm
, res
);
2433 spin_unlock(&res
->spinlock
);
2436 spin_unlock(&dlm
->spinlock
);
2441 dlm_lockres_put(res
);
2446 static void dlm_drop_lockres_ref_done(struct dlm_ctxt
*dlm
,
2447 struct dlm_lock_resource
*res
, u8 node
)
2449 struct dlm_deref_lockres_done deref
;
2451 const char *lockname
;
2452 unsigned int namelen
;
2454 lockname
= res
->lockname
.name
;
2455 namelen
= res
->lockname
.len
;
2456 BUG_ON(namelen
> O2NM_MAX_NAME_LEN
);
2458 memset(&deref
, 0, sizeof(deref
));
2459 deref
.node_idx
= dlm
->node_num
;
2460 deref
.namelen
= namelen
;
2461 memcpy(deref
.name
, lockname
, namelen
);
2463 ret
= o2net_send_message(DLM_DEREF_LOCKRES_DONE
, dlm
->key
,
2464 &deref
, sizeof(deref
), node
, &r
);
2466 mlog(ML_ERROR
, "%s: res %.*s, error %d send DEREF DONE "
2467 " to node %u\n", dlm
->name
, namelen
,
2468 lockname
, ret
, node
);
2470 /* ignore the error */
2471 mlog(ML_ERROR
, "%s: res %.*s, DEREF to node %u got %d\n",
2472 dlm
->name
, namelen
, lockname
, node
, r
);
2473 dlm_print_one_lock_resource(res
);
2477 static void dlm_deref_lockres_worker(struct dlm_work_item
*item
, void *data
)
2479 struct dlm_ctxt
*dlm
;
2480 struct dlm_lock_resource
*res
;
2485 res
= item
->u
.dl
.deref_res
;
2486 node
= item
->u
.dl
.deref_node
;
2488 spin_lock(&res
->spinlock
);
2489 BUG_ON(res
->state
& DLM_LOCK_RES_DROPPING_REF
);
2490 __dlm_wait_on_lockres_flags(res
, DLM_LOCK_RES_SETREF_INPROG
);
2491 if (test_bit(node
, res
->refmap
)) {
2492 dlm_lockres_clear_refmap_bit(dlm
, res
, node
);
2495 spin_unlock(&res
->spinlock
);
2497 dlm_drop_lockres_ref_done(dlm
, res
, node
);
2500 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2501 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
, node
);
2502 dlm_lockres_calc_usage(dlm
, res
);
2504 mlog(ML_ERROR
, "%s:%.*s: node %u trying to drop ref "
2505 "but it is already dropped!\n", dlm
->name
,
2506 res
->lockname
.len
, res
->lockname
.name
, node
);
2507 dlm_print_one_lock_resource(res
);
2510 dlm_lockres_put(res
);
2514 * A migrateable resource is one that is :
2515 * 1. locally mastered, and,
2516 * 2. zero local locks, and,
2517 * 3. one or more non-local locks, or, one or more references
2518 * Returns 1 if yes, 0 if not.
2520 static int dlm_is_lockres_migrateable(struct dlm_ctxt
*dlm
,
2521 struct dlm_lock_resource
*res
)
2523 enum dlm_lockres_list idx
;
2524 int nonlocal
= 0, node_ref
;
2525 struct list_head
*queue
;
2526 struct dlm_lock
*lock
;
2529 assert_spin_locked(&res
->spinlock
);
2531 /* delay migration when the lockres is in MIGRATING state */
2532 if (res
->state
& DLM_LOCK_RES_MIGRATING
)
2535 /* delay migration when the lockres is in RECOCERING state */
2536 if (res
->state
& (DLM_LOCK_RES_RECOVERING
|
2537 DLM_LOCK_RES_RECOVERY_WAITING
))
2540 if (res
->owner
!= dlm
->node_num
)
2543 for (idx
= DLM_GRANTED_LIST
; idx
<= DLM_BLOCKED_LIST
; idx
++) {
2544 queue
= dlm_list_idx_to_ptr(res
, idx
);
2545 list_for_each_entry(lock
, queue
, list
) {
2546 if (lock
->ml
.node
!= dlm
->node_num
) {
2550 cookie
= be64_to_cpu(lock
->ml
.cookie
);
2551 mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
2552 "%s list\n", dlm
->name
, res
->lockname
.len
,
2554 dlm_get_lock_cookie_node(cookie
),
2555 dlm_get_lock_cookie_seq(cookie
),
2556 dlm_list_in_text(idx
));
2562 node_ref
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
, 0);
2563 if (node_ref
>= O2NM_MAX_NODES
)
2567 mlog(0, "%s: res %.*s, Migrateable\n", dlm
->name
, res
->lockname
.len
,
2568 res
->lockname
.name
);
2574 * DLM_MIGRATE_LOCKRES
2578 static int dlm_migrate_lockres(struct dlm_ctxt
*dlm
,
2579 struct dlm_lock_resource
*res
, u8 target
)
2581 struct dlm_master_list_entry
*mle
= NULL
;
2582 struct dlm_master_list_entry
*oldmle
= NULL
;
2583 struct dlm_migratable_lockres
*mres
= NULL
;
2586 unsigned int namelen
;
2593 BUG_ON(target
== O2NM_MAX_NODES
);
2595 name
= res
->lockname
.name
;
2596 namelen
= res
->lockname
.len
;
2598 mlog(0, "%s: Migrating %.*s to node %u\n", dlm
->name
, namelen
, name
,
2601 /* preallocate up front. if this fails, abort */
2603 mres
= (struct dlm_migratable_lockres
*) __get_free_page(GFP_NOFS
);
2609 mle
= kmem_cache_alloc(dlm_mle_cache
, GFP_NOFS
);
2617 * clear any existing master requests and
2618 * add the migration mle to the list
2620 spin_lock(&dlm
->spinlock
);
2621 spin_lock(&dlm
->master_lock
);
2622 ret
= dlm_add_migration_mle(dlm
, res
, mle
, &oldmle
, name
,
2623 namelen
, target
, dlm
->node_num
);
2624 /* get an extra reference on the mle.
2625 * otherwise the assert_master from the new
2626 * master will destroy this.
2628 dlm_get_mle_inuse(mle
);
2629 spin_unlock(&dlm
->master_lock
);
2630 spin_unlock(&dlm
->spinlock
);
2632 if (ret
== -EEXIST
) {
2633 mlog(0, "another process is already migrating it\n");
2639 * set the MIGRATING flag and flush asts
2640 * if we fail after this we need to re-dirty the lockres
2642 if (dlm_mark_lockres_migrating(dlm
, res
, target
) < 0) {
2643 mlog(ML_ERROR
, "tried to migrate %.*s to %u, but "
2644 "the target went down.\n", res
->lockname
.len
,
2645 res
->lockname
.name
, target
);
2646 spin_lock(&res
->spinlock
);
2647 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2649 spin_unlock(&res
->spinlock
);
2654 if (ret
!= -EEXIST
&& oldmle
) {
2655 /* master is known, detach if not already detached */
2656 dlm_mle_detach_hb_events(dlm
, oldmle
);
2657 dlm_put_mle(oldmle
);
2662 dlm_mle_detach_hb_events(dlm
, mle
);
2664 dlm_put_mle_inuse(mle
);
2666 kmem_cache_free(dlm_mle_cache
, mle
);
2673 * at this point, we have a migration target, an mle
2674 * in the master list, and the MIGRATING flag set on
2678 /* now that remote nodes are spinning on the MIGRATING flag,
2679 * ensure that all assert_master work is flushed. */
2680 flush_workqueue(dlm
->dlm_worker
);
2682 /* notify new node and send all lock state */
2683 /* call send_one_lockres with migration flag.
2684 * this serves as notice to the target node that a
2685 * migration is starting. */
2686 ret
= dlm_send_one_lockres(dlm
, res
, mres
, target
,
2687 DLM_MRES_MIGRATION
);
2690 mlog(0, "migration to node %u failed with %d\n",
2692 /* migration failed, detach and clean up mle */
2693 dlm_mle_detach_hb_events(dlm
, mle
);
2695 dlm_put_mle_inuse(mle
);
2696 spin_lock(&res
->spinlock
);
2697 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2699 spin_unlock(&res
->spinlock
);
2700 if (dlm_is_host_down(ret
))
2701 dlm_wait_for_node_death(dlm
, target
,
2702 DLM_NODE_DEATH_WAIT_MAX
);
2706 /* at this point, the target sends a message to all nodes,
2707 * (using dlm_do_migrate_request). this node is skipped since
2708 * we had to put an mle in the list to begin the process. this
2709 * node now waits for target to do an assert master. this node
2710 * will be the last one notified, ensuring that the migration
2711 * is complete everywhere. if the target dies while this is
2712 * going on, some nodes could potentially see the target as the
2713 * master, so it is important that my recovery finds the migration
2714 * mle and sets the master to UNKNOWN. */
2717 /* wait for new node to assert master */
2719 ret
= wait_event_interruptible_timeout(mle
->wq
,
2720 (atomic_read(&mle
->woken
) == 1),
2721 msecs_to_jiffies(5000));
2724 if (atomic_read(&mle
->woken
) == 1 ||
2725 res
->owner
== target
)
2728 mlog(0, "%s:%.*s: timed out during migration\n",
2729 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
2730 /* avoid hang during shutdown when migrating lockres
2731 * to a node which also goes down */
2732 if (dlm_is_node_dead(dlm
, target
)) {
2733 mlog(0, "%s:%.*s: expected migration "
2734 "target %u is no longer up, restarting\n",
2735 dlm
->name
, res
->lockname
.len
,
2736 res
->lockname
.name
, target
);
2738 /* migration failed, detach and clean up mle */
2739 dlm_mle_detach_hb_events(dlm
, mle
);
2741 dlm_put_mle_inuse(mle
);
2742 spin_lock(&res
->spinlock
);
2743 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2745 spin_unlock(&res
->spinlock
);
2749 mlog(0, "%s:%.*s: caught signal during migration\n",
2750 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
2753 /* all done, set the owner, clear the flag */
2754 spin_lock(&res
->spinlock
);
2755 dlm_set_lockres_owner(dlm
, res
, target
);
2756 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
2757 dlm_remove_nonlocal_locks(dlm
, res
);
2758 spin_unlock(&res
->spinlock
);
2761 /* master is known, detach if not already detached */
2762 dlm_mle_detach_hb_events(dlm
, mle
);
2763 dlm_put_mle_inuse(mle
);
2766 dlm_lockres_calc_usage(dlm
, res
);
2769 /* re-dirty the lockres if we failed */
2771 dlm_kick_thread(dlm
, res
);
2773 /* wake up waiters if the MIGRATING flag got set
2774 * but migration failed */
2779 free_page((unsigned long)mres
);
2783 mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm
->name
, namelen
,
2788 #define DLM_MIGRATION_RETRY_MS 100
2791 * Should be called only after beginning the domain leave process.
2792 * There should not be any remaining locks on nonlocal lock resources,
2793 * and there should be no local locks left on locally mastered resources.
2795 * Called with the dlm spinlock held, may drop it to do migration, but
2796 * will re-acquire before exit.
2798 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
2800 int dlm_empty_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
2803 int lock_dropped
= 0;
2804 u8 target
= O2NM_MAX_NODES
;
2806 assert_spin_locked(&dlm
->spinlock
);
2808 spin_lock(&res
->spinlock
);
2809 if (dlm_is_lockres_migrateable(dlm
, res
))
2810 target
= dlm_pick_migration_target(dlm
, res
);
2811 spin_unlock(&res
->spinlock
);
2813 if (target
== O2NM_MAX_NODES
)
2816 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2817 spin_unlock(&dlm
->spinlock
);
2819 ret
= dlm_migrate_lockres(dlm
, res
, target
);
2821 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2822 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
2824 spin_lock(&dlm
->spinlock
);
2826 return lock_dropped
;
2829 int dlm_lock_basts_flushed(struct dlm_ctxt
*dlm
, struct dlm_lock
*lock
)
2832 spin_lock(&dlm
->ast_lock
);
2833 spin_lock(&lock
->spinlock
);
2834 ret
= (list_empty(&lock
->bast_list
) && !lock
->bast_pending
);
2835 spin_unlock(&lock
->spinlock
);
2836 spin_unlock(&dlm
->ast_lock
);
2840 static int dlm_migration_can_proceed(struct dlm_ctxt
*dlm
,
2841 struct dlm_lock_resource
*res
,
2845 spin_lock(&res
->spinlock
);
2846 can_proceed
= !!(res
->state
& DLM_LOCK_RES_MIGRATING
);
2847 spin_unlock(&res
->spinlock
);
2849 /* target has died, so make the caller break out of the
2850 * wait_event, but caller must recheck the domain_map */
2851 spin_lock(&dlm
->spinlock
);
2852 if (!test_bit(mig_target
, dlm
->domain_map
))
2854 spin_unlock(&dlm
->spinlock
);
2858 static int dlm_lockres_is_dirty(struct dlm_ctxt
*dlm
,
2859 struct dlm_lock_resource
*res
)
2862 spin_lock(&res
->spinlock
);
2863 ret
= !!(res
->state
& DLM_LOCK_RES_DIRTY
);
2864 spin_unlock(&res
->spinlock
);
2869 static int dlm_mark_lockres_migrating(struct dlm_ctxt
*dlm
,
2870 struct dlm_lock_resource
*res
,
2875 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2876 res
->lockname
.len
, res
->lockname
.name
, dlm
->node_num
,
2878 /* need to set MIGRATING flag on lockres. this is done by
2879 * ensuring that all asts have been flushed for this lockres. */
2880 spin_lock(&res
->spinlock
);
2881 BUG_ON(res
->migration_pending
);
2882 res
->migration_pending
= 1;
2883 /* strategy is to reserve an extra ast then release
2884 * it below, letting the release do all of the work */
2885 __dlm_lockres_reserve_ast(res
);
2886 spin_unlock(&res
->spinlock
);
2888 /* now flush all the pending asts */
2889 dlm_kick_thread(dlm
, res
);
2890 /* before waiting on DIRTY, block processes which may
2891 * try to dirty the lockres before MIGRATING is set */
2892 spin_lock(&res
->spinlock
);
2893 BUG_ON(res
->state
& DLM_LOCK_RES_BLOCK_DIRTY
);
2894 res
->state
|= DLM_LOCK_RES_BLOCK_DIRTY
;
2895 spin_unlock(&res
->spinlock
);
2896 /* now wait on any pending asts and the DIRTY state */
2897 wait_event(dlm
->ast_wq
, !dlm_lockres_is_dirty(dlm
, res
));
2898 dlm_lockres_release_ast(dlm
, res
);
2900 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2901 res
->state
& DLM_LOCK_RES_DIRTY
? "yes" : "no");
2902 /* if the extra ref we just put was the final one, this
2903 * will pass thru immediately. otherwise, we need to wait
2904 * for the last ast to finish. */
2906 ret
= wait_event_interruptible_timeout(dlm
->migration_wq
,
2907 dlm_migration_can_proceed(dlm
, res
, target
),
2908 msecs_to_jiffies(1000));
2910 mlog(0, "woken again: migrating? %s, dead? %s\n",
2911 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes":"no",
2912 test_bit(target
, dlm
->domain_map
) ? "no":"yes");
2914 mlog(0, "all is well: migrating? %s, dead? %s\n",
2915 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes":"no",
2916 test_bit(target
, dlm
->domain_map
) ? "no":"yes");
2918 if (!dlm_migration_can_proceed(dlm
, res
, target
)) {
2919 mlog(0, "trying again...\n");
2924 /* did the target go down or die? */
2925 spin_lock(&dlm
->spinlock
);
2926 if (!test_bit(target
, dlm
->domain_map
)) {
2927 mlog(ML_ERROR
, "aha. migration target %u just went down\n",
2931 spin_unlock(&dlm
->spinlock
);
2934 * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
2935 * another try; otherwise, we are sure the MIGRATING state is there,
2936 * drop the unneded state which blocked threads trying to DIRTY
2938 spin_lock(&res
->spinlock
);
2939 BUG_ON(!(res
->state
& DLM_LOCK_RES_BLOCK_DIRTY
));
2940 res
->state
&= ~DLM_LOCK_RES_BLOCK_DIRTY
;
2942 BUG_ON(!(res
->state
& DLM_LOCK_RES_MIGRATING
));
2944 res
->migration_pending
= 0;
2945 spin_unlock(&res
->spinlock
);
2950 * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
2951 * o there are no pending asts on this lockres
2952 * o all processes trying to reserve an ast on this
2953 * lockres must wait for the MIGRATING flag to clear
2958 /* last step in the migration process.
2959 * original master calls this to free all of the dlm_lock
2960 * structures that used to be for other nodes. */
2961 static void dlm_remove_nonlocal_locks(struct dlm_ctxt
*dlm
,
2962 struct dlm_lock_resource
*res
)
2964 struct list_head
*queue
= &res
->granted
;
2966 struct dlm_lock
*lock
, *next
;
2968 assert_spin_locked(&res
->spinlock
);
2970 BUG_ON(res
->owner
== dlm
->node_num
);
2972 for (i
=0; i
<3; i
++) {
2973 list_for_each_entry_safe(lock
, next
, queue
, list
) {
2974 if (lock
->ml
.node
!= dlm
->node_num
) {
2975 mlog(0, "putting lock for node %u\n",
2977 /* be extra careful */
2978 BUG_ON(!list_empty(&lock
->ast_list
));
2979 BUG_ON(!list_empty(&lock
->bast_list
));
2980 BUG_ON(lock
->ast_pending
);
2981 BUG_ON(lock
->bast_pending
);
2982 dlm_lockres_clear_refmap_bit(dlm
, res
,
2984 list_del_init(&lock
->list
);
2986 /* In a normal unlock, we would have added a
2987 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2995 bit
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
, bit
);
2996 if (bit
>= O2NM_MAX_NODES
)
2998 /* do not clear the local node reference, if there is a
2999 * process holding this, let it drop the ref itself */
3000 if (bit
!= dlm
->node_num
) {
3001 mlog(0, "%s:%.*s: node %u had a ref to this "
3002 "migrating lockres, clearing\n", dlm
->name
,
3003 res
->lockname
.len
, res
->lockname
.name
, bit
);
3004 dlm_lockres_clear_refmap_bit(dlm
, res
, bit
);
3011 * Pick a node to migrate the lock resource to. This function selects a
3012 * potential target based first on the locks and then on refmap. It skips
3013 * nodes that are in the process of exiting the domain.
3015 static u8
dlm_pick_migration_target(struct dlm_ctxt
*dlm
,
3016 struct dlm_lock_resource
*res
)
3018 enum dlm_lockres_list idx
;
3019 struct list_head
*queue
= &res
->granted
;
3020 struct dlm_lock
*lock
;
3022 u8 nodenum
= O2NM_MAX_NODES
;
3024 assert_spin_locked(&dlm
->spinlock
);
3025 assert_spin_locked(&res
->spinlock
);
3027 /* Go through all the locks */
3028 for (idx
= DLM_GRANTED_LIST
; idx
<= DLM_BLOCKED_LIST
; idx
++) {
3029 queue
= dlm_list_idx_to_ptr(res
, idx
);
3030 list_for_each_entry(lock
, queue
, list
) {
3031 if (lock
->ml
.node
== dlm
->node_num
)
3033 if (test_bit(lock
->ml
.node
, dlm
->exit_domain_map
))
3035 nodenum
= lock
->ml
.node
;
3040 /* Go thru the refmap */
3043 noderef
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
,
3045 if (noderef
>= O2NM_MAX_NODES
)
3047 if (noderef
== dlm
->node_num
)
3049 if (test_bit(noderef
, dlm
->exit_domain_map
))
3059 /* this is called by the new master once all lockres
3060 * data has been received */
3061 static int dlm_do_migrate_request(struct dlm_ctxt
*dlm
,
3062 struct dlm_lock_resource
*res
,
3063 u8 master
, u8 new_master
,
3064 struct dlm_node_iter
*iter
)
3066 struct dlm_migrate_request migrate
;
3067 int ret
, skip
, status
= 0;
3070 memset(&migrate
, 0, sizeof(migrate
));
3071 migrate
.namelen
= res
->lockname
.len
;
3072 memcpy(migrate
.name
, res
->lockname
.name
, migrate
.namelen
);
3073 migrate
.new_master
= new_master
;
3074 migrate
.master
= master
;
3078 /* send message to all nodes, except the master and myself */
3079 while ((nodenum
= dlm_node_iter_next(iter
)) >= 0) {
3080 if (nodenum
== master
||
3081 nodenum
== new_master
)
3084 /* We could race exit domain. If exited, skip. */
3085 spin_lock(&dlm
->spinlock
);
3086 skip
= (!test_bit(nodenum
, dlm
->domain_map
));
3087 spin_unlock(&dlm
->spinlock
);
3089 clear_bit(nodenum
, iter
->node_map
);
3093 ret
= o2net_send_message(DLM_MIGRATE_REQUEST_MSG
, dlm
->key
,
3094 &migrate
, sizeof(migrate
), nodenum
,
3097 mlog(ML_ERROR
, "%s: res %.*s, Error %d send "
3098 "MIGRATE_REQUEST to node %u\n", dlm
->name
,
3099 migrate
.namelen
, migrate
.name
, ret
, nodenum
);
3100 if (!dlm_is_host_down(ret
)) {
3101 mlog(ML_ERROR
, "unhandled error=%d!\n", ret
);
3104 clear_bit(nodenum
, iter
->node_map
);
3106 } else if (status
< 0) {
3107 mlog(0, "migrate request (node %u) returned %d!\n",
3110 } else if (status
== DLM_MIGRATE_RESPONSE_MASTERY_REF
) {
3111 /* during the migration request we short-circuited
3112 * the mastery of the lockres. make sure we have
3113 * a mastery ref for nodenum */
3114 mlog(0, "%s:%.*s: need ref for node %u\n",
3115 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
3117 spin_lock(&res
->spinlock
);
3118 dlm_lockres_set_refmap_bit(dlm
, res
, nodenum
);
3119 spin_unlock(&res
->spinlock
);
3126 mlog(0, "returning ret=%d\n", ret
);
3131 /* if there is an existing mle for this lockres, we now know who the master is.
3132 * (the one who sent us *this* message) we can clear it up right away.
3133 * since the process that put the mle on the list still has a reference to it,
3134 * we can unhash it now, set the master and wake the process. as a result,
3135 * we will have no mle in the list to start with. now we can add an mle for
3136 * the migration and this should be the only one found for those scanning the
3138 int dlm_migrate_request_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
3141 struct dlm_ctxt
*dlm
= data
;
3142 struct dlm_lock_resource
*res
= NULL
;
3143 struct dlm_migrate_request
*migrate
= (struct dlm_migrate_request
*) msg
->buf
;
3144 struct dlm_master_list_entry
*mle
= NULL
, *oldmle
= NULL
;
3146 unsigned int namelen
, hash
;
3152 name
= migrate
->name
;
3153 namelen
= migrate
->namelen
;
3154 hash
= dlm_lockid_hash(name
, namelen
);
3156 /* preallocate.. if this fails, abort */
3157 mle
= kmem_cache_alloc(dlm_mle_cache
, GFP_NOFS
);
3164 /* check for pre-existing lock */
3165 spin_lock(&dlm
->spinlock
);
3166 res
= __dlm_lookup_lockres(dlm
, name
, namelen
, hash
);
3168 spin_lock(&res
->spinlock
);
3169 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
3170 /* if all is working ok, this can only mean that we got
3171 * a migrate request from a node that we now see as
3172 * dead. what can we do here? drop it to the floor? */
3173 spin_unlock(&res
->spinlock
);
3174 mlog(ML_ERROR
, "Got a migrate request, but the "
3175 "lockres is marked as recovering!");
3176 kmem_cache_free(dlm_mle_cache
, mle
);
3177 ret
= -EINVAL
; /* need a better solution */
3180 res
->state
|= DLM_LOCK_RES_MIGRATING
;
3181 spin_unlock(&res
->spinlock
);
3184 spin_lock(&dlm
->master_lock
);
3185 /* ignore status. only nonzero status would BUG. */
3186 ret
= dlm_add_migration_mle(dlm
, res
, mle
, &oldmle
,
3188 migrate
->new_master
,
3192 kmem_cache_free(dlm_mle_cache
, mle
);
3194 spin_unlock(&dlm
->master_lock
);
3196 spin_unlock(&dlm
->spinlock
);
3199 /* master is known, detach if not already detached */
3200 dlm_mle_detach_hb_events(dlm
, oldmle
);
3201 dlm_put_mle(oldmle
);
3205 dlm_lockres_put(res
);
3211 /* must be holding dlm->spinlock and dlm->master_lock
3212 * when adding a migration mle, we can clear any other mles
3213 * in the master list because we know with certainty that
3214 * the master is "master". so we remove any old mle from
3215 * the list after setting it's master field, and then add
3216 * the new migration mle. this way we can hold with the rule
3217 * of having only one mle for a given lock name at all times. */
3218 static int dlm_add_migration_mle(struct dlm_ctxt
*dlm
,
3219 struct dlm_lock_resource
*res
,
3220 struct dlm_master_list_entry
*mle
,
3221 struct dlm_master_list_entry
**oldmle
,
3222 const char *name
, unsigned int namelen
,
3223 u8 new_master
, u8 master
)
3230 assert_spin_locked(&dlm
->spinlock
);
3231 assert_spin_locked(&dlm
->master_lock
);
3233 /* caller is responsible for any ref taken here on oldmle */
3234 found
= dlm_find_mle(dlm
, oldmle
, (char *)name
, namelen
);
3236 struct dlm_master_list_entry
*tmp
= *oldmle
;
3237 spin_lock(&tmp
->spinlock
);
3238 if (tmp
->type
== DLM_MLE_MIGRATION
) {
3239 if (master
== dlm
->node_num
) {
3240 /* ah another process raced me to it */
3241 mlog(0, "tried to migrate %.*s, but some "
3242 "process beat me to it\n",
3244 spin_unlock(&tmp
->spinlock
);
3247 /* bad. 2 NODES are trying to migrate! */
3248 mlog(ML_ERROR
, "migration error mle: "
3249 "master=%u new_master=%u // request: "
3250 "master=%u new_master=%u // "
3252 tmp
->master
, tmp
->new_master
,
3258 /* this is essentially what assert_master does */
3259 tmp
->master
= master
;
3260 atomic_set(&tmp
->woken
, 1);
3262 /* remove it so that only one mle will be found */
3263 __dlm_unlink_mle(dlm
, tmp
);
3264 __dlm_mle_detach_hb_events(dlm
, tmp
);
3265 if (tmp
->type
== DLM_MLE_MASTER
) {
3266 ret
= DLM_MIGRATE_RESPONSE_MASTERY_REF
;
3267 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3268 "telling master to get ref "
3269 "for cleared out mle during "
3270 "migration\n", dlm
->name
,
3271 namelen
, name
, master
,
3275 spin_unlock(&tmp
->spinlock
);
3278 /* now add a migration mle to the tail of the list */
3279 dlm_init_mle(mle
, DLM_MLE_MIGRATION
, dlm
, res
, name
, namelen
);
3280 mle
->new_master
= new_master
;
3281 /* the new master will be sending an assert master for this.
3282 * at that point we will get the refmap reference */
3283 mle
->master
= master
;
3284 /* do this for consistency with other mle types */
3285 set_bit(new_master
, mle
->maybe_map
);
3286 __dlm_insert_mle(dlm
, mle
);
3292 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3294 static struct dlm_lock_resource
*dlm_reset_mleres_owner(struct dlm_ctxt
*dlm
,
3295 struct dlm_master_list_entry
*mle
)
3297 struct dlm_lock_resource
*res
;
3299 /* Find the lockres associated to the mle and set its owner to UNK */
3300 res
= __dlm_lookup_lockres(dlm
, mle
->mname
, mle
->mnamelen
,
3303 spin_unlock(&dlm
->master_lock
);
3305 /* move lockres onto recovery list */
3306 spin_lock(&res
->spinlock
);
3307 dlm_set_lockres_owner(dlm
, res
, DLM_LOCK_RES_OWNER_UNKNOWN
);
3308 dlm_move_lockres_to_recovery_list(dlm
, res
);
3309 spin_unlock(&res
->spinlock
);
3310 dlm_lockres_put(res
);
3312 /* about to get rid of mle, detach from heartbeat */
3313 __dlm_mle_detach_hb_events(dlm
, mle
);
3316 spin_lock(&dlm
->master_lock
);
3318 spin_unlock(&dlm
->master_lock
);
3324 static void dlm_clean_migration_mle(struct dlm_ctxt
*dlm
,
3325 struct dlm_master_list_entry
*mle
)
3327 __dlm_mle_detach_hb_events(dlm
, mle
);
3329 spin_lock(&mle
->spinlock
);
3330 __dlm_unlink_mle(dlm
, mle
);
3331 atomic_set(&mle
->woken
, 1);
3332 spin_unlock(&mle
->spinlock
);
3337 static void dlm_clean_block_mle(struct dlm_ctxt
*dlm
,
3338 struct dlm_master_list_entry
*mle
, u8 dead_node
)
3342 BUG_ON(mle
->type
!= DLM_MLE_BLOCK
);
3344 spin_lock(&mle
->spinlock
);
3345 bit
= find_next_bit(mle
->maybe_map
, O2NM_MAX_NODES
, 0);
3346 if (bit
!= dead_node
) {
3347 mlog(0, "mle found, but dead node %u would not have been "
3348 "master\n", dead_node
);
3349 spin_unlock(&mle
->spinlock
);
3351 /* Must drop the refcount by one since the assert_master will
3352 * never arrive. This may result in the mle being unlinked and
3353 * freed, but there may still be a process waiting in the
3354 * dlmlock path which is fine. */
3355 mlog(0, "node %u was expected master\n", dead_node
);
3356 atomic_set(&mle
->woken
, 1);
3357 spin_unlock(&mle
->spinlock
);
3360 /* Do not need events any longer, so detach from heartbeat */
3361 __dlm_mle_detach_hb_events(dlm
, mle
);
3366 void dlm_clean_master_list(struct dlm_ctxt
*dlm
, u8 dead_node
)
3368 struct dlm_master_list_entry
*mle
;
3369 struct dlm_lock_resource
*res
;
3370 struct hlist_head
*bucket
;
3371 struct hlist_node
*tmp
;
3374 mlog(0, "dlm=%s, dead node=%u\n", dlm
->name
, dead_node
);
3376 assert_spin_locked(&dlm
->spinlock
);
3378 /* clean the master list */
3379 spin_lock(&dlm
->master_lock
);
3380 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
3381 bucket
= dlm_master_hash(dlm
, i
);
3382 hlist_for_each_entry_safe(mle
, tmp
, bucket
, master_hash_node
) {
3383 BUG_ON(mle
->type
!= DLM_MLE_BLOCK
&&
3384 mle
->type
!= DLM_MLE_MASTER
&&
3385 mle
->type
!= DLM_MLE_MIGRATION
);
3387 /* MASTER mles are initiated locally. The waiting
3388 * process will notice the node map change shortly.
3389 * Let that happen as normal. */
3390 if (mle
->type
== DLM_MLE_MASTER
)
3393 /* BLOCK mles are initiated by other nodes. Need to
3394 * clean up if the dead node would have been the
3396 if (mle
->type
== DLM_MLE_BLOCK
) {
3397 dlm_clean_block_mle(dlm
, mle
, dead_node
);
3401 /* Everything else is a MIGRATION mle */
3403 /* The rule for MIGRATION mles is that the master
3404 * becomes UNKNOWN if *either* the original or the new
3405 * master dies. All UNKNOWN lockres' are sent to
3406 * whichever node becomes the recovery master. The new
3407 * master is responsible for determining if there is
3408 * still a master for this lockres, or if he needs to
3409 * take over mastery. Either way, this node should
3410 * expect another message to resolve this. */
3412 if (mle
->master
!= dead_node
&&
3413 mle
->new_master
!= dead_node
)
3416 if (mle
->new_master
== dead_node
&& mle
->inuse
) {
3417 mlog(ML_NOTICE
, "%s: target %u died during "
3418 "migration from %u, the MLE is "
3419 "still keep used, ignore it!\n",
3420 dlm
->name
, dead_node
,
3425 /* If we have reached this point, this mle needs to be
3426 * removed from the list and freed. */
3427 dlm_clean_migration_mle(dlm
, mle
);
3429 mlog(0, "%s: node %u died during migration from "
3430 "%u to %u!\n", dlm
->name
, dead_node
, mle
->master
,
3433 /* If we find a lockres associated with the mle, we've
3434 * hit this rare case that messes up our lock ordering.
3435 * If so, we need to drop the master lock so that we can
3436 * take the lockres lock, meaning that we will have to
3437 * restart from the head of list. */
3438 res
= dlm_reset_mleres_owner(dlm
, mle
);
3443 /* This may be the last reference */
3447 spin_unlock(&dlm
->master_lock
);
3450 int dlm_finish_migration(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
3453 struct dlm_node_iter iter
;
3456 spin_lock(&dlm
->spinlock
);
3457 dlm_node_iter_init(dlm
->domain_map
, &iter
);
3458 clear_bit(old_master
, iter
.node_map
);
3459 clear_bit(dlm
->node_num
, iter
.node_map
);
3460 spin_unlock(&dlm
->spinlock
);
3462 /* ownership of the lockres is changing. account for the
3463 * mastery reference here since old_master will briefly have
3464 * a reference after the migration completes */
3465 spin_lock(&res
->spinlock
);
3466 dlm_lockres_set_refmap_bit(dlm
, res
, old_master
);
3467 spin_unlock(&res
->spinlock
);
3469 mlog(0, "now time to do a migrate request to other nodes\n");
3470 ret
= dlm_do_migrate_request(dlm
, res
, old_master
,
3471 dlm
->node_num
, &iter
);
3477 mlog(0, "doing assert master of %.*s to all except the original node\n",
3478 res
->lockname
.len
, res
->lockname
.name
);
3479 /* this call now finishes out the nodemap
3480 * even if one or more nodes die */
3481 ret
= dlm_do_assert_master(dlm
, res
, iter
.node_map
,
3482 DLM_ASSERT_MASTER_FINISH_MIGRATION
);
3484 /* no longer need to retry. all living nodes contacted. */
3489 memset(iter
.node_map
, 0, sizeof(iter
.node_map
));
3490 set_bit(old_master
, iter
.node_map
);
3491 mlog(0, "doing assert master of %.*s back to %u\n",
3492 res
->lockname
.len
, res
->lockname
.name
, old_master
);
3493 ret
= dlm_do_assert_master(dlm
, res
, iter
.node_map
,
3494 DLM_ASSERT_MASTER_FINISH_MIGRATION
);
3496 mlog(0, "assert master to original master failed "
3498 /* the only nonzero status here would be because of
3499 * a dead original node. we're done. */
3503 /* all done, set the owner, clear the flag */
3504 spin_lock(&res
->spinlock
);
3505 dlm_set_lockres_owner(dlm
, res
, dlm
->node_num
);
3506 res
->state
&= ~DLM_LOCK_RES_MIGRATING
;
3507 spin_unlock(&res
->spinlock
);
3508 /* re-dirty it on the new master */
3509 dlm_kick_thread(dlm
, res
);
3516 * LOCKRES AST REFCOUNT
3517 * this is integral to migration
3520 /* for future intent to call an ast, reserve one ahead of time.
3521 * this should be called only after waiting on the lockres
3522 * with dlm_wait_on_lockres, and while still holding the
3523 * spinlock after the call. */
3524 void __dlm_lockres_reserve_ast(struct dlm_lock_resource
*res
)
3526 assert_spin_locked(&res
->spinlock
);
3527 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
3528 __dlm_print_one_lock_resource(res
);
3530 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
3532 atomic_inc(&res
->asts_reserved
);
3536 * used to drop the reserved ast, either because it went unused,
3537 * or because the ast/bast was actually called.
3539 * also, if there is a pending migration on this lockres,
3540 * and this was the last pending ast on the lockres,
3541 * atomically set the MIGRATING flag before we drop the lock.
3542 * this is how we ensure that migration can proceed with no
3543 * asts in progress. note that it is ok if the state of the
3544 * queues is such that a lock should be granted in the future
3545 * or that a bast should be fired, because the new master will
3546 * shuffle the lists on this lockres as soon as it is migrated.
3548 void dlm_lockres_release_ast(struct dlm_ctxt
*dlm
,
3549 struct dlm_lock_resource
*res
)
3551 if (!atomic_dec_and_lock(&res
->asts_reserved
, &res
->spinlock
))
3554 if (!res
->migration_pending
) {
3555 spin_unlock(&res
->spinlock
);
3559 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
3560 res
->migration_pending
= 0;
3561 res
->state
|= DLM_LOCK_RES_MIGRATING
;
3562 spin_unlock(&res
->spinlock
);
3564 wake_up(&dlm
->migration_wq
);
3567 void dlm_force_free_mles(struct dlm_ctxt
*dlm
)
3570 struct hlist_head
*bucket
;
3571 struct dlm_master_list_entry
*mle
;
3572 struct hlist_node
*tmp
;
3575 * We notified all other nodes that we are exiting the domain and
3576 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3577 * around we force free them and wake any processes that are waiting
3580 spin_lock(&dlm
->spinlock
);
3581 spin_lock(&dlm
->master_lock
);
3583 BUG_ON(dlm
->dlm_state
!= DLM_CTXT_LEAVING
);
3584 BUG_ON((find_next_bit(dlm
->domain_map
, O2NM_MAX_NODES
, 0) < O2NM_MAX_NODES
));
3586 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
3587 bucket
= dlm_master_hash(dlm
, i
);
3588 hlist_for_each_entry_safe(mle
, tmp
, bucket
, master_hash_node
) {
3589 if (mle
->type
!= DLM_MLE_BLOCK
) {
3590 mlog(ML_ERROR
, "bad mle: %p\n", mle
);
3591 dlm_print_one_mle(mle
);
3593 atomic_set(&mle
->woken
, 1);
3596 __dlm_unlink_mle(dlm
, mle
);
3597 __dlm_mle_detach_hb_events(dlm
, mle
);
3601 spin_unlock(&dlm
->master_lock
);
3602 spin_unlock(&dlm
->spinlock
);