Linux 2.6.21
[linux/fpc-iii.git] / fs / ocfs2 / dlm / dlmdomain.c
blobc558442a0b447fa449a1ba1e3118bc17d6b30ab9
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * dlmdomain.c
6 * defines domain join / leave apis
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/utsname.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/delay.h>
35 #include <linux/err.h>
37 #include "cluster/heartbeat.h"
38 #include "cluster/nodemanager.h"
39 #include "cluster/tcp.h"
41 #include "dlmapi.h"
42 #include "dlmcommon.h"
44 #include "dlmdomain.h"
46 #include "dlmver.h"
48 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
49 #include "cluster/masklog.h"
52 * ocfs2 node maps are array of long int, which limits to send them freely
53 * across the wire due to endianness issues. To workaround this, we convert
54 * long ints to byte arrays. Following 3 routines are helper functions to
55 * set/test/copy bits within those array of bytes
57 static inline void byte_set_bit(u8 nr, u8 map[])
59 map[nr >> 3] |= (1UL << (nr & 7));
62 static inline int byte_test_bit(u8 nr, u8 map[])
64 return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
67 static inline void byte_copymap(u8 dmap[], unsigned long smap[],
68 unsigned int sz)
70 unsigned int nn;
72 if (!sz)
73 return;
75 memset(dmap, 0, ((sz + 7) >> 3));
76 for (nn = 0 ; nn < sz; nn++)
77 if (test_bit(nn, smap))
78 byte_set_bit(nn, dmap);
81 static void dlm_free_pagevec(void **vec, int pages)
83 while (pages--)
84 free_page((unsigned long)vec[pages]);
85 kfree(vec);
88 static void **dlm_alloc_pagevec(int pages)
90 void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
91 int i;
93 if (!vec)
94 return NULL;
96 for (i = 0; i < pages; i++)
97 if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
98 goto out_free;
100 mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
101 pages, (unsigned long)DLM_HASH_PAGES,
102 (unsigned long)DLM_BUCKETS_PER_PAGE);
103 return vec;
104 out_free:
105 dlm_free_pagevec(vec, i);
106 return NULL;
111 * spinlock lock ordering: if multiple locks are needed, obey this ordering:
112 * dlm_domain_lock
113 * struct dlm_ctxt->spinlock
114 * struct dlm_lock_resource->spinlock
115 * struct dlm_ctxt->master_lock
116 * struct dlm_ctxt->ast_lock
117 * dlm_master_list_entry->spinlock
118 * dlm_lock->spinlock
122 DEFINE_SPINLOCK(dlm_domain_lock);
123 LIST_HEAD(dlm_domains);
124 static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
126 #define DLM_DOMAIN_BACKOFF_MS 200
128 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
129 void **ret_data);
130 static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
131 void **ret_data);
132 static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
133 void **ret_data);
134 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
135 void **ret_data);
137 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
139 void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
141 if (!hlist_unhashed(&lockres->hash_node)) {
142 hlist_del_init(&lockres->hash_node);
143 dlm_lockres_put(lockres);
147 void __dlm_insert_lockres(struct dlm_ctxt *dlm,
148 struct dlm_lock_resource *res)
150 struct hlist_head *bucket;
151 struct qstr *q;
153 assert_spin_locked(&dlm->spinlock);
155 q = &res->lockname;
156 bucket = dlm_lockres_hash(dlm, q->hash);
158 /* get a reference for our hashtable */
159 dlm_lockres_get(res);
161 hlist_add_head(&res->hash_node, bucket);
164 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
165 const char *name,
166 unsigned int len,
167 unsigned int hash)
169 struct hlist_head *bucket;
170 struct hlist_node *list;
172 mlog_entry("%.*s\n", len, name);
174 assert_spin_locked(&dlm->spinlock);
176 bucket = dlm_lockres_hash(dlm, hash);
178 hlist_for_each(list, bucket) {
179 struct dlm_lock_resource *res = hlist_entry(list,
180 struct dlm_lock_resource, hash_node);
181 if (res->lockname.name[0] != name[0])
182 continue;
183 if (unlikely(res->lockname.len != len))
184 continue;
185 if (memcmp(res->lockname.name + 1, name + 1, len - 1))
186 continue;
187 dlm_lockres_get(res);
188 return res;
190 return NULL;
193 /* intended to be called by functions which do not care about lock
194 * resources which are being purged (most net _handler functions).
195 * this will return NULL for any lock resource which is found but
196 * currently in the process of dropping its mastery reference.
197 * use __dlm_lookup_lockres_full when you need the lock resource
198 * regardless (e.g. dlm_get_lock_resource) */
199 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
200 const char *name,
201 unsigned int len,
202 unsigned int hash)
204 struct dlm_lock_resource *res = NULL;
206 mlog_entry("%.*s\n", len, name);
208 assert_spin_locked(&dlm->spinlock);
210 res = __dlm_lookup_lockres_full(dlm, name, len, hash);
211 if (res) {
212 spin_lock(&res->spinlock);
213 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
214 spin_unlock(&res->spinlock);
215 dlm_lockres_put(res);
216 return NULL;
218 spin_unlock(&res->spinlock);
221 return res;
224 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
225 const char *name,
226 unsigned int len)
228 struct dlm_lock_resource *res;
229 unsigned int hash = dlm_lockid_hash(name, len);
231 spin_lock(&dlm->spinlock);
232 res = __dlm_lookup_lockres(dlm, name, len, hash);
233 spin_unlock(&dlm->spinlock);
234 return res;
237 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
239 struct dlm_ctxt *tmp = NULL;
240 struct list_head *iter;
242 assert_spin_locked(&dlm_domain_lock);
244 /* tmp->name here is always NULL terminated,
245 * but domain may not be! */
246 list_for_each(iter, &dlm_domains) {
247 tmp = list_entry (iter, struct dlm_ctxt, list);
248 if (strlen(tmp->name) == len &&
249 memcmp(tmp->name, domain, len)==0)
250 break;
251 tmp = NULL;
254 return tmp;
257 /* For null terminated domain strings ONLY */
258 static struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
260 assert_spin_locked(&dlm_domain_lock);
262 return __dlm_lookup_domain_full(domain, strlen(domain));
266 /* returns true on one of two conditions:
267 * 1) the domain does not exist
268 * 2) the domain exists and it's state is "joined" */
269 static int dlm_wait_on_domain_helper(const char *domain)
271 int ret = 0;
272 struct dlm_ctxt *tmp = NULL;
274 spin_lock(&dlm_domain_lock);
276 tmp = __dlm_lookup_domain(domain);
277 if (!tmp)
278 ret = 1;
279 else if (tmp->dlm_state == DLM_CTXT_JOINED)
280 ret = 1;
282 spin_unlock(&dlm_domain_lock);
283 return ret;
286 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
288 if (dlm->lockres_hash)
289 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
291 if (dlm->name)
292 kfree(dlm->name);
294 kfree(dlm);
297 /* A little strange - this function will be called while holding
298 * dlm_domain_lock and is expected to be holding it on the way out. We
299 * will however drop and reacquire it multiple times */
300 static void dlm_ctxt_release(struct kref *kref)
302 struct dlm_ctxt *dlm;
304 dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
306 BUG_ON(dlm->num_joins);
307 BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
309 /* we may still be in the list if we hit an error during join. */
310 list_del_init(&dlm->list);
312 spin_unlock(&dlm_domain_lock);
314 mlog(0, "freeing memory from domain %s\n", dlm->name);
316 wake_up(&dlm_domain_events);
318 dlm_free_ctxt_mem(dlm);
320 spin_lock(&dlm_domain_lock);
323 void dlm_put(struct dlm_ctxt *dlm)
325 spin_lock(&dlm_domain_lock);
326 kref_put(&dlm->dlm_refs, dlm_ctxt_release);
327 spin_unlock(&dlm_domain_lock);
330 static void __dlm_get(struct dlm_ctxt *dlm)
332 kref_get(&dlm->dlm_refs);
335 /* given a questionable reference to a dlm object, gets a reference if
336 * it can find it in the list, otherwise returns NULL in which case
337 * you shouldn't trust your pointer. */
338 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
340 struct list_head *iter;
341 struct dlm_ctxt *target = NULL;
343 spin_lock(&dlm_domain_lock);
345 list_for_each(iter, &dlm_domains) {
346 target = list_entry (iter, struct dlm_ctxt, list);
348 if (target == dlm) {
349 __dlm_get(target);
350 break;
353 target = NULL;
356 spin_unlock(&dlm_domain_lock);
358 return target;
361 int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
363 int ret;
365 spin_lock(&dlm_domain_lock);
366 ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
367 (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
368 spin_unlock(&dlm_domain_lock);
370 return ret;
373 static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
375 if (dlm->dlm_worker) {
376 flush_workqueue(dlm->dlm_worker);
377 destroy_workqueue(dlm->dlm_worker);
378 dlm->dlm_worker = NULL;
382 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
384 dlm_unregister_domain_handlers(dlm);
385 dlm_complete_thread(dlm);
386 dlm_complete_recovery_thread(dlm);
387 dlm_destroy_dlm_worker(dlm);
389 /* We've left the domain. Now we can take ourselves out of the
390 * list and allow the kref stuff to help us free the
391 * memory. */
392 spin_lock(&dlm_domain_lock);
393 list_del_init(&dlm->list);
394 spin_unlock(&dlm_domain_lock);
396 /* Wake up anyone waiting for us to remove this domain */
397 wake_up(&dlm_domain_events);
400 static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
402 int i, num, n, ret = 0;
403 struct dlm_lock_resource *res;
404 struct hlist_node *iter;
405 struct hlist_head *bucket;
406 int dropped;
408 mlog(0, "Migrating locks from domain %s\n", dlm->name);
410 num = 0;
411 spin_lock(&dlm->spinlock);
412 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
413 redo_bucket:
414 n = 0;
415 bucket = dlm_lockres_hash(dlm, i);
416 iter = bucket->first;
417 while (iter) {
418 n++;
419 res = hlist_entry(iter, struct dlm_lock_resource,
420 hash_node);
421 dlm_lockres_get(res);
422 /* migrate, if necessary. this will drop the dlm
423 * spinlock and retake it if it does migration. */
424 dropped = dlm_empty_lockres(dlm, res);
426 spin_lock(&res->spinlock);
427 __dlm_lockres_calc_usage(dlm, res);
428 iter = res->hash_node.next;
429 spin_unlock(&res->spinlock);
431 dlm_lockres_put(res);
433 cond_resched_lock(&dlm->spinlock);
435 if (dropped)
436 goto redo_bucket;
438 num += n;
439 mlog(0, "%s: touched %d lockreses in bucket %d "
440 "(tot=%d)\n", dlm->name, n, i, num);
442 spin_unlock(&dlm->spinlock);
443 wake_up(&dlm->dlm_thread_wq);
445 /* let the dlm thread take care of purging, keep scanning until
446 * nothing remains in the hash */
447 if (num) {
448 mlog(0, "%s: %d lock resources in hash last pass\n",
449 dlm->name, num);
450 ret = -EAGAIN;
452 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
453 return ret;
456 static int dlm_no_joining_node(struct dlm_ctxt *dlm)
458 int ret;
460 spin_lock(&dlm->spinlock);
461 ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
462 spin_unlock(&dlm->spinlock);
464 return ret;
467 static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
469 /* Yikes, a double spinlock! I need domain_lock for the dlm
470 * state and the dlm spinlock for join state... Sorry! */
471 again:
472 spin_lock(&dlm_domain_lock);
473 spin_lock(&dlm->spinlock);
475 if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
476 mlog(0, "Node %d is joining, we wait on it.\n",
477 dlm->joining_node);
478 spin_unlock(&dlm->spinlock);
479 spin_unlock(&dlm_domain_lock);
481 wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
482 goto again;
485 dlm->dlm_state = DLM_CTXT_LEAVING;
486 spin_unlock(&dlm->spinlock);
487 spin_unlock(&dlm_domain_lock);
490 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
492 int node = -1;
494 assert_spin_locked(&dlm->spinlock);
496 printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name);
498 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
499 node + 1)) < O2NM_MAX_NODES) {
500 printk("%d ", node);
502 printk("\n");
505 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
506 void **ret_data)
508 struct dlm_ctxt *dlm = data;
509 unsigned int node;
510 struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
512 mlog_entry("%p %u %p", msg, len, data);
514 if (!dlm_grab(dlm))
515 return 0;
517 node = exit_msg->node_idx;
519 printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name);
521 spin_lock(&dlm->spinlock);
522 clear_bit(node, dlm->domain_map);
523 __dlm_print_nodes(dlm);
525 /* notify anything attached to the heartbeat events */
526 dlm_hb_event_notify_attached(dlm, node, 0);
528 spin_unlock(&dlm->spinlock);
530 dlm_put(dlm);
532 return 0;
535 static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
536 unsigned int node)
538 int status;
539 struct dlm_exit_domain leave_msg;
541 mlog(0, "Asking node %u if we can leave the domain %s me = %u\n",
542 node, dlm->name, dlm->node_num);
544 memset(&leave_msg, 0, sizeof(leave_msg));
545 leave_msg.node_idx = dlm->node_num;
547 status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
548 &leave_msg, sizeof(leave_msg), node,
549 NULL);
551 mlog(0, "status return %d from o2net_send_message\n", status);
553 return status;
557 static void dlm_leave_domain(struct dlm_ctxt *dlm)
559 int node, clear_node, status;
561 /* At this point we've migrated away all our locks and won't
562 * accept mastership of new ones. The dlm is responsible for
563 * almost nothing now. We make sure not to confuse any joining
564 * nodes and then commence shutdown procedure. */
566 spin_lock(&dlm->spinlock);
567 /* Clear ourselves from the domain map */
568 clear_bit(dlm->node_num, dlm->domain_map);
569 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
570 0)) < O2NM_MAX_NODES) {
571 /* Drop the dlm spinlock. This is safe wrt the domain_map.
572 * -nodes cannot be added now as the
573 * query_join_handlers knows to respond with OK_NO_MAP
574 * -we catch the right network errors if a node is
575 * removed from the map while we're sending him the
576 * exit message. */
577 spin_unlock(&dlm->spinlock);
579 clear_node = 1;
581 status = dlm_send_one_domain_exit(dlm, node);
582 if (status < 0 &&
583 status != -ENOPROTOOPT &&
584 status != -ENOTCONN) {
585 mlog(ML_NOTICE, "Error %d sending domain exit message "
586 "to node %d\n", status, node);
588 /* Not sure what to do here but lets sleep for
589 * a bit in case this was a transient
590 * error... */
591 msleep(DLM_DOMAIN_BACKOFF_MS);
592 clear_node = 0;
595 spin_lock(&dlm->spinlock);
596 /* If we're not clearing the node bit then we intend
597 * to loop back around to try again. */
598 if (clear_node)
599 clear_bit(node, dlm->domain_map);
601 spin_unlock(&dlm->spinlock);
604 int dlm_joined(struct dlm_ctxt *dlm)
606 int ret = 0;
608 spin_lock(&dlm_domain_lock);
610 if (dlm->dlm_state == DLM_CTXT_JOINED)
611 ret = 1;
613 spin_unlock(&dlm_domain_lock);
615 return ret;
618 int dlm_shutting_down(struct dlm_ctxt *dlm)
620 int ret = 0;
622 spin_lock(&dlm_domain_lock);
624 if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
625 ret = 1;
627 spin_unlock(&dlm_domain_lock);
629 return ret;
632 void dlm_unregister_domain(struct dlm_ctxt *dlm)
634 int leave = 0;
636 spin_lock(&dlm_domain_lock);
637 BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
638 BUG_ON(!dlm->num_joins);
640 dlm->num_joins--;
641 if (!dlm->num_joins) {
642 /* We mark it "in shutdown" now so new register
643 * requests wait until we've completely left the
644 * domain. Don't use DLM_CTXT_LEAVING yet as we still
645 * want new domain joins to communicate with us at
646 * least until we've completed migration of our
647 * resources. */
648 dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
649 leave = 1;
651 spin_unlock(&dlm_domain_lock);
653 if (leave) {
654 mlog(0, "shutting down domain %s\n", dlm->name);
656 /* We changed dlm state, notify the thread */
657 dlm_kick_thread(dlm, NULL);
659 while (dlm_migrate_all_locks(dlm)) {
660 /* Give dlm_thread time to purge the lockres' */
661 msleep(500);
662 mlog(0, "%s: more migration to do\n", dlm->name);
664 dlm_mark_domain_leaving(dlm);
665 dlm_leave_domain(dlm);
666 dlm_complete_dlm_shutdown(dlm);
668 dlm_put(dlm);
670 EXPORT_SYMBOL_GPL(dlm_unregister_domain);
672 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
673 void **ret_data)
675 struct dlm_query_join_request *query;
676 enum dlm_query_join_response response;
677 struct dlm_ctxt *dlm = NULL;
678 u8 nodenum;
680 query = (struct dlm_query_join_request *) msg->buf;
682 mlog(0, "node %u wants to join domain %s\n", query->node_idx,
683 query->domain);
686 * If heartbeat doesn't consider the node live, tell it
687 * to back off and try again. This gives heartbeat a chance
688 * to catch up.
690 if (!o2hb_check_node_heartbeating(query->node_idx)) {
691 mlog(0, "node %u is not in our live map yet\n",
692 query->node_idx);
694 response = JOIN_DISALLOW;
695 goto respond;
698 response = JOIN_OK_NO_MAP;
700 spin_lock(&dlm_domain_lock);
701 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
702 if (!dlm)
703 goto unlock_respond;
706 * There is a small window where the joining node may not see the
707 * node(s) that just left but still part of the cluster. DISALLOW
708 * join request if joining node has different node map.
710 nodenum=0;
711 while (nodenum < O2NM_MAX_NODES) {
712 if (test_bit(nodenum, dlm->domain_map)) {
713 if (!byte_test_bit(nodenum, query->node_map)) {
714 mlog(0, "disallow join as node %u does not "
715 "have node %u in its nodemap\n",
716 query->node_idx, nodenum);
717 response = JOIN_DISALLOW;
718 goto unlock_respond;
721 nodenum++;
724 /* Once the dlm ctxt is marked as leaving then we don't want
725 * to be put in someone's domain map.
726 * Also, explicitly disallow joining at certain troublesome
727 * times (ie. during recovery). */
728 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
729 int bit = query->node_idx;
730 spin_lock(&dlm->spinlock);
732 if (dlm->dlm_state == DLM_CTXT_NEW &&
733 dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
734 /*If this is a brand new context and we
735 * haven't started our join process yet, then
736 * the other node won the race. */
737 response = JOIN_OK_NO_MAP;
738 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
739 /* Disallow parallel joins. */
740 response = JOIN_DISALLOW;
741 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
742 mlog(0, "node %u trying to join, but recovery "
743 "is ongoing.\n", bit);
744 response = JOIN_DISALLOW;
745 } else if (test_bit(bit, dlm->recovery_map)) {
746 mlog(0, "node %u trying to join, but it "
747 "still needs recovery.\n", bit);
748 response = JOIN_DISALLOW;
749 } else if (test_bit(bit, dlm->domain_map)) {
750 mlog(0, "node %u trying to join, but it "
751 "is still in the domain! needs recovery?\n",
752 bit);
753 response = JOIN_DISALLOW;
754 } else {
755 /* Alright we're fully a part of this domain
756 * so we keep some state as to who's joining
757 * and indicate to him that needs to be fixed
758 * up. */
759 response = JOIN_OK;
760 __dlm_set_joining_node(dlm, query->node_idx);
763 spin_unlock(&dlm->spinlock);
765 unlock_respond:
766 spin_unlock(&dlm_domain_lock);
768 respond:
769 mlog(0, "We respond with %u\n", response);
771 return response;
774 static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
775 void **ret_data)
777 struct dlm_assert_joined *assert;
778 struct dlm_ctxt *dlm = NULL;
780 assert = (struct dlm_assert_joined *) msg->buf;
782 mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
783 assert->domain);
785 spin_lock(&dlm_domain_lock);
786 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
787 /* XXX should we consider no dlm ctxt an error? */
788 if (dlm) {
789 spin_lock(&dlm->spinlock);
791 /* Alright, this node has officially joined our
792 * domain. Set him in the map and clean up our
793 * leftover join state. */
794 BUG_ON(dlm->joining_node != assert->node_idx);
795 set_bit(assert->node_idx, dlm->domain_map);
796 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
798 printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n",
799 assert->node_idx, dlm->name);
800 __dlm_print_nodes(dlm);
802 /* notify anything attached to the heartbeat events */
803 dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
805 spin_unlock(&dlm->spinlock);
807 spin_unlock(&dlm_domain_lock);
809 return 0;
812 static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
813 void **ret_data)
815 struct dlm_cancel_join *cancel;
816 struct dlm_ctxt *dlm = NULL;
818 cancel = (struct dlm_cancel_join *) msg->buf;
820 mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
821 cancel->domain);
823 spin_lock(&dlm_domain_lock);
824 dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
826 if (dlm) {
827 spin_lock(&dlm->spinlock);
829 /* Yikes, this guy wants to cancel his join. No
830 * problem, we simply cleanup our join state. */
831 BUG_ON(dlm->joining_node != cancel->node_idx);
832 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
834 spin_unlock(&dlm->spinlock);
836 spin_unlock(&dlm_domain_lock);
838 return 0;
841 static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
842 unsigned int node)
844 int status;
845 struct dlm_cancel_join cancel_msg;
847 memset(&cancel_msg, 0, sizeof(cancel_msg));
848 cancel_msg.node_idx = dlm->node_num;
849 cancel_msg.name_len = strlen(dlm->name);
850 memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
852 status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
853 &cancel_msg, sizeof(cancel_msg), node,
854 NULL);
855 if (status < 0) {
856 mlog_errno(status);
857 goto bail;
860 bail:
861 return status;
864 /* map_size should be in bytes. */
865 static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
866 unsigned long *node_map,
867 unsigned int map_size)
869 int status, tmpstat;
870 unsigned int node;
872 if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) *
873 sizeof(unsigned long))) {
874 mlog(ML_ERROR,
875 "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
876 map_size, BITS_TO_LONGS(O2NM_MAX_NODES));
877 return -EINVAL;
880 status = 0;
881 node = -1;
882 while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
883 node + 1)) < O2NM_MAX_NODES) {
884 if (node == dlm->node_num)
885 continue;
887 tmpstat = dlm_send_one_join_cancel(dlm, node);
888 if (tmpstat) {
889 mlog(ML_ERROR, "Error return %d cancelling join on "
890 "node %d\n", tmpstat, node);
891 if (!status)
892 status = tmpstat;
896 if (status)
897 mlog_errno(status);
898 return status;
901 static int dlm_request_join(struct dlm_ctxt *dlm,
902 int node,
903 enum dlm_query_join_response *response)
905 int status, retval;
906 struct dlm_query_join_request join_msg;
908 mlog(0, "querying node %d\n", node);
910 memset(&join_msg, 0, sizeof(join_msg));
911 join_msg.node_idx = dlm->node_num;
912 join_msg.name_len = strlen(dlm->name);
913 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
915 /* copy live node map to join message */
916 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
918 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
919 sizeof(join_msg), node, &retval);
920 if (status < 0 && status != -ENOPROTOOPT) {
921 mlog_errno(status);
922 goto bail;
925 /* -ENOPROTOOPT from the net code means the other side isn't
926 listening for our message type -- that's fine, it means
927 his dlm isn't up, so we can consider him a 'yes' but not
928 joined into the domain. */
929 if (status == -ENOPROTOOPT) {
930 status = 0;
931 *response = JOIN_OK_NO_MAP;
932 } else if (retval == JOIN_DISALLOW ||
933 retval == JOIN_OK ||
934 retval == JOIN_OK_NO_MAP) {
935 *response = retval;
936 } else {
937 status = -EINVAL;
938 mlog(ML_ERROR, "invalid response %d from node %u\n", retval,
939 node);
942 mlog(0, "status %d, node %d response is %d\n", status, node,
943 *response);
945 bail:
946 return status;
949 static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
950 unsigned int node)
952 int status;
953 struct dlm_assert_joined assert_msg;
955 mlog(0, "Sending join assert to node %u\n", node);
957 memset(&assert_msg, 0, sizeof(assert_msg));
958 assert_msg.node_idx = dlm->node_num;
959 assert_msg.name_len = strlen(dlm->name);
960 memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
962 status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
963 &assert_msg, sizeof(assert_msg), node,
964 NULL);
965 if (status < 0)
966 mlog_errno(status);
968 return status;
971 static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
972 unsigned long *node_map)
974 int status, node, live;
976 status = 0;
977 node = -1;
978 while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
979 node + 1)) < O2NM_MAX_NODES) {
980 if (node == dlm->node_num)
981 continue;
983 do {
984 /* It is very important that this message be
985 * received so we spin until either the node
986 * has died or it gets the message. */
987 status = dlm_send_one_join_assert(dlm, node);
989 spin_lock(&dlm->spinlock);
990 live = test_bit(node, dlm->live_nodes_map);
991 spin_unlock(&dlm->spinlock);
993 if (status) {
994 mlog(ML_ERROR, "Error return %d asserting "
995 "join on node %d\n", status, node);
997 /* give us some time between errors... */
998 if (live)
999 msleep(DLM_DOMAIN_BACKOFF_MS);
1001 } while (status && live);
1005 struct domain_join_ctxt {
1006 unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1007 unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1010 static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1011 struct domain_join_ctxt *ctxt,
1012 enum dlm_query_join_response response)
1014 int ret;
1016 if (response == JOIN_DISALLOW) {
1017 mlog(0, "Latest response of disallow -- should restart\n");
1018 return 1;
1021 spin_lock(&dlm->spinlock);
1022 /* For now, we restart the process if the node maps have
1023 * changed at all */
1024 ret = memcmp(ctxt->live_map, dlm->live_nodes_map,
1025 sizeof(dlm->live_nodes_map));
1026 spin_unlock(&dlm->spinlock);
1028 if (ret)
1029 mlog(0, "Node maps changed -- should restart\n");
1031 return ret;
1034 static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1036 int status = 0, tmpstat, node;
1037 struct domain_join_ctxt *ctxt;
1038 enum dlm_query_join_response response;
1040 mlog_entry("%p", dlm);
1042 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1043 if (!ctxt) {
1044 status = -ENOMEM;
1045 mlog_errno(status);
1046 goto bail;
1049 /* group sem locking should work for us here -- we're already
1050 * registered for heartbeat events so filling this should be
1051 * atomic wrt getting those handlers called. */
1052 o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
1054 spin_lock(&dlm->spinlock);
1055 memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map));
1057 __dlm_set_joining_node(dlm, dlm->node_num);
1059 spin_unlock(&dlm->spinlock);
1061 node = -1;
1062 while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
1063 node + 1)) < O2NM_MAX_NODES) {
1064 if (node == dlm->node_num)
1065 continue;
1067 status = dlm_request_join(dlm, node, &response);
1068 if (status < 0) {
1069 mlog_errno(status);
1070 goto bail;
1073 /* Ok, either we got a response or the node doesn't have a
1074 * dlm up. */
1075 if (response == JOIN_OK)
1076 set_bit(node, ctxt->yes_resp_map);
1078 if (dlm_should_restart_join(dlm, ctxt, response)) {
1079 status = -EAGAIN;
1080 goto bail;
1084 mlog(0, "Yay, done querying nodes!\n");
1086 /* Yay, everyone agree's we can join the domain. My domain is
1087 * comprised of all nodes who were put in the
1088 * yes_resp_map. Copy that into our domain map and send a join
1089 * assert message to clean up everyone elses state. */
1090 spin_lock(&dlm->spinlock);
1091 memcpy(dlm->domain_map, ctxt->yes_resp_map,
1092 sizeof(ctxt->yes_resp_map));
1093 set_bit(dlm->node_num, dlm->domain_map);
1094 spin_unlock(&dlm->spinlock);
1096 dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
1098 /* Joined state *must* be set before the joining node
1099 * information, otherwise the query_join handler may read no
1100 * current joiner but a state of NEW and tell joining nodes
1101 * we're not in the domain. */
1102 spin_lock(&dlm_domain_lock);
1103 dlm->dlm_state = DLM_CTXT_JOINED;
1104 dlm->num_joins++;
1105 spin_unlock(&dlm_domain_lock);
1107 bail:
1108 spin_lock(&dlm->spinlock);
1109 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1110 if (!status)
1111 __dlm_print_nodes(dlm);
1112 spin_unlock(&dlm->spinlock);
1114 if (ctxt) {
1115 /* Do we need to send a cancel message to any nodes? */
1116 if (status < 0) {
1117 tmpstat = dlm_send_join_cancels(dlm,
1118 ctxt->yes_resp_map,
1119 sizeof(ctxt->yes_resp_map));
1120 if (tmpstat < 0)
1121 mlog_errno(tmpstat);
1123 kfree(ctxt);
1126 mlog(0, "returning %d\n", status);
1127 return status;
1130 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
1132 o2hb_unregister_callback(&dlm->dlm_hb_up);
1133 o2hb_unregister_callback(&dlm->dlm_hb_down);
1134 o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
1137 static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1139 int status;
1141 mlog(0, "registering handlers.\n");
1143 o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
1144 dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
1145 status = o2hb_register_callback(&dlm->dlm_hb_down);
1146 if (status)
1147 goto bail;
1149 o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
1150 dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
1151 status = o2hb_register_callback(&dlm->dlm_hb_up);
1152 if (status)
1153 goto bail;
1155 status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
1156 sizeof(struct dlm_master_request),
1157 dlm_master_request_handler,
1158 dlm, NULL, &dlm->dlm_domain_handlers);
1159 if (status)
1160 goto bail;
1162 status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
1163 sizeof(struct dlm_assert_master),
1164 dlm_assert_master_handler,
1165 dlm, dlm_assert_master_post_handler,
1166 &dlm->dlm_domain_handlers);
1167 if (status)
1168 goto bail;
1170 status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
1171 sizeof(struct dlm_create_lock),
1172 dlm_create_lock_handler,
1173 dlm, NULL, &dlm->dlm_domain_handlers);
1174 if (status)
1175 goto bail;
1177 status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
1178 DLM_CONVERT_LOCK_MAX_LEN,
1179 dlm_convert_lock_handler,
1180 dlm, NULL, &dlm->dlm_domain_handlers);
1181 if (status)
1182 goto bail;
1184 status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
1185 DLM_UNLOCK_LOCK_MAX_LEN,
1186 dlm_unlock_lock_handler,
1187 dlm, NULL, &dlm->dlm_domain_handlers);
1188 if (status)
1189 goto bail;
1191 status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
1192 DLM_PROXY_AST_MAX_LEN,
1193 dlm_proxy_ast_handler,
1194 dlm, NULL, &dlm->dlm_domain_handlers);
1195 if (status)
1196 goto bail;
1198 status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
1199 sizeof(struct dlm_exit_domain),
1200 dlm_exit_domain_handler,
1201 dlm, NULL, &dlm->dlm_domain_handlers);
1202 if (status)
1203 goto bail;
1205 status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
1206 sizeof(struct dlm_deref_lockres),
1207 dlm_deref_lockres_handler,
1208 dlm, NULL, &dlm->dlm_domain_handlers);
1209 if (status)
1210 goto bail;
1212 status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
1213 sizeof(struct dlm_migrate_request),
1214 dlm_migrate_request_handler,
1215 dlm, NULL, &dlm->dlm_domain_handlers);
1216 if (status)
1217 goto bail;
1219 status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
1220 DLM_MIG_LOCKRES_MAX_LEN,
1221 dlm_mig_lockres_handler,
1222 dlm, NULL, &dlm->dlm_domain_handlers);
1223 if (status)
1224 goto bail;
1226 status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
1227 sizeof(struct dlm_master_requery),
1228 dlm_master_requery_handler,
1229 dlm, NULL, &dlm->dlm_domain_handlers);
1230 if (status)
1231 goto bail;
1233 status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
1234 sizeof(struct dlm_lock_request),
1235 dlm_request_all_locks_handler,
1236 dlm, NULL, &dlm->dlm_domain_handlers);
1237 if (status)
1238 goto bail;
1240 status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
1241 sizeof(struct dlm_reco_data_done),
1242 dlm_reco_data_done_handler,
1243 dlm, NULL, &dlm->dlm_domain_handlers);
1244 if (status)
1245 goto bail;
1247 status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
1248 sizeof(struct dlm_begin_reco),
1249 dlm_begin_reco_handler,
1250 dlm, NULL, &dlm->dlm_domain_handlers);
1251 if (status)
1252 goto bail;
1254 status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
1255 sizeof(struct dlm_finalize_reco),
1256 dlm_finalize_reco_handler,
1257 dlm, NULL, &dlm->dlm_domain_handlers);
1258 if (status)
1259 goto bail;
1261 bail:
1262 if (status)
1263 dlm_unregister_domain_handlers(dlm);
1265 return status;
1268 static int dlm_join_domain(struct dlm_ctxt *dlm)
1270 int status;
1271 unsigned int backoff;
1272 unsigned int total_backoff = 0;
1274 BUG_ON(!dlm);
1276 mlog(0, "Join domain %s\n", dlm->name);
1278 status = dlm_register_domain_handlers(dlm);
1279 if (status) {
1280 mlog_errno(status);
1281 goto bail;
1284 status = dlm_launch_thread(dlm);
1285 if (status < 0) {
1286 mlog_errno(status);
1287 goto bail;
1290 status = dlm_launch_recovery_thread(dlm);
1291 if (status < 0) {
1292 mlog_errno(status);
1293 goto bail;
1296 dlm->dlm_worker = create_singlethread_workqueue("dlm_wq");
1297 if (!dlm->dlm_worker) {
1298 status = -ENOMEM;
1299 mlog_errno(status);
1300 goto bail;
1303 do {
1304 status = dlm_try_to_join_domain(dlm);
1306 /* If we're racing another node to the join, then we
1307 * need to back off temporarily and let them
1308 * complete. */
1309 #define DLM_JOIN_TIMEOUT_MSECS 90000
1310 if (status == -EAGAIN) {
1311 if (signal_pending(current)) {
1312 status = -ERESTARTSYS;
1313 goto bail;
1316 if (total_backoff >
1317 msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) {
1318 status = -ERESTARTSYS;
1319 mlog(ML_NOTICE, "Timed out joining dlm domain "
1320 "%s after %u msecs\n", dlm->name,
1321 jiffies_to_msecs(total_backoff));
1322 goto bail;
1326 * <chip> After you!
1327 * <dale> No, after you!
1328 * <chip> I insist!
1329 * <dale> But you first!
1330 * ...
1332 backoff = (unsigned int)(jiffies & 0x3);
1333 backoff *= DLM_DOMAIN_BACKOFF_MS;
1334 total_backoff += backoff;
1335 mlog(0, "backoff %d\n", backoff);
1336 msleep(backoff);
1338 } while (status == -EAGAIN);
1340 if (status < 0) {
1341 mlog_errno(status);
1342 goto bail;
1345 status = 0;
1346 bail:
1347 wake_up(&dlm_domain_events);
1349 if (status) {
1350 dlm_unregister_domain_handlers(dlm);
1351 dlm_complete_thread(dlm);
1352 dlm_complete_recovery_thread(dlm);
1353 dlm_destroy_dlm_worker(dlm);
1356 return status;
1359 static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1360 u32 key)
1362 int i;
1363 struct dlm_ctxt *dlm = NULL;
1365 dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1366 if (!dlm) {
1367 mlog_errno(-ENOMEM);
1368 goto leave;
1371 dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL);
1372 if (dlm->name == NULL) {
1373 mlog_errno(-ENOMEM);
1374 kfree(dlm);
1375 dlm = NULL;
1376 goto leave;
1379 dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
1380 if (!dlm->lockres_hash) {
1381 mlog_errno(-ENOMEM);
1382 kfree(dlm->name);
1383 kfree(dlm);
1384 dlm = NULL;
1385 goto leave;
1388 for (i = 0; i < DLM_HASH_BUCKETS; i++)
1389 INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
1391 strcpy(dlm->name, domain);
1392 dlm->key = key;
1393 dlm->node_num = o2nm_this_node();
1395 spin_lock_init(&dlm->spinlock);
1396 spin_lock_init(&dlm->master_lock);
1397 spin_lock_init(&dlm->ast_lock);
1398 INIT_LIST_HEAD(&dlm->list);
1399 INIT_LIST_HEAD(&dlm->dirty_list);
1400 INIT_LIST_HEAD(&dlm->reco.resources);
1401 INIT_LIST_HEAD(&dlm->reco.received);
1402 INIT_LIST_HEAD(&dlm->reco.node_data);
1403 INIT_LIST_HEAD(&dlm->purge_list);
1404 INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
1405 dlm->reco.state = 0;
1407 INIT_LIST_HEAD(&dlm->pending_asts);
1408 INIT_LIST_HEAD(&dlm->pending_basts);
1410 mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
1411 dlm->recovery_map, &(dlm->recovery_map[0]));
1413 memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map));
1414 memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map));
1415 memset(dlm->domain_map, 0, sizeof(dlm->domain_map));
1417 dlm->dlm_thread_task = NULL;
1418 dlm->dlm_reco_thread_task = NULL;
1419 dlm->dlm_worker = NULL;
1420 init_waitqueue_head(&dlm->dlm_thread_wq);
1421 init_waitqueue_head(&dlm->dlm_reco_thread_wq);
1422 init_waitqueue_head(&dlm->reco.event);
1423 init_waitqueue_head(&dlm->ast_wq);
1424 init_waitqueue_head(&dlm->migration_wq);
1425 INIT_LIST_HEAD(&dlm->master_list);
1426 INIT_LIST_HEAD(&dlm->mle_hb_events);
1428 dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
1429 init_waitqueue_head(&dlm->dlm_join_events);
1431 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
1432 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
1433 atomic_set(&dlm->local_resources, 0);
1434 atomic_set(&dlm->remote_resources, 0);
1435 atomic_set(&dlm->unknown_resources, 0);
1437 spin_lock_init(&dlm->work_lock);
1438 INIT_LIST_HEAD(&dlm->work_list);
1439 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
1441 kref_init(&dlm->dlm_refs);
1442 dlm->dlm_state = DLM_CTXT_NEW;
1444 INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
1446 mlog(0, "context init: refcount %u\n",
1447 atomic_read(&dlm->dlm_refs.refcount));
1449 leave:
1450 return dlm;
1454 * dlm_register_domain: one-time setup per "domain"
1456 struct dlm_ctxt * dlm_register_domain(const char *domain,
1457 u32 key)
1459 int ret;
1460 struct dlm_ctxt *dlm = NULL;
1461 struct dlm_ctxt *new_ctxt = NULL;
1463 if (strlen(domain) > O2NM_MAX_NAME_LEN) {
1464 ret = -ENAMETOOLONG;
1465 mlog(ML_ERROR, "domain name length too long\n");
1466 goto leave;
1469 if (!o2hb_check_local_node_heartbeating()) {
1470 mlog(ML_ERROR, "the local node has not been configured, or is "
1471 "not heartbeating\n");
1472 ret = -EPROTO;
1473 goto leave;
1476 mlog(0, "register called for domain \"%s\"\n", domain);
1478 retry:
1479 dlm = NULL;
1480 if (signal_pending(current)) {
1481 ret = -ERESTARTSYS;
1482 mlog_errno(ret);
1483 goto leave;
1486 spin_lock(&dlm_domain_lock);
1488 dlm = __dlm_lookup_domain(domain);
1489 if (dlm) {
1490 if (dlm->dlm_state != DLM_CTXT_JOINED) {
1491 spin_unlock(&dlm_domain_lock);
1493 mlog(0, "This ctxt is not joined yet!\n");
1494 wait_event_interruptible(dlm_domain_events,
1495 dlm_wait_on_domain_helper(
1496 domain));
1497 goto retry;
1500 __dlm_get(dlm);
1501 dlm->num_joins++;
1503 spin_unlock(&dlm_domain_lock);
1505 ret = 0;
1506 goto leave;
1509 /* doesn't exist */
1510 if (!new_ctxt) {
1511 spin_unlock(&dlm_domain_lock);
1513 new_ctxt = dlm_alloc_ctxt(domain, key);
1514 if (new_ctxt)
1515 goto retry;
1517 ret = -ENOMEM;
1518 mlog_errno(ret);
1519 goto leave;
1522 /* a little variable switch-a-roo here... */
1523 dlm = new_ctxt;
1524 new_ctxt = NULL;
1526 /* add the new domain */
1527 list_add_tail(&dlm->list, &dlm_domains);
1528 spin_unlock(&dlm_domain_lock);
1530 ret = dlm_join_domain(dlm);
1531 if (ret) {
1532 mlog_errno(ret);
1533 dlm_put(dlm);
1534 goto leave;
1537 ret = 0;
1538 leave:
1539 if (new_ctxt)
1540 dlm_free_ctxt_mem(new_ctxt);
1542 if (ret < 0)
1543 dlm = ERR_PTR(ret);
1545 return dlm;
1547 EXPORT_SYMBOL_GPL(dlm_register_domain);
1549 static LIST_HEAD(dlm_join_handlers);
1551 static void dlm_unregister_net_handlers(void)
1553 o2net_unregister_handler_list(&dlm_join_handlers);
1556 static int dlm_register_net_handlers(void)
1558 int status = 0;
1560 status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
1561 sizeof(struct dlm_query_join_request),
1562 dlm_query_join_handler,
1563 NULL, NULL, &dlm_join_handlers);
1564 if (status)
1565 goto bail;
1567 status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1568 sizeof(struct dlm_assert_joined),
1569 dlm_assert_joined_handler,
1570 NULL, NULL, &dlm_join_handlers);
1571 if (status)
1572 goto bail;
1574 status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
1575 sizeof(struct dlm_cancel_join),
1576 dlm_cancel_join_handler,
1577 NULL, NULL, &dlm_join_handlers);
1579 bail:
1580 if (status < 0)
1581 dlm_unregister_net_handlers();
1583 return status;
1586 /* Domain eviction callback handling.
1588 * The file system requires notification of node death *before* the
1589 * dlm completes it's recovery work, otherwise it may be able to
1590 * acquire locks on resources requiring recovery. Since the dlm can
1591 * evict a node from it's domain *before* heartbeat fires, a similar
1592 * mechanism is required. */
1594 /* Eviction is not expected to happen often, so a per-domain lock is
1595 * not necessary. Eviction callbacks are allowed to sleep for short
1596 * periods of time. */
1597 static DECLARE_RWSEM(dlm_callback_sem);
1599 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
1600 int node_num)
1602 struct list_head *iter;
1603 struct dlm_eviction_cb *cb;
1605 down_read(&dlm_callback_sem);
1606 list_for_each(iter, &dlm->dlm_eviction_callbacks) {
1607 cb = list_entry(iter, struct dlm_eviction_cb, ec_item);
1609 cb->ec_func(node_num, cb->ec_data);
1611 up_read(&dlm_callback_sem);
1614 void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
1615 dlm_eviction_func *f,
1616 void *data)
1618 INIT_LIST_HEAD(&cb->ec_item);
1619 cb->ec_func = f;
1620 cb->ec_data = data;
1622 EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
1624 void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
1625 struct dlm_eviction_cb *cb)
1627 down_write(&dlm_callback_sem);
1628 list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
1629 up_write(&dlm_callback_sem);
1631 EXPORT_SYMBOL_GPL(dlm_register_eviction_cb);
1633 void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
1635 down_write(&dlm_callback_sem);
1636 list_del_init(&cb->ec_item);
1637 up_write(&dlm_callback_sem);
1639 EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb);
1641 static int __init dlm_init(void)
1643 int status;
1645 dlm_print_version();
1647 status = dlm_init_mle_cache();
1648 if (status)
1649 return -1;
1651 status = dlm_register_net_handlers();
1652 if (status) {
1653 dlm_destroy_mle_cache();
1654 return -1;
1657 return 0;
1660 static void __exit dlm_exit (void)
1662 dlm_unregister_net_handlers();
1663 dlm_destroy_mle_cache();
1666 MODULE_AUTHOR("Oracle");
1667 MODULE_LICENSE("GPL");
1669 module_init(dlm_init);
1670 module_exit(dlm_exit);