1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * Copyright (C) 2004, 2005 Oracle. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/configfs.h>
29 #include "nodemanager.h"
30 #include "heartbeat.h"
35 /* for now we operate under the assertion that there can be only one
36 * cluster active at a time. Changing this will require trickling
37 * cluster references throughout where nodes are looked up */
38 struct o2nm_cluster
*o2nm_single_cluster
= NULL
;
40 #define OCFS2_MAX_HB_CTL_PATH 256
41 static char ocfs2_hb_ctl_path
[OCFS2_MAX_HB_CTL_PATH
] = "/sbin/ocfs2_hb_ctl";
43 static ctl_table ocfs2_nm_table
[] = {
46 .procname
= "hb_ctl_path",
47 .data
= ocfs2_hb_ctl_path
,
48 .maxlen
= OCFS2_MAX_HB_CTL_PATH
,
50 .proc_handler
= &proc_dostring
,
51 .strategy
= &sysctl_string
,
56 static ctl_table ocfs2_mod_table
[] = {
58 .ctl_name
= FS_OCFS2_NM
,
63 .child
= ocfs2_nm_table
68 static ctl_table ocfs2_kern_table
[] = {
75 .child
= ocfs2_mod_table
80 static ctl_table ocfs2_root_table
[] = {
87 .child
= ocfs2_kern_table
92 static struct ctl_table_header
*ocfs2_table_header
= NULL
;
94 const char *o2nm_get_hb_ctl_path(void)
96 return ocfs2_hb_ctl_path
;
98 EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path
);
100 struct o2nm_node
*o2nm_get_node_by_num(u8 node_num
)
102 struct o2nm_node
*node
= NULL
;
104 if (node_num
>= O2NM_MAX_NODES
|| o2nm_single_cluster
== NULL
)
107 read_lock(&o2nm_single_cluster
->cl_nodes_lock
);
108 node
= o2nm_single_cluster
->cl_nodes
[node_num
];
110 config_item_get(&node
->nd_item
);
111 read_unlock(&o2nm_single_cluster
->cl_nodes_lock
);
115 EXPORT_SYMBOL_GPL(o2nm_get_node_by_num
);
117 int o2nm_configured_node_map(unsigned long *map
, unsigned bytes
)
119 struct o2nm_cluster
*cluster
= o2nm_single_cluster
;
121 BUG_ON(bytes
< (sizeof(cluster
->cl_nodes_bitmap
)));
126 read_lock(&cluster
->cl_nodes_lock
);
127 memcpy(map
, cluster
->cl_nodes_bitmap
, sizeof(cluster
->cl_nodes_bitmap
));
128 read_unlock(&cluster
->cl_nodes_lock
);
132 EXPORT_SYMBOL_GPL(o2nm_configured_node_map
);
134 static struct o2nm_node
*o2nm_node_ip_tree_lookup(struct o2nm_cluster
*cluster
,
136 struct rb_node
***ret_p
,
137 struct rb_node
**ret_parent
)
139 struct rb_node
**p
= &cluster
->cl_node_ip_tree
.rb_node
;
140 struct rb_node
*parent
= NULL
;
141 struct o2nm_node
*node
, *ret
= NULL
;
147 node
= rb_entry(parent
, struct o2nm_node
, nd_ip_node
);
149 cmp
= memcmp(&ip_needle
, &node
->nd_ipv4_address
,
163 if (ret_parent
!= NULL
)
164 *ret_parent
= parent
;
169 struct o2nm_node
*o2nm_get_node_by_ip(__be32 addr
)
171 struct o2nm_node
*node
= NULL
;
172 struct o2nm_cluster
*cluster
= o2nm_single_cluster
;
177 read_lock(&cluster
->cl_nodes_lock
);
178 node
= o2nm_node_ip_tree_lookup(cluster
, addr
, NULL
, NULL
);
180 config_item_get(&node
->nd_item
);
181 read_unlock(&cluster
->cl_nodes_lock
);
186 EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip
);
188 void o2nm_node_put(struct o2nm_node
*node
)
190 config_item_put(&node
->nd_item
);
192 EXPORT_SYMBOL_GPL(o2nm_node_put
);
194 void o2nm_node_get(struct o2nm_node
*node
)
196 config_item_get(&node
->nd_item
);
198 EXPORT_SYMBOL_GPL(o2nm_node_get
);
200 u8
o2nm_this_node(void)
202 u8 node_num
= O2NM_MAX_NODES
;
204 if (o2nm_single_cluster
&& o2nm_single_cluster
->cl_has_local
)
205 node_num
= o2nm_single_cluster
->cl_local_node
;
209 EXPORT_SYMBOL_GPL(o2nm_this_node
);
211 /* node configfs bits */
213 static struct o2nm_cluster
*to_o2nm_cluster(struct config_item
*item
)
216 container_of(to_config_group(item
), struct o2nm_cluster
,
221 static struct o2nm_node
*to_o2nm_node(struct config_item
*item
)
223 return item
? container_of(item
, struct o2nm_node
, nd_item
) : NULL
;
226 static void o2nm_node_release(struct config_item
*item
)
228 struct o2nm_node
*node
= to_o2nm_node(item
);
232 static ssize_t
o2nm_node_num_read(struct o2nm_node
*node
, char *page
)
234 return sprintf(page
, "%d\n", node
->nd_num
);
237 static struct o2nm_cluster
*to_o2nm_cluster_from_node(struct o2nm_node
*node
)
239 /* through the first node_set .parent
240 * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
241 return to_o2nm_cluster(node
->nd_item
.ci_parent
->ci_parent
);
245 O2NM_NODE_ATTR_NUM
= 0,
247 O2NM_NODE_ATTR_ADDRESS
,
248 O2NM_NODE_ATTR_LOCAL
,
251 static ssize_t
o2nm_node_num_write(struct o2nm_node
*node
, const char *page
,
254 struct o2nm_cluster
*cluster
= to_o2nm_cluster_from_node(node
);
256 char *p
= (char *)page
;
258 tmp
= simple_strtoul(p
, &p
, 0);
259 if (!p
|| (*p
&& (*p
!= '\n')))
262 if (tmp
>= O2NM_MAX_NODES
)
265 /* once we're in the cl_nodes tree networking can look us up by
266 * node number and try to use our address and port attributes
267 * to connect to this node.. make sure that they've been set
268 * before writing the node attribute? */
269 if (!test_bit(O2NM_NODE_ATTR_ADDRESS
, &node
->nd_set_attributes
) ||
270 !test_bit(O2NM_NODE_ATTR_PORT
, &node
->nd_set_attributes
))
271 return -EINVAL
; /* XXX */
273 write_lock(&cluster
->cl_nodes_lock
);
274 if (cluster
->cl_nodes
[tmp
])
277 cluster
->cl_nodes
[tmp
] = node
;
279 set_bit(tmp
, cluster
->cl_nodes_bitmap
);
281 write_unlock(&cluster
->cl_nodes_lock
);
287 static ssize_t
o2nm_node_ipv4_port_read(struct o2nm_node
*node
, char *page
)
289 return sprintf(page
, "%u\n", ntohs(node
->nd_ipv4_port
));
292 static ssize_t
o2nm_node_ipv4_port_write(struct o2nm_node
*node
,
293 const char *page
, size_t count
)
296 char *p
= (char *)page
;
298 tmp
= simple_strtoul(p
, &p
, 0);
299 if (!p
|| (*p
&& (*p
!= '\n')))
307 node
->nd_ipv4_port
= htons(tmp
);
312 static ssize_t
o2nm_node_ipv4_address_read(struct o2nm_node
*node
, char *page
)
314 return sprintf(page
, "%u.%u.%u.%u\n", NIPQUAD(node
->nd_ipv4_address
));
317 static ssize_t
o2nm_node_ipv4_address_write(struct o2nm_node
*node
,
321 struct o2nm_cluster
*cluster
= to_o2nm_cluster_from_node(node
);
323 struct rb_node
**p
, *parent
;
324 unsigned int octets
[4];
325 __be32 ipv4_addr
= 0;
327 ret
= sscanf(page
, "%3u.%3u.%3u.%3u", &octets
[3], &octets
[2],
328 &octets
[1], &octets
[0]);
332 for (i
= 0; i
< ARRAY_SIZE(octets
); i
++) {
335 be32_add_cpu(&ipv4_addr
, octets
[i
] << (i
* 8));
339 write_lock(&cluster
->cl_nodes_lock
);
340 if (o2nm_node_ip_tree_lookup(cluster
, ipv4_addr
, &p
, &parent
))
343 rb_link_node(&node
->nd_ip_node
, parent
, p
);
344 rb_insert_color(&node
->nd_ip_node
, &cluster
->cl_node_ip_tree
);
346 write_unlock(&cluster
->cl_nodes_lock
);
350 memcpy(&node
->nd_ipv4_address
, &ipv4_addr
, sizeof(ipv4_addr
));
355 static ssize_t
o2nm_node_local_read(struct o2nm_node
*node
, char *page
)
357 return sprintf(page
, "%d\n", node
->nd_local
);
360 static ssize_t
o2nm_node_local_write(struct o2nm_node
*node
, const char *page
,
363 struct o2nm_cluster
*cluster
= to_o2nm_cluster_from_node(node
);
365 char *p
= (char *)page
;
368 tmp
= simple_strtoul(p
, &p
, 0);
369 if (!p
|| (*p
&& (*p
!= '\n')))
372 tmp
= !!tmp
; /* boolean of whether this node wants to be local */
374 /* setting local turns on networking rx for now so we require having
375 * set everything else first */
376 if (!test_bit(O2NM_NODE_ATTR_ADDRESS
, &node
->nd_set_attributes
) ||
377 !test_bit(O2NM_NODE_ATTR_NUM
, &node
->nd_set_attributes
) ||
378 !test_bit(O2NM_NODE_ATTR_PORT
, &node
->nd_set_attributes
))
379 return -EINVAL
; /* XXX */
381 /* the only failure case is trying to set a new local node
382 * when a different one is already set */
383 if (tmp
&& tmp
== cluster
->cl_has_local
&&
384 cluster
->cl_local_node
!= node
->nd_num
)
387 /* bring up the rx thread if we're setting the new local node. */
388 if (tmp
&& !cluster
->cl_has_local
) {
389 ret
= o2net_start_listening(node
);
394 if (!tmp
&& cluster
->cl_has_local
&&
395 cluster
->cl_local_node
== node
->nd_num
) {
396 o2net_stop_listening(node
);
397 cluster
->cl_local_node
= O2NM_INVALID_NODE_NUM
;
400 node
->nd_local
= tmp
;
401 if (node
->nd_local
) {
402 cluster
->cl_has_local
= tmp
;
403 cluster
->cl_local_node
= node
->nd_num
;
409 struct o2nm_node_attribute
{
410 struct configfs_attribute attr
;
411 ssize_t (*show
)(struct o2nm_node
*, char *);
412 ssize_t (*store
)(struct o2nm_node
*, const char *, size_t);
415 static struct o2nm_node_attribute o2nm_node_attr_num
= {
416 .attr
= { .ca_owner
= THIS_MODULE
,
418 .ca_mode
= S_IRUGO
| S_IWUSR
},
419 .show
= o2nm_node_num_read
,
420 .store
= o2nm_node_num_write
,
423 static struct o2nm_node_attribute o2nm_node_attr_ipv4_port
= {
424 .attr
= { .ca_owner
= THIS_MODULE
,
425 .ca_name
= "ipv4_port",
426 .ca_mode
= S_IRUGO
| S_IWUSR
},
427 .show
= o2nm_node_ipv4_port_read
,
428 .store
= o2nm_node_ipv4_port_write
,
431 static struct o2nm_node_attribute o2nm_node_attr_ipv4_address
= {
432 .attr
= { .ca_owner
= THIS_MODULE
,
433 .ca_name
= "ipv4_address",
434 .ca_mode
= S_IRUGO
| S_IWUSR
},
435 .show
= o2nm_node_ipv4_address_read
,
436 .store
= o2nm_node_ipv4_address_write
,
439 static struct o2nm_node_attribute o2nm_node_attr_local
= {
440 .attr
= { .ca_owner
= THIS_MODULE
,
442 .ca_mode
= S_IRUGO
| S_IWUSR
},
443 .show
= o2nm_node_local_read
,
444 .store
= o2nm_node_local_write
,
447 static struct configfs_attribute
*o2nm_node_attrs
[] = {
448 [O2NM_NODE_ATTR_NUM
] = &o2nm_node_attr_num
.attr
,
449 [O2NM_NODE_ATTR_PORT
] = &o2nm_node_attr_ipv4_port
.attr
,
450 [O2NM_NODE_ATTR_ADDRESS
] = &o2nm_node_attr_ipv4_address
.attr
,
451 [O2NM_NODE_ATTR_LOCAL
] = &o2nm_node_attr_local
.attr
,
455 static int o2nm_attr_index(struct configfs_attribute
*attr
)
458 for (i
= 0; i
< ARRAY_SIZE(o2nm_node_attrs
); i
++) {
459 if (attr
== o2nm_node_attrs
[i
])
466 static ssize_t
o2nm_node_show(struct config_item
*item
,
467 struct configfs_attribute
*attr
,
470 struct o2nm_node
*node
= to_o2nm_node(item
);
471 struct o2nm_node_attribute
*o2nm_node_attr
=
472 container_of(attr
, struct o2nm_node_attribute
, attr
);
475 if (o2nm_node_attr
->show
)
476 ret
= o2nm_node_attr
->show(node
, page
);
480 static ssize_t
o2nm_node_store(struct config_item
*item
,
481 struct configfs_attribute
*attr
,
482 const char *page
, size_t count
)
484 struct o2nm_node
*node
= to_o2nm_node(item
);
485 struct o2nm_node_attribute
*o2nm_node_attr
=
486 container_of(attr
, struct o2nm_node_attribute
, attr
);
488 int attr_index
= o2nm_attr_index(attr
);
490 if (o2nm_node_attr
->store
== NULL
) {
495 if (test_bit(attr_index
, &node
->nd_set_attributes
))
498 ret
= o2nm_node_attr
->store(node
, page
, count
);
502 set_bit(attr_index
, &node
->nd_set_attributes
);
507 static struct configfs_item_operations o2nm_node_item_ops
= {
508 .release
= o2nm_node_release
,
509 .show_attribute
= o2nm_node_show
,
510 .store_attribute
= o2nm_node_store
,
513 static struct config_item_type o2nm_node_type
= {
514 .ct_item_ops
= &o2nm_node_item_ops
,
515 .ct_attrs
= o2nm_node_attrs
,
516 .ct_owner
= THIS_MODULE
,
521 struct o2nm_node_group
{
522 struct config_group ns_group
;
527 static struct o2nm_node_group
*to_o2nm_node_group(struct config_group
*group
)
530 container_of(group
, struct o2nm_node_group
, ns_group
)
535 struct o2nm_cluster_attribute
{
536 struct configfs_attribute attr
;
537 ssize_t (*show
)(struct o2nm_cluster
*, char *);
538 ssize_t (*store
)(struct o2nm_cluster
*, const char *, size_t);
541 static ssize_t
o2nm_cluster_attr_write(const char *page
, ssize_t count
,
545 char *p
= (char *)page
;
547 tmp
= simple_strtoul(p
, &p
, 0);
548 if (!p
|| (*p
&& (*p
!= '\n')))
561 static ssize_t
o2nm_cluster_attr_idle_timeout_ms_read(
562 struct o2nm_cluster
*cluster
, char *page
)
564 return sprintf(page
, "%u\n", cluster
->cl_idle_timeout_ms
);
567 static ssize_t
o2nm_cluster_attr_idle_timeout_ms_write(
568 struct o2nm_cluster
*cluster
, const char *page
, size_t count
)
573 ret
= o2nm_cluster_attr_write(page
, count
, &val
);
576 if (cluster
->cl_idle_timeout_ms
!= val
577 && o2net_num_connected_peers()) {
579 "o2net: cannot change idle timeout after "
580 "the first peer has agreed to it."
581 " %d connected peers\n",
582 o2net_num_connected_peers());
584 } else if (val
<= cluster
->cl_keepalive_delay_ms
) {
585 mlog(ML_NOTICE
, "o2net: idle timeout must be larger "
586 "than keepalive delay\n");
589 cluster
->cl_idle_timeout_ms
= val
;
596 static ssize_t
o2nm_cluster_attr_keepalive_delay_ms_read(
597 struct o2nm_cluster
*cluster
, char *page
)
599 return sprintf(page
, "%u\n", cluster
->cl_keepalive_delay_ms
);
602 static ssize_t
o2nm_cluster_attr_keepalive_delay_ms_write(
603 struct o2nm_cluster
*cluster
, const char *page
, size_t count
)
608 ret
= o2nm_cluster_attr_write(page
, count
, &val
);
611 if (cluster
->cl_keepalive_delay_ms
!= val
612 && o2net_num_connected_peers()) {
614 "o2net: cannot change keepalive delay after"
615 " the first peer has agreed to it."
616 " %d connected peers\n",
617 o2net_num_connected_peers());
619 } else if (val
>= cluster
->cl_idle_timeout_ms
) {
620 mlog(ML_NOTICE
, "o2net: keepalive delay must be "
621 "smaller than idle timeout\n");
624 cluster
->cl_keepalive_delay_ms
= val
;
631 static ssize_t
o2nm_cluster_attr_reconnect_delay_ms_read(
632 struct o2nm_cluster
*cluster
, char *page
)
634 return sprintf(page
, "%u\n", cluster
->cl_reconnect_delay_ms
);
637 static ssize_t
o2nm_cluster_attr_reconnect_delay_ms_write(
638 struct o2nm_cluster
*cluster
, const char *page
, size_t count
)
640 return o2nm_cluster_attr_write(page
, count
,
641 &cluster
->cl_reconnect_delay_ms
);
643 static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms
= {
644 .attr
= { .ca_owner
= THIS_MODULE
,
645 .ca_name
= "idle_timeout_ms",
646 .ca_mode
= S_IRUGO
| S_IWUSR
},
647 .show
= o2nm_cluster_attr_idle_timeout_ms_read
,
648 .store
= o2nm_cluster_attr_idle_timeout_ms_write
,
651 static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms
= {
652 .attr
= { .ca_owner
= THIS_MODULE
,
653 .ca_name
= "keepalive_delay_ms",
654 .ca_mode
= S_IRUGO
| S_IWUSR
},
655 .show
= o2nm_cluster_attr_keepalive_delay_ms_read
,
656 .store
= o2nm_cluster_attr_keepalive_delay_ms_write
,
659 static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms
= {
660 .attr
= { .ca_owner
= THIS_MODULE
,
661 .ca_name
= "reconnect_delay_ms",
662 .ca_mode
= S_IRUGO
| S_IWUSR
},
663 .show
= o2nm_cluster_attr_reconnect_delay_ms_read
,
664 .store
= o2nm_cluster_attr_reconnect_delay_ms_write
,
667 static struct configfs_attribute
*o2nm_cluster_attrs
[] = {
668 &o2nm_cluster_attr_idle_timeout_ms
.attr
,
669 &o2nm_cluster_attr_keepalive_delay_ms
.attr
,
670 &o2nm_cluster_attr_reconnect_delay_ms
.attr
,
673 static ssize_t
o2nm_cluster_show(struct config_item
*item
,
674 struct configfs_attribute
*attr
,
677 struct o2nm_cluster
*cluster
= to_o2nm_cluster(item
);
678 struct o2nm_cluster_attribute
*o2nm_cluster_attr
=
679 container_of(attr
, struct o2nm_cluster_attribute
, attr
);
682 if (o2nm_cluster_attr
->show
)
683 ret
= o2nm_cluster_attr
->show(cluster
, page
);
687 static ssize_t
o2nm_cluster_store(struct config_item
*item
,
688 struct configfs_attribute
*attr
,
689 const char *page
, size_t count
)
691 struct o2nm_cluster
*cluster
= to_o2nm_cluster(item
);
692 struct o2nm_cluster_attribute
*o2nm_cluster_attr
=
693 container_of(attr
, struct o2nm_cluster_attribute
, attr
);
696 if (o2nm_cluster_attr
->store
== NULL
) {
701 ret
= o2nm_cluster_attr
->store(cluster
, page
, count
);
708 static struct config_item
*o2nm_node_group_make_item(struct config_group
*group
,
711 struct o2nm_node
*node
= NULL
;
712 struct config_item
*ret
= NULL
;
714 if (strlen(name
) > O2NM_MAX_NAME_LEN
)
715 goto out
; /* ENAMETOOLONG */
717 node
= kzalloc(sizeof(struct o2nm_node
), GFP_KERNEL
);
719 goto out
; /* ENOMEM */
721 strcpy(node
->nd_name
, name
); /* use item.ci_namebuf instead? */
722 config_item_init_type_name(&node
->nd_item
, name
, &o2nm_node_type
);
723 spin_lock_init(&node
->nd_lock
);
725 ret
= &node
->nd_item
;
734 static void o2nm_node_group_drop_item(struct config_group
*group
,
735 struct config_item
*item
)
737 struct o2nm_node
*node
= to_o2nm_node(item
);
738 struct o2nm_cluster
*cluster
= to_o2nm_cluster(group
->cg_item
.ci_parent
);
740 o2net_disconnect_node(node
);
742 if (cluster
->cl_has_local
&&
743 (cluster
->cl_local_node
== node
->nd_num
)) {
744 cluster
->cl_has_local
= 0;
745 cluster
->cl_local_node
= O2NM_INVALID_NODE_NUM
;
746 o2net_stop_listening(node
);
749 /* XXX call into net to stop this node from trading messages */
751 write_lock(&cluster
->cl_nodes_lock
);
754 if (node
->nd_ipv4_address
)
755 rb_erase(&node
->nd_ip_node
, &cluster
->cl_node_ip_tree
);
757 /* nd_num might be 0 if the node number hasn't been set.. */
758 if (cluster
->cl_nodes
[node
->nd_num
] == node
) {
759 cluster
->cl_nodes
[node
->nd_num
] = NULL
;
760 clear_bit(node
->nd_num
, cluster
->cl_nodes_bitmap
);
762 write_unlock(&cluster
->cl_nodes_lock
);
764 config_item_put(item
);
767 static struct configfs_group_operations o2nm_node_group_group_ops
= {
768 .make_item
= o2nm_node_group_make_item
,
769 .drop_item
= o2nm_node_group_drop_item
,
772 static struct config_item_type o2nm_node_group_type
= {
773 .ct_group_ops
= &o2nm_node_group_group_ops
,
774 .ct_owner
= THIS_MODULE
,
779 static void o2nm_cluster_release(struct config_item
*item
)
781 struct o2nm_cluster
*cluster
= to_o2nm_cluster(item
);
783 kfree(cluster
->cl_group
.default_groups
);
787 static struct configfs_item_operations o2nm_cluster_item_ops
= {
788 .release
= o2nm_cluster_release
,
789 .show_attribute
= o2nm_cluster_show
,
790 .store_attribute
= o2nm_cluster_store
,
793 static struct config_item_type o2nm_cluster_type
= {
794 .ct_item_ops
= &o2nm_cluster_item_ops
,
795 .ct_attrs
= o2nm_cluster_attrs
,
796 .ct_owner
= THIS_MODULE
,
801 struct o2nm_cluster_group
{
802 struct configfs_subsystem cs_subsys
;
807 static struct o2nm_cluster_group
*to_o2nm_cluster_group(struct config_group
*group
)
810 container_of(to_configfs_subsystem(group
), struct o2nm_cluster_group
, cs_subsys
)
815 static struct config_group
*o2nm_cluster_group_make_group(struct config_group
*group
,
818 struct o2nm_cluster
*cluster
= NULL
;
819 struct o2nm_node_group
*ns
= NULL
;
820 struct config_group
*o2hb_group
= NULL
, *ret
= NULL
;
823 /* this runs under the parent dir's i_mutex; there can be only
824 * one caller in here at a time */
825 if (o2nm_single_cluster
)
826 goto out
; /* ENOSPC */
828 cluster
= kzalloc(sizeof(struct o2nm_cluster
), GFP_KERNEL
);
829 ns
= kzalloc(sizeof(struct o2nm_node_group
), GFP_KERNEL
);
830 defs
= kcalloc(3, sizeof(struct config_group
*), GFP_KERNEL
);
831 o2hb_group
= o2hb_alloc_hb_set();
832 if (cluster
== NULL
|| ns
== NULL
|| o2hb_group
== NULL
|| defs
== NULL
)
835 config_group_init_type_name(&cluster
->cl_group
, name
,
837 config_group_init_type_name(&ns
->ns_group
, "node",
838 &o2nm_node_group_type
);
840 cluster
->cl_group
.default_groups
= defs
;
841 cluster
->cl_group
.default_groups
[0] = &ns
->ns_group
;
842 cluster
->cl_group
.default_groups
[1] = o2hb_group
;
843 cluster
->cl_group
.default_groups
[2] = NULL
;
844 rwlock_init(&cluster
->cl_nodes_lock
);
845 cluster
->cl_node_ip_tree
= RB_ROOT
;
846 cluster
->cl_reconnect_delay_ms
= O2NET_RECONNECT_DELAY_MS_DEFAULT
;
847 cluster
->cl_idle_timeout_ms
= O2NET_IDLE_TIMEOUT_MS_DEFAULT
;
848 cluster
->cl_keepalive_delay_ms
= O2NET_KEEPALIVE_DELAY_MS_DEFAULT
;
850 ret
= &cluster
->cl_group
;
851 o2nm_single_cluster
= cluster
;
857 o2hb_free_hb_set(o2hb_group
);
864 static void o2nm_cluster_group_drop_item(struct config_group
*group
, struct config_item
*item
)
866 struct o2nm_cluster
*cluster
= to_o2nm_cluster(item
);
868 struct config_item
*killme
;
870 BUG_ON(o2nm_single_cluster
!= cluster
);
871 o2nm_single_cluster
= NULL
;
873 for (i
= 0; cluster
->cl_group
.default_groups
[i
]; i
++) {
874 killme
= &cluster
->cl_group
.default_groups
[i
]->cg_item
;
875 cluster
->cl_group
.default_groups
[i
] = NULL
;
876 config_item_put(killme
);
879 config_item_put(item
);
882 static struct configfs_group_operations o2nm_cluster_group_group_ops
= {
883 .make_group
= o2nm_cluster_group_make_group
,
884 .drop_item
= o2nm_cluster_group_drop_item
,
887 static struct config_item_type o2nm_cluster_group_type
= {
888 .ct_group_ops
= &o2nm_cluster_group_group_ops
,
889 .ct_owner
= THIS_MODULE
,
892 static struct o2nm_cluster_group o2nm_cluster_group
= {
896 .ci_namebuf
= "cluster",
897 .ci_type
= &o2nm_cluster_group_type
,
903 static void __exit
exit_o2nm(void)
905 if (ocfs2_table_header
)
906 unregister_sysctl_table(ocfs2_table_header
);
908 /* XXX sync with hb callbacks and shut down hb? */
909 o2net_unregister_hb_callbacks();
910 configfs_unregister_subsystem(&o2nm_cluster_group
.cs_subsys
);
916 static int __init
init_o2nm(void)
920 cluster_print_version();
925 ocfs2_table_header
= register_sysctl_table(ocfs2_root_table
);
926 if (!ocfs2_table_header
) {
927 printk(KERN_ERR
"nodemanager: unable to register sysctl\n");
928 ret
= -ENOMEM
; /* or something. */
932 ret
= o2net_register_hb_callbacks();
936 config_group_init(&o2nm_cluster_group
.cs_subsys
.su_group
);
937 init_MUTEX(&o2nm_cluster_group
.cs_subsys
.su_sem
);
938 ret
= configfs_register_subsystem(&o2nm_cluster_group
.cs_subsys
);
940 printk(KERN_ERR
"nodemanager: Registration returned %d\n", ret
);
944 ret
= o2cb_sys_init();
948 configfs_unregister_subsystem(&o2nm_cluster_group
.cs_subsys
);
950 o2net_unregister_hb_callbacks();
952 unregister_sysctl_table(ocfs2_table_header
);
959 MODULE_AUTHOR("Oracle");
960 MODULE_LICENSE("GPL");
962 module_init(init_o2nm
)
963 module_exit(exit_o2nm
)