2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
43 #define NODE_HTABLE_SIZE 512
45 static void node_lost_contact(struct tipc_node
*n_ptr
);
46 static void node_established_contact(struct tipc_node
*n_ptr
);
48 static struct hlist_head node_htable
[NODE_HTABLE_SIZE
];
49 LIST_HEAD(tipc_node_list
);
50 static u32 tipc_num_nodes
;
51 static u32 tipc_num_links
;
52 static DEFINE_SPINLOCK(node_list_lock
);
54 struct tipc_sock_conn
{
58 struct list_head list
;
61 static const struct nla_policy tipc_nl_node_policy
[TIPC_NLA_NODE_MAX
+ 1] = {
62 [TIPC_NLA_NODE_UNSPEC
] = { .type
= NLA_UNSPEC
},
63 [TIPC_NLA_NODE_ADDR
] = { .type
= NLA_U32
},
64 [TIPC_NLA_NODE_UP
] = { .type
= NLA_FLAG
}
68 * A trivial power-of-two bitmask technique is used for speed, since this
69 * operation is done for every incoming TIPC packet. The number of hash table
70 * entries has been chosen so that no hash chain exceeds 8 nodes and will
71 * usually be much smaller (typically only a single node).
73 static unsigned int tipc_hashfn(u32 addr
)
75 return addr
& (NODE_HTABLE_SIZE
- 1);
79 * tipc_node_find - locate specified node object, if it exists
81 struct tipc_node
*tipc_node_find(u32 addr
)
83 struct tipc_node
*node
;
85 if (unlikely(!in_own_cluster_exact(addr
)))
89 hlist_for_each_entry_rcu(node
, &node_htable
[tipc_hashfn(addr
)], hash
) {
90 if (node
->addr
== addr
) {
99 struct tipc_node
*tipc_node_create(u32 addr
)
101 struct tipc_node
*n_ptr
, *temp_node
;
103 spin_lock_bh(&node_list_lock
);
105 n_ptr
= kzalloc(sizeof(*n_ptr
), GFP_ATOMIC
);
107 spin_unlock_bh(&node_list_lock
);
108 pr_warn("Node creation failed, no memory\n");
113 spin_lock_init(&n_ptr
->lock
);
114 INIT_HLIST_NODE(&n_ptr
->hash
);
115 INIT_LIST_HEAD(&n_ptr
->list
);
116 INIT_LIST_HEAD(&n_ptr
->publ_list
);
117 INIT_LIST_HEAD(&n_ptr
->conn_sks
);
118 skb_queue_head_init(&n_ptr
->waiting_sks
);
119 __skb_queue_head_init(&n_ptr
->bclink
.deferred_queue
);
121 hlist_add_head_rcu(&n_ptr
->hash
, &node_htable
[tipc_hashfn(addr
)]);
123 list_for_each_entry_rcu(temp_node
, &tipc_node_list
, list
) {
124 if (n_ptr
->addr
< temp_node
->addr
)
127 list_add_tail_rcu(&n_ptr
->list
, &temp_node
->list
);
128 n_ptr
->action_flags
= TIPC_WAIT_PEER_LINKS_DOWN
;
129 n_ptr
->signature
= INVALID_NODE_SIG
;
133 spin_unlock_bh(&node_list_lock
);
137 static void tipc_node_delete(struct tipc_node
*n_ptr
)
139 list_del_rcu(&n_ptr
->list
);
140 hlist_del_rcu(&n_ptr
->hash
);
141 kfree_rcu(n_ptr
, rcu
);
146 void tipc_node_stop(void)
148 struct tipc_node
*node
, *t_node
;
150 spin_lock_bh(&node_list_lock
);
151 list_for_each_entry_safe(node
, t_node
, &tipc_node_list
, list
)
152 tipc_node_delete(node
);
153 spin_unlock_bh(&node_list_lock
);
156 int tipc_node_add_conn(u32 dnode
, u32 port
, u32 peer_port
)
158 struct tipc_node
*node
;
159 struct tipc_sock_conn
*conn
;
161 if (in_own_node(dnode
))
164 node
= tipc_node_find(dnode
);
166 pr_warn("Connecting sock to node 0x%x failed\n", dnode
);
167 return -EHOSTUNREACH
;
169 conn
= kmalloc(sizeof(*conn
), GFP_ATOMIC
);
171 return -EHOSTUNREACH
;
172 conn
->peer_node
= dnode
;
174 conn
->peer_port
= peer_port
;
176 tipc_node_lock(node
);
177 list_add_tail(&conn
->list
, &node
->conn_sks
);
178 tipc_node_unlock(node
);
182 void tipc_node_remove_conn(u32 dnode
, u32 port
)
184 struct tipc_node
*node
;
185 struct tipc_sock_conn
*conn
, *safe
;
187 if (in_own_node(dnode
))
190 node
= tipc_node_find(dnode
);
194 tipc_node_lock(node
);
195 list_for_each_entry_safe(conn
, safe
, &node
->conn_sks
, list
) {
196 if (port
!= conn
->port
)
198 list_del(&conn
->list
);
201 tipc_node_unlock(node
);
204 void tipc_node_abort_sock_conns(struct list_head
*conns
)
206 struct tipc_sock_conn
*conn
, *safe
;
209 list_for_each_entry_safe(conn
, safe
, conns
, list
) {
210 buf
= tipc_msg_create(TIPC_CRITICAL_IMPORTANCE
, TIPC_CONN_MSG
,
211 SHORT_H_SIZE
, 0, tipc_own_addr
,
212 conn
->peer_node
, conn
->port
,
213 conn
->peer_port
, TIPC_ERR_NO_NODE
);
216 list_del(&conn
->list
);
222 * tipc_node_link_up - handle addition of link
224 * Link becomes active (alone or shared) or standby, depending on its priority.
226 void tipc_node_link_up(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
228 struct tipc_link
**active
= &n_ptr
->active_links
[0];
230 n_ptr
->working_links
++;
231 n_ptr
->action_flags
|= TIPC_NOTIFY_LINK_UP
;
232 n_ptr
->link_id
= l_ptr
->peer_bearer_id
<< 16 | l_ptr
->bearer_id
;
234 pr_info("Established link <%s> on network plane %c\n",
235 l_ptr
->name
, l_ptr
->net_plane
);
238 active
[0] = active
[1] = l_ptr
;
239 node_established_contact(n_ptr
);
242 if (l_ptr
->priority
< active
[0]->priority
) {
243 pr_info("New link <%s> becomes standby\n", l_ptr
->name
);
246 tipc_link_dup_queue_xmit(active
[0], l_ptr
);
247 if (l_ptr
->priority
== active
[0]->priority
) {
251 pr_info("Old link <%s> becomes standby\n", active
[0]->name
);
252 if (active
[1] != active
[0])
253 pr_info("Old link <%s> becomes standby\n", active
[1]->name
);
254 active
[0] = active
[1] = l_ptr
;
256 /* Leave room for changeover header when returning 'mtu' to users: */
257 n_ptr
->act_mtus
[0] = active
[0]->max_pkt
- INT_H_SIZE
;
258 n_ptr
->act_mtus
[1] = active
[1]->max_pkt
- INT_H_SIZE
;
262 * node_select_active_links - select active link
264 static void node_select_active_links(struct tipc_node
*n_ptr
)
266 struct tipc_link
**active
= &n_ptr
->active_links
[0];
268 u32 highest_prio
= 0;
270 active
[0] = active
[1] = NULL
;
272 for (i
= 0; i
< MAX_BEARERS
; i
++) {
273 struct tipc_link
*l_ptr
= n_ptr
->links
[i
];
275 if (!l_ptr
|| !tipc_link_is_up(l_ptr
) ||
276 (l_ptr
->priority
< highest_prio
))
279 if (l_ptr
->priority
> highest_prio
) {
280 highest_prio
= l_ptr
->priority
;
281 active
[0] = active
[1] = l_ptr
;
289 * tipc_node_link_down - handle loss of link
291 void tipc_node_link_down(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
293 struct tipc_link
**active
;
295 n_ptr
->working_links
--;
296 n_ptr
->action_flags
|= TIPC_NOTIFY_LINK_DOWN
;
297 n_ptr
->link_id
= l_ptr
->peer_bearer_id
<< 16 | l_ptr
->bearer_id
;
299 if (!tipc_link_is_active(l_ptr
)) {
300 pr_info("Lost standby link <%s> on network plane %c\n",
301 l_ptr
->name
, l_ptr
->net_plane
);
304 pr_info("Lost link <%s> on network plane %c\n",
305 l_ptr
->name
, l_ptr
->net_plane
);
307 active
= &n_ptr
->active_links
[0];
308 if (active
[0] == l_ptr
)
309 active
[0] = active
[1];
310 if (active
[1] == l_ptr
)
311 active
[1] = active
[0];
312 if (active
[0] == l_ptr
)
313 node_select_active_links(n_ptr
);
314 if (tipc_node_is_up(n_ptr
))
315 tipc_link_failover_send_queue(l_ptr
);
317 node_lost_contact(n_ptr
);
319 /* Leave room for changeover header when returning 'mtu' to users: */
321 n_ptr
->act_mtus
[0] = active
[0]->max_pkt
- INT_H_SIZE
;
322 n_ptr
->act_mtus
[1] = active
[1]->max_pkt
- INT_H_SIZE
;
326 /* Loopback link went down? No fragmentation needed from now on. */
327 if (n_ptr
->addr
== tipc_own_addr
) {
328 n_ptr
->act_mtus
[0] = MAX_MSG_SIZE
;
329 n_ptr
->act_mtus
[1] = MAX_MSG_SIZE
;
333 int tipc_node_active_links(struct tipc_node
*n_ptr
)
335 return n_ptr
->active_links
[0] != NULL
;
338 int tipc_node_is_up(struct tipc_node
*n_ptr
)
340 return tipc_node_active_links(n_ptr
);
343 void tipc_node_attach_link(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
345 n_ptr
->links
[l_ptr
->bearer_id
] = l_ptr
;
346 spin_lock_bh(&node_list_lock
);
348 spin_unlock_bh(&node_list_lock
);
352 void tipc_node_detach_link(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
356 for (i
= 0; i
< MAX_BEARERS
; i
++) {
357 if (l_ptr
!= n_ptr
->links
[i
])
359 n_ptr
->links
[i
] = NULL
;
360 spin_lock_bh(&node_list_lock
);
362 spin_unlock_bh(&node_list_lock
);
367 static void node_established_contact(struct tipc_node
*n_ptr
)
369 n_ptr
->action_flags
|= TIPC_NOTIFY_NODE_UP
;
370 n_ptr
->bclink
.oos_state
= 0;
371 n_ptr
->bclink
.acked
= tipc_bclink_get_last_sent();
372 tipc_bclink_add_node(n_ptr
->addr
);
375 static void node_lost_contact(struct tipc_node
*n_ptr
)
377 char addr_string
[16];
380 pr_info("Lost contact with %s\n",
381 tipc_addr_string_fill(addr_string
, n_ptr
->addr
));
383 /* Flush broadcast link info associated with lost node */
384 if (n_ptr
->bclink
.recv_permitted
) {
385 __skb_queue_purge(&n_ptr
->bclink
.deferred_queue
);
387 if (n_ptr
->bclink
.reasm_buf
) {
388 kfree_skb(n_ptr
->bclink
.reasm_buf
);
389 n_ptr
->bclink
.reasm_buf
= NULL
;
392 tipc_bclink_remove_node(n_ptr
->addr
);
393 tipc_bclink_acknowledge(n_ptr
, INVALID_LINK_SEQ
);
395 n_ptr
->bclink
.recv_permitted
= false;
398 /* Abort link changeover */
399 for (i
= 0; i
< MAX_BEARERS
; i
++) {
400 struct tipc_link
*l_ptr
= n_ptr
->links
[i
];
403 l_ptr
->reset_checkpoint
= l_ptr
->next_in_no
;
404 l_ptr
->exp_msg_count
= 0;
405 tipc_link_reset_fragments(l_ptr
);
408 n_ptr
->action_flags
&= ~TIPC_WAIT_OWN_LINKS_DOWN
;
410 /* Notify subscribers and prevent re-contact with node until
413 n_ptr
->action_flags
|= TIPC_WAIT_PEER_LINKS_DOWN
|
414 TIPC_NOTIFY_NODE_DOWN
;
417 struct sk_buff
*tipc_node_get_nodes(const void *req_tlv_area
, int req_tlv_space
)
421 struct tipc_node
*n_ptr
;
422 struct tipc_node_info node_info
;
425 if (!TLV_CHECK(req_tlv_area
, req_tlv_space
, TIPC_TLV_NET_ADDR
))
426 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR
);
428 domain
= ntohl(*(__be32
*)TLV_DATA(req_tlv_area
));
429 if (!tipc_addr_domain_valid(domain
))
430 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
431 " (network address)");
433 spin_lock_bh(&node_list_lock
);
434 if (!tipc_num_nodes
) {
435 spin_unlock_bh(&node_list_lock
);
436 return tipc_cfg_reply_none();
439 /* For now, get space for all other nodes */
440 payload_size
= TLV_SPACE(sizeof(node_info
)) * tipc_num_nodes
;
441 if (payload_size
> 32768u) {
442 spin_unlock_bh(&node_list_lock
);
443 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
444 " (too many nodes)");
446 spin_unlock_bh(&node_list_lock
);
448 buf
= tipc_cfg_reply_alloc(payload_size
);
452 /* Add TLVs for all nodes in scope */
454 list_for_each_entry_rcu(n_ptr
, &tipc_node_list
, list
) {
455 if (!tipc_in_scope(domain
, n_ptr
->addr
))
457 node_info
.addr
= htonl(n_ptr
->addr
);
458 node_info
.up
= htonl(tipc_node_is_up(n_ptr
));
459 tipc_cfg_append_tlv(buf
, TIPC_TLV_NODE_INFO
,
460 &node_info
, sizeof(node_info
));
466 struct sk_buff
*tipc_node_get_links(const void *req_tlv_area
, int req_tlv_space
)
470 struct tipc_node
*n_ptr
;
471 struct tipc_link_info link_info
;
474 if (!TLV_CHECK(req_tlv_area
, req_tlv_space
, TIPC_TLV_NET_ADDR
))
475 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR
);
477 domain
= ntohl(*(__be32
*)TLV_DATA(req_tlv_area
));
478 if (!tipc_addr_domain_valid(domain
))
479 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
480 " (network address)");
483 return tipc_cfg_reply_none();
485 spin_lock_bh(&node_list_lock
);
486 /* Get space for all unicast links + broadcast link */
487 payload_size
= TLV_SPACE((sizeof(link_info
)) * (tipc_num_links
+ 1));
488 if (payload_size
> 32768u) {
489 spin_unlock_bh(&node_list_lock
);
490 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
491 " (too many links)");
493 spin_unlock_bh(&node_list_lock
);
495 buf
= tipc_cfg_reply_alloc(payload_size
);
499 /* Add TLV for broadcast link */
500 link_info
.dest
= htonl(tipc_cluster_mask(tipc_own_addr
));
501 link_info
.up
= htonl(1);
502 strlcpy(link_info
.str
, tipc_bclink_name
, TIPC_MAX_LINK_NAME
);
503 tipc_cfg_append_tlv(buf
, TIPC_TLV_LINK_INFO
, &link_info
, sizeof(link_info
));
505 /* Add TLVs for any other links in scope */
507 list_for_each_entry_rcu(n_ptr
, &tipc_node_list
, list
) {
510 if (!tipc_in_scope(domain
, n_ptr
->addr
))
512 tipc_node_lock(n_ptr
);
513 for (i
= 0; i
< MAX_BEARERS
; i
++) {
514 if (!n_ptr
->links
[i
])
516 link_info
.dest
= htonl(n_ptr
->addr
);
517 link_info
.up
= htonl(tipc_link_is_up(n_ptr
->links
[i
]));
518 strcpy(link_info
.str
, n_ptr
->links
[i
]->name
);
519 tipc_cfg_append_tlv(buf
, TIPC_TLV_LINK_INFO
,
520 &link_info
, sizeof(link_info
));
522 tipc_node_unlock(n_ptr
);
529 * tipc_node_get_linkname - get the name of a link
531 * @bearer_id: id of the bearer
532 * @node: peer node address
533 * @linkname: link name output buffer
535 * Returns 0 on success
537 int tipc_node_get_linkname(u32 bearer_id
, u32 addr
, char *linkname
, size_t len
)
539 struct tipc_link
*link
;
540 struct tipc_node
*node
= tipc_node_find(addr
);
542 if ((bearer_id
>= MAX_BEARERS
) || !node
)
544 tipc_node_lock(node
);
545 link
= node
->links
[bearer_id
];
547 strncpy(linkname
, link
->name
, len
);
548 tipc_node_unlock(node
);
551 tipc_node_unlock(node
);
555 void tipc_node_unlock(struct tipc_node
*node
)
557 LIST_HEAD(nsub_list
);
559 struct sk_buff_head waiting_sks
;
561 int flags
= node
->action_flags
;
564 if (likely(!flags
)) {
565 spin_unlock_bh(&node
->lock
);
570 link_id
= node
->link_id
;
571 __skb_queue_head_init(&waiting_sks
);
573 if (flags
& TIPC_WAKEUP_USERS
)
574 skb_queue_splice_init(&node
->waiting_sks
, &waiting_sks
);
576 if (flags
& TIPC_NOTIFY_NODE_DOWN
) {
577 list_replace_init(&node
->publ_list
, &nsub_list
);
578 list_replace_init(&node
->conn_sks
, &conn_sks
);
580 node
->action_flags
&= ~(TIPC_WAKEUP_USERS
| TIPC_NOTIFY_NODE_DOWN
|
581 TIPC_NOTIFY_NODE_UP
| TIPC_NOTIFY_LINK_UP
|
582 TIPC_NOTIFY_LINK_DOWN
|
583 TIPC_WAKEUP_BCAST_USERS
);
585 spin_unlock_bh(&node
->lock
);
587 while (!skb_queue_empty(&waiting_sks
))
588 tipc_sk_rcv(__skb_dequeue(&waiting_sks
));
590 if (!list_empty(&conn_sks
))
591 tipc_node_abort_sock_conns(&conn_sks
);
593 if (!list_empty(&nsub_list
))
594 tipc_publ_notify(&nsub_list
, addr
);
596 if (flags
& TIPC_WAKEUP_BCAST_USERS
)
597 tipc_bclink_wakeup_users();
599 if (flags
& TIPC_NOTIFY_NODE_UP
)
600 tipc_named_node_up(addr
);
602 if (flags
& TIPC_NOTIFY_LINK_UP
)
603 tipc_nametbl_publish(TIPC_LINK_STATE
, addr
, addr
,
604 TIPC_NODE_SCOPE
, link_id
, addr
);
606 if (flags
& TIPC_NOTIFY_LINK_DOWN
)
607 tipc_nametbl_withdraw(TIPC_LINK_STATE
, addr
,
611 /* Caller should hold node lock for the passed node */
612 static int __tipc_nl_add_node(struct tipc_nl_msg
*msg
, struct tipc_node
*node
)
615 struct nlattr
*attrs
;
617 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_v2_family
,
618 NLM_F_MULTI
, TIPC_NL_NODE_GET
);
622 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_NODE
);
626 if (nla_put_u32(msg
->skb
, TIPC_NLA_NODE_ADDR
, node
->addr
))
628 if (tipc_node_is_up(node
))
629 if (nla_put_flag(msg
->skb
, TIPC_NLA_NODE_UP
))
632 nla_nest_end(msg
->skb
, attrs
);
633 genlmsg_end(msg
->skb
, hdr
);
638 nla_nest_cancel(msg
->skb
, attrs
);
640 genlmsg_cancel(msg
->skb
, hdr
);
645 int tipc_nl_node_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
648 int done
= cb
->args
[0];
649 int last_addr
= cb
->args
[1];
650 struct tipc_node
*node
;
651 struct tipc_nl_msg msg
;
657 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
658 msg
.seq
= cb
->nlh
->nlmsg_seq
;
662 if (last_addr
&& !tipc_node_find(last_addr
)) {
664 /* We never set seq or call nl_dump_check_consistent() this
665 * means that setting prev_seq here will cause the consistence
666 * check to fail in the netlink callback handler. Resulting in
667 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
668 * the node state changed while we released the lock.
674 list_for_each_entry_rcu(node
, &tipc_node_list
, list
) {
676 if (node
->addr
== last_addr
)
682 tipc_node_lock(node
);
683 err
= __tipc_nl_add_node(&msg
, node
);
685 last_addr
= node
->addr
;
686 tipc_node_unlock(node
);
690 tipc_node_unlock(node
);
695 cb
->args
[1] = last_addr
;