2 * net/tipc/node.c: TIPC node management routines
4 * Copyright (c) 2000-2006, 2012 Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
42 #define NODE_HTABLE_SIZE 512
44 static void node_lost_contact(struct tipc_node
*n_ptr
);
45 static void node_established_contact(struct tipc_node
*n_ptr
);
47 static DEFINE_SPINLOCK(node_create_lock
);
49 static struct hlist_head node_htable
[NODE_HTABLE_SIZE
];
50 LIST_HEAD(tipc_node_list
);
51 static u32 tipc_num_nodes
;
53 static atomic_t tipc_num_links
= ATOMIC_INIT(0);
56 * A trivial power-of-two bitmask technique is used for speed, since this
57 * operation is done for every incoming TIPC packet. The number of hash table
58 * entries has been chosen so that no hash chain exceeds 8 nodes and will
59 * usually be much smaller (typically only a single node).
61 static unsigned int tipc_hashfn(u32 addr
)
63 return addr
& (NODE_HTABLE_SIZE
- 1);
67 * tipc_node_find - locate specified node object, if it exists
69 struct tipc_node
*tipc_node_find(u32 addr
)
71 struct tipc_node
*node
;
73 if (unlikely(!in_own_cluster_exact(addr
)))
76 hlist_for_each_entry(node
, &node_htable
[tipc_hashfn(addr
)], hash
) {
77 if (node
->addr
== addr
)
84 * tipc_node_create - create neighboring node
86 * Currently, this routine is called by neighbor discovery code, which holds
87 * net_lock for reading only. We must take node_create_lock to ensure a node
88 * isn't created twice if two different bearers discover the node at the same
89 * time. (It would be preferable to switch to holding net_lock in write mode,
90 * but this is a non-trivial change.)
92 struct tipc_node
*tipc_node_create(u32 addr
)
94 struct tipc_node
*n_ptr
, *temp_node
;
96 spin_lock_bh(&node_create_lock
);
98 n_ptr
= tipc_node_find(addr
);
100 spin_unlock_bh(&node_create_lock
);
104 n_ptr
= kzalloc(sizeof(*n_ptr
), GFP_ATOMIC
);
106 spin_unlock_bh(&node_create_lock
);
107 pr_warn("Node creation failed, no memory\n");
112 spin_lock_init(&n_ptr
->lock
);
113 INIT_HLIST_NODE(&n_ptr
->hash
);
114 INIT_LIST_HEAD(&n_ptr
->list
);
115 INIT_LIST_HEAD(&n_ptr
->nsub
);
117 hlist_add_head(&n_ptr
->hash
, &node_htable
[tipc_hashfn(addr
)]);
119 list_for_each_entry(temp_node
, &tipc_node_list
, list
) {
120 if (n_ptr
->addr
< temp_node
->addr
)
123 list_add_tail(&n_ptr
->list
, &temp_node
->list
);
124 n_ptr
->block_setup
= WAIT_PEER_DOWN
;
125 n_ptr
->signature
= INVALID_NODE_SIG
;
129 spin_unlock_bh(&node_create_lock
);
133 void tipc_node_delete(struct tipc_node
*n_ptr
)
135 list_del(&n_ptr
->list
);
136 hlist_del(&n_ptr
->hash
);
143 * tipc_node_link_up - handle addition of link
145 * Link becomes active (alone or shared) or standby, depending on its priority.
147 void tipc_node_link_up(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
149 struct tipc_link
**active
= &n_ptr
->active_links
[0];
151 n_ptr
->working_links
++;
153 pr_info("Established link <%s> on network plane %c\n",
154 l_ptr
->name
, l_ptr
->b_ptr
->net_plane
);
157 active
[0] = active
[1] = l_ptr
;
158 node_established_contact(n_ptr
);
161 if (l_ptr
->priority
< active
[0]->priority
) {
162 pr_info("New link <%s> becomes standby\n", l_ptr
->name
);
165 tipc_link_send_duplicate(active
[0], l_ptr
);
166 if (l_ptr
->priority
== active
[0]->priority
) {
170 pr_info("Old link <%s> becomes standby\n", active
[0]->name
);
171 if (active
[1] != active
[0])
172 pr_info("Old link <%s> becomes standby\n", active
[1]->name
);
173 active
[0] = active
[1] = l_ptr
;
177 * node_select_active_links - select active link
179 static void node_select_active_links(struct tipc_node
*n_ptr
)
181 struct tipc_link
**active
= &n_ptr
->active_links
[0];
183 u32 highest_prio
= 0;
185 active
[0] = active
[1] = NULL
;
187 for (i
= 0; i
< MAX_BEARERS
; i
++) {
188 struct tipc_link
*l_ptr
= n_ptr
->links
[i
];
190 if (!l_ptr
|| !tipc_link_is_up(l_ptr
) ||
191 (l_ptr
->priority
< highest_prio
))
194 if (l_ptr
->priority
> highest_prio
) {
195 highest_prio
= l_ptr
->priority
;
196 active
[0] = active
[1] = l_ptr
;
204 * tipc_node_link_down - handle loss of link
206 void tipc_node_link_down(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
208 struct tipc_link
**active
;
210 n_ptr
->working_links
--;
212 if (!tipc_link_is_active(l_ptr
)) {
213 pr_info("Lost standby link <%s> on network plane %c\n",
214 l_ptr
->name
, l_ptr
->b_ptr
->net_plane
);
217 pr_info("Lost link <%s> on network plane %c\n",
218 l_ptr
->name
, l_ptr
->b_ptr
->net_plane
);
220 active
= &n_ptr
->active_links
[0];
221 if (active
[0] == l_ptr
)
222 active
[0] = active
[1];
223 if (active
[1] == l_ptr
)
224 active
[1] = active
[0];
225 if (active
[0] == l_ptr
)
226 node_select_active_links(n_ptr
);
227 if (tipc_node_is_up(n_ptr
))
228 tipc_link_changeover(l_ptr
);
230 node_lost_contact(n_ptr
);
233 int tipc_node_active_links(struct tipc_node
*n_ptr
)
235 return n_ptr
->active_links
[0] != NULL
;
238 int tipc_node_redundant_links(struct tipc_node
*n_ptr
)
240 return n_ptr
->working_links
> 1;
243 int tipc_node_is_up(struct tipc_node
*n_ptr
)
245 return tipc_node_active_links(n_ptr
);
248 void tipc_node_attach_link(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
250 n_ptr
->links
[l_ptr
->b_ptr
->identity
] = l_ptr
;
251 atomic_inc(&tipc_num_links
);
255 void tipc_node_detach_link(struct tipc_node
*n_ptr
, struct tipc_link
*l_ptr
)
257 n_ptr
->links
[l_ptr
->b_ptr
->identity
] = NULL
;
258 atomic_dec(&tipc_num_links
);
262 static void node_established_contact(struct tipc_node
*n_ptr
)
264 tipc_k_signal((Handler
)tipc_named_node_up
, n_ptr
->addr
);
265 n_ptr
->bclink
.oos_state
= 0;
266 n_ptr
->bclink
.acked
= tipc_bclink_get_last_sent();
267 tipc_bclink_add_node(n_ptr
->addr
);
270 static void node_name_purge_complete(unsigned long node_addr
)
272 struct tipc_node
*n_ptr
;
274 read_lock_bh(&tipc_net_lock
);
275 n_ptr
= tipc_node_find(node_addr
);
277 tipc_node_lock(n_ptr
);
278 n_ptr
->block_setup
&= ~WAIT_NAMES_GONE
;
279 tipc_node_unlock(n_ptr
);
281 read_unlock_bh(&tipc_net_lock
);
284 static void node_lost_contact(struct tipc_node
*n_ptr
)
286 char addr_string
[16];
289 pr_info("Lost contact with %s\n",
290 tipc_addr_string_fill(addr_string
, n_ptr
->addr
));
292 /* Flush broadcast link info associated with lost node */
293 if (n_ptr
->bclink
.recv_permitted
) {
294 while (n_ptr
->bclink
.deferred_head
) {
295 struct sk_buff
*buf
= n_ptr
->bclink
.deferred_head
;
296 n_ptr
->bclink
.deferred_head
= buf
->next
;
299 n_ptr
->bclink
.deferred_size
= 0;
301 if (n_ptr
->bclink
.reasm_head
) {
302 kfree_skb(n_ptr
->bclink
.reasm_head
);
303 n_ptr
->bclink
.reasm_head
= NULL
;
304 n_ptr
->bclink
.reasm_tail
= NULL
;
307 tipc_bclink_remove_node(n_ptr
->addr
);
308 tipc_bclink_acknowledge(n_ptr
, INVALID_LINK_SEQ
);
310 n_ptr
->bclink
.recv_permitted
= false;
313 /* Abort link changeover */
314 for (i
= 0; i
< MAX_BEARERS
; i
++) {
315 struct tipc_link
*l_ptr
= n_ptr
->links
[i
];
318 l_ptr
->reset_checkpoint
= l_ptr
->next_in_no
;
319 l_ptr
->exp_msg_count
= 0;
320 tipc_link_reset_fragments(l_ptr
);
323 /* Notify subscribers */
324 tipc_nodesub_notify(n_ptr
);
326 /* Prevent re-contact with node until cleanup is done */
327 n_ptr
->block_setup
= WAIT_PEER_DOWN
| WAIT_NAMES_GONE
;
328 tipc_k_signal((Handler
)node_name_purge_complete
, n_ptr
->addr
);
331 struct sk_buff
*tipc_node_get_nodes(const void *req_tlv_area
, int req_tlv_space
)
335 struct tipc_node
*n_ptr
;
336 struct tipc_node_info node_info
;
339 if (!TLV_CHECK(req_tlv_area
, req_tlv_space
, TIPC_TLV_NET_ADDR
))
340 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR
);
342 domain
= ntohl(*(__be32
*)TLV_DATA(req_tlv_area
));
343 if (!tipc_addr_domain_valid(domain
))
344 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
345 " (network address)");
347 read_lock_bh(&tipc_net_lock
);
348 if (!tipc_num_nodes
) {
349 read_unlock_bh(&tipc_net_lock
);
350 return tipc_cfg_reply_none();
353 /* For now, get space for all other nodes */
354 payload_size
= TLV_SPACE(sizeof(node_info
)) * tipc_num_nodes
;
355 if (payload_size
> 32768u) {
356 read_unlock_bh(&tipc_net_lock
);
357 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
358 " (too many nodes)");
360 buf
= tipc_cfg_reply_alloc(payload_size
);
362 read_unlock_bh(&tipc_net_lock
);
366 /* Add TLVs for all nodes in scope */
367 list_for_each_entry(n_ptr
, &tipc_node_list
, list
) {
368 if (!tipc_in_scope(domain
, n_ptr
->addr
))
370 node_info
.addr
= htonl(n_ptr
->addr
);
371 node_info
.up
= htonl(tipc_node_is_up(n_ptr
));
372 tipc_cfg_append_tlv(buf
, TIPC_TLV_NODE_INFO
,
373 &node_info
, sizeof(node_info
));
376 read_unlock_bh(&tipc_net_lock
);
380 struct sk_buff
*tipc_node_get_links(const void *req_tlv_area
, int req_tlv_space
)
384 struct tipc_node
*n_ptr
;
385 struct tipc_link_info link_info
;
388 if (!TLV_CHECK(req_tlv_area
, req_tlv_space
, TIPC_TLV_NET_ADDR
))
389 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR
);
391 domain
= ntohl(*(__be32
*)TLV_DATA(req_tlv_area
));
392 if (!tipc_addr_domain_valid(domain
))
393 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
394 " (network address)");
397 return tipc_cfg_reply_none();
399 read_lock_bh(&tipc_net_lock
);
401 /* Get space for all unicast links + broadcast link */
402 payload_size
= TLV_SPACE(sizeof(link_info
)) *
403 (atomic_read(&tipc_num_links
) + 1);
404 if (payload_size
> 32768u) {
405 read_unlock_bh(&tipc_net_lock
);
406 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
407 " (too many links)");
409 buf
= tipc_cfg_reply_alloc(payload_size
);
411 read_unlock_bh(&tipc_net_lock
);
415 /* Add TLV for broadcast link */
416 link_info
.dest
= htonl(tipc_cluster_mask(tipc_own_addr
));
417 link_info
.up
= htonl(1);
418 strlcpy(link_info
.str
, tipc_bclink_name
, TIPC_MAX_LINK_NAME
);
419 tipc_cfg_append_tlv(buf
, TIPC_TLV_LINK_INFO
, &link_info
, sizeof(link_info
));
421 /* Add TLVs for any other links in scope */
422 list_for_each_entry(n_ptr
, &tipc_node_list
, list
) {
425 if (!tipc_in_scope(domain
, n_ptr
->addr
))
427 tipc_node_lock(n_ptr
);
428 for (i
= 0; i
< MAX_BEARERS
; i
++) {
429 if (!n_ptr
->links
[i
])
431 link_info
.dest
= htonl(n_ptr
->addr
);
432 link_info
.up
= htonl(tipc_link_is_up(n_ptr
->links
[i
]));
433 strcpy(link_info
.str
, n_ptr
->links
[i
]->name
);
434 tipc_cfg_append_tlv(buf
, TIPC_TLV_LINK_INFO
,
435 &link_info
, sizeof(link_info
));
437 tipc_node_unlock(n_ptr
);
440 read_unlock_bh(&tipc_net_lock
);