Linux 3.16.2
[linux/fpc-iii.git] / net / tipc / link.c
blobad2c57f5868dafe28fbf4204f9fc2189c1156d25
1 /*
2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "link.h"
39 #include "port.h"
40 #include "socket.h"
41 #include "name_distr.h"
42 #include "discover.h"
43 #include "config.h"
45 #include <linux/pkt_sched.h>
48 * Error message prefixes
50 static const char *link_co_err = "Link changeover error, ";
51 static const char *link_rst_msg = "Resetting link ";
52 static const char *link_unk_evt = "Unknown link event ";
55 * Out-of-range value for link session numbers
57 #define INVALID_SESSION 0x10000
60 * Link state events:
62 #define STARTING_EVT 856384768 /* link processing trigger */
63 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
64 #define TIMEOUT_EVT 560817u /* link timer expired */
67 * The following two 'message types' is really just implementation
68 * data conveniently stored in the message header.
69 * They must not be considered part of the protocol
71 #define OPEN_MSG 0
72 #define CLOSED_MSG 1
75 * State value stored in 'exp_msg_count'
77 #define START_CHANGEOVER 100000u
79 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
80 struct sk_buff *buf);
81 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
82 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
83 struct sk_buff **buf);
84 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
85 static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
86 struct iovec const *msg_sect,
87 unsigned int len, u32 destnode);
88 static void link_state_event(struct tipc_link *l_ptr, u32 event);
89 static void link_reset_statistics(struct tipc_link *l_ptr);
90 static void link_print(struct tipc_link *l_ptr, const char *str);
91 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
92 static void tipc_link_sync_xmit(struct tipc_link *l);
93 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
96 * Simple link routines
98 static unsigned int align(unsigned int i)
100 return (i + 3) & ~3u;
103 static void link_init_max_pkt(struct tipc_link *l_ptr)
105 struct tipc_bearer *b_ptr;
106 u32 max_pkt;
108 rcu_read_lock();
109 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
110 if (!b_ptr) {
111 rcu_read_unlock();
112 return;
114 max_pkt = (b_ptr->mtu & ~3);
115 rcu_read_unlock();
117 if (max_pkt > MAX_MSG_SIZE)
118 max_pkt = MAX_MSG_SIZE;
120 l_ptr->max_pkt_target = max_pkt;
121 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
122 l_ptr->max_pkt = l_ptr->max_pkt_target;
123 else
124 l_ptr->max_pkt = MAX_PKT_DEFAULT;
126 l_ptr->max_pkt_probes = 0;
129 static u32 link_next_sent(struct tipc_link *l_ptr)
131 if (l_ptr->next_out)
132 return buf_seqno(l_ptr->next_out);
133 return mod(l_ptr->next_out_no);
136 static u32 link_last_sent(struct tipc_link *l_ptr)
138 return mod(link_next_sent(l_ptr) - 1);
142 * Simple non-static link routines (i.e. referenced outside this file)
144 int tipc_link_is_up(struct tipc_link *l_ptr)
146 if (!l_ptr)
147 return 0;
148 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
151 int tipc_link_is_active(struct tipc_link *l_ptr)
153 return (l_ptr->owner->active_links[0] == l_ptr) ||
154 (l_ptr->owner->active_links[1] == l_ptr);
158 * link_timeout - handle expiration of link timer
159 * @l_ptr: pointer to link
161 static void link_timeout(struct tipc_link *l_ptr)
163 tipc_node_lock(l_ptr->owner);
165 /* update counters used in statistical profiling of send traffic */
166 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
167 l_ptr->stats.queue_sz_counts++;
169 if (l_ptr->first_out) {
170 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
171 u32 length = msg_size(msg);
173 if ((msg_user(msg) == MSG_FRAGMENTER) &&
174 (msg_type(msg) == FIRST_FRAGMENT)) {
175 length = msg_size(msg_get_wrapped(msg));
177 if (length) {
178 l_ptr->stats.msg_lengths_total += length;
179 l_ptr->stats.msg_length_counts++;
180 if (length <= 64)
181 l_ptr->stats.msg_length_profile[0]++;
182 else if (length <= 256)
183 l_ptr->stats.msg_length_profile[1]++;
184 else if (length <= 1024)
185 l_ptr->stats.msg_length_profile[2]++;
186 else if (length <= 4096)
187 l_ptr->stats.msg_length_profile[3]++;
188 else if (length <= 16384)
189 l_ptr->stats.msg_length_profile[4]++;
190 else if (length <= 32768)
191 l_ptr->stats.msg_length_profile[5]++;
192 else
193 l_ptr->stats.msg_length_profile[6]++;
197 /* do all other link processing performed on a periodic basis */
199 link_state_event(l_ptr, TIMEOUT_EVT);
201 if (l_ptr->next_out)
202 tipc_link_push_queue(l_ptr);
204 tipc_node_unlock(l_ptr->owner);
207 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
209 k_start_timer(&l_ptr->timer, time);
213 * tipc_link_create - create a new link
214 * @n_ptr: pointer to associated node
215 * @b_ptr: pointer to associated bearer
216 * @media_addr: media address to use when sending messages over link
218 * Returns pointer to link.
220 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
221 struct tipc_bearer *b_ptr,
222 const struct tipc_media_addr *media_addr)
224 struct tipc_link *l_ptr;
225 struct tipc_msg *msg;
226 char *if_name;
227 char addr_string[16];
228 u32 peer = n_ptr->addr;
230 if (n_ptr->link_cnt >= 2) {
231 tipc_addr_string_fill(addr_string, n_ptr->addr);
232 pr_err("Attempt to establish third link to %s\n", addr_string);
233 return NULL;
236 if (n_ptr->links[b_ptr->identity]) {
237 tipc_addr_string_fill(addr_string, n_ptr->addr);
238 pr_err("Attempt to establish second link on <%s> to %s\n",
239 b_ptr->name, addr_string);
240 return NULL;
243 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
244 if (!l_ptr) {
245 pr_warn("Link creation failed, no memory\n");
246 return NULL;
249 l_ptr->addr = peer;
250 if_name = strchr(b_ptr->name, ':') + 1;
251 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
252 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
253 tipc_node(tipc_own_addr),
254 if_name,
255 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
256 /* note: peer i/f name is updated by reset/activate message */
257 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
258 l_ptr->owner = n_ptr;
259 l_ptr->checkpoint = 1;
260 l_ptr->peer_session = INVALID_SESSION;
261 l_ptr->bearer_id = b_ptr->identity;
262 link_set_supervision_props(l_ptr, b_ptr->tolerance);
263 l_ptr->state = RESET_UNKNOWN;
265 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
266 msg = l_ptr->pmsg;
267 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
268 msg_set_size(msg, sizeof(l_ptr->proto_msg));
269 msg_set_session(msg, (tipc_random & 0xffff));
270 msg_set_bearer_id(msg, b_ptr->identity);
271 strcpy((char *)msg_data(msg), if_name);
273 l_ptr->priority = b_ptr->priority;
274 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
276 l_ptr->net_plane = b_ptr->net_plane;
277 link_init_max_pkt(l_ptr);
279 l_ptr->next_out_no = 1;
280 INIT_LIST_HEAD(&l_ptr->waiting_ports);
282 link_reset_statistics(l_ptr);
284 tipc_node_attach_link(n_ptr, l_ptr);
286 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
287 (unsigned long)l_ptr);
289 link_state_event(l_ptr, STARTING_EVT);
291 return l_ptr;
294 void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
296 struct tipc_link *l_ptr;
297 struct tipc_node *n_ptr;
299 rcu_read_lock();
300 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
301 tipc_node_lock(n_ptr);
302 l_ptr = n_ptr->links[bearer_id];
303 if (l_ptr) {
304 tipc_link_reset(l_ptr);
305 if (shutting_down || !tipc_node_is_up(n_ptr)) {
306 tipc_node_detach_link(l_ptr->owner, l_ptr);
307 tipc_link_reset_fragments(l_ptr);
308 tipc_node_unlock(n_ptr);
310 /* Nobody else can access this link now: */
311 del_timer_sync(&l_ptr->timer);
312 kfree(l_ptr);
313 } else {
314 /* Detach/delete when failover is finished: */
315 l_ptr->flags |= LINK_STOPPED;
316 tipc_node_unlock(n_ptr);
317 del_timer_sync(&l_ptr->timer);
319 continue;
321 tipc_node_unlock(n_ptr);
323 rcu_read_unlock();
327 * link_schedule_port - schedule port for deferred sending
328 * @l_ptr: pointer to link
329 * @origport: reference to sending port
330 * @sz: amount of data to be sent
332 * Schedules port for renewed sending of messages after link congestion
333 * has abated.
335 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
337 struct tipc_port *p_ptr;
339 spin_lock_bh(&tipc_port_list_lock);
340 p_ptr = tipc_port_lock(origport);
341 if (p_ptr) {
342 if (!list_empty(&p_ptr->wait_list))
343 goto exit;
344 p_ptr->congested = 1;
345 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
346 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
347 l_ptr->stats.link_congs++;
348 exit:
349 tipc_port_unlock(p_ptr);
351 spin_unlock_bh(&tipc_port_list_lock);
352 return -ELINKCONG;
355 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
357 struct tipc_port *p_ptr;
358 struct tipc_port *temp_p_ptr;
359 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
361 if (all)
362 win = 100000;
363 if (win <= 0)
364 return;
365 if (!spin_trylock_bh(&tipc_port_list_lock))
366 return;
367 if (link_congested(l_ptr))
368 goto exit;
369 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
370 wait_list) {
371 if (win <= 0)
372 break;
373 list_del_init(&p_ptr->wait_list);
374 spin_lock_bh(p_ptr->lock);
375 p_ptr->congested = 0;
376 tipc_port_wakeup(p_ptr);
377 win -= p_ptr->waiting_pkts;
378 spin_unlock_bh(p_ptr->lock);
381 exit:
382 spin_unlock_bh(&tipc_port_list_lock);
386 * link_release_outqueue - purge link's outbound message queue
387 * @l_ptr: pointer to link
389 static void link_release_outqueue(struct tipc_link *l_ptr)
391 kfree_skb_list(l_ptr->first_out);
392 l_ptr->first_out = NULL;
393 l_ptr->out_queue_size = 0;
397 * tipc_link_reset_fragments - purge link's inbound message fragments queue
398 * @l_ptr: pointer to link
400 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
402 kfree_skb(l_ptr->reasm_buf);
403 l_ptr->reasm_buf = NULL;
407 * tipc_link_purge_queues - purge all pkt queues associated with link
408 * @l_ptr: pointer to link
410 void tipc_link_purge_queues(struct tipc_link *l_ptr)
412 kfree_skb_list(l_ptr->oldest_deferred_in);
413 kfree_skb_list(l_ptr->first_out);
414 tipc_link_reset_fragments(l_ptr);
415 kfree_skb(l_ptr->proto_msg_queue);
416 l_ptr->proto_msg_queue = NULL;
419 void tipc_link_reset(struct tipc_link *l_ptr)
421 u32 prev_state = l_ptr->state;
422 u32 checkpoint = l_ptr->next_in_no;
423 int was_active_link = tipc_link_is_active(l_ptr);
425 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
427 /* Link is down, accept any session */
428 l_ptr->peer_session = INVALID_SESSION;
430 /* Prepare for max packet size negotiation */
431 link_init_max_pkt(l_ptr);
433 l_ptr->state = RESET_UNKNOWN;
435 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
436 return;
438 tipc_node_link_down(l_ptr->owner, l_ptr);
439 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
441 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
442 l_ptr->reset_checkpoint = checkpoint;
443 l_ptr->exp_msg_count = START_CHANGEOVER;
446 /* Clean up all queues: */
447 link_release_outqueue(l_ptr);
448 kfree_skb(l_ptr->proto_msg_queue);
449 l_ptr->proto_msg_queue = NULL;
450 kfree_skb_list(l_ptr->oldest_deferred_in);
451 if (!list_empty(&l_ptr->waiting_ports))
452 tipc_link_wakeup_ports(l_ptr, 1);
454 l_ptr->retransm_queue_head = 0;
455 l_ptr->retransm_queue_size = 0;
456 l_ptr->last_out = NULL;
457 l_ptr->first_out = NULL;
458 l_ptr->next_out = NULL;
459 l_ptr->unacked_window = 0;
460 l_ptr->checkpoint = 1;
461 l_ptr->next_out_no = 1;
462 l_ptr->deferred_inqueue_sz = 0;
463 l_ptr->oldest_deferred_in = NULL;
464 l_ptr->newest_deferred_in = NULL;
465 l_ptr->fsm_msg_cnt = 0;
466 l_ptr->stale_count = 0;
467 link_reset_statistics(l_ptr);
470 void tipc_link_reset_list(unsigned int bearer_id)
472 struct tipc_link *l_ptr;
473 struct tipc_node *n_ptr;
475 rcu_read_lock();
476 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
477 tipc_node_lock(n_ptr);
478 l_ptr = n_ptr->links[bearer_id];
479 if (l_ptr)
480 tipc_link_reset(l_ptr);
481 tipc_node_unlock(n_ptr);
483 rcu_read_unlock();
486 static void link_activate(struct tipc_link *l_ptr)
488 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
489 tipc_node_link_up(l_ptr->owner, l_ptr);
490 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
494 * link_state_event - link finite state machine
495 * @l_ptr: pointer to link
496 * @event: state machine event to process
498 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
500 struct tipc_link *other;
501 u32 cont_intv = l_ptr->continuity_interval;
503 if (l_ptr->flags & LINK_STOPPED)
504 return;
506 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
507 return; /* Not yet. */
509 /* Check whether changeover is going on */
510 if (l_ptr->exp_msg_count) {
511 if (event == TIMEOUT_EVT)
512 link_set_timer(l_ptr, cont_intv);
513 return;
516 switch (l_ptr->state) {
517 case WORKING_WORKING:
518 switch (event) {
519 case TRAFFIC_MSG_EVT:
520 case ACTIVATE_MSG:
521 break;
522 case TIMEOUT_EVT:
523 if (l_ptr->next_in_no != l_ptr->checkpoint) {
524 l_ptr->checkpoint = l_ptr->next_in_no;
525 if (tipc_bclink_acks_missing(l_ptr->owner)) {
526 tipc_link_proto_xmit(l_ptr, STATE_MSG,
527 0, 0, 0, 0, 0);
528 l_ptr->fsm_msg_cnt++;
529 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
530 tipc_link_proto_xmit(l_ptr, STATE_MSG,
531 1, 0, 0, 0, 0);
532 l_ptr->fsm_msg_cnt++;
534 link_set_timer(l_ptr, cont_intv);
535 break;
537 l_ptr->state = WORKING_UNKNOWN;
538 l_ptr->fsm_msg_cnt = 0;
539 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
540 l_ptr->fsm_msg_cnt++;
541 link_set_timer(l_ptr, cont_intv / 4);
542 break;
543 case RESET_MSG:
544 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
545 l_ptr->name);
546 tipc_link_reset(l_ptr);
547 l_ptr->state = RESET_RESET;
548 l_ptr->fsm_msg_cnt = 0;
549 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
550 0, 0, 0, 0, 0);
551 l_ptr->fsm_msg_cnt++;
552 link_set_timer(l_ptr, cont_intv);
553 break;
554 default:
555 pr_err("%s%u in WW state\n", link_unk_evt, event);
557 break;
558 case WORKING_UNKNOWN:
559 switch (event) {
560 case TRAFFIC_MSG_EVT:
561 case ACTIVATE_MSG:
562 l_ptr->state = WORKING_WORKING;
563 l_ptr->fsm_msg_cnt = 0;
564 link_set_timer(l_ptr, cont_intv);
565 break;
566 case RESET_MSG:
567 pr_info("%s<%s>, requested by peer while probing\n",
568 link_rst_msg, l_ptr->name);
569 tipc_link_reset(l_ptr);
570 l_ptr->state = RESET_RESET;
571 l_ptr->fsm_msg_cnt = 0;
572 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
573 0, 0, 0, 0, 0);
574 l_ptr->fsm_msg_cnt++;
575 link_set_timer(l_ptr, cont_intv);
576 break;
577 case TIMEOUT_EVT:
578 if (l_ptr->next_in_no != l_ptr->checkpoint) {
579 l_ptr->state = WORKING_WORKING;
580 l_ptr->fsm_msg_cnt = 0;
581 l_ptr->checkpoint = l_ptr->next_in_no;
582 if (tipc_bclink_acks_missing(l_ptr->owner)) {
583 tipc_link_proto_xmit(l_ptr, STATE_MSG,
584 0, 0, 0, 0, 0);
585 l_ptr->fsm_msg_cnt++;
587 link_set_timer(l_ptr, cont_intv);
588 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
589 tipc_link_proto_xmit(l_ptr, STATE_MSG,
590 1, 0, 0, 0, 0);
591 l_ptr->fsm_msg_cnt++;
592 link_set_timer(l_ptr, cont_intv / 4);
593 } else { /* Link has failed */
594 pr_warn("%s<%s>, peer not responding\n",
595 link_rst_msg, l_ptr->name);
596 tipc_link_reset(l_ptr);
597 l_ptr->state = RESET_UNKNOWN;
598 l_ptr->fsm_msg_cnt = 0;
599 tipc_link_proto_xmit(l_ptr, RESET_MSG,
600 0, 0, 0, 0, 0);
601 l_ptr->fsm_msg_cnt++;
602 link_set_timer(l_ptr, cont_intv);
604 break;
605 default:
606 pr_err("%s%u in WU state\n", link_unk_evt, event);
608 break;
609 case RESET_UNKNOWN:
610 switch (event) {
611 case TRAFFIC_MSG_EVT:
612 break;
613 case ACTIVATE_MSG:
614 other = l_ptr->owner->active_links[0];
615 if (other && link_working_unknown(other))
616 break;
617 l_ptr->state = WORKING_WORKING;
618 l_ptr->fsm_msg_cnt = 0;
619 link_activate(l_ptr);
620 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
621 l_ptr->fsm_msg_cnt++;
622 if (l_ptr->owner->working_links == 1)
623 tipc_link_sync_xmit(l_ptr);
624 link_set_timer(l_ptr, cont_intv);
625 break;
626 case RESET_MSG:
627 l_ptr->state = RESET_RESET;
628 l_ptr->fsm_msg_cnt = 0;
629 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
630 1, 0, 0, 0, 0);
631 l_ptr->fsm_msg_cnt++;
632 link_set_timer(l_ptr, cont_intv);
633 break;
634 case STARTING_EVT:
635 l_ptr->flags |= LINK_STARTED;
636 /* fall through */
637 case TIMEOUT_EVT:
638 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
639 l_ptr->fsm_msg_cnt++;
640 link_set_timer(l_ptr, cont_intv);
641 break;
642 default:
643 pr_err("%s%u in RU state\n", link_unk_evt, event);
645 break;
646 case RESET_RESET:
647 switch (event) {
648 case TRAFFIC_MSG_EVT:
649 case ACTIVATE_MSG:
650 other = l_ptr->owner->active_links[0];
651 if (other && link_working_unknown(other))
652 break;
653 l_ptr->state = WORKING_WORKING;
654 l_ptr->fsm_msg_cnt = 0;
655 link_activate(l_ptr);
656 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
657 l_ptr->fsm_msg_cnt++;
658 if (l_ptr->owner->working_links == 1)
659 tipc_link_sync_xmit(l_ptr);
660 link_set_timer(l_ptr, cont_intv);
661 break;
662 case RESET_MSG:
663 break;
664 case TIMEOUT_EVT:
665 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
666 0, 0, 0, 0, 0);
667 l_ptr->fsm_msg_cnt++;
668 link_set_timer(l_ptr, cont_intv);
669 break;
670 default:
671 pr_err("%s%u in RR state\n", link_unk_evt, event);
673 break;
674 default:
675 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
680 * link_bundle_buf(): Append contents of a buffer to
681 * the tail of an existing one.
683 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
684 struct sk_buff *buf)
686 struct tipc_msg *bundler_msg = buf_msg(bundler);
687 struct tipc_msg *msg = buf_msg(buf);
688 u32 size = msg_size(msg);
689 u32 bundle_size = msg_size(bundler_msg);
690 u32 to_pos = align(bundle_size);
691 u32 pad = to_pos - bundle_size;
693 if (msg_user(bundler_msg) != MSG_BUNDLER)
694 return 0;
695 if (msg_type(bundler_msg) != OPEN_MSG)
696 return 0;
697 if (skb_tailroom(bundler) < (pad + size))
698 return 0;
699 if (l_ptr->max_pkt < (to_pos + size))
700 return 0;
702 skb_put(bundler, pad + size);
703 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
704 msg_set_size(bundler_msg, to_pos + size);
705 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
706 kfree_skb(buf);
707 l_ptr->stats.sent_bundled++;
708 return 1;
711 static void link_add_to_outqueue(struct tipc_link *l_ptr,
712 struct sk_buff *buf,
713 struct tipc_msg *msg)
715 u32 ack = mod(l_ptr->next_in_no - 1);
716 u32 seqno = mod(l_ptr->next_out_no++);
718 msg_set_word(msg, 2, ((ack << 16) | seqno));
719 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
720 buf->next = NULL;
721 if (l_ptr->first_out) {
722 l_ptr->last_out->next = buf;
723 l_ptr->last_out = buf;
724 } else
725 l_ptr->first_out = l_ptr->last_out = buf;
727 l_ptr->out_queue_size++;
728 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
729 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
732 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
733 struct sk_buff *buf_chain,
734 u32 long_msgno)
736 struct sk_buff *buf;
737 struct tipc_msg *msg;
739 if (!l_ptr->next_out)
740 l_ptr->next_out = buf_chain;
741 while (buf_chain) {
742 buf = buf_chain;
743 buf_chain = buf_chain->next;
745 msg = buf_msg(buf);
746 msg_set_long_msgno(msg, long_msgno);
747 link_add_to_outqueue(l_ptr, buf, msg);
752 * tipc_link_xmit() is the 'full path' for messages, called from
753 * inside TIPC when the 'fast path' in tipc_send_xmit
754 * has failed, and from link_send()
756 int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
758 struct tipc_msg *msg = buf_msg(buf);
759 u32 size = msg_size(msg);
760 u32 dsz = msg_data_sz(msg);
761 u32 queue_size = l_ptr->out_queue_size;
762 u32 imp = tipc_msg_tot_importance(msg);
763 u32 queue_limit = l_ptr->queue_limit[imp];
764 u32 max_packet = l_ptr->max_pkt;
766 /* Match msg importance against queue limits: */
767 if (unlikely(queue_size >= queue_limit)) {
768 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
769 link_schedule_port(l_ptr, msg_origport(msg), size);
770 kfree_skb(buf);
771 return -ELINKCONG;
773 kfree_skb(buf);
774 if (imp > CONN_MANAGER) {
775 pr_warn("%s<%s>, send queue full", link_rst_msg,
776 l_ptr->name);
777 tipc_link_reset(l_ptr);
779 return dsz;
782 /* Fragmentation needed ? */
783 if (size > max_packet)
784 return tipc_link_frag_xmit(l_ptr, buf);
786 /* Packet can be queued or sent. */
787 if (likely(!link_congested(l_ptr))) {
788 link_add_to_outqueue(l_ptr, buf, msg);
790 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
791 l_ptr->unacked_window = 0;
792 return dsz;
794 /* Congestion: can message be bundled ? */
795 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
796 (msg_user(msg) != MSG_FRAGMENTER)) {
798 /* Try adding message to an existing bundle */
799 if (l_ptr->next_out &&
800 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
801 return dsz;
803 /* Try creating a new bundle */
804 if (size <= max_packet * 2 / 3) {
805 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
806 struct tipc_msg bundler_hdr;
808 if (bundler) {
809 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
810 INT_H_SIZE, l_ptr->addr);
811 skb_copy_to_linear_data(bundler, &bundler_hdr,
812 INT_H_SIZE);
813 skb_trim(bundler, INT_H_SIZE);
814 link_bundle_buf(l_ptr, bundler, buf);
815 buf = bundler;
816 msg = buf_msg(buf);
817 l_ptr->stats.sent_bundles++;
821 if (!l_ptr->next_out)
822 l_ptr->next_out = buf;
823 link_add_to_outqueue(l_ptr, buf, msg);
824 return dsz;
828 * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
829 * has not been selected yet, and the the owner node is not locked
830 * Called by TIPC internal users, e.g. the name distributor
832 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
834 struct tipc_link *l_ptr;
835 struct tipc_node *n_ptr;
836 int res = -ELINKCONG;
838 n_ptr = tipc_node_find(dest);
839 if (n_ptr) {
840 tipc_node_lock(n_ptr);
841 l_ptr = n_ptr->active_links[selector & 1];
842 if (l_ptr)
843 res = __tipc_link_xmit(l_ptr, buf);
844 else
845 kfree_skb(buf);
846 tipc_node_unlock(n_ptr);
847 } else {
848 kfree_skb(buf);
850 return res;
854 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
856 * Give a newly added peer node the sequence number where it should
857 * start receiving and acking broadcast packets.
859 * Called with node locked
861 static void tipc_link_sync_xmit(struct tipc_link *l)
863 struct sk_buff *buf;
864 struct tipc_msg *msg;
866 buf = tipc_buf_acquire(INT_H_SIZE);
867 if (!buf)
868 return;
870 msg = buf_msg(buf);
871 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
872 msg_set_last_bcast(msg, l->owner->bclink.acked);
873 link_add_chain_to_outqueue(l, buf, 0);
874 tipc_link_push_queue(l);
878 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
879 * Receive the sequence number where we should start receiving and
880 * acking broadcast packets from a newly added peer node, and open
881 * up for reception of such packets.
883 * Called with node locked
885 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
887 struct tipc_msg *msg = buf_msg(buf);
889 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
890 n->bclink.recv_permitted = true;
891 kfree_skb(buf);
895 * tipc_link_names_xmit - send name table entries to new neighbor
897 * Send routine for bulk delivery of name table messages when contact
898 * with a new neighbor occurs. No link congestion checking is performed
899 * because name table messages *must* be delivered. The messages must be
900 * small enough not to require fragmentation.
901 * Called without any locks held.
903 void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
905 struct tipc_node *n_ptr;
906 struct tipc_link *l_ptr;
907 struct sk_buff *buf;
908 struct sk_buff *temp_buf;
910 if (list_empty(message_list))
911 return;
913 n_ptr = tipc_node_find(dest);
914 if (n_ptr) {
915 tipc_node_lock(n_ptr);
916 l_ptr = n_ptr->active_links[0];
917 if (l_ptr) {
918 /* convert circular list to linear list */
919 ((struct sk_buff *)message_list->prev)->next = NULL;
920 link_add_chain_to_outqueue(l_ptr,
921 (struct sk_buff *)message_list->next, 0);
922 tipc_link_push_queue(l_ptr);
923 INIT_LIST_HEAD(message_list);
925 tipc_node_unlock(n_ptr);
928 /* discard the messages if they couldn't be sent */
929 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
930 list_del((struct list_head *)buf);
931 kfree_skb(buf);
936 * tipc_link_xmit_fast: Entry for data messages where the
937 * destination link is known and the header is complete,
938 * inclusive total message length. Very time critical.
939 * Link is locked. Returns user data length.
941 static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
942 u32 *used_max_pkt)
944 struct tipc_msg *msg = buf_msg(buf);
945 int res = msg_data_sz(msg);
947 if (likely(!link_congested(l_ptr))) {
948 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
949 link_add_to_outqueue(l_ptr, buf, msg);
950 tipc_bearer_send(l_ptr->bearer_id, buf,
951 &l_ptr->media_addr);
952 l_ptr->unacked_window = 0;
953 return res;
955 else
956 *used_max_pkt = l_ptr->max_pkt;
958 return __tipc_link_xmit(l_ptr, buf); /* All other cases */
962 * tipc_link_iovec_xmit_fast: Entry for messages where the
963 * destination processor is known and the header is complete,
964 * except for total message length.
965 * Returns user data length or errno.
967 int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
968 struct iovec const *msg_sect,
969 unsigned int len, u32 destaddr)
971 struct tipc_msg *hdr = &sender->phdr;
972 struct tipc_link *l_ptr;
973 struct sk_buff *buf;
974 struct tipc_node *node;
975 int res;
976 u32 selector = msg_origport(hdr) & 1;
978 again:
980 * Try building message using port's max_pkt hint.
981 * (Must not hold any locks while building message.)
983 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
984 /* Exit if build request was invalid */
985 if (unlikely(res < 0))
986 return res;
988 node = tipc_node_find(destaddr);
989 if (likely(node)) {
990 tipc_node_lock(node);
991 l_ptr = node->active_links[selector];
992 if (likely(l_ptr)) {
993 if (likely(buf)) {
994 res = tipc_link_xmit_fast(l_ptr, buf,
995 &sender->max_pkt);
996 exit:
997 tipc_node_unlock(node);
998 return res;
1001 /* Exit if link (or bearer) is congested */
1002 if (link_congested(l_ptr)) {
1003 res = link_schedule_port(l_ptr,
1004 sender->ref, res);
1005 goto exit;
1009 * Message size exceeds max_pkt hint; update hint,
1010 * then re-try fast path or fragment the message
1012 sender->max_pkt = l_ptr->max_pkt;
1013 tipc_node_unlock(node);
1016 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1017 goto again;
1019 return tipc_link_iovec_long_xmit(sender, msg_sect,
1020 len, destaddr);
1022 tipc_node_unlock(node);
1025 /* Couldn't find a link to the destination node */
1026 kfree_skb(buf);
1027 tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
1028 return -ENETUNREACH;
1032 * tipc_link_iovec_long_xmit(): Entry for long messages where the
1033 * destination node is known and the header is complete,
1034 * inclusive total message length.
1035 * Link and bearer congestion status have been checked to be ok,
1036 * and are ignored if they change.
1038 * Note that fragments do not use the full link MTU so that they won't have
1039 * to undergo refragmentation if link changeover causes them to be sent
1040 * over another link with an additional tunnel header added as prefix.
1041 * (Refragmentation will still occur if the other link has a smaller MTU.)
1043 * Returns user data length or errno.
1045 static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
1046 struct iovec const *msg_sect,
1047 unsigned int len, u32 destaddr)
1049 struct tipc_link *l_ptr;
1050 struct tipc_node *node;
1051 struct tipc_msg *hdr = &sender->phdr;
1052 u32 dsz = len;
1053 u32 max_pkt, fragm_sz, rest;
1054 struct tipc_msg fragm_hdr;
1055 struct sk_buff *buf, *buf_chain, *prev;
1056 u32 fragm_crs, fragm_rest, hsz, sect_rest;
1057 const unchar __user *sect_crs;
1058 int curr_sect;
1059 u32 fragm_no;
1060 int res = 0;
1062 again:
1063 fragm_no = 1;
1064 max_pkt = sender->max_pkt - INT_H_SIZE;
1065 /* leave room for tunnel header in case of link changeover */
1066 fragm_sz = max_pkt - INT_H_SIZE;
1067 /* leave room for fragmentation header in each fragment */
1068 rest = dsz;
1069 fragm_crs = 0;
1070 fragm_rest = 0;
1071 sect_rest = 0;
1072 sect_crs = NULL;
1073 curr_sect = -1;
1075 /* Prepare reusable fragment header */
1076 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1077 INT_H_SIZE, msg_destnode(hdr));
1078 msg_set_size(&fragm_hdr, max_pkt);
1079 msg_set_fragm_no(&fragm_hdr, 1);
1081 /* Prepare header of first fragment */
1082 buf_chain = buf = tipc_buf_acquire(max_pkt);
1083 if (!buf)
1084 return -ENOMEM;
1085 buf->next = NULL;
1086 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1087 hsz = msg_hdr_sz(hdr);
1088 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1090 /* Chop up message */
1091 fragm_crs = INT_H_SIZE + hsz;
1092 fragm_rest = fragm_sz - hsz;
1094 do { /* For all sections */
1095 u32 sz;
1097 if (!sect_rest) {
1098 sect_rest = msg_sect[++curr_sect].iov_len;
1099 sect_crs = msg_sect[curr_sect].iov_base;
1102 if (sect_rest < fragm_rest)
1103 sz = sect_rest;
1104 else
1105 sz = fragm_rest;
1107 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1108 res = -EFAULT;
1109 error:
1110 kfree_skb_list(buf_chain);
1111 return res;
1113 sect_crs += sz;
1114 sect_rest -= sz;
1115 fragm_crs += sz;
1116 fragm_rest -= sz;
1117 rest -= sz;
1119 if (!fragm_rest && rest) {
1121 /* Initiate new fragment: */
1122 if (rest <= fragm_sz) {
1123 fragm_sz = rest;
1124 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1125 } else {
1126 msg_set_type(&fragm_hdr, FRAGMENT);
1128 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1129 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1130 prev = buf;
1131 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1132 if (!buf) {
1133 res = -ENOMEM;
1134 goto error;
1137 buf->next = NULL;
1138 prev->next = buf;
1139 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1140 fragm_crs = INT_H_SIZE;
1141 fragm_rest = fragm_sz;
1143 } while (rest > 0);
1146 * Now we have a buffer chain. Select a link and check
1147 * that packet size is still OK
1149 node = tipc_node_find(destaddr);
1150 if (likely(node)) {
1151 tipc_node_lock(node);
1152 l_ptr = node->active_links[sender->ref & 1];
1153 if (!l_ptr) {
1154 tipc_node_unlock(node);
1155 goto reject;
1157 if (l_ptr->max_pkt < max_pkt) {
1158 sender->max_pkt = l_ptr->max_pkt;
1159 tipc_node_unlock(node);
1160 kfree_skb_list(buf_chain);
1161 goto again;
1163 } else {
1164 reject:
1165 kfree_skb_list(buf_chain);
1166 tipc_port_iovec_reject(sender, hdr, msg_sect, len,
1167 TIPC_ERR_NO_NODE);
1168 return -ENETUNREACH;
1171 /* Append chain of fragments to send queue & send them */
1172 l_ptr->long_msg_seq_no++;
1173 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1174 l_ptr->stats.sent_fragments += fragm_no;
1175 l_ptr->stats.sent_fragmented++;
1176 tipc_link_push_queue(l_ptr);
1177 tipc_node_unlock(node);
1178 return dsz;
1182 * tipc_link_push_packet: Push one unsent packet to the media
1184 static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1186 struct sk_buff *buf = l_ptr->first_out;
1187 u32 r_q_size = l_ptr->retransm_queue_size;
1188 u32 r_q_head = l_ptr->retransm_queue_head;
1190 /* Step to position where retransmission failed, if any, */
1191 /* consider that buffers may have been released in meantime */
1192 if (r_q_size && buf) {
1193 u32 last = lesser(mod(r_q_head + r_q_size),
1194 link_last_sent(l_ptr));
1195 u32 first = buf_seqno(buf);
1197 while (buf && less(first, r_q_head)) {
1198 first = mod(first + 1);
1199 buf = buf->next;
1201 l_ptr->retransm_queue_head = r_q_head = first;
1202 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1205 /* Continue retransmission now, if there is anything: */
1206 if (r_q_size && buf) {
1207 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1208 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1209 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1210 l_ptr->retransm_queue_head = mod(++r_q_head);
1211 l_ptr->retransm_queue_size = --r_q_size;
1212 l_ptr->stats.retransmitted++;
1213 return 0;
1216 /* Send deferred protocol message, if any: */
1217 buf = l_ptr->proto_msg_queue;
1218 if (buf) {
1219 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1220 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1221 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1222 l_ptr->unacked_window = 0;
1223 kfree_skb(buf);
1224 l_ptr->proto_msg_queue = NULL;
1225 return 0;
1228 /* Send one deferred data message, if send window not full: */
1229 buf = l_ptr->next_out;
1230 if (buf) {
1231 struct tipc_msg *msg = buf_msg(buf);
1232 u32 next = msg_seqno(msg);
1233 u32 first = buf_seqno(l_ptr->first_out);
1235 if (mod(next - first) < l_ptr->queue_limit[0]) {
1236 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1237 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1238 tipc_bearer_send(l_ptr->bearer_id, buf,
1239 &l_ptr->media_addr);
1240 if (msg_user(msg) == MSG_BUNDLER)
1241 msg_set_type(msg, CLOSED_MSG);
1242 l_ptr->next_out = buf->next;
1243 return 0;
1246 return 1;
1250 * push_queue(): push out the unsent messages of a link where
1251 * congestion has abated. Node is locked
1253 void tipc_link_push_queue(struct tipc_link *l_ptr)
1255 u32 res;
1257 do {
1258 res = tipc_link_push_packet(l_ptr);
1259 } while (!res);
1262 void tipc_link_reset_all(struct tipc_node *node)
1264 char addr_string[16];
1265 u32 i;
1267 tipc_node_lock(node);
1269 pr_warn("Resetting all links to %s\n",
1270 tipc_addr_string_fill(addr_string, node->addr));
1272 for (i = 0; i < MAX_BEARERS; i++) {
1273 if (node->links[i]) {
1274 link_print(node->links[i], "Resetting link\n");
1275 tipc_link_reset(node->links[i]);
1279 tipc_node_unlock(node);
1282 static void link_retransmit_failure(struct tipc_link *l_ptr,
1283 struct sk_buff *buf)
1285 struct tipc_msg *msg = buf_msg(buf);
1287 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1289 if (l_ptr->addr) {
1290 /* Handle failure on standard link */
1291 link_print(l_ptr, "Resetting link\n");
1292 tipc_link_reset(l_ptr);
1294 } else {
1295 /* Handle failure on broadcast link */
1296 struct tipc_node *n_ptr;
1297 char addr_string[16];
1299 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1300 pr_cont("Outstanding acks: %lu\n",
1301 (unsigned long) TIPC_SKB_CB(buf)->handle);
1303 n_ptr = tipc_bclink_retransmit_to();
1304 tipc_node_lock(n_ptr);
1306 tipc_addr_string_fill(addr_string, n_ptr->addr);
1307 pr_info("Broadcast link info for %s\n", addr_string);
1308 pr_info("Reception permitted: %d, Acked: %u\n",
1309 n_ptr->bclink.recv_permitted,
1310 n_ptr->bclink.acked);
1311 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1312 n_ptr->bclink.last_in,
1313 n_ptr->bclink.oos_state,
1314 n_ptr->bclink.last_sent);
1316 tipc_node_unlock(n_ptr);
1318 tipc_bclink_set_flags(TIPC_BCLINK_RESET);
1319 l_ptr->stale_count = 0;
1323 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1324 u32 retransmits)
1326 struct tipc_msg *msg;
1328 if (!buf)
1329 return;
1331 msg = buf_msg(buf);
1333 /* Detect repeated retransmit failures */
1334 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1335 if (++l_ptr->stale_count > 100) {
1336 link_retransmit_failure(l_ptr, buf);
1337 return;
1339 } else {
1340 l_ptr->last_retransmitted = msg_seqno(msg);
1341 l_ptr->stale_count = 1;
1344 while (retransmits && (buf != l_ptr->next_out) && buf) {
1345 msg = buf_msg(buf);
1346 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1347 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1348 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1349 buf = buf->next;
1350 retransmits--;
1351 l_ptr->stats.retransmitted++;
1354 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1358 * link_insert_deferred_queue - insert deferred messages back into receive chain
1360 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1361 struct sk_buff *buf)
1363 u32 seq_no;
1365 if (l_ptr->oldest_deferred_in == NULL)
1366 return buf;
1368 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1369 if (seq_no == mod(l_ptr->next_in_no)) {
1370 l_ptr->newest_deferred_in->next = buf;
1371 buf = l_ptr->oldest_deferred_in;
1372 l_ptr->oldest_deferred_in = NULL;
1373 l_ptr->deferred_inqueue_sz = 0;
1375 return buf;
1379 * link_recv_buf_validate - validate basic format of received message
1381 * This routine ensures a TIPC message has an acceptable header, and at least
1382 * as much data as the header indicates it should. The routine also ensures
1383 * that the entire message header is stored in the main fragment of the message
1384 * buffer, to simplify future access to message header fields.
1386 * Note: Having extra info present in the message header or data areas is OK.
1387 * TIPC will ignore the excess, under the assumption that it is optional info
1388 * introduced by a later release of the protocol.
1390 static int link_recv_buf_validate(struct sk_buff *buf)
1392 static u32 min_data_hdr_size[8] = {
1393 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1394 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1397 struct tipc_msg *msg;
1398 u32 tipc_hdr[2];
1399 u32 size;
1400 u32 hdr_size;
1401 u32 min_hdr_size;
1403 /* If this packet comes from the defer queue, the skb has already
1404 * been validated
1406 if (unlikely(TIPC_SKB_CB(buf)->deferred))
1407 return 1;
1409 if (unlikely(buf->len < MIN_H_SIZE))
1410 return 0;
1412 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1413 if (msg == NULL)
1414 return 0;
1416 if (unlikely(msg_version(msg) != TIPC_VERSION))
1417 return 0;
1419 size = msg_size(msg);
1420 hdr_size = msg_hdr_sz(msg);
1421 min_hdr_size = msg_isdata(msg) ?
1422 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1424 if (unlikely((hdr_size < min_hdr_size) ||
1425 (size < hdr_size) ||
1426 (buf->len < size) ||
1427 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1428 return 0;
1430 return pskb_may_pull(buf, hdr_size);
1434 * tipc_rcv - process TIPC packets/messages arriving from off-node
1435 * @head: pointer to message buffer chain
1436 * @b_ptr: pointer to bearer message arrived on
1438 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1439 * structure (i.e. cannot be NULL), but bearer can be inactive.
1441 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1443 while (head) {
1444 struct tipc_node *n_ptr;
1445 struct tipc_link *l_ptr;
1446 struct sk_buff *crs;
1447 struct sk_buff *buf = head;
1448 struct tipc_msg *msg;
1449 u32 seq_no;
1450 u32 ackd;
1451 u32 released = 0;
1453 head = head->next;
1454 buf->next = NULL;
1456 /* Ensure message is well-formed */
1457 if (unlikely(!link_recv_buf_validate(buf)))
1458 goto discard;
1460 /* Ensure message data is a single contiguous unit */
1461 if (unlikely(skb_linearize(buf)))
1462 goto discard;
1464 /* Handle arrival of a non-unicast link message */
1465 msg = buf_msg(buf);
1467 if (unlikely(msg_non_seq(msg))) {
1468 if (msg_user(msg) == LINK_CONFIG)
1469 tipc_disc_rcv(buf, b_ptr);
1470 else
1471 tipc_bclink_rcv(buf);
1472 continue;
1475 /* Discard unicast link messages destined for another node */
1476 if (unlikely(!msg_short(msg) &&
1477 (msg_destnode(msg) != tipc_own_addr)))
1478 goto discard;
1480 /* Locate neighboring node that sent message */
1481 n_ptr = tipc_node_find(msg_prevnode(msg));
1482 if (unlikely(!n_ptr))
1483 goto discard;
1484 tipc_node_lock(n_ptr);
1486 /* Locate unicast link endpoint that should handle message */
1487 l_ptr = n_ptr->links[b_ptr->identity];
1488 if (unlikely(!l_ptr))
1489 goto unlock_discard;
1491 /* Verify that communication with node is currently allowed */
1492 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1493 msg_user(msg) == LINK_PROTOCOL &&
1494 (msg_type(msg) == RESET_MSG ||
1495 msg_type(msg) == ACTIVATE_MSG) &&
1496 !msg_redundant_link(msg))
1497 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1499 if (tipc_node_blocked(n_ptr))
1500 goto unlock_discard;
1502 /* Validate message sequence number info */
1503 seq_no = msg_seqno(msg);
1504 ackd = msg_ack(msg);
1506 /* Release acked messages */
1507 if (n_ptr->bclink.recv_permitted)
1508 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1510 crs = l_ptr->first_out;
1511 while ((crs != l_ptr->next_out) &&
1512 less_eq(buf_seqno(crs), ackd)) {
1513 struct sk_buff *next = crs->next;
1514 kfree_skb(crs);
1515 crs = next;
1516 released++;
1518 if (released) {
1519 l_ptr->first_out = crs;
1520 l_ptr->out_queue_size -= released;
1523 /* Try sending any messages link endpoint has pending */
1524 if (unlikely(l_ptr->next_out))
1525 tipc_link_push_queue(l_ptr);
1527 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1528 tipc_link_wakeup_ports(l_ptr, 0);
1530 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1531 l_ptr->stats.sent_acks++;
1532 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1535 /* Process the incoming packet */
1536 if (unlikely(!link_working_working(l_ptr))) {
1537 if (msg_user(msg) == LINK_PROTOCOL) {
1538 tipc_link_proto_rcv(l_ptr, buf);
1539 head = link_insert_deferred_queue(l_ptr, head);
1540 tipc_node_unlock(n_ptr);
1541 continue;
1544 /* Traffic message. Conditionally activate link */
1545 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1547 if (link_working_working(l_ptr)) {
1548 /* Re-insert buffer in front of queue */
1549 buf->next = head;
1550 head = buf;
1551 tipc_node_unlock(n_ptr);
1552 continue;
1554 goto unlock_discard;
1557 /* Link is now in state WORKING_WORKING */
1558 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1559 link_handle_out_of_seq_msg(l_ptr, buf);
1560 head = link_insert_deferred_queue(l_ptr, head);
1561 tipc_node_unlock(n_ptr);
1562 continue;
1564 l_ptr->next_in_no++;
1565 if (unlikely(l_ptr->oldest_deferred_in))
1566 head = link_insert_deferred_queue(l_ptr, head);
1568 /* Deliver packet/message to correct user: */
1569 if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) {
1570 if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
1571 tipc_node_unlock(n_ptr);
1572 continue;
1574 msg = buf_msg(buf);
1575 } else if (msg_user(msg) == MSG_FRAGMENTER) {
1576 l_ptr->stats.recv_fragments++;
1577 if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
1578 l_ptr->stats.recv_fragmented++;
1579 msg = buf_msg(buf);
1580 } else {
1581 if (!l_ptr->reasm_buf)
1582 tipc_link_reset(l_ptr);
1583 tipc_node_unlock(n_ptr);
1584 continue;
1588 switch (msg_user(msg)) {
1589 case TIPC_LOW_IMPORTANCE:
1590 case TIPC_MEDIUM_IMPORTANCE:
1591 case TIPC_HIGH_IMPORTANCE:
1592 case TIPC_CRITICAL_IMPORTANCE:
1593 tipc_node_unlock(n_ptr);
1594 tipc_sk_rcv(buf);
1595 continue;
1596 case MSG_BUNDLER:
1597 l_ptr->stats.recv_bundles++;
1598 l_ptr->stats.recv_bundled += msg_msgcnt(msg);
1599 tipc_node_unlock(n_ptr);
1600 tipc_link_bundle_rcv(buf);
1601 continue;
1602 case NAME_DISTRIBUTOR:
1603 n_ptr->bclink.recv_permitted = true;
1604 tipc_node_unlock(n_ptr);
1605 tipc_named_rcv(buf);
1606 continue;
1607 case CONN_MANAGER:
1608 tipc_node_unlock(n_ptr);
1609 tipc_port_proto_rcv(buf);
1610 continue;
1611 case BCAST_PROTOCOL:
1612 tipc_link_sync_rcv(n_ptr, buf);
1613 break;
1614 default:
1615 kfree_skb(buf);
1616 break;
1618 tipc_node_unlock(n_ptr);
1619 continue;
1620 unlock_discard:
1621 tipc_node_unlock(n_ptr);
1622 discard:
1623 kfree_skb(buf);
1628 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1630 * Returns increase in queue length (i.e. 0 or 1)
1632 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1633 struct sk_buff *buf)
1635 struct sk_buff *queue_buf;
1636 struct sk_buff **prev;
1637 u32 seq_no = buf_seqno(buf);
1639 buf->next = NULL;
1641 /* Empty queue ? */
1642 if (*head == NULL) {
1643 *head = *tail = buf;
1644 return 1;
1647 /* Last ? */
1648 if (less(buf_seqno(*tail), seq_no)) {
1649 (*tail)->next = buf;
1650 *tail = buf;
1651 return 1;
1654 /* Locate insertion point in queue, then insert; discard if duplicate */
1655 prev = head;
1656 queue_buf = *head;
1657 for (;;) {
1658 u32 curr_seqno = buf_seqno(queue_buf);
1660 if (seq_no == curr_seqno) {
1661 kfree_skb(buf);
1662 return 0;
1665 if (less(seq_no, curr_seqno))
1666 break;
1668 prev = &queue_buf->next;
1669 queue_buf = queue_buf->next;
1672 buf->next = queue_buf;
1673 *prev = buf;
1674 return 1;
1678 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1680 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1681 struct sk_buff *buf)
1683 u32 seq_no = buf_seqno(buf);
1685 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1686 tipc_link_proto_rcv(l_ptr, buf);
1687 return;
1690 /* Record OOS packet arrival (force mismatch on next timeout) */
1691 l_ptr->checkpoint--;
1694 * Discard packet if a duplicate; otherwise add it to deferred queue
1695 * and notify peer of gap as per protocol specification
1697 if (less(seq_no, mod(l_ptr->next_in_no))) {
1698 l_ptr->stats.duplicates++;
1699 kfree_skb(buf);
1700 return;
1703 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1704 &l_ptr->newest_deferred_in, buf)) {
1705 l_ptr->deferred_inqueue_sz++;
1706 l_ptr->stats.deferred_recv++;
1707 TIPC_SKB_CB(buf)->deferred = true;
1708 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1709 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1710 } else
1711 l_ptr->stats.duplicates++;
1715 * Send protocol message to the other endpoint.
1717 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1718 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1720 struct sk_buff *buf = NULL;
1721 struct tipc_msg *msg = l_ptr->pmsg;
1722 u32 msg_size = sizeof(l_ptr->proto_msg);
1723 int r_flag;
1725 /* Discard any previous message that was deferred due to congestion */
1726 if (l_ptr->proto_msg_queue) {
1727 kfree_skb(l_ptr->proto_msg_queue);
1728 l_ptr->proto_msg_queue = NULL;
1731 /* Don't send protocol message during link changeover */
1732 if (l_ptr->exp_msg_count)
1733 return;
1735 /* Abort non-RESET send if communication with node is prohibited */
1736 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1737 return;
1739 /* Create protocol message with "out-of-sequence" sequence number */
1740 msg_set_type(msg, msg_typ);
1741 msg_set_net_plane(msg, l_ptr->net_plane);
1742 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1743 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1745 if (msg_typ == STATE_MSG) {
1746 u32 next_sent = mod(l_ptr->next_out_no);
1748 if (!tipc_link_is_up(l_ptr))
1749 return;
1750 if (l_ptr->next_out)
1751 next_sent = buf_seqno(l_ptr->next_out);
1752 msg_set_next_sent(msg, next_sent);
1753 if (l_ptr->oldest_deferred_in) {
1754 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1755 gap = mod(rec - mod(l_ptr->next_in_no));
1757 msg_set_seq_gap(msg, gap);
1758 if (gap)
1759 l_ptr->stats.sent_nacks++;
1760 msg_set_link_tolerance(msg, tolerance);
1761 msg_set_linkprio(msg, priority);
1762 msg_set_max_pkt(msg, ack_mtu);
1763 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1764 msg_set_probe(msg, probe_msg != 0);
1765 if (probe_msg) {
1766 u32 mtu = l_ptr->max_pkt;
1768 if ((mtu < l_ptr->max_pkt_target) &&
1769 link_working_working(l_ptr) &&
1770 l_ptr->fsm_msg_cnt) {
1771 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1772 if (l_ptr->max_pkt_probes == 10) {
1773 l_ptr->max_pkt_target = (msg_size - 4);
1774 l_ptr->max_pkt_probes = 0;
1775 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1777 l_ptr->max_pkt_probes++;
1780 l_ptr->stats.sent_probes++;
1782 l_ptr->stats.sent_states++;
1783 } else { /* RESET_MSG or ACTIVATE_MSG */
1784 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1785 msg_set_seq_gap(msg, 0);
1786 msg_set_next_sent(msg, 1);
1787 msg_set_probe(msg, 0);
1788 msg_set_link_tolerance(msg, l_ptr->tolerance);
1789 msg_set_linkprio(msg, l_ptr->priority);
1790 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1793 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1794 msg_set_redundant_link(msg, r_flag);
1795 msg_set_linkprio(msg, l_ptr->priority);
1796 msg_set_size(msg, msg_size);
1798 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1800 buf = tipc_buf_acquire(msg_size);
1801 if (!buf)
1802 return;
1804 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1805 buf->priority = TC_PRIO_CONTROL;
1807 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1808 l_ptr->unacked_window = 0;
1809 kfree_skb(buf);
1813 * Receive protocol message :
1814 * Note that network plane id propagates through the network, and may
1815 * change at any time. The node with lowest address rules
1817 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1819 u32 rec_gap = 0;
1820 u32 max_pkt_info;
1821 u32 max_pkt_ack;
1822 u32 msg_tol;
1823 struct tipc_msg *msg = buf_msg(buf);
1825 /* Discard protocol message during link changeover */
1826 if (l_ptr->exp_msg_count)
1827 goto exit;
1829 if (l_ptr->net_plane != msg_net_plane(msg))
1830 if (tipc_own_addr > msg_prevnode(msg))
1831 l_ptr->net_plane = msg_net_plane(msg);
1833 switch (msg_type(msg)) {
1835 case RESET_MSG:
1836 if (!link_working_unknown(l_ptr) &&
1837 (l_ptr->peer_session != INVALID_SESSION)) {
1838 if (less_eq(msg_session(msg), l_ptr->peer_session))
1839 break; /* duplicate or old reset: ignore */
1842 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1843 link_working_unknown(l_ptr))) {
1845 * peer has lost contact -- don't allow peer's links
1846 * to reactivate before we recognize loss & clean up
1848 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1851 link_state_event(l_ptr, RESET_MSG);
1853 /* fall thru' */
1854 case ACTIVATE_MSG:
1855 /* Update link settings according other endpoint's values */
1856 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1858 msg_tol = msg_link_tolerance(msg);
1859 if (msg_tol > l_ptr->tolerance)
1860 link_set_supervision_props(l_ptr, msg_tol);
1862 if (msg_linkprio(msg) > l_ptr->priority)
1863 l_ptr->priority = msg_linkprio(msg);
1865 max_pkt_info = msg_max_pkt(msg);
1866 if (max_pkt_info) {
1867 if (max_pkt_info < l_ptr->max_pkt_target)
1868 l_ptr->max_pkt_target = max_pkt_info;
1869 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1870 l_ptr->max_pkt = l_ptr->max_pkt_target;
1871 } else {
1872 l_ptr->max_pkt = l_ptr->max_pkt_target;
1875 /* Synchronize broadcast link info, if not done previously */
1876 if (!tipc_node_is_up(l_ptr->owner)) {
1877 l_ptr->owner->bclink.last_sent =
1878 l_ptr->owner->bclink.last_in =
1879 msg_last_bcast(msg);
1880 l_ptr->owner->bclink.oos_state = 0;
1883 l_ptr->peer_session = msg_session(msg);
1884 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1886 if (msg_type(msg) == ACTIVATE_MSG)
1887 link_state_event(l_ptr, ACTIVATE_MSG);
1888 break;
1889 case STATE_MSG:
1891 msg_tol = msg_link_tolerance(msg);
1892 if (msg_tol)
1893 link_set_supervision_props(l_ptr, msg_tol);
1895 if (msg_linkprio(msg) &&
1896 (msg_linkprio(msg) != l_ptr->priority)) {
1897 pr_warn("%s<%s>, priority change %u->%u\n",
1898 link_rst_msg, l_ptr->name, l_ptr->priority,
1899 msg_linkprio(msg));
1900 l_ptr->priority = msg_linkprio(msg);
1901 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1902 break;
1905 /* Record reception; force mismatch at next timeout: */
1906 l_ptr->checkpoint--;
1908 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1909 l_ptr->stats.recv_states++;
1910 if (link_reset_unknown(l_ptr))
1911 break;
1913 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1914 rec_gap = mod(msg_next_sent(msg) -
1915 mod(l_ptr->next_in_no));
1918 max_pkt_ack = msg_max_pkt(msg);
1919 if (max_pkt_ack > l_ptr->max_pkt) {
1920 l_ptr->max_pkt = max_pkt_ack;
1921 l_ptr->max_pkt_probes = 0;
1924 max_pkt_ack = 0;
1925 if (msg_probe(msg)) {
1926 l_ptr->stats.recv_probes++;
1927 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1928 max_pkt_ack = msg_size(msg);
1931 /* Protocol message before retransmits, reduce loss risk */
1932 if (l_ptr->owner->bclink.recv_permitted)
1933 tipc_bclink_update_link_state(l_ptr->owner,
1934 msg_last_bcast(msg));
1936 if (rec_gap || (msg_probe(msg))) {
1937 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1938 0, max_pkt_ack);
1940 if (msg_seq_gap(msg)) {
1941 l_ptr->stats.recv_nacks++;
1942 tipc_link_retransmit(l_ptr, l_ptr->first_out,
1943 msg_seq_gap(msg));
1945 break;
1947 exit:
1948 kfree_skb(buf);
1952 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1953 * a different bearer. Owner node is locked.
1955 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1956 struct tipc_msg *tunnel_hdr,
1957 struct tipc_msg *msg,
1958 u32 selector)
1960 struct tipc_link *tunnel;
1961 struct sk_buff *buf;
1962 u32 length = msg_size(msg);
1964 tunnel = l_ptr->owner->active_links[selector & 1];
1965 if (!tipc_link_is_up(tunnel)) {
1966 pr_warn("%stunnel link no longer available\n", link_co_err);
1967 return;
1969 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1970 buf = tipc_buf_acquire(length + INT_H_SIZE);
1971 if (!buf) {
1972 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1973 return;
1975 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
1976 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
1977 __tipc_link_xmit(tunnel, buf);
1981 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1982 * link is still active. We can do failover. Tunnel the failing link's
1983 * whole send queue via the remaining link. This way, we don't lose
1984 * any packets, and sequence order is preserved for subsequent traffic
1985 * sent over the remaining link. Owner node is locked.
1987 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1989 u32 msgcount = l_ptr->out_queue_size;
1990 struct sk_buff *crs = l_ptr->first_out;
1991 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1992 struct tipc_msg tunnel_hdr;
1993 int split_bundles;
1995 if (!tunnel)
1996 return;
1998 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1999 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2000 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2001 msg_set_msgcnt(&tunnel_hdr, msgcount);
2003 if (!l_ptr->first_out) {
2004 struct sk_buff *buf;
2006 buf = tipc_buf_acquire(INT_H_SIZE);
2007 if (buf) {
2008 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2009 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2010 __tipc_link_xmit(tunnel, buf);
2011 } else {
2012 pr_warn("%sunable to send changeover msg\n",
2013 link_co_err);
2015 return;
2018 split_bundles = (l_ptr->owner->active_links[0] !=
2019 l_ptr->owner->active_links[1]);
2021 while (crs) {
2022 struct tipc_msg *msg = buf_msg(crs);
2024 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2025 struct tipc_msg *m = msg_get_wrapped(msg);
2026 unchar *pos = (unchar *)m;
2028 msgcount = msg_msgcnt(msg);
2029 while (msgcount--) {
2030 msg_set_seqno(m, msg_seqno(msg));
2031 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
2032 msg_link_selector(m));
2033 pos += align(msg_size(m));
2034 m = (struct tipc_msg *)pos;
2036 } else {
2037 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
2038 msg_link_selector(msg));
2040 crs = crs->next;
2044 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
2045 * duplicate of the first link's send queue via the new link. This way, we
2046 * are guaranteed that currently queued packets from a socket are delivered
2047 * before future traffic from the same socket, even if this is using the
2048 * new link. The last arriving copy of each duplicate packet is dropped at
2049 * the receiving end by the regular protocol check, so packet cardinality
2050 * and sequence order is preserved per sender/receiver socket pair.
2051 * Owner node is locked.
2053 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
2054 struct tipc_link *tunnel)
2056 struct sk_buff *iter;
2057 struct tipc_msg tunnel_hdr;
2059 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2060 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2061 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2062 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2063 iter = l_ptr->first_out;
2064 while (iter) {
2065 struct sk_buff *outbuf;
2066 struct tipc_msg *msg = buf_msg(iter);
2067 u32 length = msg_size(msg);
2069 if (msg_user(msg) == MSG_BUNDLER)
2070 msg_set_type(msg, CLOSED_MSG);
2071 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2072 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2073 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2074 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2075 if (outbuf == NULL) {
2076 pr_warn("%sunable to send duplicate msg\n",
2077 link_co_err);
2078 return;
2080 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2081 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2082 length);
2083 __tipc_link_xmit(tunnel, outbuf);
2084 if (!tipc_link_is_up(l_ptr))
2085 return;
2086 iter = iter->next;
2091 * buf_extract - extracts embedded TIPC message from another message
2092 * @skb: encapsulating message buffer
2093 * @from_pos: offset to extract from
2095 * Returns a new message buffer containing an embedded message. The
2096 * encapsulating message itself is left unchanged.
2098 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2100 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2101 u32 size = msg_size(msg);
2102 struct sk_buff *eb;
2104 eb = tipc_buf_acquire(size);
2105 if (eb)
2106 skb_copy_to_linear_data(eb, msg, size);
2107 return eb;
2112 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
2113 * Owner node is locked.
2115 static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
2116 struct sk_buff *t_buf)
2118 struct sk_buff *buf;
2120 if (!tipc_link_is_up(l_ptr))
2121 return;
2123 buf = buf_extract(t_buf, INT_H_SIZE);
2124 if (buf == NULL) {
2125 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
2126 return;
2129 /* Add buffer to deferred queue, if applicable: */
2130 link_handle_out_of_seq_msg(l_ptr, buf);
2133 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
2134 * Owner node is locked.
2136 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
2137 struct sk_buff *t_buf)
2139 struct tipc_msg *t_msg = buf_msg(t_buf);
2140 struct sk_buff *buf = NULL;
2141 struct tipc_msg *msg;
2143 if (tipc_link_is_up(l_ptr))
2144 tipc_link_reset(l_ptr);
2146 /* First failover packet? */
2147 if (l_ptr->exp_msg_count == START_CHANGEOVER)
2148 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
2150 /* Should there be an inner packet? */
2151 if (l_ptr->exp_msg_count) {
2152 l_ptr->exp_msg_count--;
2153 buf = buf_extract(t_buf, INT_H_SIZE);
2154 if (buf == NULL) {
2155 pr_warn("%sno inner failover pkt\n", link_co_err);
2156 goto exit;
2158 msg = buf_msg(buf);
2160 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
2161 kfree_skb(buf);
2162 buf = NULL;
2163 goto exit;
2165 if (msg_user(msg) == MSG_FRAGMENTER) {
2166 l_ptr->stats.recv_fragments++;
2167 tipc_buf_append(&l_ptr->reasm_buf, &buf);
2170 exit:
2171 if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
2172 tipc_node_detach_link(l_ptr->owner, l_ptr);
2173 kfree(l_ptr);
2175 return buf;
2178 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
2179 * via other link as result of a failover (ORIGINAL_MSG) or
2180 * a new active link (DUPLICATE_MSG). Failover packets are
2181 * returned to the active link for delivery upwards.
2182 * Owner node is locked.
2184 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
2185 struct sk_buff **buf)
2187 struct sk_buff *t_buf = *buf;
2188 struct tipc_link *l_ptr;
2189 struct tipc_msg *t_msg = buf_msg(t_buf);
2190 u32 bearer_id = msg_bearer_id(t_msg);
2192 *buf = NULL;
2194 if (bearer_id >= MAX_BEARERS)
2195 goto exit;
2197 l_ptr = n_ptr->links[bearer_id];
2198 if (!l_ptr)
2199 goto exit;
2201 if (msg_type(t_msg) == DUPLICATE_MSG)
2202 tipc_link_dup_rcv(l_ptr, t_buf);
2203 else if (msg_type(t_msg) == ORIGINAL_MSG)
2204 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
2205 else
2206 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
2207 exit:
2208 kfree_skb(t_buf);
2209 return *buf != NULL;
2213 * Bundler functionality:
2215 void tipc_link_bundle_rcv(struct sk_buff *buf)
2217 u32 msgcount = msg_msgcnt(buf_msg(buf));
2218 u32 pos = INT_H_SIZE;
2219 struct sk_buff *obuf;
2221 while (msgcount--) {
2222 obuf = buf_extract(buf, pos);
2223 if (obuf == NULL) {
2224 pr_warn("Link unable to unbundle message(s)\n");
2225 break;
2227 pos += align(msg_size(buf_msg(obuf)));
2228 tipc_net_route_msg(obuf);
2230 kfree_skb(buf);
2234 * Fragmentation/defragmentation:
2238 * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
2239 * The buffer is complete, inclusive total message length.
2240 * Returns user data length.
2242 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
2244 struct sk_buff *buf_chain = NULL;
2245 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2246 struct tipc_msg *inmsg = buf_msg(buf);
2247 struct tipc_msg fragm_hdr;
2248 u32 insize = msg_size(inmsg);
2249 u32 dsz = msg_data_sz(inmsg);
2250 unchar *crs = buf->data;
2251 u32 rest = insize;
2252 u32 pack_sz = l_ptr->max_pkt;
2253 u32 fragm_sz = pack_sz - INT_H_SIZE;
2254 u32 fragm_no = 0;
2255 u32 destaddr;
2257 if (msg_short(inmsg))
2258 destaddr = l_ptr->addr;
2259 else
2260 destaddr = msg_destnode(inmsg);
2262 /* Prepare reusable fragment header: */
2263 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2264 INT_H_SIZE, destaddr);
2266 /* Chop up message: */
2267 while (rest > 0) {
2268 struct sk_buff *fragm;
2270 if (rest <= fragm_sz) {
2271 fragm_sz = rest;
2272 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2274 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2275 if (fragm == NULL) {
2276 kfree_skb(buf);
2277 kfree_skb_list(buf_chain);
2278 return -ENOMEM;
2280 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2281 fragm_no++;
2282 msg_set_fragm_no(&fragm_hdr, fragm_no);
2283 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2284 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2285 fragm_sz);
2286 buf_chain_tail->next = fragm;
2287 buf_chain_tail = fragm;
2289 rest -= fragm_sz;
2290 crs += fragm_sz;
2291 msg_set_type(&fragm_hdr, FRAGMENT);
2293 kfree_skb(buf);
2295 /* Append chain of fragments to send queue & send them */
2296 l_ptr->long_msg_seq_no++;
2297 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2298 l_ptr->stats.sent_fragments += fragm_no;
2299 l_ptr->stats.sent_fragmented++;
2300 tipc_link_push_queue(l_ptr);
2302 return dsz;
2305 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2307 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2308 return;
2310 l_ptr->tolerance = tolerance;
2311 l_ptr->continuity_interval =
2312 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2313 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2316 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2318 /* Data messages from this node, inclusive FIRST_FRAGM */
2319 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2320 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2321 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2322 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2323 /* Transiting data messages,inclusive FIRST_FRAGM */
2324 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2325 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2326 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2327 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2328 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2329 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2330 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2331 /* FRAGMENT and LAST_FRAGMENT packets */
2332 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2335 /* tipc_link_find_owner - locate owner node of link by link's name
2336 * @name: pointer to link name string
2337 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2339 * Returns pointer to node owning the link, or 0 if no matching link is found.
2341 static struct tipc_node *tipc_link_find_owner(const char *link_name,
2342 unsigned int *bearer_id)
2344 struct tipc_link *l_ptr;
2345 struct tipc_node *n_ptr;
2346 struct tipc_node *found_node = 0;
2347 int i;
2349 *bearer_id = 0;
2350 rcu_read_lock();
2351 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
2352 tipc_node_lock(n_ptr);
2353 for (i = 0; i < MAX_BEARERS; i++) {
2354 l_ptr = n_ptr->links[i];
2355 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
2356 *bearer_id = i;
2357 found_node = n_ptr;
2358 break;
2361 tipc_node_unlock(n_ptr);
2362 if (found_node)
2363 break;
2365 rcu_read_unlock();
2367 return found_node;
2371 * link_value_is_valid -- validate proposed link tolerance/priority/window
2373 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2374 * @new_value: the new value
2376 * Returns 1 if value is within range, 0 if not.
2378 static int link_value_is_valid(u16 cmd, u32 new_value)
2380 switch (cmd) {
2381 case TIPC_CMD_SET_LINK_TOL:
2382 return (new_value >= TIPC_MIN_LINK_TOL) &&
2383 (new_value <= TIPC_MAX_LINK_TOL);
2384 case TIPC_CMD_SET_LINK_PRI:
2385 return (new_value <= TIPC_MAX_LINK_PRI);
2386 case TIPC_CMD_SET_LINK_WINDOW:
2387 return (new_value >= TIPC_MIN_LINK_WIN) &&
2388 (new_value <= TIPC_MAX_LINK_WIN);
2390 return 0;
2394 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2395 * @name: ptr to link, bearer, or media name
2396 * @new_value: new value of link, bearer, or media setting
2397 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2399 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2401 * Returns 0 if value updated and negative value on error.
2403 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2405 struct tipc_node *node;
2406 struct tipc_link *l_ptr;
2407 struct tipc_bearer *b_ptr;
2408 struct tipc_media *m_ptr;
2409 int bearer_id;
2410 int res = 0;
2412 node = tipc_link_find_owner(name, &bearer_id);
2413 if (node) {
2414 tipc_node_lock(node);
2415 l_ptr = node->links[bearer_id];
2417 if (l_ptr) {
2418 switch (cmd) {
2419 case TIPC_CMD_SET_LINK_TOL:
2420 link_set_supervision_props(l_ptr, new_value);
2421 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2422 new_value, 0, 0);
2423 break;
2424 case TIPC_CMD_SET_LINK_PRI:
2425 l_ptr->priority = new_value;
2426 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2427 0, new_value, 0);
2428 break;
2429 case TIPC_CMD_SET_LINK_WINDOW:
2430 tipc_link_set_queue_limits(l_ptr, new_value);
2431 break;
2432 default:
2433 res = -EINVAL;
2434 break;
2437 tipc_node_unlock(node);
2438 return res;
2441 b_ptr = tipc_bearer_find(name);
2442 if (b_ptr) {
2443 switch (cmd) {
2444 case TIPC_CMD_SET_LINK_TOL:
2445 b_ptr->tolerance = new_value;
2446 break;
2447 case TIPC_CMD_SET_LINK_PRI:
2448 b_ptr->priority = new_value;
2449 break;
2450 case TIPC_CMD_SET_LINK_WINDOW:
2451 b_ptr->window = new_value;
2452 break;
2453 default:
2454 res = -EINVAL;
2455 break;
2457 return res;
2460 m_ptr = tipc_media_find(name);
2461 if (!m_ptr)
2462 return -ENODEV;
2463 switch (cmd) {
2464 case TIPC_CMD_SET_LINK_TOL:
2465 m_ptr->tolerance = new_value;
2466 break;
2467 case TIPC_CMD_SET_LINK_PRI:
2468 m_ptr->priority = new_value;
2469 break;
2470 case TIPC_CMD_SET_LINK_WINDOW:
2471 m_ptr->window = new_value;
2472 break;
2473 default:
2474 res = -EINVAL;
2475 break;
2477 return res;
2480 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2481 u16 cmd)
2483 struct tipc_link_config *args;
2484 u32 new_value;
2485 int res;
2487 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2488 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2490 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2491 new_value = ntohl(args->value);
2493 if (!link_value_is_valid(cmd, new_value))
2494 return tipc_cfg_reply_error_string(
2495 "cannot change, value invalid");
2497 if (!strcmp(args->name, tipc_bclink_name)) {
2498 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2499 (tipc_bclink_set_queue_limits(new_value) == 0))
2500 return tipc_cfg_reply_none();
2501 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2502 " (cannot change setting on broadcast link)");
2505 res = link_cmd_set_value(args->name, new_value, cmd);
2506 if (res)
2507 return tipc_cfg_reply_error_string("cannot change link setting");
2509 return tipc_cfg_reply_none();
2513 * link_reset_statistics - reset link statistics
2514 * @l_ptr: pointer to link
2516 static void link_reset_statistics(struct tipc_link *l_ptr)
2518 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2519 l_ptr->stats.sent_info = l_ptr->next_out_no;
2520 l_ptr->stats.recv_info = l_ptr->next_in_no;
2523 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2525 char *link_name;
2526 struct tipc_link *l_ptr;
2527 struct tipc_node *node;
2528 unsigned int bearer_id;
2530 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2531 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2533 link_name = (char *)TLV_DATA(req_tlv_area);
2534 if (!strcmp(link_name, tipc_bclink_name)) {
2535 if (tipc_bclink_reset_stats())
2536 return tipc_cfg_reply_error_string("link not found");
2537 return tipc_cfg_reply_none();
2539 node = tipc_link_find_owner(link_name, &bearer_id);
2540 if (!node)
2541 return tipc_cfg_reply_error_string("link not found");
2543 tipc_node_lock(node);
2544 l_ptr = node->links[bearer_id];
2545 if (!l_ptr) {
2546 tipc_node_unlock(node);
2547 return tipc_cfg_reply_error_string("link not found");
2549 link_reset_statistics(l_ptr);
2550 tipc_node_unlock(node);
2551 return tipc_cfg_reply_none();
2555 * percent - convert count to a percentage of total (rounding up or down)
2557 static u32 percent(u32 count, u32 total)
2559 return (count * 100 + (total / 2)) / total;
2563 * tipc_link_stats - print link statistics
2564 * @name: link name
2565 * @buf: print buffer area
2566 * @buf_size: size of print buffer area
2568 * Returns length of print buffer data string (or 0 if error)
2570 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2572 struct tipc_link *l;
2573 struct tipc_stats *s;
2574 struct tipc_node *node;
2575 char *status;
2576 u32 profile_total = 0;
2577 unsigned int bearer_id;
2578 int ret;
2580 if (!strcmp(name, tipc_bclink_name))
2581 return tipc_bclink_stats(buf, buf_size);
2583 node = tipc_link_find_owner(name, &bearer_id);
2584 if (!node)
2585 return 0;
2587 tipc_node_lock(node);
2589 l = node->links[bearer_id];
2590 if (!l) {
2591 tipc_node_unlock(node);
2592 return 0;
2595 s = &l->stats;
2597 if (tipc_link_is_active(l))
2598 status = "ACTIVE";
2599 else if (tipc_link_is_up(l))
2600 status = "STANDBY";
2601 else
2602 status = "DEFUNCT";
2604 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2605 " %s MTU:%u Priority:%u Tolerance:%u ms"
2606 " Window:%u packets\n",
2607 l->name, status, l->max_pkt, l->priority,
2608 l->tolerance, l->queue_limit[0]);
2610 ret += tipc_snprintf(buf + ret, buf_size - ret,
2611 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2612 l->next_in_no - s->recv_info, s->recv_fragments,
2613 s->recv_fragmented, s->recv_bundles,
2614 s->recv_bundled);
2616 ret += tipc_snprintf(buf + ret, buf_size - ret,
2617 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2618 l->next_out_no - s->sent_info, s->sent_fragments,
2619 s->sent_fragmented, s->sent_bundles,
2620 s->sent_bundled);
2622 profile_total = s->msg_length_counts;
2623 if (!profile_total)
2624 profile_total = 1;
2626 ret += tipc_snprintf(buf + ret, buf_size - ret,
2627 " TX profile sample:%u packets average:%u octets\n"
2628 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2629 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2630 s->msg_length_counts,
2631 s->msg_lengths_total / profile_total,
2632 percent(s->msg_length_profile[0], profile_total),
2633 percent(s->msg_length_profile[1], profile_total),
2634 percent(s->msg_length_profile[2], profile_total),
2635 percent(s->msg_length_profile[3], profile_total),
2636 percent(s->msg_length_profile[4], profile_total),
2637 percent(s->msg_length_profile[5], profile_total),
2638 percent(s->msg_length_profile[6], profile_total));
2640 ret += tipc_snprintf(buf + ret, buf_size - ret,
2641 " RX states:%u probes:%u naks:%u defs:%u"
2642 " dups:%u\n", s->recv_states, s->recv_probes,
2643 s->recv_nacks, s->deferred_recv, s->duplicates);
2645 ret += tipc_snprintf(buf + ret, buf_size - ret,
2646 " TX states:%u probes:%u naks:%u acks:%u"
2647 " dups:%u\n", s->sent_states, s->sent_probes,
2648 s->sent_nacks, s->sent_acks, s->retransmitted);
2650 ret += tipc_snprintf(buf + ret, buf_size - ret,
2651 " Congestion link:%u Send queue"
2652 " max:%u avg:%u\n", s->link_congs,
2653 s->max_queue_sz, s->queue_sz_counts ?
2654 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2656 tipc_node_unlock(node);
2657 return ret;
2660 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2662 struct sk_buff *buf;
2663 struct tlv_desc *rep_tlv;
2664 int str_len;
2665 int pb_len;
2666 char *pb;
2668 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2669 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2671 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2672 if (!buf)
2673 return NULL;
2675 rep_tlv = (struct tlv_desc *)buf->data;
2676 pb = TLV_DATA(rep_tlv);
2677 pb_len = ULTRA_STRING_MAX_LEN;
2678 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2679 pb, pb_len);
2680 if (!str_len) {
2681 kfree_skb(buf);
2682 return tipc_cfg_reply_error_string("link not found");
2684 str_len += 1; /* for "\0" */
2685 skb_put(buf, TLV_SPACE(str_len));
2686 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2688 return buf;
2692 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2693 * @dest: network address of destination node
2694 * @selector: used to select from set of active links
2696 * If no active link can be found, uses default maximum packet size.
2698 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2700 struct tipc_node *n_ptr;
2701 struct tipc_link *l_ptr;
2702 u32 res = MAX_PKT_DEFAULT;
2704 if (dest == tipc_own_addr)
2705 return MAX_MSG_SIZE;
2707 n_ptr = tipc_node_find(dest);
2708 if (n_ptr) {
2709 tipc_node_lock(n_ptr);
2710 l_ptr = n_ptr->active_links[selector & 1];
2711 if (l_ptr)
2712 res = l_ptr->max_pkt;
2713 tipc_node_unlock(n_ptr);
2715 return res;
2718 static void link_print(struct tipc_link *l_ptr, const char *str)
2720 struct tipc_bearer *b_ptr;
2722 rcu_read_lock();
2723 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
2724 if (b_ptr)
2725 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2726 rcu_read_unlock();
2728 if (link_working_unknown(l_ptr))
2729 pr_cont(":WU\n");
2730 else if (link_reset_reset(l_ptr))
2731 pr_cont(":RR\n");
2732 else if (link_reset_unknown(l_ptr))
2733 pr_cont(":RU\n");
2734 else if (link_working_working(l_ptr))
2735 pr_cont(":WW\n");
2736 else
2737 pr_cont("\n");