tracing/snapshot: Resize spare buffer if size changed
[linux/fpc-iii.git] / net / tipc / link.c
blob836727e363c46290ab8ef55e9d7b630f1dfac293
1 /*
2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
47 #include <linux/pkt_sched.h>
49 struct tipc_stats {
50 u32 sent_pkts;
51 u32 recv_pkts;
52 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
79 /**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_cnt: counter for number of identical retransmit attempts
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages
114 * @deferred_queue: deferred queue saved OOS b'cast message received from node
115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116 * @inputq: buffer queue for messages to be delivered upwards
117 * @namedq: buffer queue for name table messages to be delivered upwards
118 * @next_out: ptr to first unsent outbound message in queue
119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121 * @reasm_buf: head of partially reassembled inbound message fragments
122 * @bc_rcvr: marks that this is a broadcast receiver link
123 * @stats: collects statistics regarding link activity
125 struct tipc_link {
126 u32 addr;
127 char name[TIPC_MAX_LINK_NAME];
128 struct net *net;
130 /* Management and link supervision data */
131 u16 peer_session;
132 u16 session;
133 u16 snd_nxt_state;
134 u16 rcv_nxt_state;
135 u32 peer_bearer_id;
136 u32 bearer_id;
137 u32 tolerance;
138 u32 abort_limit;
139 u32 state;
140 u16 peer_caps;
141 bool in_session;
142 bool active;
143 u32 silent_intv_cnt;
144 char if_name[TIPC_MAX_IF_NAME];
145 u32 priority;
146 char net_plane;
147 struct tipc_mon_state mon_state;
148 u16 rst_cnt;
150 /* Failover/synch */
151 u16 drop_point;
152 struct sk_buff *failover_reasm_skb;
154 /* Max packet negotiation */
155 u16 mtu;
156 u16 advertised_mtu;
158 /* Sending */
159 struct sk_buff_head transmq;
160 struct sk_buff_head backlogq;
161 struct {
162 u16 len;
163 u16 limit;
164 } backlog[5];
165 u16 snd_nxt;
166 u16 last_retransm;
167 u16 window;
168 u16 stale_cnt;
169 unsigned long stale_limit;
171 /* Reception */
172 u16 rcv_nxt;
173 u32 rcv_unacked;
174 struct sk_buff_head deferdq;
175 struct sk_buff_head *inputq;
176 struct sk_buff_head *namedq;
178 /* Congestion handling */
179 struct sk_buff_head wakeupq;
181 /* Fragmentation/reassembly */
182 struct sk_buff *reasm_buf;
184 /* Broadcast */
185 u16 ackers;
186 u16 acked;
187 struct tipc_link *bc_rcvlink;
188 struct tipc_link *bc_sndlink;
189 unsigned long prev_retr;
190 u16 prev_from;
191 u16 prev_to;
192 u8 nack_state;
193 bool bc_peer_is_up;
195 /* Statistics */
196 struct tipc_stats stats;
200 * Error message prefixes
202 static const char *link_co_err = "Link tunneling error, ";
203 static const char *link_rst_msg = "Resetting link ";
205 /* Send states for broadcast NACKs
207 enum {
208 BC_NACK_SND_CONDITIONAL,
209 BC_NACK_SND_UNCONDITIONAL,
210 BC_NACK_SND_SUPPRESS,
213 #define TIPC_BC_RETR_LIMIT 10 /* [ms] */
216 * Interval between NACKs when packets arrive out of order
218 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
220 /* Link FSM states:
222 enum {
223 LINK_ESTABLISHED = 0xe,
224 LINK_ESTABLISHING = 0xe << 4,
225 LINK_RESET = 0x1 << 8,
226 LINK_RESETTING = 0x2 << 12,
227 LINK_PEER_RESET = 0xd << 16,
228 LINK_FAILINGOVER = 0xf << 20,
229 LINK_SYNCHING = 0xc << 24
232 /* Link FSM state checking routines
234 static int link_is_up(struct tipc_link *l)
236 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
239 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 struct sk_buff_head *xmitq);
241 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
242 bool probe_reply, u16 rcvgap,
243 int tolerance, int priority,
244 struct sk_buff_head *xmitq);
245 static void link_print(struct tipc_link *l, const char *str);
246 static int tipc_link_build_nack_msg(struct tipc_link *l,
247 struct sk_buff_head *xmitq);
248 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 struct sk_buff_head *xmitq);
250 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
253 * Simple non-static link routines (i.e. referenced outside this file)
255 bool tipc_link_is_up(struct tipc_link *l)
257 return link_is_up(l);
260 bool tipc_link_peer_is_down(struct tipc_link *l)
262 return l->state == LINK_PEER_RESET;
265 bool tipc_link_is_reset(struct tipc_link *l)
267 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
270 bool tipc_link_is_establishing(struct tipc_link *l)
272 return l->state == LINK_ESTABLISHING;
275 bool tipc_link_is_synching(struct tipc_link *l)
277 return l->state == LINK_SYNCHING;
280 bool tipc_link_is_failingover(struct tipc_link *l)
282 return l->state == LINK_FAILINGOVER;
285 bool tipc_link_is_blocked(struct tipc_link *l)
287 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
290 static bool link_is_bc_sndlink(struct tipc_link *l)
292 return !l->bc_sndlink;
295 static bool link_is_bc_rcvlink(struct tipc_link *l)
297 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
300 void tipc_link_set_active(struct tipc_link *l, bool active)
302 l->active = active;
305 u32 tipc_link_id(struct tipc_link *l)
307 return l->peer_bearer_id << 16 | l->bearer_id;
310 int tipc_link_window(struct tipc_link *l)
312 return l->window;
315 int tipc_link_prio(struct tipc_link *l)
317 return l->priority;
320 unsigned long tipc_link_tolerance(struct tipc_link *l)
322 return l->tolerance;
325 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
327 return l->inputq;
330 char tipc_link_plane(struct tipc_link *l)
332 return l->net_plane;
335 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
337 l->peer_caps = capabilities;
340 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 struct tipc_link *uc_l,
342 struct sk_buff_head *xmitq)
344 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
346 snd_l->ackers++;
347 rcv_l->acked = snd_l->snd_nxt - 1;
348 snd_l->state = LINK_ESTABLISHED;
349 tipc_link_build_bc_init_msg(uc_l, xmitq);
352 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 struct tipc_link *rcv_l,
354 struct sk_buff_head *xmitq)
356 u16 ack = snd_l->snd_nxt - 1;
358 snd_l->ackers--;
359 rcv_l->bc_peer_is_up = true;
360 rcv_l->state = LINK_ESTABLISHED;
361 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 tipc_link_reset(rcv_l);
363 rcv_l->state = LINK_RESET;
364 if (!snd_l->ackers) {
365 tipc_link_reset(snd_l);
366 snd_l->state = LINK_RESET;
367 __skb_queue_purge(xmitq);
371 int tipc_link_bc_peers(struct tipc_link *l)
373 return l->ackers;
376 static u16 link_bc_rcv_gap(struct tipc_link *l)
378 struct sk_buff *skb = skb_peek(&l->deferdq);
379 u16 gap = 0;
381 if (more(l->snd_nxt, l->rcv_nxt))
382 gap = l->snd_nxt - l->rcv_nxt;
383 if (skb)
384 gap = buf_seqno(skb) - l->rcv_nxt;
385 return gap;
388 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
390 l->mtu = mtu;
393 int tipc_link_mtu(struct tipc_link *l)
395 return l->mtu;
398 u16 tipc_link_rcv_nxt(struct tipc_link *l)
400 return l->rcv_nxt;
403 u16 tipc_link_acked(struct tipc_link *l)
405 return l->acked;
408 char *tipc_link_name(struct tipc_link *l)
410 return l->name;
413 u32 tipc_link_state(struct tipc_link *l)
415 return l->state;
419 * tipc_link_create - create a new link
420 * @n: pointer to associated node
421 * @if_name: associated interface name
422 * @bearer_id: id (index) of associated bearer
423 * @tolerance: link tolerance to be used by link
424 * @net_plane: network plane (A,B,c..) this link belongs to
425 * @mtu: mtu to be advertised by link
426 * @priority: priority to be used by link
427 * @window: send window to be used by link
428 * @session: session to be used by link
429 * @ownnode: identity of own node
430 * @peer: node id of peer node
431 * @peer_caps: bitmap describing peer node capabilities
432 * @bc_sndlink: the namespace global link used for broadcast sending
433 * @bc_rcvlink: the peer specific link used for broadcast reception
434 * @inputq: queue to put messages ready for delivery
435 * @namedq: queue to put binding table update messages ready for delivery
436 * @link: return value, pointer to put the created link
438 * Returns true if link was created, otherwise false
440 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
441 int tolerance, char net_plane, u32 mtu, int priority,
442 int window, u32 session, u32 self,
443 u32 peer, u8 *peer_id, u16 peer_caps,
444 struct tipc_link *bc_sndlink,
445 struct tipc_link *bc_rcvlink,
446 struct sk_buff_head *inputq,
447 struct sk_buff_head *namedq,
448 struct tipc_link **link)
450 char peer_str[NODE_ID_STR_LEN] = {0,};
451 char self_str[NODE_ID_STR_LEN] = {0,};
452 struct tipc_link *l;
454 l = kzalloc(sizeof(*l), GFP_ATOMIC);
455 if (!l)
456 return false;
457 *link = l;
458 l->session = session;
460 /* Set link name for unicast links only */
461 if (peer_id) {
462 tipc_nodeid2string(self_str, tipc_own_id(net));
463 if (strlen(self_str) > 16)
464 sprintf(self_str, "%x", self);
465 tipc_nodeid2string(peer_str, peer_id);
466 if (strlen(peer_str) > 16)
467 sprintf(peer_str, "%x", peer);
469 /* Peer i/f name will be completed by reset/activate message */
470 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
471 self_str, if_name, peer_str);
473 strcpy(l->if_name, if_name);
474 l->addr = peer;
475 l->peer_caps = peer_caps;
476 l->net = net;
477 l->in_session = false;
478 l->bearer_id = bearer_id;
479 l->tolerance = tolerance;
480 if (bc_rcvlink)
481 bc_rcvlink->tolerance = tolerance;
482 l->net_plane = net_plane;
483 l->advertised_mtu = mtu;
484 l->mtu = mtu;
485 l->priority = priority;
486 tipc_link_set_queue_limits(l, window);
487 l->ackers = 1;
488 l->bc_sndlink = bc_sndlink;
489 l->bc_rcvlink = bc_rcvlink;
490 l->inputq = inputq;
491 l->namedq = namedq;
492 l->state = LINK_RESETTING;
493 __skb_queue_head_init(&l->transmq);
494 __skb_queue_head_init(&l->backlogq);
495 __skb_queue_head_init(&l->deferdq);
496 skb_queue_head_init(&l->wakeupq);
497 skb_queue_head_init(l->inputq);
498 return true;
502 * tipc_link_bc_create - create new link to be used for broadcast
503 * @n: pointer to associated node
504 * @mtu: mtu to be used initially if no peers
505 * @window: send window to be used
506 * @inputq: queue to put messages ready for delivery
507 * @namedq: queue to put binding table update messages ready for delivery
508 * @link: return value, pointer to put the created link
510 * Returns true if link was created, otherwise false
512 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
513 int mtu, int window, u16 peer_caps,
514 struct sk_buff_head *inputq,
515 struct sk_buff_head *namedq,
516 struct tipc_link *bc_sndlink,
517 struct tipc_link **link)
519 struct tipc_link *l;
521 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
522 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
523 NULL, inputq, namedq, link))
524 return false;
526 l = *link;
527 strcpy(l->name, tipc_bclink_name);
528 tipc_link_reset(l);
529 l->state = LINK_RESET;
530 l->ackers = 0;
531 l->bc_rcvlink = l;
533 /* Broadcast send link is always up */
534 if (link_is_bc_sndlink(l))
535 l->state = LINK_ESTABLISHED;
537 /* Disable replicast if even a single peer doesn't support it */
538 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
539 tipc_bcast_disable_rcast(net);
541 return true;
545 * tipc_link_fsm_evt - link finite state machine
546 * @l: pointer to link
547 * @evt: state machine event to be processed
549 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
551 int rc = 0;
553 switch (l->state) {
554 case LINK_RESETTING:
555 switch (evt) {
556 case LINK_PEER_RESET_EVT:
557 l->state = LINK_PEER_RESET;
558 break;
559 case LINK_RESET_EVT:
560 l->state = LINK_RESET;
561 break;
562 case LINK_FAILURE_EVT:
563 case LINK_FAILOVER_BEGIN_EVT:
564 case LINK_ESTABLISH_EVT:
565 case LINK_FAILOVER_END_EVT:
566 case LINK_SYNCH_BEGIN_EVT:
567 case LINK_SYNCH_END_EVT:
568 default:
569 goto illegal_evt;
571 break;
572 case LINK_RESET:
573 switch (evt) {
574 case LINK_PEER_RESET_EVT:
575 l->state = LINK_ESTABLISHING;
576 break;
577 case LINK_FAILOVER_BEGIN_EVT:
578 l->state = LINK_FAILINGOVER;
579 case LINK_FAILURE_EVT:
580 case LINK_RESET_EVT:
581 case LINK_ESTABLISH_EVT:
582 case LINK_FAILOVER_END_EVT:
583 break;
584 case LINK_SYNCH_BEGIN_EVT:
585 case LINK_SYNCH_END_EVT:
586 default:
587 goto illegal_evt;
589 break;
590 case LINK_PEER_RESET:
591 switch (evt) {
592 case LINK_RESET_EVT:
593 l->state = LINK_ESTABLISHING;
594 break;
595 case LINK_PEER_RESET_EVT:
596 case LINK_ESTABLISH_EVT:
597 case LINK_FAILURE_EVT:
598 break;
599 case LINK_SYNCH_BEGIN_EVT:
600 case LINK_SYNCH_END_EVT:
601 case LINK_FAILOVER_BEGIN_EVT:
602 case LINK_FAILOVER_END_EVT:
603 default:
604 goto illegal_evt;
606 break;
607 case LINK_FAILINGOVER:
608 switch (evt) {
609 case LINK_FAILOVER_END_EVT:
610 l->state = LINK_RESET;
611 break;
612 case LINK_PEER_RESET_EVT:
613 case LINK_RESET_EVT:
614 case LINK_ESTABLISH_EVT:
615 case LINK_FAILURE_EVT:
616 break;
617 case LINK_FAILOVER_BEGIN_EVT:
618 case LINK_SYNCH_BEGIN_EVT:
619 case LINK_SYNCH_END_EVT:
620 default:
621 goto illegal_evt;
623 break;
624 case LINK_ESTABLISHING:
625 switch (evt) {
626 case LINK_ESTABLISH_EVT:
627 l->state = LINK_ESTABLISHED;
628 break;
629 case LINK_FAILOVER_BEGIN_EVT:
630 l->state = LINK_FAILINGOVER;
631 break;
632 case LINK_RESET_EVT:
633 l->state = LINK_RESET;
634 break;
635 case LINK_FAILURE_EVT:
636 case LINK_PEER_RESET_EVT:
637 case LINK_SYNCH_BEGIN_EVT:
638 case LINK_FAILOVER_END_EVT:
639 break;
640 case LINK_SYNCH_END_EVT:
641 default:
642 goto illegal_evt;
644 break;
645 case LINK_ESTABLISHED:
646 switch (evt) {
647 case LINK_PEER_RESET_EVT:
648 l->state = LINK_PEER_RESET;
649 rc |= TIPC_LINK_DOWN_EVT;
650 break;
651 case LINK_FAILURE_EVT:
652 l->state = LINK_RESETTING;
653 rc |= TIPC_LINK_DOWN_EVT;
654 break;
655 case LINK_RESET_EVT:
656 l->state = LINK_RESET;
657 break;
658 case LINK_ESTABLISH_EVT:
659 case LINK_SYNCH_END_EVT:
660 break;
661 case LINK_SYNCH_BEGIN_EVT:
662 l->state = LINK_SYNCHING;
663 break;
664 case LINK_FAILOVER_BEGIN_EVT:
665 case LINK_FAILOVER_END_EVT:
666 default:
667 goto illegal_evt;
669 break;
670 case LINK_SYNCHING:
671 switch (evt) {
672 case LINK_PEER_RESET_EVT:
673 l->state = LINK_PEER_RESET;
674 rc |= TIPC_LINK_DOWN_EVT;
675 break;
676 case LINK_FAILURE_EVT:
677 l->state = LINK_RESETTING;
678 rc |= TIPC_LINK_DOWN_EVT;
679 break;
680 case LINK_RESET_EVT:
681 l->state = LINK_RESET;
682 break;
683 case LINK_ESTABLISH_EVT:
684 case LINK_SYNCH_BEGIN_EVT:
685 break;
686 case LINK_SYNCH_END_EVT:
687 l->state = LINK_ESTABLISHED;
688 break;
689 case LINK_FAILOVER_BEGIN_EVT:
690 case LINK_FAILOVER_END_EVT:
691 default:
692 goto illegal_evt;
694 break;
695 default:
696 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
698 return rc;
699 illegal_evt:
700 pr_err("Illegal FSM event %x in state %x on link %s\n",
701 evt, l->state, l->name);
702 return rc;
705 /* link_profile_stats - update statistical profiling of traffic
707 static void link_profile_stats(struct tipc_link *l)
709 struct sk_buff *skb;
710 struct tipc_msg *msg;
711 int length;
713 /* Update counters used in statistical profiling of send traffic */
714 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
715 l->stats.queue_sz_counts++;
717 skb = skb_peek(&l->transmq);
718 if (!skb)
719 return;
720 msg = buf_msg(skb);
721 length = msg_size(msg);
723 if (msg_user(msg) == MSG_FRAGMENTER) {
724 if (msg_type(msg) != FIRST_FRAGMENT)
725 return;
726 length = msg_size(msg_get_wrapped(msg));
728 l->stats.msg_lengths_total += length;
729 l->stats.msg_length_counts++;
730 if (length <= 64)
731 l->stats.msg_length_profile[0]++;
732 else if (length <= 256)
733 l->stats.msg_length_profile[1]++;
734 else if (length <= 1024)
735 l->stats.msg_length_profile[2]++;
736 else if (length <= 4096)
737 l->stats.msg_length_profile[3]++;
738 else if (length <= 16384)
739 l->stats.msg_length_profile[4]++;
740 else if (length <= 32768)
741 l->stats.msg_length_profile[5]++;
742 else
743 l->stats.msg_length_profile[6]++;
746 /* tipc_link_timeout - perform periodic task as instructed from node timeout
748 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
750 int mtyp = 0;
751 int rc = 0;
752 bool state = false;
753 bool probe = false;
754 bool setup = false;
755 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
756 u16 bc_acked = l->bc_rcvlink->acked;
757 struct tipc_mon_state *mstate = &l->mon_state;
759 switch (l->state) {
760 case LINK_ESTABLISHED:
761 case LINK_SYNCHING:
762 mtyp = STATE_MSG;
763 link_profile_stats(l);
764 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
765 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
766 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
767 state = bc_acked != bc_snt;
768 state |= l->bc_rcvlink->rcv_unacked;
769 state |= l->rcv_unacked;
770 state |= !skb_queue_empty(&l->transmq);
771 state |= !skb_queue_empty(&l->deferdq);
772 probe = mstate->probing;
773 probe |= l->silent_intv_cnt;
774 if (probe || mstate->monitoring)
775 l->silent_intv_cnt++;
776 break;
777 case LINK_RESET:
778 setup = l->rst_cnt++ <= 4;
779 setup |= !(l->rst_cnt % 16);
780 mtyp = RESET_MSG;
781 break;
782 case LINK_ESTABLISHING:
783 setup = true;
784 mtyp = ACTIVATE_MSG;
785 break;
786 case LINK_PEER_RESET:
787 case LINK_RESETTING:
788 case LINK_FAILINGOVER:
789 break;
790 default:
791 break;
794 if (state || probe || setup)
795 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
797 return rc;
801 * link_schedule_user - schedule a message sender for wakeup after congestion
802 * @l: congested link
803 * @hdr: header of message that is being sent
804 * Create pseudo msg to send back to user when congestion abates
806 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
808 u32 dnode = tipc_own_addr(l->net);
809 u32 dport = msg_origport(hdr);
810 struct sk_buff *skb;
812 /* Create and schedule wakeup pseudo message */
813 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
814 dnode, l->addr, dport, 0, 0);
815 if (!skb)
816 return -ENOBUFS;
817 msg_set_dest_droppable(buf_msg(skb), true);
818 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
819 skb_queue_tail(&l->wakeupq, skb);
820 l->stats.link_congs++;
821 return -ELINKCONG;
825 * link_prepare_wakeup - prepare users for wakeup after congestion
826 * @l: congested link
827 * Wake up a number of waiting users, as permitted by available space
828 * in the send queue
830 static void link_prepare_wakeup(struct tipc_link *l)
832 struct sk_buff *skb, *tmp;
833 int imp, i = 0;
835 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
836 imp = TIPC_SKB_CB(skb)->chain_imp;
837 if (l->backlog[imp].len < l->backlog[imp].limit) {
838 skb_unlink(skb, &l->wakeupq);
839 skb_queue_tail(l->inputq, skb);
840 } else if (i++ > 10) {
841 break;
846 void tipc_link_reset(struct tipc_link *l)
848 struct sk_buff_head list;
850 __skb_queue_head_init(&list);
852 l->in_session = false;
853 l->session++;
854 l->mtu = l->advertised_mtu;
856 spin_lock_bh(&l->wakeupq.lock);
857 skb_queue_splice_init(&l->wakeupq, &list);
858 spin_unlock_bh(&l->wakeupq.lock);
860 spin_lock_bh(&l->inputq->lock);
861 skb_queue_splice_init(&list, l->inputq);
862 spin_unlock_bh(&l->inputq->lock);
864 __skb_queue_purge(&l->transmq);
865 __skb_queue_purge(&l->deferdq);
866 __skb_queue_purge(&l->backlogq);
867 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
868 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
869 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
870 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
871 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
872 kfree_skb(l->reasm_buf);
873 kfree_skb(l->failover_reasm_skb);
874 l->reasm_buf = NULL;
875 l->failover_reasm_skb = NULL;
876 l->rcv_unacked = 0;
877 l->snd_nxt = 1;
878 l->rcv_nxt = 1;
879 l->snd_nxt_state = 1;
880 l->rcv_nxt_state = 1;
881 l->acked = 0;
882 l->silent_intv_cnt = 0;
883 l->rst_cnt = 0;
884 l->stale_cnt = 0;
885 l->bc_peer_is_up = false;
886 memset(&l->mon_state, 0, sizeof(l->mon_state));
887 tipc_link_reset_stats(l);
891 * tipc_link_xmit(): enqueue buffer list according to queue situation
892 * @link: link to use
893 * @list: chain of buffers containing message
894 * @xmitq: returned list of packets to be sent by caller
896 * Consumes the buffer chain.
897 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
898 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
900 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
901 struct sk_buff_head *xmitq)
903 struct tipc_msg *hdr = buf_msg(skb_peek(list));
904 unsigned int maxwin = l->window;
905 int imp = msg_importance(hdr);
906 unsigned int mtu = l->mtu;
907 u16 ack = l->rcv_nxt - 1;
908 u16 seqno = l->snd_nxt;
909 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
910 struct sk_buff_head *transmq = &l->transmq;
911 struct sk_buff_head *backlogq = &l->backlogq;
912 struct sk_buff *skb, *_skb, *bskb;
913 int pkt_cnt = skb_queue_len(list);
914 int rc = 0;
916 if (unlikely(msg_size(hdr) > mtu)) {
917 skb_queue_purge(list);
918 return -EMSGSIZE;
921 /* Allow oversubscription of one data msg per source at congestion */
922 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
923 if (imp == TIPC_SYSTEM_IMPORTANCE) {
924 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
925 return -ENOBUFS;
927 rc = link_schedule_user(l, hdr);
930 if (pkt_cnt > 1) {
931 l->stats.sent_fragmented++;
932 l->stats.sent_fragments += pkt_cnt;
935 /* Prepare each packet for sending, and add to relevant queue: */
936 while (skb_queue_len(list)) {
937 skb = skb_peek(list);
938 hdr = buf_msg(skb);
939 msg_set_seqno(hdr, seqno);
940 msg_set_ack(hdr, ack);
941 msg_set_bcast_ack(hdr, bc_ack);
943 if (likely(skb_queue_len(transmq) < maxwin)) {
944 _skb = skb_clone(skb, GFP_ATOMIC);
945 if (!_skb) {
946 skb_queue_purge(list);
947 return -ENOBUFS;
949 __skb_dequeue(list);
950 __skb_queue_tail(transmq, skb);
951 __skb_queue_tail(xmitq, _skb);
952 TIPC_SKB_CB(skb)->ackers = l->ackers;
953 l->rcv_unacked = 0;
954 l->stats.sent_pkts++;
955 seqno++;
956 continue;
958 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
959 kfree_skb(__skb_dequeue(list));
960 l->stats.sent_bundled++;
961 continue;
963 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
964 kfree_skb(__skb_dequeue(list));
965 __skb_queue_tail(backlogq, bskb);
966 l->backlog[msg_importance(buf_msg(bskb))].len++;
967 l->stats.sent_bundled++;
968 l->stats.sent_bundles++;
969 continue;
971 l->backlog[imp].len += skb_queue_len(list);
972 skb_queue_splice_tail_init(list, backlogq);
974 l->snd_nxt = seqno;
975 return rc;
978 static void tipc_link_advance_backlog(struct tipc_link *l,
979 struct sk_buff_head *xmitq)
981 struct sk_buff *skb, *_skb;
982 struct tipc_msg *hdr;
983 u16 seqno = l->snd_nxt;
984 u16 ack = l->rcv_nxt - 1;
985 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
987 while (skb_queue_len(&l->transmq) < l->window) {
988 skb = skb_peek(&l->backlogq);
989 if (!skb)
990 break;
991 _skb = skb_clone(skb, GFP_ATOMIC);
992 if (!_skb)
993 break;
994 __skb_dequeue(&l->backlogq);
995 hdr = buf_msg(skb);
996 l->backlog[msg_importance(hdr)].len--;
997 __skb_queue_tail(&l->transmq, skb);
998 __skb_queue_tail(xmitq, _skb);
999 TIPC_SKB_CB(skb)->ackers = l->ackers;
1000 msg_set_seqno(hdr, seqno);
1001 msg_set_ack(hdr, ack);
1002 msg_set_bcast_ack(hdr, bc_ack);
1003 l->rcv_unacked = 0;
1004 l->stats.sent_pkts++;
1005 seqno++;
1007 l->snd_nxt = seqno;
1010 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
1012 struct tipc_msg *hdr = buf_msg(skb);
1014 pr_warn("Retransmission failure on link <%s>\n", l->name);
1015 link_print(l, "State of link ");
1016 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1017 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1018 pr_info("sqno %u, prev: %x, src: %x\n",
1019 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1022 /* tipc_link_retrans() - retransmit one or more packets
1023 * @l: the link to transmit on
1024 * @r: the receiving link ordering the retransmit. Same as l if unicast
1025 * @from: retransmit from (inclusive) this sequence number
1026 * @to: retransmit to (inclusive) this sequence number
1027 * xmitq: queue for accumulating the retransmitted packets
1029 static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1030 u16 from, u16 to, struct sk_buff_head *xmitq)
1032 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1033 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1034 u16 ack = l->rcv_nxt - 1;
1035 struct tipc_msg *hdr;
1037 if (!skb)
1038 return 0;
1040 /* Detect repeated retransmit failures on same packet */
1041 if (r->last_retransm != buf_seqno(skb)) {
1042 r->last_retransm = buf_seqno(skb);
1043 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1044 r->stale_cnt = 0;
1045 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
1046 link_retransmit_failure(l, skb);
1047 if (link_is_bc_sndlink(l))
1048 return TIPC_LINK_DOWN_EVT;
1049 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1052 skb_queue_walk(&l->transmq, skb) {
1053 hdr = buf_msg(skb);
1054 if (less(msg_seqno(hdr), from))
1055 continue;
1056 if (more(msg_seqno(hdr), to))
1057 break;
1058 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1059 if (!_skb)
1060 return 0;
1061 hdr = buf_msg(_skb);
1062 msg_set_ack(hdr, ack);
1063 msg_set_bcast_ack(hdr, bc_ack);
1064 _skb->priority = TC_PRIO_CONTROL;
1065 __skb_queue_tail(xmitq, _skb);
1066 l->stats.retransmitted++;
1068 return 0;
1071 /* tipc_data_input - deliver data and name distr msgs to upper layer
1073 * Consumes buffer if message is of right type
1074 * Node lock must be held
1076 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1077 struct sk_buff_head *inputq)
1079 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1080 struct tipc_msg *hdr = buf_msg(skb);
1082 switch (msg_user(hdr)) {
1083 case TIPC_LOW_IMPORTANCE:
1084 case TIPC_MEDIUM_IMPORTANCE:
1085 case TIPC_HIGH_IMPORTANCE:
1086 case TIPC_CRITICAL_IMPORTANCE:
1087 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1088 skb_queue_tail(mc_inputq, skb);
1089 return true;
1091 /* else: fall through */
1092 case CONN_MANAGER:
1093 skb_queue_tail(inputq, skb);
1094 return true;
1095 case GROUP_PROTOCOL:
1096 skb_queue_tail(mc_inputq, skb);
1097 return true;
1098 case NAME_DISTRIBUTOR:
1099 l->bc_rcvlink->state = LINK_ESTABLISHED;
1100 skb_queue_tail(l->namedq, skb);
1101 return true;
1102 case MSG_BUNDLER:
1103 case TUNNEL_PROTOCOL:
1104 case MSG_FRAGMENTER:
1105 case BCAST_PROTOCOL:
1106 return false;
1107 default:
1108 pr_warn("Dropping received illegal msg type\n");
1109 kfree_skb(skb);
1110 return false;
1114 /* tipc_link_input - process packet that has passed link protocol check
1116 * Consumes buffer
1118 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1119 struct sk_buff_head *inputq)
1121 struct tipc_msg *hdr = buf_msg(skb);
1122 struct sk_buff **reasm_skb = &l->reasm_buf;
1123 struct sk_buff *iskb;
1124 struct sk_buff_head tmpq;
1125 int usr = msg_user(hdr);
1126 int rc = 0;
1127 int pos = 0;
1128 int ipos = 0;
1130 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1131 if (msg_type(hdr) == SYNCH_MSG) {
1132 __skb_queue_purge(&l->deferdq);
1133 goto drop;
1135 if (!tipc_msg_extract(skb, &iskb, &ipos))
1136 return rc;
1137 kfree_skb(skb);
1138 skb = iskb;
1139 hdr = buf_msg(skb);
1140 if (less(msg_seqno(hdr), l->drop_point))
1141 goto drop;
1142 if (tipc_data_input(l, skb, inputq))
1143 return rc;
1144 usr = msg_user(hdr);
1145 reasm_skb = &l->failover_reasm_skb;
1148 if (usr == MSG_BUNDLER) {
1149 skb_queue_head_init(&tmpq);
1150 l->stats.recv_bundles++;
1151 l->stats.recv_bundled += msg_msgcnt(hdr);
1152 while (tipc_msg_extract(skb, &iskb, &pos))
1153 tipc_data_input(l, iskb, &tmpq);
1154 tipc_skb_queue_splice_tail(&tmpq, inputq);
1155 return 0;
1156 } else if (usr == MSG_FRAGMENTER) {
1157 l->stats.recv_fragments++;
1158 if (tipc_buf_append(reasm_skb, &skb)) {
1159 l->stats.recv_fragmented++;
1160 tipc_data_input(l, skb, inputq);
1161 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1162 pr_warn_ratelimited("Unable to build fragment list\n");
1163 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1165 return 0;
1166 } else if (usr == BCAST_PROTOCOL) {
1167 tipc_bcast_lock(l->net);
1168 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1169 tipc_bcast_unlock(l->net);
1171 drop:
1172 kfree_skb(skb);
1173 return 0;
1176 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1178 bool released = false;
1179 struct sk_buff *skb, *tmp;
1181 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1182 if (more(buf_seqno(skb), acked))
1183 break;
1184 __skb_unlink(skb, &l->transmq);
1185 kfree_skb(skb);
1186 released = true;
1188 return released;
1191 /* tipc_link_build_state_msg: prepare link state message for transmission
1193 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1194 * risk of ack storms towards the sender
1196 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1198 if (!l)
1199 return 0;
1201 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1202 if (link_is_bc_rcvlink(l)) {
1203 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1204 return 0;
1205 l->rcv_unacked = 0;
1207 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1208 l->snd_nxt = l->rcv_nxt;
1209 return TIPC_LINK_SND_STATE;
1212 /* Unicast ACK */
1213 l->rcv_unacked = 0;
1214 l->stats.sent_acks++;
1215 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1216 return 0;
1219 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1221 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1223 int mtyp = RESET_MSG;
1224 struct sk_buff *skb;
1226 if (l->state == LINK_ESTABLISHING)
1227 mtyp = ACTIVATE_MSG;
1229 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1231 /* Inform peer that this endpoint is going down if applicable */
1232 skb = skb_peek_tail(xmitq);
1233 if (skb && (l->state == LINK_RESET))
1234 msg_set_peer_stopping(buf_msg(skb), 1);
1237 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1238 * Note that sending of broadcast NACK is coordinated among nodes, to
1239 * reduce the risk of NACK storms towards the sender
1241 static int tipc_link_build_nack_msg(struct tipc_link *l,
1242 struct sk_buff_head *xmitq)
1244 u32 def_cnt = ++l->stats.deferred_recv;
1245 int match1, match2;
1247 if (link_is_bc_rcvlink(l)) {
1248 match1 = def_cnt & 0xf;
1249 match2 = tipc_own_addr(l->net) & 0xf;
1250 if (match1 == match2)
1251 return TIPC_LINK_SND_STATE;
1252 return 0;
1255 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1256 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1257 return 0;
1260 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1261 * @l: the link that should handle the message
1262 * @skb: TIPC packet
1263 * @xmitq: queue to place packets to be sent after this call
1265 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1266 struct sk_buff_head *xmitq)
1268 struct sk_buff_head *defq = &l->deferdq;
1269 struct tipc_msg *hdr;
1270 u16 seqno, rcv_nxt, win_lim;
1271 int rc = 0;
1273 do {
1274 hdr = buf_msg(skb);
1275 seqno = msg_seqno(hdr);
1276 rcv_nxt = l->rcv_nxt;
1277 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1279 /* Verify and update link state */
1280 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1281 return tipc_link_proto_rcv(l, skb, xmitq);
1283 if (unlikely(!link_is_up(l))) {
1284 if (l->state == LINK_ESTABLISHING)
1285 rc = TIPC_LINK_UP_EVT;
1286 goto drop;
1289 /* Don't send probe at next timeout expiration */
1290 l->silent_intv_cnt = 0;
1292 /* Drop if outside receive window */
1293 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1294 l->stats.duplicates++;
1295 goto drop;
1298 /* Forward queues and wake up waiting users */
1299 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1300 l->stale_cnt = 0;
1301 tipc_link_advance_backlog(l, xmitq);
1302 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1303 link_prepare_wakeup(l);
1306 /* Defer delivery if sequence gap */
1307 if (unlikely(seqno != rcv_nxt)) {
1308 __tipc_skb_queue_sorted(defq, seqno, skb);
1309 rc |= tipc_link_build_nack_msg(l, xmitq);
1310 break;
1313 /* Deliver packet */
1314 l->rcv_nxt++;
1315 l->stats.recv_pkts++;
1316 if (!tipc_data_input(l, skb, l->inputq))
1317 rc |= tipc_link_input(l, skb, l->inputq);
1318 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1319 rc |= tipc_link_build_state_msg(l, xmitq);
1320 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1321 break;
1322 } while ((skb = __skb_dequeue(defq)));
1324 return rc;
1325 drop:
1326 kfree_skb(skb);
1327 return rc;
1330 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1331 bool probe_reply, u16 rcvgap,
1332 int tolerance, int priority,
1333 struct sk_buff_head *xmitq)
1335 struct tipc_link *bcl = l->bc_rcvlink;
1336 struct sk_buff *skb;
1337 struct tipc_msg *hdr;
1338 struct sk_buff_head *dfq = &l->deferdq;
1339 bool node_up = link_is_up(bcl);
1340 struct tipc_mon_state *mstate = &l->mon_state;
1341 int dlen = 0;
1342 void *data;
1344 /* Don't send protocol message during reset or link failover */
1345 if (tipc_link_is_blocked(l))
1346 return;
1348 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1349 return;
1351 if (!skb_queue_empty(dfq))
1352 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1354 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1355 tipc_max_domain_size, l->addr,
1356 tipc_own_addr(l->net), 0, 0, 0);
1357 if (!skb)
1358 return;
1360 hdr = buf_msg(skb);
1361 data = msg_data(hdr);
1362 msg_set_session(hdr, l->session);
1363 msg_set_bearer_id(hdr, l->bearer_id);
1364 msg_set_net_plane(hdr, l->net_plane);
1365 msg_set_next_sent(hdr, l->snd_nxt);
1366 msg_set_ack(hdr, l->rcv_nxt - 1);
1367 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1368 msg_set_bc_ack_invalid(hdr, !node_up);
1369 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1370 msg_set_link_tolerance(hdr, tolerance);
1371 msg_set_linkprio(hdr, priority);
1372 msg_set_redundant_link(hdr, node_up);
1373 msg_set_seq_gap(hdr, 0);
1374 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1376 if (mtyp == STATE_MSG) {
1377 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1378 msg_set_seqno(hdr, l->snd_nxt_state++);
1379 msg_set_seq_gap(hdr, rcvgap);
1380 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1381 msg_set_probe(hdr, probe);
1382 msg_set_is_keepalive(hdr, probe || probe_reply);
1383 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1384 msg_set_size(hdr, INT_H_SIZE + dlen);
1385 skb_trim(skb, INT_H_SIZE + dlen);
1386 l->stats.sent_states++;
1387 l->rcv_unacked = 0;
1388 } else {
1389 /* RESET_MSG or ACTIVATE_MSG */
1390 msg_set_max_pkt(hdr, l->advertised_mtu);
1391 strcpy(data, l->if_name);
1392 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1393 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1395 if (probe)
1396 l->stats.sent_probes++;
1397 if (rcvgap)
1398 l->stats.sent_nacks++;
1399 skb->priority = TC_PRIO_CONTROL;
1400 __skb_queue_tail(xmitq, skb);
1403 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1404 struct sk_buff_head *xmitq)
1406 u32 onode = tipc_own_addr(l->net);
1407 struct tipc_msg *hdr, *ihdr;
1408 struct sk_buff_head tnlq;
1409 struct sk_buff *skb;
1410 u32 dnode = l->addr;
1412 skb_queue_head_init(&tnlq);
1413 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1414 INT_H_SIZE, BASIC_H_SIZE,
1415 dnode, onode, 0, 0, 0);
1416 if (!skb) {
1417 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1418 return;
1421 hdr = buf_msg(skb);
1422 msg_set_msgcnt(hdr, 1);
1423 msg_set_bearer_id(hdr, l->peer_bearer_id);
1425 ihdr = (struct tipc_msg *)msg_data(hdr);
1426 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1427 BASIC_H_SIZE, dnode);
1428 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1429 __skb_queue_tail(&tnlq, skb);
1430 tipc_link_xmit(l, &tnlq, xmitq);
1433 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1434 * with contents of the link's transmit and backlog queues.
1436 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1437 int mtyp, struct sk_buff_head *xmitq)
1439 struct sk_buff *skb, *tnlskb;
1440 struct tipc_msg *hdr, tnlhdr;
1441 struct sk_buff_head *queue = &l->transmq;
1442 struct sk_buff_head tmpxq, tnlq;
1443 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1445 if (!tnl)
1446 return;
1448 skb_queue_head_init(&tnlq);
1449 skb_queue_head_init(&tmpxq);
1451 /* At least one packet required for safe algorithm => add dummy */
1452 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1453 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1454 0, 0, TIPC_ERR_NO_PORT);
1455 if (!skb) {
1456 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1457 return;
1459 skb_queue_tail(&tnlq, skb);
1460 tipc_link_xmit(l, &tnlq, &tmpxq);
1461 __skb_queue_purge(&tmpxq);
1463 /* Initialize reusable tunnel packet header */
1464 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1465 mtyp, INT_H_SIZE, l->addr);
1466 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1467 msg_set_msgcnt(&tnlhdr, pktcnt);
1468 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1469 tnl:
1470 /* Wrap each packet into a tunnel packet */
1471 skb_queue_walk(queue, skb) {
1472 hdr = buf_msg(skb);
1473 if (queue == &l->backlogq)
1474 msg_set_seqno(hdr, seqno++);
1475 pktlen = msg_size(hdr);
1476 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1477 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1478 if (!tnlskb) {
1479 pr_warn("%sunable to send packet\n", link_co_err);
1480 return;
1482 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1483 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1484 __skb_queue_tail(&tnlq, tnlskb);
1486 if (queue != &l->backlogq) {
1487 queue = &l->backlogq;
1488 goto tnl;
1491 tipc_link_xmit(tnl, &tnlq, xmitq);
1493 if (mtyp == FAILOVER_MSG) {
1494 tnl->drop_point = l->rcv_nxt;
1495 tnl->failover_reasm_skb = l->reasm_buf;
1496 l->reasm_buf = NULL;
1500 /* tipc_link_validate_msg(): validate message against current link state
1501 * Returns true if message should be accepted, otherwise false
1503 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1505 u16 curr_session = l->peer_session;
1506 u16 session = msg_session(hdr);
1507 int mtyp = msg_type(hdr);
1509 if (msg_user(hdr) != LINK_PROTOCOL)
1510 return true;
1512 switch (mtyp) {
1513 case RESET_MSG:
1514 if (!l->in_session)
1515 return true;
1516 /* Accept only RESET with new session number */
1517 return more(session, curr_session);
1518 case ACTIVATE_MSG:
1519 if (!l->in_session)
1520 return true;
1521 /* Accept only ACTIVATE with new or current session number */
1522 return !less(session, curr_session);
1523 case STATE_MSG:
1524 /* Accept only STATE with current session number */
1525 if (!l->in_session)
1526 return false;
1527 if (session != curr_session)
1528 return false;
1529 /* Extra sanity check */
1530 if (!link_is_up(l) && msg_ack(hdr))
1531 return false;
1532 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1533 return true;
1534 /* Accept only STATE with new sequence number */
1535 return !less(msg_seqno(hdr), l->rcv_nxt_state);
1536 default:
1537 return false;
1541 /* tipc_link_proto_rcv(): receive link level protocol message :
1542 * Note that network plane id propagates through the network, and may
1543 * change at any time. The node with lowest numerical id determines
1544 * network plane
1546 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1547 struct sk_buff_head *xmitq)
1549 struct tipc_msg *hdr = buf_msg(skb);
1550 u16 rcvgap = 0;
1551 u16 ack = msg_ack(hdr);
1552 u16 gap = msg_seq_gap(hdr);
1553 u16 peers_snd_nxt = msg_next_sent(hdr);
1554 u16 peers_tol = msg_link_tolerance(hdr);
1555 u16 peers_prio = msg_linkprio(hdr);
1556 u16 rcv_nxt = l->rcv_nxt;
1557 u16 dlen = msg_data_sz(hdr);
1558 int mtyp = msg_type(hdr);
1559 bool reply = msg_probe(hdr);
1560 void *data;
1561 char *if_name;
1562 int rc = 0;
1564 if (tipc_link_is_blocked(l) || !xmitq)
1565 goto exit;
1567 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1568 l->net_plane = msg_net_plane(hdr);
1570 skb_linearize(skb);
1571 hdr = buf_msg(skb);
1572 data = msg_data(hdr);
1574 if (!tipc_link_validate_msg(l, hdr))
1575 goto exit;
1577 switch (mtyp) {
1578 case RESET_MSG:
1579 case ACTIVATE_MSG:
1580 /* Complete own link name with peer's interface name */
1581 if_name = strrchr(l->name, ':') + 1;
1582 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1583 break;
1584 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1585 break;
1586 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1588 /* Update own tolerance if peer indicates a non-zero value */
1589 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1590 l->tolerance = peers_tol;
1591 l->bc_rcvlink->tolerance = peers_tol;
1593 /* Update own priority if peer's priority is higher */
1594 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1595 l->priority = peers_prio;
1597 /* If peer is going down we want full re-establish cycle */
1598 if (msg_peer_stopping(hdr)) {
1599 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1600 break;
1602 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1603 if (mtyp == RESET_MSG || !link_is_up(l))
1604 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1606 /* ACTIVATE_MSG takes up link if it was already locally reset */
1607 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
1608 rc = TIPC_LINK_UP_EVT;
1610 l->peer_session = msg_session(hdr);
1611 l->in_session = true;
1612 l->peer_bearer_id = msg_bearer_id(hdr);
1613 if (l->mtu > msg_max_pkt(hdr))
1614 l->mtu = msg_max_pkt(hdr);
1615 break;
1617 case STATE_MSG:
1618 l->rcv_nxt_state = msg_seqno(hdr) + 1;
1620 /* Update own tolerance if peer indicates a non-zero value */
1621 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1622 l->tolerance = peers_tol;
1623 l->bc_rcvlink->tolerance = peers_tol;
1625 /* Update own prio if peer indicates a different value */
1626 if ((peers_prio != l->priority) &&
1627 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1628 l->priority = peers_prio;
1629 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1632 l->silent_intv_cnt = 0;
1633 l->stats.recv_states++;
1634 if (msg_probe(hdr))
1635 l->stats.recv_probes++;
1637 if (!link_is_up(l)) {
1638 if (l->state == LINK_ESTABLISHING)
1639 rc = TIPC_LINK_UP_EVT;
1640 break;
1642 tipc_mon_rcv(l->net, data, dlen, l->addr,
1643 &l->mon_state, l->bearer_id);
1645 /* Send NACK if peer has sent pkts we haven't received yet */
1646 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1647 rcvgap = peers_snd_nxt - l->rcv_nxt;
1648 if (rcvgap || reply)
1649 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1650 rcvgap, 0, 0, xmitq);
1651 tipc_link_release_pkts(l, ack);
1653 /* If NACK, retransmit will now start at right position */
1654 if (gap) {
1655 rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
1656 l->stats.recv_nacks++;
1659 tipc_link_advance_backlog(l, xmitq);
1660 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1661 link_prepare_wakeup(l);
1663 exit:
1664 kfree_skb(skb);
1665 return rc;
1668 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1670 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1671 u16 peers_snd_nxt,
1672 struct sk_buff_head *xmitq)
1674 struct sk_buff *skb;
1675 struct tipc_msg *hdr;
1676 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1677 u16 ack = l->rcv_nxt - 1;
1678 u16 gap_to = peers_snd_nxt - 1;
1680 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1681 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1682 if (!skb)
1683 return false;
1684 hdr = buf_msg(skb);
1685 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1686 msg_set_bcast_ack(hdr, ack);
1687 msg_set_bcgap_after(hdr, ack);
1688 if (dfrd_skb)
1689 gap_to = buf_seqno(dfrd_skb) - 1;
1690 msg_set_bcgap_to(hdr, gap_to);
1691 msg_set_non_seq(hdr, bcast);
1692 __skb_queue_tail(xmitq, skb);
1693 return true;
1696 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1698 * Give a newly added peer node the sequence number where it should
1699 * start receiving and acking broadcast packets.
1701 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1702 struct sk_buff_head *xmitq)
1704 struct sk_buff_head list;
1706 __skb_queue_head_init(&list);
1707 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1708 return;
1709 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1710 tipc_link_xmit(l, &list, xmitq);
1713 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1715 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1717 int mtyp = msg_type(hdr);
1718 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1720 if (link_is_up(l))
1721 return;
1723 if (msg_user(hdr) == BCAST_PROTOCOL) {
1724 l->rcv_nxt = peers_snd_nxt;
1725 l->state = LINK_ESTABLISHED;
1726 return;
1729 if (l->peer_caps & TIPC_BCAST_SYNCH)
1730 return;
1732 if (msg_peer_node_is_up(hdr))
1733 return;
1735 /* Compatibility: accept older, less safe initial synch data */
1736 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1737 l->rcv_nxt = peers_snd_nxt;
1740 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1741 * - Adjust permitted range if there is overlap with previous retransmission
1743 static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1745 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1747 if (less(*to, *from))
1748 return false;
1750 /* New retransmission request */
1751 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1752 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1753 l->prev_from = *from;
1754 l->prev_to = *to;
1755 l->prev_retr = jiffies;
1756 return true;
1759 /* Inside range of previous retransmit */
1760 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1761 return false;
1763 /* Fully or partially outside previous range => exclude overlap */
1764 if (less(*from, l->prev_from)) {
1765 *to = l->prev_from - 1;
1766 l->prev_from = *from;
1768 if (more(*to, l->prev_to)) {
1769 *from = l->prev_to + 1;
1770 l->prev_to = *to;
1772 l->prev_retr = jiffies;
1773 return true;
1776 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1778 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1779 struct sk_buff_head *xmitq)
1781 struct tipc_link *snd_l = l->bc_sndlink;
1782 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1783 u16 from = msg_bcast_ack(hdr) + 1;
1784 u16 to = from + msg_bc_gap(hdr) - 1;
1785 int rc = 0;
1787 if (!link_is_up(l))
1788 return rc;
1790 if (!msg_peer_node_is_up(hdr))
1791 return rc;
1793 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1794 if (msg_ack(hdr))
1795 l->bc_peer_is_up = true;
1797 if (!l->bc_peer_is_up)
1798 return rc;
1800 l->stats.recv_nacks++;
1802 /* Ignore if peers_snd_nxt goes beyond receive window */
1803 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1804 return rc;
1806 if (link_bc_retr_eval(snd_l, &from, &to))
1807 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1809 l->snd_nxt = peers_snd_nxt;
1810 if (link_bc_rcv_gap(l))
1811 rc |= TIPC_LINK_SND_STATE;
1813 /* Return now if sender supports nack via STATE messages */
1814 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1815 return rc;
1817 /* Otherwise, be backwards compatible */
1819 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1820 l->nack_state = BC_NACK_SND_CONDITIONAL;
1821 return 0;
1824 /* Don't NACK if one was recently sent or peeked */
1825 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1826 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1827 return 0;
1830 /* Conditionally delay NACK sending until next synch rcv */
1831 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1832 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1833 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1834 return 0;
1837 /* Send NACK now but suppress next one */
1838 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1839 l->nack_state = BC_NACK_SND_SUPPRESS;
1840 return 0;
1843 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1844 struct sk_buff_head *xmitq)
1846 struct sk_buff *skb, *tmp;
1847 struct tipc_link *snd_l = l->bc_sndlink;
1849 if (!link_is_up(l) || !l->bc_peer_is_up)
1850 return;
1852 if (!more(acked, l->acked))
1853 return;
1855 /* Skip over packets peer has already acked */
1856 skb_queue_walk(&snd_l->transmq, skb) {
1857 if (more(buf_seqno(skb), l->acked))
1858 break;
1861 /* Update/release the packets peer is acking now */
1862 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1863 if (more(buf_seqno(skb), acked))
1864 break;
1865 if (!--TIPC_SKB_CB(skb)->ackers) {
1866 __skb_unlink(skb, &snd_l->transmq);
1867 kfree_skb(skb);
1870 l->acked = acked;
1871 tipc_link_advance_backlog(snd_l, xmitq);
1872 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1873 link_prepare_wakeup(snd_l);
1876 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1877 * This function is here for backwards compatibility, since
1878 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1880 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1881 struct sk_buff_head *xmitq)
1883 struct tipc_msg *hdr = buf_msg(skb);
1884 u32 dnode = msg_destnode(hdr);
1885 int mtyp = msg_type(hdr);
1886 u16 acked = msg_bcast_ack(hdr);
1887 u16 from = acked + 1;
1888 u16 to = msg_bcgap_to(hdr);
1889 u16 peers_snd_nxt = to + 1;
1890 int rc = 0;
1892 kfree_skb(skb);
1894 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1895 return 0;
1897 if (mtyp != STATE_MSG)
1898 return 0;
1900 if (dnode == tipc_own_addr(l->net)) {
1901 tipc_link_bc_ack_rcv(l, acked, xmitq);
1902 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
1903 l->stats.recv_nacks++;
1904 return rc;
1907 /* Msg for other node => suppress own NACK at next sync if applicable */
1908 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1909 l->nack_state = BC_NACK_SND_SUPPRESS;
1911 return 0;
1914 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1916 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
1918 l->window = win;
1919 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1920 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1921 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1922 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1923 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1927 * link_reset_stats - reset link statistics
1928 * @l: pointer to link
1930 void tipc_link_reset_stats(struct tipc_link *l)
1932 memset(&l->stats, 0, sizeof(l->stats));
1935 static void link_print(struct tipc_link *l, const char *str)
1937 struct sk_buff *hskb = skb_peek(&l->transmq);
1938 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1939 u16 tail = l->snd_nxt - 1;
1941 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1942 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1943 skb_queue_len(&l->transmq), head, tail,
1944 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1947 /* Parse and validate nested (link) properties valid for media, bearer and link
1949 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1951 int err;
1953 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1954 tipc_nl_prop_policy, NULL);
1955 if (err)
1956 return err;
1958 if (props[TIPC_NLA_PROP_PRIO]) {
1959 u32 prio;
1961 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1962 if (prio > TIPC_MAX_LINK_PRI)
1963 return -EINVAL;
1966 if (props[TIPC_NLA_PROP_TOL]) {
1967 u32 tol;
1969 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1970 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1971 return -EINVAL;
1974 if (props[TIPC_NLA_PROP_WIN]) {
1975 u32 win;
1977 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1978 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1979 return -EINVAL;
1982 return 0;
1985 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1987 int i;
1988 struct nlattr *stats;
1990 struct nla_map {
1991 u32 key;
1992 u32 val;
1995 struct nla_map map[] = {
1996 {TIPC_NLA_STATS_RX_INFO, 0},
1997 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1998 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1999 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2000 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2001 {TIPC_NLA_STATS_TX_INFO, 0},
2002 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2003 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2004 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2005 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2006 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2007 s->msg_length_counts : 1},
2008 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2009 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2010 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2011 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2012 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2013 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2014 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2015 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2016 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2017 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2018 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2019 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2020 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2021 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2022 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2023 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2024 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2025 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2026 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2027 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2028 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2029 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2030 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2033 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2034 if (!stats)
2035 return -EMSGSIZE;
2037 for (i = 0; i < ARRAY_SIZE(map); i++)
2038 if (nla_put_u32(skb, map[i].key, map[i].val))
2039 goto msg_full;
2041 nla_nest_end(skb, stats);
2043 return 0;
2044 msg_full:
2045 nla_nest_cancel(skb, stats);
2047 return -EMSGSIZE;
2050 /* Caller should hold appropriate locks to protect the link */
2051 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2052 struct tipc_link *link, int nlflags)
2054 u32 self = tipc_own_addr(net);
2055 struct nlattr *attrs;
2056 struct nlattr *prop;
2057 void *hdr;
2058 int err;
2060 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2061 nlflags, TIPC_NL_LINK_GET);
2062 if (!hdr)
2063 return -EMSGSIZE;
2065 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2066 if (!attrs)
2067 goto msg_full;
2069 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2070 goto attr_msg_full;
2071 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2072 goto attr_msg_full;
2073 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2074 goto attr_msg_full;
2075 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2076 goto attr_msg_full;
2077 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2078 goto attr_msg_full;
2080 if (tipc_link_is_up(link))
2081 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2082 goto attr_msg_full;
2083 if (link->active)
2084 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2085 goto attr_msg_full;
2087 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2088 if (!prop)
2089 goto attr_msg_full;
2090 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2091 goto prop_msg_full;
2092 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2093 goto prop_msg_full;
2094 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2095 link->window))
2096 goto prop_msg_full;
2097 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2098 goto prop_msg_full;
2099 nla_nest_end(msg->skb, prop);
2101 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2102 if (err)
2103 goto attr_msg_full;
2105 nla_nest_end(msg->skb, attrs);
2106 genlmsg_end(msg->skb, hdr);
2108 return 0;
2110 prop_msg_full:
2111 nla_nest_cancel(msg->skb, prop);
2112 attr_msg_full:
2113 nla_nest_cancel(msg->skb, attrs);
2114 msg_full:
2115 genlmsg_cancel(msg->skb, hdr);
2117 return -EMSGSIZE;
2120 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2121 struct tipc_stats *stats)
2123 int i;
2124 struct nlattr *nest;
2126 struct nla_map {
2127 __u32 key;
2128 __u32 val;
2131 struct nla_map map[] = {
2132 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2133 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2134 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2135 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2136 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2137 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2138 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2139 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2140 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2141 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2142 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2143 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2144 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2145 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2146 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2147 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2148 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2149 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2150 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2151 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2154 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2155 if (!nest)
2156 return -EMSGSIZE;
2158 for (i = 0; i < ARRAY_SIZE(map); i++)
2159 if (nla_put_u32(skb, map[i].key, map[i].val))
2160 goto msg_full;
2162 nla_nest_end(skb, nest);
2164 return 0;
2165 msg_full:
2166 nla_nest_cancel(skb, nest);
2168 return -EMSGSIZE;
2171 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2173 int err;
2174 void *hdr;
2175 struct nlattr *attrs;
2176 struct nlattr *prop;
2177 struct tipc_net *tn = net_generic(net, tipc_net_id);
2178 struct tipc_link *bcl = tn->bcl;
2180 if (!bcl)
2181 return 0;
2183 tipc_bcast_lock(net);
2185 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2186 NLM_F_MULTI, TIPC_NL_LINK_GET);
2187 if (!hdr) {
2188 tipc_bcast_unlock(net);
2189 return -EMSGSIZE;
2192 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2193 if (!attrs)
2194 goto msg_full;
2196 /* The broadcast link is always up */
2197 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2198 goto attr_msg_full;
2200 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2201 goto attr_msg_full;
2202 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2203 goto attr_msg_full;
2204 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2205 goto attr_msg_full;
2206 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2207 goto attr_msg_full;
2209 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2210 if (!prop)
2211 goto attr_msg_full;
2212 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2213 goto prop_msg_full;
2214 nla_nest_end(msg->skb, prop);
2216 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2217 if (err)
2218 goto attr_msg_full;
2220 tipc_bcast_unlock(net);
2221 nla_nest_end(msg->skb, attrs);
2222 genlmsg_end(msg->skb, hdr);
2224 return 0;
2226 prop_msg_full:
2227 nla_nest_cancel(msg->skb, prop);
2228 attr_msg_full:
2229 nla_nest_cancel(msg->skb, attrs);
2230 msg_full:
2231 tipc_bcast_unlock(net);
2232 genlmsg_cancel(msg->skb, hdr);
2234 return -EMSGSIZE;
2237 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2238 struct sk_buff_head *xmitq)
2240 l->tolerance = tol;
2241 if (l->bc_rcvlink)
2242 l->bc_rcvlink->tolerance = tol;
2243 if (link_is_up(l))
2244 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2247 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2248 struct sk_buff_head *xmitq)
2250 l->priority = prio;
2251 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2254 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2256 l->abort_limit = limit;