2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
49 #include <linux/pkt_sched.h>
70 u32 link_congs
; /* # port sends blocked by congestion */
73 u32 max_queue_sz
; /* send queue size high water mark */
74 u32 accu_queue_sz
; /* used for send queue size profiling */
75 u32 queue_sz_counts
; /* used for send queue size profiling */
76 u32 msg_length_counts
; /* used for message length profiling */
77 u32 msg_lengths_total
; /* used for message length profiling */
78 u32 msg_length_profile
[7]; /* used for msg. length profiling */
82 * struct tipc_link - TIPC link data structure
83 * @addr: network address of link's peer node
84 * @name: link name character string
85 * @media_addr: media address to use when sending messages over link
87 * @net: pointer to namespace struct
88 * @refcnt: reference counter for permanent references (owner node & timer)
89 * @peer_session: link session # being used by peer end of link
90 * @peer_bearer_id: bearer id used by link's peer endpoint
91 * @bearer_id: local bearer id used by link
92 * @tolerance: minimum link continuity loss needed to reset link [in ms]
93 * @abort_limit: # of unacknowledged continuity probes needed to reset link
94 * @state: current state of link FSM
95 * @peer_caps: bitmap describing capabilities of peer node
96 * @silent_intv_cnt: # of timer intervals without any reception from peer
97 * @proto_msg: template for control messages generated by link
98 * @pmsg: convenience pointer to "proto_msg" field
99 * @priority: current link priority
100 * @net_plane: current link network plane ('A' through 'H')
101 * @mon_state: cookie with information needed by link monitor
102 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103 * @exp_msg_count: # of tunnelled messages expected during link changeover
104 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105 * @mtu: current maximum packet size for this link
106 * @advertised_mtu: advertised own mtu when link is being established
107 * @transmitq: queue for sent, non-acked messages
108 * @backlogq: queue for messages waiting to be sent
109 * @snt_nxt: next sequence number to use for outbound messages
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 * @session: session to be used by link
124 * @snd_nxt_state: next send seq number
125 * @rcv_nxt_state: next rcv seq number
126 * @in_session: have received ACTIVATE_MSG from peer
127 * @active: link is active
128 * @if_name: associated interface name
129 * @rst_cnt: link reset counter
130 * @drop_point: seq number for failover handling (FIXME)
131 * @failover_reasm_skb: saved failover msg ptr (FIXME)
132 * @failover_deferdq: deferred message queue for failover processing (FIXME)
133 * @transmq: the link's transmit queue
134 * @backlog: link's backlog by priority (importance)
135 * @snd_nxt: next sequence number to be used
136 * @rcv_unacked: # messages read by user, but not yet acked back to peer
137 * @deferdq: deferred receive queue
138 * @window: sliding window size for congestion handling
139 * @min_win: minimal send window to be used by link
140 * @ssthresh: slow start threshold for congestion handling
141 * @max_win: maximal send window to be used by link
142 * @cong_acks: congestion acks for congestion avoidance (FIXME)
143 * @checkpoint: seq number for congestion window size handling
144 * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
145 * @last_gap: last gap ack blocks for bcast (FIXME)
146 * @last_ga: ptr to gap ack blocks
147 * @bc_rcvlink: the peer specific link used for broadcast reception
148 * @bc_sndlink: the namespace global link used for broadcast sending
149 * @nack_state: bcast nack state
150 * @bc_peer_is_up: peer has acked the bcast init msg
154 char name
[TIPC_MAX_LINK_NAME
];
157 /* Management and link supervision data */
171 char if_name
[TIPC_MAX_IF_NAME
];
174 struct tipc_mon_state mon_state
;
179 struct sk_buff
*failover_reasm_skb
;
180 struct sk_buff_head failover_deferdq
;
182 /* Max packet negotiation */
187 struct sk_buff_head transmq
;
188 struct sk_buff_head backlogq
;
192 struct sk_buff
*target_bskb
;
199 struct sk_buff_head deferdq
;
200 struct sk_buff_head
*inputq
;
201 struct sk_buff_head
*namedq
;
203 /* Congestion handling */
204 struct sk_buff_head wakeupq
;
212 /* Fragmentation/reassembly */
213 struct sk_buff
*reasm_buf
;
214 struct sk_buff
*reasm_tnlmsg
;
220 struct tipc_gap_ack_blks
*last_ga
;
221 struct tipc_link
*bc_rcvlink
;
222 struct tipc_link
*bc_sndlink
;
227 struct tipc_stats stats
;
231 * Error message prefixes
233 static const char *link_co_err
= "Link tunneling error, ";
234 static const char *link_rst_msg
= "Resetting link ";
236 /* Send states for broadcast NACKs
239 BC_NACK_SND_CONDITIONAL
,
240 BC_NACK_SND_UNCONDITIONAL
,
241 BC_NACK_SND_SUPPRESS
,
244 #define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
245 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
250 LINK_ESTABLISHED
= 0xe,
251 LINK_ESTABLISHING
= 0xe << 4,
252 LINK_RESET
= 0x1 << 8,
253 LINK_RESETTING
= 0x2 << 12,
254 LINK_PEER_RESET
= 0xd << 16,
255 LINK_FAILINGOVER
= 0xf << 20,
256 LINK_SYNCHING
= 0xc << 24
259 /* Link FSM state checking routines
261 static int link_is_up(struct tipc_link
*l
)
263 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
266 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
267 struct sk_buff_head
*xmitq
);
268 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
269 bool probe_reply
, u16 rcvgap
,
270 int tolerance
, int priority
,
271 struct sk_buff_head
*xmitq
);
272 static void link_print(struct tipc_link
*l
, const char *str
);
273 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
274 struct sk_buff_head
*xmitq
);
275 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
276 struct sk_buff_head
*xmitq
);
277 static u8
__tipc_build_gap_ack_blks(struct tipc_gap_ack_blks
*ga
,
278 struct tipc_link
*l
, u8 start_index
);
279 static u16
tipc_build_gap_ack_blks(struct tipc_link
*l
, struct tipc_msg
*hdr
);
280 static int tipc_link_advance_transmq(struct tipc_link
*l
, struct tipc_link
*r
,
282 struct tipc_gap_ack_blks
*ga
,
283 struct sk_buff_head
*xmitq
,
284 bool *retransmitted
, int *rc
);
285 static void tipc_link_update_cwin(struct tipc_link
*l
, int released
,
288 * Simple non-static link routines (i.e. referenced outside this file)
290 bool tipc_link_is_up(struct tipc_link
*l
)
292 return link_is_up(l
);
295 bool tipc_link_peer_is_down(struct tipc_link
*l
)
297 return l
->state
== LINK_PEER_RESET
;
300 bool tipc_link_is_reset(struct tipc_link
*l
)
302 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
305 bool tipc_link_is_establishing(struct tipc_link
*l
)
307 return l
->state
== LINK_ESTABLISHING
;
310 bool tipc_link_is_synching(struct tipc_link
*l
)
312 return l
->state
== LINK_SYNCHING
;
315 bool tipc_link_is_failingover(struct tipc_link
*l
)
317 return l
->state
== LINK_FAILINGOVER
;
320 bool tipc_link_is_blocked(struct tipc_link
*l
)
322 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
325 static bool link_is_bc_sndlink(struct tipc_link
*l
)
327 return !l
->bc_sndlink
;
330 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
332 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
335 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
340 u32
tipc_link_id(struct tipc_link
*l
)
342 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
345 int tipc_link_min_win(struct tipc_link
*l
)
350 int tipc_link_max_win(struct tipc_link
*l
)
355 int tipc_link_prio(struct tipc_link
*l
)
360 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
365 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
370 char tipc_link_plane(struct tipc_link
*l
)
375 void tipc_link_update_caps(struct tipc_link
*l
, u16 capabilities
)
377 l
->peer_caps
= capabilities
;
380 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
381 struct tipc_link
*uc_l
,
382 struct sk_buff_head
*xmitq
)
384 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
387 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
388 snd_l
->state
= LINK_ESTABLISHED
;
389 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
392 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
393 struct tipc_link
*rcv_l
,
394 struct sk_buff_head
*xmitq
)
396 u16 ack
= snd_l
->snd_nxt
- 1;
399 rcv_l
->bc_peer_is_up
= true;
400 rcv_l
->state
= LINK_ESTABLISHED
;
401 tipc_link_bc_ack_rcv(rcv_l
, ack
, 0, NULL
, xmitq
, NULL
);
402 trace_tipc_link_reset(rcv_l
, TIPC_DUMP_ALL
, "bclink removed!");
403 tipc_link_reset(rcv_l
);
404 rcv_l
->state
= LINK_RESET
;
405 if (!snd_l
->ackers
) {
406 trace_tipc_link_reset(snd_l
, TIPC_DUMP_ALL
, "zero ackers!");
407 tipc_link_reset(snd_l
);
408 snd_l
->state
= LINK_RESET
;
409 __skb_queue_purge(xmitq
);
413 int tipc_link_bc_peers(struct tipc_link
*l
)
418 static u16
link_bc_rcv_gap(struct tipc_link
*l
)
420 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
423 if (more(l
->snd_nxt
, l
->rcv_nxt
))
424 gap
= l
->snd_nxt
- l
->rcv_nxt
;
426 gap
= buf_seqno(skb
) - l
->rcv_nxt
;
430 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
435 int tipc_link_mtu(struct tipc_link
*l
)
440 int tipc_link_mss(struct tipc_link
*l
)
442 #ifdef CONFIG_TIPC_CRYPTO
443 return l
->mtu
- INT_H_SIZE
- EMSG_OVERHEAD
;
445 return l
->mtu
- INT_H_SIZE
;
449 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
454 u16
tipc_link_acked(struct tipc_link
*l
)
459 char *tipc_link_name(struct tipc_link
*l
)
464 u32
tipc_link_state(struct tipc_link
*l
)
470 * tipc_link_create - create a new link
471 * @net: pointer to associated network namespace
472 * @if_name: associated interface name
473 * @bearer_id: id (index) of associated bearer
474 * @tolerance: link tolerance to be used by link
475 * @net_plane: network plane (A,B,c..) this link belongs to
476 * @mtu: mtu to be advertised by link
477 * @priority: priority to be used by link
478 * @min_win: minimal send window to be used by link
479 * @max_win: maximal send window to be used by link
480 * @session: session to be used by link
481 * @peer: node id of peer node
482 * @peer_caps: bitmap describing peer node capabilities
483 * @bc_sndlink: the namespace global link used for broadcast sending
484 * @bc_rcvlink: the peer specific link used for broadcast reception
485 * @inputq: queue to put messages ready for delivery
486 * @namedq: queue to put binding table update messages ready for delivery
487 * @link: return value, pointer to put the created link
488 * @self: local unicast link id
489 * @peer_id: 128-bit ID of peer
491 * Return: true if link was created, otherwise false
493 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
494 int tolerance
, char net_plane
, u32 mtu
, int priority
,
495 u32 min_win
, u32 max_win
, u32 session
, u32 self
,
496 u32 peer
, u8
*peer_id
, u16 peer_caps
,
497 struct tipc_link
*bc_sndlink
,
498 struct tipc_link
*bc_rcvlink
,
499 struct sk_buff_head
*inputq
,
500 struct sk_buff_head
*namedq
,
501 struct tipc_link
**link
)
503 char peer_str
[NODE_ID_STR_LEN
] = {0,};
504 char self_str
[NODE_ID_STR_LEN
] = {0,};
507 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
511 l
->session
= session
;
513 /* Set link name for unicast links only */
515 tipc_nodeid2string(self_str
, tipc_own_id(net
));
516 if (strlen(self_str
) > 16)
517 sprintf(self_str
, "%x", self
);
518 tipc_nodeid2string(peer_str
, peer_id
);
519 if (strlen(peer_str
) > 16)
520 sprintf(peer_str
, "%x", peer
);
522 /* Peer i/f name will be completed by reset/activate message */
523 snprintf(l
->name
, sizeof(l
->name
), "%s:%s-%s:unknown",
524 self_str
, if_name
, peer_str
);
526 strcpy(l
->if_name
, if_name
);
528 l
->peer_caps
= peer_caps
;
530 l
->in_session
= false;
531 l
->bearer_id
= bearer_id
;
532 l
->tolerance
= tolerance
;
534 bc_rcvlink
->tolerance
= tolerance
;
535 l
->net_plane
= net_plane
;
536 l
->advertised_mtu
= mtu
;
538 l
->priority
= priority
;
539 tipc_link_set_queue_limits(l
, min_win
, max_win
);
541 l
->bc_sndlink
= bc_sndlink
;
542 l
->bc_rcvlink
= bc_rcvlink
;
545 l
->state
= LINK_RESETTING
;
546 __skb_queue_head_init(&l
->transmq
);
547 __skb_queue_head_init(&l
->backlogq
);
548 __skb_queue_head_init(&l
->deferdq
);
549 __skb_queue_head_init(&l
->failover_deferdq
);
550 skb_queue_head_init(&l
->wakeupq
);
551 skb_queue_head_init(l
->inputq
);
556 * tipc_link_bc_create - create new link to be used for broadcast
557 * @net: pointer to associated network namespace
558 * @mtu: mtu to be used initially if no peers
559 * @min_win: minimal send window to be used by link
560 * @max_win: maximal send window to be used by link
561 * @inputq: queue to put messages ready for delivery
562 * @namedq: queue to put binding table update messages ready for delivery
563 * @link: return value, pointer to put the created link
564 * @ownnode: identity of own node
565 * @peer: node id of peer node
566 * @peer_id: 128-bit ID of peer
567 * @peer_caps: bitmap describing peer node capabilities
568 * @bc_sndlink: the namespace global link used for broadcast sending
570 * Return: true if link was created, otherwise false
572 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
, u8
*peer_id
,
573 int mtu
, u32 min_win
, u32 max_win
, u16 peer_caps
,
574 struct sk_buff_head
*inputq
,
575 struct sk_buff_head
*namedq
,
576 struct tipc_link
*bc_sndlink
,
577 struct tipc_link
**link
)
581 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, min_win
,
582 max_win
, 0, ownnode
, peer
, NULL
, peer_caps
,
583 bc_sndlink
, NULL
, inputq
, namedq
, link
))
588 char peer_str
[NODE_ID_STR_LEN
] = {0,};
590 tipc_nodeid2string(peer_str
, peer_id
);
591 if (strlen(peer_str
) > 16)
592 sprintf(peer_str
, "%x", peer
);
593 /* Broadcast receiver link name: "broadcast-link:<peer>" */
594 snprintf(l
->name
, sizeof(l
->name
), "%s:%s", tipc_bclink_name
,
597 strcpy(l
->name
, tipc_bclink_name
);
599 trace_tipc_link_reset(l
, TIPC_DUMP_ALL
, "bclink created!");
601 l
->state
= LINK_RESET
;
605 /* Broadcast send link is always up */
606 if (link_is_bc_sndlink(l
))
607 l
->state
= LINK_ESTABLISHED
;
609 /* Disable replicast if even a single peer doesn't support it */
610 if (link_is_bc_rcvlink(l
) && !(peer_caps
& TIPC_BCAST_RCAST
))
611 tipc_bcast_toggle_rcast(net
, false);
617 * tipc_link_fsm_evt - link finite state machine
618 * @l: pointer to link
619 * @evt: state machine event to be processed
621 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
624 int old_state
= l
->state
;
629 case LINK_PEER_RESET_EVT
:
630 l
->state
= LINK_PEER_RESET
;
633 l
->state
= LINK_RESET
;
635 case LINK_FAILURE_EVT
:
636 case LINK_FAILOVER_BEGIN_EVT
:
637 case LINK_ESTABLISH_EVT
:
638 case LINK_FAILOVER_END_EVT
:
639 case LINK_SYNCH_BEGIN_EVT
:
640 case LINK_SYNCH_END_EVT
:
647 case LINK_PEER_RESET_EVT
:
648 l
->state
= LINK_ESTABLISHING
;
650 case LINK_FAILOVER_BEGIN_EVT
:
651 l
->state
= LINK_FAILINGOVER
;
652 case LINK_FAILURE_EVT
:
654 case LINK_ESTABLISH_EVT
:
655 case LINK_FAILOVER_END_EVT
:
657 case LINK_SYNCH_BEGIN_EVT
:
658 case LINK_SYNCH_END_EVT
:
663 case LINK_PEER_RESET
:
666 l
->state
= LINK_ESTABLISHING
;
668 case LINK_PEER_RESET_EVT
:
669 case LINK_ESTABLISH_EVT
:
670 case LINK_FAILURE_EVT
:
672 case LINK_SYNCH_BEGIN_EVT
:
673 case LINK_SYNCH_END_EVT
:
674 case LINK_FAILOVER_BEGIN_EVT
:
675 case LINK_FAILOVER_END_EVT
:
680 case LINK_FAILINGOVER
:
682 case LINK_FAILOVER_END_EVT
:
683 l
->state
= LINK_RESET
;
685 case LINK_PEER_RESET_EVT
:
687 case LINK_ESTABLISH_EVT
:
688 case LINK_FAILURE_EVT
:
690 case LINK_FAILOVER_BEGIN_EVT
:
691 case LINK_SYNCH_BEGIN_EVT
:
692 case LINK_SYNCH_END_EVT
:
697 case LINK_ESTABLISHING
:
699 case LINK_ESTABLISH_EVT
:
700 l
->state
= LINK_ESTABLISHED
;
702 case LINK_FAILOVER_BEGIN_EVT
:
703 l
->state
= LINK_FAILINGOVER
;
706 l
->state
= LINK_RESET
;
708 case LINK_FAILURE_EVT
:
709 case LINK_PEER_RESET_EVT
:
710 case LINK_SYNCH_BEGIN_EVT
:
711 case LINK_FAILOVER_END_EVT
:
713 case LINK_SYNCH_END_EVT
:
718 case LINK_ESTABLISHED
:
720 case LINK_PEER_RESET_EVT
:
721 l
->state
= LINK_PEER_RESET
;
722 rc
|= TIPC_LINK_DOWN_EVT
;
724 case LINK_FAILURE_EVT
:
725 l
->state
= LINK_RESETTING
;
726 rc
|= TIPC_LINK_DOWN_EVT
;
729 l
->state
= LINK_RESET
;
731 case LINK_ESTABLISH_EVT
:
732 case LINK_SYNCH_END_EVT
:
734 case LINK_SYNCH_BEGIN_EVT
:
735 l
->state
= LINK_SYNCHING
;
737 case LINK_FAILOVER_BEGIN_EVT
:
738 case LINK_FAILOVER_END_EVT
:
745 case LINK_PEER_RESET_EVT
:
746 l
->state
= LINK_PEER_RESET
;
747 rc
|= TIPC_LINK_DOWN_EVT
;
749 case LINK_FAILURE_EVT
:
750 l
->state
= LINK_RESETTING
;
751 rc
|= TIPC_LINK_DOWN_EVT
;
754 l
->state
= LINK_RESET
;
756 case LINK_ESTABLISH_EVT
:
757 case LINK_SYNCH_BEGIN_EVT
:
759 case LINK_SYNCH_END_EVT
:
760 l
->state
= LINK_ESTABLISHED
;
762 case LINK_FAILOVER_BEGIN_EVT
:
763 case LINK_FAILOVER_END_EVT
:
769 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
771 trace_tipc_link_fsm(l
->name
, old_state
, l
->state
, evt
);
774 pr_err("Illegal FSM event %x in state %x on link %s\n",
775 evt
, l
->state
, l
->name
);
776 trace_tipc_link_fsm(l
->name
, old_state
, l
->state
, evt
);
780 /* link_profile_stats - update statistical profiling of traffic
782 static void link_profile_stats(struct tipc_link
*l
)
785 struct tipc_msg
*msg
;
788 /* Update counters used in statistical profiling of send traffic */
789 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
790 l
->stats
.queue_sz_counts
++;
792 skb
= skb_peek(&l
->transmq
);
796 length
= msg_size(msg
);
798 if (msg_user(msg
) == MSG_FRAGMENTER
) {
799 if (msg_type(msg
) != FIRST_FRAGMENT
)
801 length
= msg_size(msg_inner_hdr(msg
));
803 l
->stats
.msg_lengths_total
+= length
;
804 l
->stats
.msg_length_counts
++;
806 l
->stats
.msg_length_profile
[0]++;
807 else if (length
<= 256)
808 l
->stats
.msg_length_profile
[1]++;
809 else if (length
<= 1024)
810 l
->stats
.msg_length_profile
[2]++;
811 else if (length
<= 4096)
812 l
->stats
.msg_length_profile
[3]++;
813 else if (length
<= 16384)
814 l
->stats
.msg_length_profile
[4]++;
815 else if (length
<= 32768)
816 l
->stats
.msg_length_profile
[5]++;
818 l
->stats
.msg_length_profile
[6]++;
822 * tipc_link_too_silent - check if link is "too silent"
823 * @l: tipc link to be checked
825 * Return: true if the link 'silent_intv_cnt' is about to reach the
826 * 'abort_limit' value, otherwise false
828 bool tipc_link_too_silent(struct tipc_link
*l
)
830 return (l
->silent_intv_cnt
+ 2 > l
->abort_limit
);
833 /* tipc_link_timeout - perform periodic task as instructed from node timeout
835 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
842 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
843 u16 bc_acked
= l
->bc_rcvlink
->acked
;
844 struct tipc_mon_state
*mstate
= &l
->mon_state
;
846 trace_tipc_link_timeout(l
, TIPC_DUMP_NONE
, " ");
847 trace_tipc_link_too_silent(l
, TIPC_DUMP_ALL
, " ");
849 case LINK_ESTABLISHED
:
852 link_profile_stats(l
);
853 tipc_mon_get_state(l
->net
, l
->addr
, mstate
, l
->bearer_id
);
854 if (mstate
->reset
|| (l
->silent_intv_cnt
> l
->abort_limit
))
855 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
856 state
= bc_acked
!= bc_snt
;
857 state
|= l
->bc_rcvlink
->rcv_unacked
;
858 state
|= l
->rcv_unacked
;
859 state
|= !skb_queue_empty(&l
->transmq
);
860 probe
= mstate
->probing
;
861 probe
|= l
->silent_intv_cnt
;
862 if (probe
|| mstate
->monitoring
)
863 l
->silent_intv_cnt
++;
864 probe
|= !skb_queue_empty(&l
->deferdq
);
865 if (l
->snd_nxt
== l
->checkpoint
) {
866 tipc_link_update_cwin(l
, 0, 0);
869 l
->checkpoint
= l
->snd_nxt
;
872 setup
= l
->rst_cnt
++ <= 4;
873 setup
|= !(l
->rst_cnt
% 16);
876 case LINK_ESTABLISHING
:
880 case LINK_PEER_RESET
:
882 case LINK_FAILINGOVER
:
888 if (state
|| probe
|| setup
)
889 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, 0, xmitq
);
895 * link_schedule_user - schedule a message sender for wakeup after congestion
897 * @hdr: header of message that is being sent
898 * Create pseudo msg to send back to user when congestion abates
900 static int link_schedule_user(struct tipc_link
*l
, struct tipc_msg
*hdr
)
902 u32 dnode
= tipc_own_addr(l
->net
);
903 u32 dport
= msg_origport(hdr
);
906 /* Create and schedule wakeup pseudo message */
907 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
908 dnode
, l
->addr
, dport
, 0, 0);
911 msg_set_dest_droppable(buf_msg(skb
), true);
912 TIPC_SKB_CB(skb
)->chain_imp
= msg_importance(hdr
);
913 skb_queue_tail(&l
->wakeupq
, skb
);
914 l
->stats
.link_congs
++;
915 trace_tipc_link_conges(l
, TIPC_DUMP_ALL
, "wakeup scheduled!");
920 * link_prepare_wakeup - prepare users for wakeup after congestion
922 * Wake up a number of waiting users, as permitted by available space
925 static void link_prepare_wakeup(struct tipc_link
*l
)
927 struct sk_buff_head
*wakeupq
= &l
->wakeupq
;
928 struct sk_buff_head
*inputq
= l
->inputq
;
929 struct sk_buff
*skb
, *tmp
;
930 struct sk_buff_head tmpq
;
934 __skb_queue_head_init(&tmpq
);
936 for (; imp
<= TIPC_SYSTEM_IMPORTANCE
; imp
++)
937 avail
[imp
] = l
->backlog
[imp
].limit
- l
->backlog
[imp
].len
;
939 skb_queue_walk_safe(wakeupq
, skb
, tmp
) {
940 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
944 __skb_unlink(skb
, wakeupq
);
945 __skb_queue_tail(&tmpq
, skb
);
948 spin_lock_bh(&inputq
->lock
);
949 skb_queue_splice_tail(&tmpq
, inputq
);
950 spin_unlock_bh(&inputq
->lock
);
955 * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
956 * the given skb should be next attempted
957 * @skb: skb to set a future retransmission time for
958 * @l: link the skb will be transmitted on
960 static void tipc_link_set_skb_retransmit_time(struct sk_buff
*skb
,
963 if (link_is_bc_sndlink(l
))
964 TIPC_SKB_CB(skb
)->nxt_retr
= TIPC_BC_RETR_LIM
;
966 TIPC_SKB_CB(skb
)->nxt_retr
= TIPC_UC_RETR_TIME
;
969 void tipc_link_reset(struct tipc_link
*l
)
971 struct sk_buff_head list
;
974 __skb_queue_head_init(&list
);
976 l
->in_session
= false;
977 /* Force re-synch of peer session number before establishing */
980 l
->mtu
= l
->advertised_mtu
;
982 spin_lock_bh(&l
->wakeupq
.lock
);
983 skb_queue_splice_init(&l
->wakeupq
, &list
);
984 spin_unlock_bh(&l
->wakeupq
.lock
);
986 spin_lock_bh(&l
->inputq
->lock
);
987 skb_queue_splice_init(&list
, l
->inputq
);
988 spin_unlock_bh(&l
->inputq
->lock
);
990 __skb_queue_purge(&l
->transmq
);
991 __skb_queue_purge(&l
->deferdq
);
992 __skb_queue_purge(&l
->backlogq
);
993 __skb_queue_purge(&l
->failover_deferdq
);
994 for (imp
= 0; imp
<= TIPC_SYSTEM_IMPORTANCE
; imp
++) {
995 l
->backlog
[imp
].len
= 0;
996 l
->backlog
[imp
].target_bskb
= NULL
;
998 kfree_skb(l
->reasm_buf
);
999 kfree_skb(l
->reasm_tnlmsg
);
1000 kfree_skb(l
->failover_reasm_skb
);
1001 l
->reasm_buf
= NULL
;
1002 l
->reasm_tnlmsg
= NULL
;
1003 l
->failover_reasm_skb
= NULL
;
1007 l
->snd_nxt_state
= 1;
1008 l
->rcv_nxt_state
= 1;
1013 l
->silent_intv_cnt
= 0;
1015 l
->bc_peer_is_up
= false;
1016 memset(&l
->mon_state
, 0, sizeof(l
->mon_state
));
1017 tipc_link_reset_stats(l
);
1021 * tipc_link_xmit(): enqueue buffer list according to queue situation
1023 * @list: chain of buffers containing message
1024 * @xmitq: returned list of packets to be sent by caller
1026 * Consumes the buffer chain.
1027 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
1028 * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
1030 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
1031 struct sk_buff_head
*xmitq
)
1033 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
1034 struct sk_buff_head
*backlogq
= &l
->backlogq
;
1035 struct sk_buff_head
*transmq
= &l
->transmq
;
1036 struct sk_buff
*skb
, *_skb
;
1037 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1038 u16 ack
= l
->rcv_nxt
- 1;
1039 u16 seqno
= l
->snd_nxt
;
1040 int pkt_cnt
= skb_queue_len(list
);
1041 int imp
= msg_importance(hdr
);
1042 unsigned int mss
= tipc_link_mss(l
);
1043 unsigned int cwin
= l
->window
;
1044 unsigned int mtu
= l
->mtu
;
1048 if (unlikely(msg_size(hdr
) > mtu
)) {
1049 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1050 skb_queue_len(list
), msg_user(hdr
),
1051 msg_type(hdr
), msg_size(hdr
), mtu
);
1052 __skb_queue_purge(list
);
1056 /* Allow oversubscription of one data msg per source at congestion */
1057 if (unlikely(l
->backlog
[imp
].len
>= l
->backlog
[imp
].limit
)) {
1058 if (imp
== TIPC_SYSTEM_IMPORTANCE
) {
1059 pr_warn("%s<%s>, link overflow", link_rst_msg
, l
->name
);
1062 rc
= link_schedule_user(l
, hdr
);
1066 l
->stats
.sent_fragmented
++;
1067 l
->stats
.sent_fragments
+= pkt_cnt
;
1070 /* Prepare each packet for sending, and add to relevant queue: */
1071 while ((skb
= __skb_dequeue(list
))) {
1072 if (likely(skb_queue_len(transmq
) < cwin
)) {
1074 msg_set_seqno(hdr
, seqno
);
1075 msg_set_ack(hdr
, ack
);
1076 msg_set_bcast_ack(hdr
, bc_ack
);
1077 _skb
= skb_clone(skb
, GFP_ATOMIC
);
1080 __skb_queue_purge(list
);
1083 __skb_queue_tail(transmq
, skb
);
1084 tipc_link_set_skb_retransmit_time(skb
, l
);
1085 __skb_queue_tail(xmitq
, _skb
);
1086 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
1088 l
->stats
.sent_pkts
++;
1092 if (tipc_msg_try_bundle(l
->backlog
[imp
].target_bskb
, &skb
,
1093 mss
, l
->addr
, &new_bundle
)) {
1095 /* Keep a ref. to the skb for next try */
1096 l
->backlog
[imp
].target_bskb
= skb
;
1097 l
->backlog
[imp
].len
++;
1098 __skb_queue_tail(backlogq
, skb
);
1101 l
->stats
.sent_bundles
++;
1102 l
->stats
.sent_bundled
++;
1104 l
->stats
.sent_bundled
++;
1108 l
->backlog
[imp
].target_bskb
= NULL
;
1109 l
->backlog
[imp
].len
+= (1 + skb_queue_len(list
));
1110 __skb_queue_tail(backlogq
, skb
);
1111 skb_queue_splice_tail_init(list
, backlogq
);
1117 static void tipc_link_update_cwin(struct tipc_link
*l
, int released
,
1120 int bklog_len
= skb_queue_len(&l
->backlogq
);
1121 struct sk_buff_head
*txq
= &l
->transmq
;
1122 int txq_len
= skb_queue_len(txq
);
1123 u16 cwin
= l
->window
;
1125 /* Enter fast recovery */
1126 if (unlikely(retransmitted
)) {
1127 l
->ssthresh
= max_t(u16
, l
->window
/ 2, 300);
1128 l
->window
= min_t(u16
, l
->ssthresh
, l
->window
);
1131 /* Enter slow start */
1132 if (unlikely(!released
)) {
1133 l
->ssthresh
= max_t(u16
, l
->window
/ 2, 300);
1134 l
->window
= l
->min_win
;
1137 /* Don't increase window if no pressure on the transmit queue */
1138 if (txq_len
+ bklog_len
< cwin
)
1141 /* Don't increase window if there are holes the transmit queue */
1142 if (txq_len
&& l
->snd_nxt
- buf_seqno(skb_peek(txq
)) != txq_len
)
1145 l
->cong_acks
+= released
;
1148 if (cwin
<= l
->ssthresh
) {
1149 l
->window
= min_t(u16
, cwin
+ released
, l
->max_win
);
1152 /* Congestion avoidance */
1153 if (l
->cong_acks
< cwin
)
1155 l
->window
= min_t(u16
, ++cwin
, l
->max_win
);
1159 static void tipc_link_advance_backlog(struct tipc_link
*l
,
1160 struct sk_buff_head
*xmitq
)
1162 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1163 struct sk_buff_head
*txq
= &l
->transmq
;
1164 struct sk_buff
*skb
, *_skb
;
1165 u16 ack
= l
->rcv_nxt
- 1;
1166 u16 seqno
= l
->snd_nxt
;
1167 struct tipc_msg
*hdr
;
1168 u16 cwin
= l
->window
;
1171 while (skb_queue_len(txq
) < cwin
) {
1172 skb
= skb_peek(&l
->backlogq
);
1175 _skb
= skb_clone(skb
, GFP_ATOMIC
);
1178 __skb_dequeue(&l
->backlogq
);
1180 imp
= msg_importance(hdr
);
1181 l
->backlog
[imp
].len
--;
1182 if (unlikely(skb
== l
->backlog
[imp
].target_bskb
))
1183 l
->backlog
[imp
].target_bskb
= NULL
;
1184 __skb_queue_tail(&l
->transmq
, skb
);
1185 tipc_link_set_skb_retransmit_time(skb
, l
);
1187 __skb_queue_tail(xmitq
, _skb
);
1188 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
1189 msg_set_seqno(hdr
, seqno
);
1190 msg_set_ack(hdr
, ack
);
1191 msg_set_bcast_ack(hdr
, bc_ack
);
1193 l
->stats
.sent_pkts
++;
1200 * link_retransmit_failure() - Detect repeated retransmit failures
1201 * @l: tipc link sender
1202 * @r: tipc link receiver (= l in case of unicast)
1203 * @rc: returned code
1205 * Return: true if the repeated retransmit failures happens, otherwise
1208 static bool link_retransmit_failure(struct tipc_link
*l
, struct tipc_link
*r
,
1211 struct sk_buff
*skb
= skb_peek(&l
->transmq
);
1212 struct tipc_msg
*hdr
;
1217 if (!TIPC_SKB_CB(skb
)->retr_cnt
)
1220 if (!time_after(jiffies
, TIPC_SKB_CB(skb
)->retr_stamp
+
1221 msecs_to_jiffies(r
->tolerance
* 10)))
1225 if (link_is_bc_sndlink(l
) && !less(r
->acked
, msg_seqno(hdr
)))
1228 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
1229 link_print(l
, "State of link ");
1230 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1231 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
1232 pr_info("sqno %u, prev: %x, dest: %x\n",
1233 msg_seqno(hdr
), msg_prevnode(hdr
), msg_destnode(hdr
));
1234 pr_info("retr_stamp %d, retr_cnt %d\n",
1235 jiffies_to_msecs(TIPC_SKB_CB(skb
)->retr_stamp
),
1236 TIPC_SKB_CB(skb
)->retr_cnt
);
1238 trace_tipc_list_dump(&l
->transmq
, true, "retrans failure!");
1239 trace_tipc_link_dump(l
, TIPC_DUMP_NONE
, "retrans failure!");
1240 trace_tipc_link_dump(r
, TIPC_DUMP_NONE
, "retrans failure!");
1242 if (link_is_bc_sndlink(l
)) {
1243 r
->state
= LINK_RESET
;
1244 *rc
|= TIPC_LINK_DOWN_EVT
;
1246 *rc
|= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1252 /* tipc_data_input - deliver data and name distr msgs to upper layer
1254 * Consumes buffer if message is of right type
1255 * Node lock must be held
1257 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1258 struct sk_buff_head
*inputq
)
1260 struct sk_buff_head
*mc_inputq
= l
->bc_rcvlink
->inputq
;
1261 struct tipc_msg
*hdr
= buf_msg(skb
);
1263 switch (msg_user(hdr
)) {
1264 case TIPC_LOW_IMPORTANCE
:
1265 case TIPC_MEDIUM_IMPORTANCE
:
1266 case TIPC_HIGH_IMPORTANCE
:
1267 case TIPC_CRITICAL_IMPORTANCE
:
1268 if (unlikely(msg_in_group(hdr
) || msg_mcast(hdr
))) {
1269 skb_queue_tail(mc_inputq
, skb
);
1274 skb_queue_tail(inputq
, skb
);
1276 case GROUP_PROTOCOL
:
1277 skb_queue_tail(mc_inputq
, skb
);
1279 case NAME_DISTRIBUTOR
:
1280 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1281 skb_queue_tail(l
->namedq
, skb
);
1284 case TUNNEL_PROTOCOL
:
1285 case MSG_FRAGMENTER
:
1286 case BCAST_PROTOCOL
:
1288 #ifdef CONFIG_TIPC_CRYPTO
1290 tipc_crypto_msg_rcv(l
->net
, skb
);
1294 pr_warn("Dropping received illegal msg type\n");
1300 /* tipc_link_input - process packet that has passed link protocol check
1304 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1305 struct sk_buff_head
*inputq
,
1306 struct sk_buff
**reasm_skb
)
1308 struct tipc_msg
*hdr
= buf_msg(skb
);
1309 struct sk_buff
*iskb
;
1310 struct sk_buff_head tmpq
;
1311 int usr
= msg_user(hdr
);
1314 if (usr
== MSG_BUNDLER
) {
1315 skb_queue_head_init(&tmpq
);
1316 l
->stats
.recv_bundles
++;
1317 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1318 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1319 tipc_data_input(l
, iskb
, &tmpq
);
1320 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1322 } else if (usr
== MSG_FRAGMENTER
) {
1323 l
->stats
.recv_fragments
++;
1324 if (tipc_buf_append(reasm_skb
, &skb
)) {
1325 l
->stats
.recv_fragmented
++;
1326 tipc_data_input(l
, skb
, inputq
);
1327 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1328 pr_warn_ratelimited("Unable to build fragment list\n");
1329 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1332 } else if (usr
== BCAST_PROTOCOL
) {
1333 tipc_bcast_lock(l
->net
);
1334 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1335 tipc_bcast_unlock(l
->net
);
1342 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1343 * inner message along with the ones in the old link's
1346 * @skb: TUNNEL_PROTOCOL message
1347 * @inputq: queue to put messages ready for delivery
1349 static int tipc_link_tnl_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1350 struct sk_buff_head
*inputq
)
1352 struct sk_buff
**reasm_skb
= &l
->failover_reasm_skb
;
1353 struct sk_buff
**reasm_tnlmsg
= &l
->reasm_tnlmsg
;
1354 struct sk_buff_head
*fdefq
= &l
->failover_deferdq
;
1355 struct tipc_msg
*hdr
= buf_msg(skb
);
1356 struct sk_buff
*iskb
;
1361 if (msg_type(hdr
) == SYNCH_MSG
) {
1366 /* Not a fragment? */
1367 if (likely(!msg_nof_fragms(hdr
))) {
1368 if (unlikely(!tipc_msg_extract(skb
, &iskb
, &ipos
))) {
1369 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1370 skb_queue_len(fdefq
));
1375 /* Set fragment type for buf_append */
1376 if (msg_fragm_no(hdr
) == 1)
1377 msg_set_type(hdr
, FIRST_FRAGMENT
);
1378 else if (msg_fragm_no(hdr
) < msg_nof_fragms(hdr
))
1379 msg_set_type(hdr
, FRAGMENT
);
1381 msg_set_type(hdr
, LAST_FRAGMENT
);
1383 if (!tipc_buf_append(reasm_tnlmsg
, &skb
)) {
1384 /* Successful but non-complete reassembly? */
1385 if (*reasm_tnlmsg
|| link_is_bc_rcvlink(l
))
1387 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1388 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1394 seqno
= buf_seqno(iskb
);
1395 if (unlikely(less(seqno
, l
->drop_point
))) {
1399 if (unlikely(seqno
!= l
->drop_point
)) {
1400 __tipc_skb_queue_sorted(fdefq
, seqno
, iskb
);
1405 if (!tipc_data_input(l
, iskb
, inputq
))
1406 rc
|= tipc_link_input(l
, iskb
, inputq
, reasm_skb
);
1409 } while ((iskb
= __tipc_skb_dequeue(fdefq
, l
->drop_point
)));
1415 * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1416 * @ga: returned pointer to the Gap ACK blocks if any
1418 * @hdr: the PROTOCOL/STATE_MSG header
1419 * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1421 * Return: the total Gap ACK blocks size
1423 u16
tipc_get_gap_ack_blks(struct tipc_gap_ack_blks
**ga
, struct tipc_link
*l
,
1424 struct tipc_msg
*hdr
, bool uc
)
1426 struct tipc_gap_ack_blks
*p
;
1429 /* Does peer support the Gap ACK blocks feature? */
1430 if (l
->peer_caps
& TIPC_GAP_ACK_BLOCK
) {
1431 p
= (struct tipc_gap_ack_blks
*)msg_data(hdr
);
1434 if (sz
== struct_size(p
, gacks
, p
->ugack_cnt
+ p
->bgack_cnt
)) {
1435 /* Good, check if the desired type exists */
1436 if ((uc
&& p
->ugack_cnt
) || (!uc
&& p
->bgack_cnt
))
1438 /* Backward compatible: peer might not support bc, but uc? */
1439 } else if (uc
&& sz
== struct_size(p
, gacks
, p
->ugack_cnt
)) {
1446 /* Other cases: ignore! */
1454 static u8
__tipc_build_gap_ack_blks(struct tipc_gap_ack_blks
*ga
,
1455 struct tipc_link
*l
, u8 start_index
)
1457 struct tipc_gap_ack
*gacks
= &ga
->gacks
[start_index
];
1458 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
1459 u16 expect
, seqno
= 0;
1465 expect
= buf_seqno(skb
);
1466 skb_queue_walk(&l
->deferdq
, skb
) {
1467 seqno
= buf_seqno(skb
);
1468 if (unlikely(more(seqno
, expect
))) {
1469 gacks
[n
].ack
= htons(expect
- 1);
1470 gacks
[n
].gap
= htons(seqno
- expect
);
1471 if (++n
>= MAX_GAP_ACK_BLKS
/ 2) {
1472 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1474 skb_queue_len(&l
->deferdq
));
1477 } else if (unlikely(less(seqno
, expect
))) {
1478 pr_warn("Unexpected skb in deferdq!\n");
1485 gacks
[n
].ack
= htons(seqno
);
1491 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1492 * @l: tipc unicast link
1493 * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1495 * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1496 * links of a certain peer, the buffer after built has the network data format
1497 * as found at the struct tipc_gap_ack_blks definition.
1499 * returns the actual allocated memory size
1501 static u16
tipc_build_gap_ack_blks(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1503 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1504 struct tipc_gap_ack_blks
*ga
;
1507 ga
= (struct tipc_gap_ack_blks
*)msg_data(hdr
);
1509 /* Start with broadcast link first */
1510 tipc_bcast_lock(bcl
->net
);
1511 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1512 msg_set_bc_gap(hdr
, link_bc_rcv_gap(bcl
));
1513 ga
->bgack_cnt
= __tipc_build_gap_ack_blks(ga
, bcl
, 0);
1514 tipc_bcast_unlock(bcl
->net
);
1516 /* Now for unicast link, but an explicit NACK only (???) */
1517 ga
->ugack_cnt
= (msg_seq_gap(hdr
)) ?
1518 __tipc_build_gap_ack_blks(ga
, l
, ga
->bgack_cnt
) : 0;
1521 len
= struct_size(ga
, gacks
, ga
->bgack_cnt
+ ga
->ugack_cnt
);
1522 ga
->len
= htons(len
);
1526 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1527 * acked packets, also doing retransmissions if
1529 * @l: tipc link with transmq queue to be advanced
1530 * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1531 * @acked: seqno of last packet acked by peer without any gaps before
1532 * @gap: # of gap packets
1533 * @ga: buffer pointer to Gap ACK blocks from peer
1534 * @xmitq: queue for accumulating the retransmitted packets if any
1535 * @retransmitted: returned boolean value if a retransmission is really issued
1536 * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1537 * happens (- unlikely case)
1539 * Return: the number of packets released from the link transmq
1541 static int tipc_link_advance_transmq(struct tipc_link
*l
, struct tipc_link
*r
,
1543 struct tipc_gap_ack_blks
*ga
,
1544 struct sk_buff_head
*xmitq
,
1545 bool *retransmitted
, int *rc
)
1547 struct tipc_gap_ack_blks
*last_ga
= r
->last_ga
, *this_ga
= NULL
;
1548 struct tipc_gap_ack
*gacks
= NULL
;
1549 struct sk_buff
*skb
, *_skb
, *tmp
;
1550 struct tipc_msg
*hdr
;
1551 u32 qlen
= skb_queue_len(&l
->transmq
);
1552 u16 nacked
= acked
, ngap
= gap
, gack_cnt
= 0;
1553 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
1554 u16 ack
= l
->rcv_nxt
- 1;
1556 u16 end
= r
->acked
, start
= end
, offset
= r
->last_gap
;
1557 u16 si
= (last_ga
) ? last_ga
->start_index
: 0;
1558 bool is_uc
= !link_is_bc_sndlink(l
);
1559 bool bc_has_acked
= false;
1561 trace_tipc_link_retrans(r
, acked
+ 1, acked
+ gap
, &l
->transmq
);
1563 /* Determine Gap ACK blocks if any for the particular link */
1565 /* Get the Gap ACKs, uc part */
1566 gack_cnt
= ga
->ugack_cnt
;
1567 gacks
= &ga
->gacks
[ga
->bgack_cnt
];
1569 /* Copy the Gap ACKs, bc part, for later renewal if needed */
1570 this_ga
= kmemdup(ga
, struct_size(ga
, gacks
, ga
->bgack_cnt
),
1572 if (likely(this_ga
)) {
1573 this_ga
->start_index
= 0;
1574 /* Start with the bc Gap ACKs */
1575 gack_cnt
= this_ga
->bgack_cnt
;
1576 gacks
= &this_ga
->gacks
[0];
1578 /* Hmm, we can get in trouble..., simply ignore it */
1579 pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1583 /* Advance the link transmq */
1584 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1585 seqno
= buf_seqno(skb
);
1588 if (less_eq(seqno
, nacked
)) {
1591 /* Skip packets peer has already acked */
1592 if (!more(seqno
, r
->acked
))
1594 /* Get the next of last Gap ACK blocks */
1595 while (more(seqno
, end
)) {
1596 if (!last_ga
|| si
>= last_ga
->bgack_cnt
)
1598 start
= end
+ offset
+ 1;
1599 end
= ntohs(last_ga
->gacks
[si
].ack
);
1600 offset
= ntohs(last_ga
->gacks
[si
].gap
);
1602 WARN_ONCE(more(start
, end
) ||
1604 si
< last_ga
->bgack_cnt
) ||
1605 si
> MAX_GAP_ACK_BLKS
,
1606 "Corrupted Gap ACK: %d %d %d %d %d\n",
1607 start
, end
, offset
, si
,
1608 last_ga
->bgack_cnt
);
1610 /* Check against the last Gap ACK block */
1611 if (in_range(seqno
, start
, end
))
1613 /* Update/release the packet peer is acking */
1614 bc_has_acked
= true;
1615 if (--TIPC_SKB_CB(skb
)->ackers
)
1619 __skb_unlink(skb
, &l
->transmq
);
1621 } else if (less_eq(seqno
, nacked
+ ngap
)) {
1622 /* First gap: check if repeated retrans failures? */
1623 if (unlikely(seqno
== acked
+ 1 &&
1624 link_retransmit_failure(l
, r
, rc
))) {
1625 /* Ignore this bc Gap ACKs if any */
1630 /* retransmit skb if unrestricted*/
1631 if (time_before(jiffies
, TIPC_SKB_CB(skb
)->nxt_retr
))
1633 tipc_link_set_skb_retransmit_time(skb
, l
);
1634 _skb
= pskb_copy(skb
, GFP_ATOMIC
);
1637 hdr
= buf_msg(_skb
);
1638 msg_set_ack(hdr
, ack
);
1639 msg_set_bcast_ack(hdr
, bc_ack
);
1640 _skb
->priority
= TC_PRIO_CONTROL
;
1641 __skb_queue_tail(xmitq
, _skb
);
1642 l
->stats
.retransmitted
++;
1644 r
->stats
.retransmitted
++;
1645 *retransmitted
= true;
1646 /* Increase actual retrans counter & mark first time */
1647 if (!TIPC_SKB_CB(skb
)->retr_cnt
++)
1648 TIPC_SKB_CB(skb
)->retr_stamp
= jiffies
;
1650 /* retry with Gap ACK blocks if any */
1653 nacked
= ntohs(gacks
[n
].ack
);
1654 ngap
= ntohs(gacks
[n
].gap
);
1660 /* Renew last Gap ACK blocks for bc if needed */
1664 r
->last_ga
= this_ga
;
1666 } else if (last_ga
) {
1667 if (less(acked
, start
)) {
1669 offset
= start
- acked
- 1;
1670 } else if (less(acked
, end
)) {
1673 if (si
< last_ga
->bgack_cnt
) {
1674 last_ga
->start_index
= si
;
1675 r
->last_gap
= offset
;
1689 return qlen
- skb_queue_len(&l
->transmq
);
1692 /* tipc_link_build_state_msg: prepare link state message for transmission
1694 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1695 * risk of ack storms towards the sender
1697 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1702 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1703 if (link_is_bc_rcvlink(l
)) {
1704 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1708 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1709 l
->snd_nxt
= l
->rcv_nxt
;
1710 return TIPC_LINK_SND_STATE
;
1714 l
->stats
.sent_acks
++;
1715 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, 0, xmitq
);
1719 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1721 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1723 int mtyp
= RESET_MSG
;
1724 struct sk_buff
*skb
;
1726 if (l
->state
== LINK_ESTABLISHING
)
1727 mtyp
= ACTIVATE_MSG
;
1729 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, 0, xmitq
);
1731 /* Inform peer that this endpoint is going down if applicable */
1732 skb
= skb_peek_tail(xmitq
);
1733 if (skb
&& (l
->state
== LINK_RESET
))
1734 msg_set_peer_stopping(buf_msg(skb
), 1);
1737 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1738 * Note that sending of broadcast NACK is coordinated among nodes, to
1739 * reduce the risk of NACK storms towards the sender
1741 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
1742 struct sk_buff_head
*xmitq
)
1744 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1745 struct sk_buff_head
*dfq
= &l
->deferdq
;
1746 u32 defq_len
= skb_queue_len(dfq
);
1749 if (link_is_bc_rcvlink(l
)) {
1750 match1
= def_cnt
& 0xf;
1751 match2
= tipc_own_addr(l
->net
) & 0xf;
1752 if (match1
== match2
)
1753 return TIPC_LINK_SND_STATE
;
1757 if (defq_len
>= 3 && !((defq_len
- 3) % 16)) {
1758 u16 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1760 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0,
1761 rcvgap
, 0, 0, xmitq
);
1766 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1767 * @l: the link that should handle the message
1769 * @xmitq: queue to place packets to be sent after this call
1771 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1772 struct sk_buff_head
*xmitq
)
1774 struct sk_buff_head
*defq
= &l
->deferdq
;
1775 struct tipc_msg
*hdr
= buf_msg(skb
);
1776 u16 seqno
, rcv_nxt
, win_lim
;
1780 /* Verify and update link state */
1781 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1782 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1784 /* Don't send probe at next timeout expiration */
1785 l
->silent_intv_cnt
= 0;
1789 seqno
= msg_seqno(hdr
);
1790 rcv_nxt
= l
->rcv_nxt
;
1791 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1793 if (unlikely(!link_is_up(l
))) {
1794 if (l
->state
== LINK_ESTABLISHING
)
1795 rc
= TIPC_LINK_UP_EVT
;
1800 /* Drop if outside receive window */
1801 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1802 l
->stats
.duplicates
++;
1806 released
+= tipc_link_advance_transmq(l
, l
, msg_ack(hdr
), 0,
1807 NULL
, NULL
, NULL
, NULL
);
1809 /* Defer delivery if sequence gap */
1810 if (unlikely(seqno
!= rcv_nxt
)) {
1811 if (!__tipc_skb_queue_sorted(defq
, seqno
, skb
))
1812 l
->stats
.duplicates
++;
1813 rc
|= tipc_link_build_nack_msg(l
, xmitq
);
1817 /* Deliver packet */
1819 l
->stats
.recv_pkts
++;
1821 if (unlikely(msg_user(hdr
) == TUNNEL_PROTOCOL
))
1822 rc
|= tipc_link_tnl_rcv(l
, skb
, l
->inputq
);
1823 else if (!tipc_data_input(l
, skb
, l
->inputq
))
1824 rc
|= tipc_link_input(l
, skb
, l
->inputq
, &l
->reasm_buf
);
1825 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1826 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1827 if (unlikely(rc
& ~TIPC_LINK_SND_STATE
))
1829 } while ((skb
= __tipc_skb_dequeue(defq
, l
->rcv_nxt
)));
1831 /* Forward queues and wake up waiting users */
1833 tipc_link_update_cwin(l
, released
, 0);
1834 tipc_link_advance_backlog(l
, xmitq
);
1835 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1836 link_prepare_wakeup(l
);
1841 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1842 bool probe_reply
, u16 rcvgap
,
1843 int tolerance
, int priority
,
1844 struct sk_buff_head
*xmitq
)
1846 struct tipc_mon_state
*mstate
= &l
->mon_state
;
1847 struct sk_buff_head
*dfq
= &l
->deferdq
;
1848 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1849 struct tipc_msg
*hdr
;
1850 struct sk_buff
*skb
;
1851 bool node_up
= link_is_up(bcl
);
1852 u16 glen
= 0, bc_rcvgap
= 0;
1856 /* Don't send protocol message during reset or link failover */
1857 if (tipc_link_is_blocked(l
))
1860 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1863 if ((probe
|| probe_reply
) && !skb_queue_empty(dfq
))
1864 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1866 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1867 tipc_max_domain_size
+ MAX_GAP_ACK_BLKS_SZ
,
1868 l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1873 data
= msg_data(hdr
);
1874 msg_set_session(hdr
, l
->session
);
1875 msg_set_bearer_id(hdr
, l
->bearer_id
);
1876 msg_set_net_plane(hdr
, l
->net_plane
);
1877 msg_set_next_sent(hdr
, l
->snd_nxt
);
1878 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1879 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1880 msg_set_bc_ack_invalid(hdr
, !node_up
);
1881 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1882 msg_set_link_tolerance(hdr
, tolerance
);
1883 msg_set_linkprio(hdr
, priority
);
1884 msg_set_redundant_link(hdr
, node_up
);
1885 msg_set_seq_gap(hdr
, 0);
1886 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1888 if (mtyp
== STATE_MSG
) {
1889 if (l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
)
1890 msg_set_seqno(hdr
, l
->snd_nxt_state
++);
1891 msg_set_seq_gap(hdr
, rcvgap
);
1892 bc_rcvgap
= link_bc_rcv_gap(bcl
);
1893 msg_set_bc_gap(hdr
, bc_rcvgap
);
1894 msg_set_probe(hdr
, probe
);
1895 msg_set_is_keepalive(hdr
, probe
|| probe_reply
);
1896 if (l
->peer_caps
& TIPC_GAP_ACK_BLOCK
)
1897 glen
= tipc_build_gap_ack_blks(l
, hdr
);
1898 tipc_mon_prep(l
->net
, data
+ glen
, &dlen
, mstate
, l
->bearer_id
);
1899 msg_set_size(hdr
, INT_H_SIZE
+ glen
+ dlen
);
1900 skb_trim(skb
, INT_H_SIZE
+ glen
+ dlen
);
1901 l
->stats
.sent_states
++;
1904 /* RESET_MSG or ACTIVATE_MSG */
1905 if (mtyp
== ACTIVATE_MSG
) {
1906 msg_set_dest_session_valid(hdr
, 1);
1907 msg_set_dest_session(hdr
, l
->peer_session
);
1909 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1910 strcpy(data
, l
->if_name
);
1911 msg_set_size(hdr
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1912 skb_trim(skb
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1915 l
->stats
.sent_probes
++;
1917 l
->stats
.sent_nacks
++;
1919 bcl
->stats
.sent_nacks
++;
1920 skb
->priority
= TC_PRIO_CONTROL
;
1921 __skb_queue_tail(xmitq
, skb
);
1922 trace_tipc_proto_build(skb
, false, l
->name
);
1925 void tipc_link_create_dummy_tnl_msg(struct tipc_link
*l
,
1926 struct sk_buff_head
*xmitq
)
1928 u32 onode
= tipc_own_addr(l
->net
);
1929 struct tipc_msg
*hdr
, *ihdr
;
1930 struct sk_buff_head tnlq
;
1931 struct sk_buff
*skb
;
1932 u32 dnode
= l
->addr
;
1934 __skb_queue_head_init(&tnlq
);
1935 skb
= tipc_msg_create(TUNNEL_PROTOCOL
, FAILOVER_MSG
,
1936 INT_H_SIZE
, BASIC_H_SIZE
,
1937 dnode
, onode
, 0, 0, 0);
1939 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1944 msg_set_msgcnt(hdr
, 1);
1945 msg_set_bearer_id(hdr
, l
->peer_bearer_id
);
1947 ihdr
= (struct tipc_msg
*)msg_data(hdr
);
1948 tipc_msg_init(onode
, ihdr
, TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1949 BASIC_H_SIZE
, dnode
);
1950 msg_set_errcode(ihdr
, TIPC_ERR_NO_PORT
);
1951 __skb_queue_tail(&tnlq
, skb
);
1952 tipc_link_xmit(l
, &tnlq
, xmitq
);
1955 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1956 * with contents of the link's transmit and backlog queues.
1958 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1959 int mtyp
, struct sk_buff_head
*xmitq
)
1961 struct sk_buff_head
*fdefq
= &tnl
->failover_deferdq
;
1962 struct sk_buff
*skb
, *tnlskb
;
1963 struct tipc_msg
*hdr
, tnlhdr
;
1964 struct sk_buff_head
*queue
= &l
->transmq
;
1965 struct sk_buff_head tmpxq
, tnlq
, frags
;
1966 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1967 bool pktcnt_need_update
= false;
1974 __skb_queue_head_init(&tnlq
);
1976 * From now on, send only one single ("dummy") SYNCH message
1977 * to peer. The SYNCH message does not contain any data, just
1978 * a header conveying the synch point to the peer.
1980 if (mtyp
== SYNCH_MSG
&& (tnl
->peer_caps
& TIPC_TUNNEL_ENHANCED
)) {
1981 tnlskb
= tipc_msg_create(TUNNEL_PROTOCOL
, SYNCH_MSG
,
1982 INT_H_SIZE
, 0, l
->addr
,
1983 tipc_own_addr(l
->net
),
1986 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1991 hdr
= buf_msg(tnlskb
);
1992 syncpt
= l
->snd_nxt
+ skb_queue_len(&l
->backlogq
) - 1;
1993 msg_set_syncpt(hdr
, syncpt
);
1994 msg_set_bearer_id(hdr
, l
->peer_bearer_id
);
1995 __skb_queue_tail(&tnlq
, tnlskb
);
1996 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
2000 __skb_queue_head_init(&tmpxq
);
2001 __skb_queue_head_init(&frags
);
2002 /* At least one packet required for safe algorithm => add dummy */
2003 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
2004 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
2005 0, 0, TIPC_ERR_NO_PORT
);
2007 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
2010 __skb_queue_tail(&tnlq
, skb
);
2011 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
2012 __skb_queue_purge(&tmpxq
);
2014 /* Initialize reusable tunnel packet header */
2015 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
2016 mtyp
, INT_H_SIZE
, l
->addr
);
2017 if (mtyp
== SYNCH_MSG
)
2018 pktcnt
= l
->snd_nxt
- buf_seqno(skb_peek(&l
->transmq
));
2020 pktcnt
= skb_queue_len(&l
->transmq
);
2021 pktcnt
+= skb_queue_len(&l
->backlogq
);
2022 msg_set_msgcnt(&tnlhdr
, pktcnt
);
2023 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
2025 /* Wrap each packet into a tunnel packet */
2026 skb_queue_walk(queue
, skb
) {
2028 if (queue
== &l
->backlogq
)
2029 msg_set_seqno(hdr
, seqno
++);
2030 pktlen
= msg_size(hdr
);
2032 /* Tunnel link MTU is not large enough? This could be
2034 * 1) Link MTU has just changed or set differently;
2035 * 2) Or FAILOVER on the top of a SYNCH message
2037 * The 2nd case should not happen if peer supports
2038 * TIPC_TUNNEL_ENHANCED
2040 if (pktlen
> tnl
->mtu
- INT_H_SIZE
) {
2041 if (mtyp
== FAILOVER_MSG
&&
2042 (tnl
->peer_caps
& TIPC_TUNNEL_ENHANCED
)) {
2043 rc
= tipc_msg_fragment(skb
, &tnlhdr
, tnl
->mtu
,
2046 pr_warn("%sunable to frag msg: rc %d\n",
2050 pktcnt
+= skb_queue_len(&frags
) - 1;
2051 pktcnt_need_update
= true;
2052 skb_queue_splice_tail_init(&frags
, &tnlq
);
2055 /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2056 * => Just warn it and return!
2058 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2059 link_co_err
, msg_user(hdr
),
2060 msg_type(hdr
), msg_size(hdr
));
2064 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
2065 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
, GFP_ATOMIC
);
2067 pr_warn("%sunable to send packet\n", link_co_err
);
2070 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
2071 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
2072 __skb_queue_tail(&tnlq
, tnlskb
);
2074 if (queue
!= &l
->backlogq
) {
2075 queue
= &l
->backlogq
;
2079 if (pktcnt_need_update
)
2080 skb_queue_walk(&tnlq
, skb
) {
2082 msg_set_msgcnt(hdr
, pktcnt
);
2085 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
2087 if (mtyp
== FAILOVER_MSG
) {
2088 tnl
->drop_point
= l
->rcv_nxt
;
2089 tnl
->failover_reasm_skb
= l
->reasm_buf
;
2090 l
->reasm_buf
= NULL
;
2092 /* Failover the link's deferdq */
2093 if (unlikely(!skb_queue_empty(fdefq
))) {
2094 pr_warn("Link failover deferdq not empty: %d!\n",
2095 skb_queue_len(fdefq
));
2096 __skb_queue_purge(fdefq
);
2098 skb_queue_splice_init(&l
->deferdq
, fdefq
);
2103 * tipc_link_failover_prepare() - prepare tnl for link failover
2105 * This is a special version of the precursor - tipc_link_tnl_prepare(),
2106 * see the tipc_node_link_failover() for details
2110 * @xmitq: queue for messages to be xmited
2112 void tipc_link_failover_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
2113 struct sk_buff_head
*xmitq
)
2115 struct sk_buff_head
*fdefq
= &tnl
->failover_deferdq
;
2117 tipc_link_create_dummy_tnl_msg(tnl
, xmitq
);
2119 /* This failover link endpoint was never established before,
2120 * so it has not received anything from peer.
2121 * Otherwise, it must be a normal failover situation or the
2122 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2123 * would have to start over from scratch instead.
2125 tnl
->drop_point
= 1;
2126 tnl
->failover_reasm_skb
= NULL
;
2128 /* Initiate the link's failover deferdq */
2129 if (unlikely(!skb_queue_empty(fdefq
))) {
2130 pr_warn("Link failover deferdq not empty: %d!\n",
2131 skb_queue_len(fdefq
));
2132 __skb_queue_purge(fdefq
);
2136 /* tipc_link_validate_msg(): validate message against current link state
2137 * Returns true if message should be accepted, otherwise false
2139 bool tipc_link_validate_msg(struct tipc_link
*l
, struct tipc_msg
*hdr
)
2141 u16 curr_session
= l
->peer_session
;
2142 u16 session
= msg_session(hdr
);
2143 int mtyp
= msg_type(hdr
);
2145 if (msg_user(hdr
) != LINK_PROTOCOL
)
2152 /* Accept only RESET with new session number */
2153 return more(session
, curr_session
);
2157 /* Accept only ACTIVATE with new or current session number */
2158 return !less(session
, curr_session
);
2160 /* Accept only STATE with current session number */
2163 if (session
!= curr_session
)
2165 /* Extra sanity check */
2166 if (!link_is_up(l
) && msg_ack(hdr
))
2168 if (!(l
->peer_caps
& TIPC_LINK_PROTO_SEQNO
))
2170 /* Accept only STATE with new sequence number */
2171 return !less(msg_seqno(hdr
), l
->rcv_nxt_state
);
2177 /* tipc_link_proto_rcv(): receive link level protocol message :
2178 * Note that network plane id propagates through the network, and may
2179 * change at any time. The node with lowest numerical id determines
2182 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
2183 struct sk_buff_head
*xmitq
)
2185 struct tipc_msg
*hdr
= buf_msg(skb
);
2186 struct tipc_gap_ack_blks
*ga
= NULL
;
2187 bool reply
= msg_probe(hdr
), retransmitted
= false;
2188 u16 dlen
= msg_data_sz(hdr
), glen
= 0;
2189 u16 peers_snd_nxt
= msg_next_sent(hdr
);
2190 u16 peers_tol
= msg_link_tolerance(hdr
);
2191 u16 peers_prio
= msg_linkprio(hdr
);
2192 u16 gap
= msg_seq_gap(hdr
);
2193 u16 ack
= msg_ack(hdr
);
2194 u16 rcv_nxt
= l
->rcv_nxt
;
2196 int mtyp
= msg_type(hdr
);
2197 int rc
= 0, released
;
2201 trace_tipc_proto_rcv(skb
, false, l
->name
);
2202 if (tipc_link_is_blocked(l
) || !xmitq
)
2205 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
2206 l
->net_plane
= msg_net_plane(hdr
);
2210 data
= msg_data(hdr
);
2212 if (!tipc_link_validate_msg(l
, hdr
)) {
2213 trace_tipc_skb_dump(skb
, false, "PROTO invalid (1)!");
2214 trace_tipc_link_dump(l
, TIPC_DUMP_NONE
, "PROTO invalid (1)!");
2221 /* Complete own link name with peer's interface name */
2222 if_name
= strrchr(l
->name
, ':') + 1;
2223 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
2225 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
2227 strncpy(if_name
, data
, TIPC_MAX_IF_NAME
);
2229 /* Update own tolerance if peer indicates a non-zero value */
2230 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
)) {
2231 l
->tolerance
= peers_tol
;
2232 l
->bc_rcvlink
->tolerance
= peers_tol
;
2234 /* Update own priority if peer's priority is higher */
2235 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
2236 l
->priority
= peers_prio
;
2238 /* If peer is going down we want full re-establish cycle */
2239 if (msg_peer_stopping(hdr
)) {
2240 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
2244 /* If this endpoint was re-created while peer was ESTABLISHING
2245 * it doesn't know current session number. Force re-synch.
2247 if (mtyp
== ACTIVATE_MSG
&& msg_dest_session_valid(hdr
) &&
2248 l
->session
!= msg_dest_session(hdr
)) {
2249 if (less(l
->session
, msg_dest_session(hdr
)))
2250 l
->session
= msg_dest_session(hdr
) + 1;
2254 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2255 if (mtyp
== RESET_MSG
|| !link_is_up(l
))
2256 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
2258 /* ACTIVATE_MSG takes up link if it was already locally reset */
2259 if (mtyp
== ACTIVATE_MSG
&& l
->state
== LINK_ESTABLISHING
)
2260 rc
= TIPC_LINK_UP_EVT
;
2262 l
->peer_session
= msg_session(hdr
);
2263 l
->in_session
= true;
2264 l
->peer_bearer_id
= msg_bearer_id(hdr
);
2265 if (l
->mtu
> msg_max_pkt(hdr
))
2266 l
->mtu
= msg_max_pkt(hdr
);
2270 l
->rcv_nxt_state
= msg_seqno(hdr
) + 1;
2272 /* Update own tolerance if peer indicates a non-zero value */
2273 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
)) {
2274 l
->tolerance
= peers_tol
;
2275 l
->bc_rcvlink
->tolerance
= peers_tol
;
2277 /* Update own prio if peer indicates a different value */
2278 if ((peers_prio
!= l
->priority
) &&
2279 in_range(peers_prio
, 1, TIPC_MAX_LINK_PRI
)) {
2280 l
->priority
= peers_prio
;
2281 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
2284 l
->silent_intv_cnt
= 0;
2285 l
->stats
.recv_states
++;
2287 l
->stats
.recv_probes
++;
2289 if (!link_is_up(l
)) {
2290 if (l
->state
== LINK_ESTABLISHING
)
2291 rc
= TIPC_LINK_UP_EVT
;
2295 /* Receive Gap ACK blocks from peer if any */
2296 glen
= tipc_get_gap_ack_blks(&ga
, l
, hdr
, true);
2298 tipc_mon_rcv(l
->net
, data
+ glen
, dlen
- glen
, l
->addr
,
2299 &l
->mon_state
, l
->bearer_id
);
2301 /* Send NACK if peer has sent pkts we haven't received yet */
2302 if ((reply
|| msg_is_keepalive(hdr
)) &&
2303 more(peers_snd_nxt
, rcv_nxt
) &&
2304 !tipc_link_is_synching(l
) &&
2305 skb_queue_empty(&l
->deferdq
))
2306 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
2307 if (rcvgap
|| reply
)
2308 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, reply
,
2309 rcvgap
, 0, 0, xmitq
);
2311 released
= tipc_link_advance_transmq(l
, l
, ack
, gap
, ga
, xmitq
,
2312 &retransmitted
, &rc
);
2314 l
->stats
.recv_nacks
++;
2315 if (released
|| retransmitted
)
2316 tipc_link_update_cwin(l
, released
, retransmitted
);
2318 tipc_link_advance_backlog(l
, xmitq
);
2319 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
2320 link_prepare_wakeup(l
);
2327 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2329 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
2331 struct sk_buff_head
*xmitq
)
2333 struct sk_buff
*skb
;
2334 struct tipc_msg
*hdr
;
2335 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
2336 u16 ack
= l
->rcv_nxt
- 1;
2337 u16 gap_to
= peers_snd_nxt
- 1;
2339 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
2340 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
2344 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
2345 msg_set_bcast_ack(hdr
, ack
);
2346 msg_set_bcgap_after(hdr
, ack
);
2348 gap_to
= buf_seqno(dfrd_skb
) - 1;
2349 msg_set_bcgap_to(hdr
, gap_to
);
2350 msg_set_non_seq(hdr
, bcast
);
2351 __skb_queue_tail(xmitq
, skb
);
2355 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2357 * Give a newly added peer node the sequence number where it should
2358 * start receiving and acking broadcast packets.
2360 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
2361 struct sk_buff_head
*xmitq
)
2363 struct sk_buff_head list
;
2365 __skb_queue_head_init(&list
);
2366 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
2368 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list
)), true);
2369 tipc_link_xmit(l
, &list
, xmitq
);
2372 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2374 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
2376 int mtyp
= msg_type(hdr
);
2377 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
2382 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
2383 l
->rcv_nxt
= peers_snd_nxt
;
2384 l
->state
= LINK_ESTABLISHED
;
2388 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
2391 if (msg_peer_node_is_up(hdr
))
2394 /* Compatibility: accept older, less safe initial synch data */
2395 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
2396 l
->rcv_nxt
= peers_snd_nxt
;
2399 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2401 int tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
2402 struct sk_buff_head
*xmitq
)
2404 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
2410 if (!msg_peer_node_is_up(hdr
))
2413 /* Open when peer acknowledges our bcast init msg (pkt #1) */
2415 l
->bc_peer_is_up
= true;
2417 if (!l
->bc_peer_is_up
)
2420 /* Ignore if peers_snd_nxt goes beyond receive window */
2421 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
2424 l
->snd_nxt
= peers_snd_nxt
;
2425 if (link_bc_rcv_gap(l
))
2426 rc
|= TIPC_LINK_SND_STATE
;
2428 /* Return now if sender supports nack via STATE messages */
2429 if (l
->peer_caps
& TIPC_BCAST_STATE_NACK
)
2432 /* Otherwise, be backwards compatible */
2434 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
2435 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
2439 /* Don't NACK if one was recently sent or peeked */
2440 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
2441 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
2445 /* Conditionally delay NACK sending until next synch rcv */
2446 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
2447 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
2448 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
2452 /* Send NACK now but suppress next one */
2453 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
2454 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
2458 int tipc_link_bc_ack_rcv(struct tipc_link
*r
, u16 acked
, u16 gap
,
2459 struct tipc_gap_ack_blks
*ga
,
2460 struct sk_buff_head
*xmitq
,
2461 struct sk_buff_head
*retrq
)
2463 struct tipc_link
*l
= r
->bc_sndlink
;
2464 bool unused
= false;
2467 if (!link_is_up(r
) || !r
->bc_peer_is_up
)
2471 l
->stats
.recv_nacks
++;
2472 r
->stats
.recv_nacks
++;
2475 if (less(acked
, r
->acked
) || (acked
== r
->acked
&& !gap
&& !ga
))
2478 trace_tipc_link_bc_ack(r
, acked
, gap
, &l
->transmq
);
2479 tipc_link_advance_transmq(l
, r
, acked
, gap
, ga
, retrq
, &unused
, &rc
);
2481 tipc_link_advance_backlog(l
, xmitq
);
2482 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
2483 link_prepare_wakeup(l
);
2488 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2489 * This function is here for backwards compatibility, since
2490 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2492 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
2493 struct sk_buff_head
*xmitq
)
2495 struct tipc_msg
*hdr
= buf_msg(skb
);
2496 u32 dnode
= msg_destnode(hdr
);
2497 int mtyp
= msg_type(hdr
);
2498 u16 acked
= msg_bcast_ack(hdr
);
2499 u16 from
= acked
+ 1;
2500 u16 to
= msg_bcgap_to(hdr
);
2501 u16 peers_snd_nxt
= to
+ 1;
2506 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
2509 if (mtyp
!= STATE_MSG
)
2512 if (dnode
== tipc_own_addr(l
->net
)) {
2513 rc
= tipc_link_bc_ack_rcv(l
, acked
, to
- acked
, NULL
, xmitq
,
2515 l
->stats
.recv_nacks
++;
2519 /* Msg for other node => suppress own NACK at next sync if applicable */
2520 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
2521 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
2526 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 min_win
, u32 max_win
)
2528 int max_bulk
= TIPC_MAX_PUBL
/ (l
->mtu
/ ITEM_SIZE
);
2530 l
->min_win
= min_win
;
2531 l
->ssthresh
= max_win
;
2532 l
->max_win
= max_win
;
2533 l
->window
= min_win
;
2534 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= min_win
* 2;
2535 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= min_win
* 4;
2536 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= min_win
* 6;
2537 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= min_win
* 8;
2538 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
2542 * link_reset_stats - reset link statistics
2543 * @l: pointer to link
2545 void tipc_link_reset_stats(struct tipc_link
*l
)
2547 memset(&l
->stats
, 0, sizeof(l
->stats
));
2550 static void link_print(struct tipc_link
*l
, const char *str
)
2552 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
2553 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
2554 u16 tail
= l
->snd_nxt
- 1;
2556 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
2557 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2558 skb_queue_len(&l
->transmq
), head
, tail
,
2559 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
2562 /* Parse and validate nested (link) properties valid for media, bearer and link
2564 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
2568 err
= nla_parse_nested_deprecated(props
, TIPC_NLA_PROP_MAX
, prop
,
2569 tipc_nl_prop_policy
, NULL
);
2573 if (props
[TIPC_NLA_PROP_PRIO
]) {
2576 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
2577 if (prio
> TIPC_MAX_LINK_PRI
)
2581 if (props
[TIPC_NLA_PROP_TOL
]) {
2584 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
2585 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
2589 if (props
[TIPC_NLA_PROP_WIN
]) {
2592 max_win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
2593 if (max_win
< TIPC_DEF_LINK_WIN
|| max_win
> TIPC_MAX_LINK_WIN
)
2600 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
2603 struct nlattr
*stats
;
2610 struct nla_map map
[] = {
2611 {TIPC_NLA_STATS_RX_INFO
, 0},
2612 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
2613 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
2614 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
2615 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
2616 {TIPC_NLA_STATS_TX_INFO
, 0},
2617 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
2618 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
2619 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
2620 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
2621 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
2622 s
->msg_length_counts
: 1},
2623 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
2624 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
2625 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
2626 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
2627 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
2628 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
2629 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
2630 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
2631 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
2632 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
2633 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
2634 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
2635 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
2636 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
2637 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
2638 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
2639 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
2640 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
2641 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
2642 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
2643 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
2644 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
2645 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
2648 stats
= nla_nest_start_noflag(skb
, TIPC_NLA_LINK_STATS
);
2652 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2653 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2656 nla_nest_end(skb
, stats
);
2660 nla_nest_cancel(skb
, stats
);
2665 /* Caller should hold appropriate locks to protect the link */
2666 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2667 struct tipc_link
*link
, int nlflags
)
2669 u32 self
= tipc_own_addr(net
);
2670 struct nlattr
*attrs
;
2671 struct nlattr
*prop
;
2675 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2676 nlflags
, TIPC_NL_LINK_GET
);
2680 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK
);
2684 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
2686 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
, tipc_cluster_mask(self
)))
2688 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
2690 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->stats
.recv_pkts
))
2692 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->stats
.sent_pkts
))
2695 if (tipc_link_is_up(link
))
2696 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2699 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
2702 prop
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK_PROP
);
2705 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2707 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
2709 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
2712 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2714 nla_nest_end(msg
->skb
, prop
);
2716 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
2720 nla_nest_end(msg
->skb
, attrs
);
2721 genlmsg_end(msg
->skb
, hdr
);
2726 nla_nest_cancel(msg
->skb
, prop
);
2728 nla_nest_cancel(msg
->skb
, attrs
);
2730 genlmsg_cancel(msg
->skb
, hdr
);
2735 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
2736 struct tipc_stats
*stats
)
2739 struct nlattr
*nest
;
2746 struct nla_map map
[] = {
2747 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_pkts
},
2748 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
2749 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
2750 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
2751 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
2752 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_pkts
},
2753 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
2754 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
2755 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
2756 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
2757 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
2758 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
2759 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
2760 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
2761 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
2762 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
2763 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
2764 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
2765 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
2766 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
2769 nest
= nla_nest_start_noflag(skb
, TIPC_NLA_LINK_STATS
);
2773 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2774 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2777 nla_nest_end(skb
, nest
);
2781 nla_nest_cancel(skb
, nest
);
2786 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2787 struct tipc_link
*bcl
)
2791 struct nlattr
*attrs
;
2792 struct nlattr
*prop
;
2793 u32 bc_mode
= tipc_bcast_get_mode(net
);
2794 u32 bc_ratio
= tipc_bcast_get_broadcast_ratio(net
);
2799 tipc_bcast_lock(net
);
2801 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2802 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
2804 tipc_bcast_unlock(net
);
2808 attrs
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK
);
2812 /* The broadcast link is always up */
2813 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2816 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
2818 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
2820 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, 0))
2822 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, 0))
2825 prop
= nla_nest_start_noflag(msg
->skb
, TIPC_NLA_LINK_PROP
);
2828 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->max_win
))
2830 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_BROADCAST
, bc_mode
))
2832 if (bc_mode
& BCLINK_MODE_SEL
)
2833 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_BROADCAST_RATIO
,
2836 nla_nest_end(msg
->skb
, prop
);
2838 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
2842 tipc_bcast_unlock(net
);
2843 nla_nest_end(msg
->skb
, attrs
);
2844 genlmsg_end(msg
->skb
, hdr
);
2849 nla_nest_cancel(msg
->skb
, prop
);
2851 nla_nest_cancel(msg
->skb
, attrs
);
2853 tipc_bcast_unlock(net
);
2854 genlmsg_cancel(msg
->skb
, hdr
);
2859 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
2860 struct sk_buff_head
*xmitq
)
2864 l
->bc_rcvlink
->tolerance
= tol
;
2866 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, tol
, 0, xmitq
);
2869 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
2870 struct sk_buff_head
*xmitq
)
2873 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, prio
, xmitq
);
2876 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2878 l
->abort_limit
= limit
;
2882 * tipc_link_dump - dump TIPC link data
2883 * @l: tipc link to be dumped
2884 * @dqueues: bitmask to decide if any link queue to be dumped?
2885 * - TIPC_DUMP_NONE: don't dump link queues
2886 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2887 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2888 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2889 * - TIPC_DUMP_INPUTQ: dump link input queue
2890 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2891 * - TIPC_DUMP_ALL: dump all the link queues above
2892 * @buf: returned buffer of dump data in format
2894 int tipc_link_dump(struct tipc_link
*l
, u16 dqueues
, char *buf
)
2897 size_t sz
= (dqueues
) ? LINK_LMAX
: LINK_LMIN
;
2898 struct sk_buff_head
*list
;
2899 struct sk_buff
*hskb
, *tskb
;
2903 i
+= scnprintf(buf
, sz
, "link data: (null)\n");
2907 i
+= scnprintf(buf
, sz
, "link data: %x", l
->addr
);
2908 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", l
->state
);
2909 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->in_session
);
2910 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->session
);
2911 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->peer_session
);
2912 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->snd_nxt
);
2913 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rcv_nxt
);
2914 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->snd_nxt_state
);
2915 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rcv_nxt_state
);
2916 i
+= scnprintf(buf
+ i
, sz
- i
, " %x", l
->peer_caps
);
2917 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->silent_intv_cnt
);
2918 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->rst_cnt
);
2919 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", 0);
2920 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", 0);
2921 i
+= scnprintf(buf
+ i
, sz
- i
, " %u", l
->acked
);
2924 len
= skb_queue_len(list
);
2925 hskb
= skb_peek(list
);
2926 tskb
= skb_peek_tail(list
);
2927 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2928 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2929 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2932 len
= skb_queue_len(list
);
2933 hskb
= skb_peek(list
);
2934 tskb
= skb_peek_tail(list
);
2935 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2936 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2937 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2939 list
= &l
->backlogq
;
2940 len
= skb_queue_len(list
);
2941 hskb
= skb_peek(list
);
2942 tskb
= skb_peek_tail(list
);
2943 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u", len
,
2944 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2945 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2948 len
= skb_queue_len(list
);
2949 hskb
= skb_peek(list
);
2950 tskb
= skb_peek_tail(list
);
2951 i
+= scnprintf(buf
+ i
, sz
- i
, " | %u %u %u\n", len
,
2952 (hskb
) ? msg_seqno(buf_msg(hskb
)) : 0,
2953 (tskb
) ? msg_seqno(buf_msg(tskb
)) : 0);
2955 if (dqueues
& TIPC_DUMP_TRANSMQ
) {
2956 i
+= scnprintf(buf
+ i
, sz
- i
, "transmq: ");
2957 i
+= tipc_list_dump(&l
->transmq
, false, buf
+ i
);
2959 if (dqueues
& TIPC_DUMP_BACKLOGQ
) {
2960 i
+= scnprintf(buf
+ i
, sz
- i
,
2961 "backlogq: <%u %u %u %u %u>, ",
2962 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
,
2963 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
,
2964 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
,
2965 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
,
2966 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
);
2967 i
+= tipc_list_dump(&l
->backlogq
, false, buf
+ i
);
2969 if (dqueues
& TIPC_DUMP_DEFERDQ
) {
2970 i
+= scnprintf(buf
+ i
, sz
- i
, "deferdq: ");
2971 i
+= tipc_list_dump(&l
->deferdq
, false, buf
+ i
);
2973 if (dqueues
& TIPC_DUMP_INPUTQ
) {
2974 i
+= scnprintf(buf
+ i
, sz
- i
, "inputq: ");
2975 i
+= tipc_list_dump(l
->inputq
, false, buf
+ i
);
2977 if (dqueues
& TIPC_DUMP_WAKEUP
) {
2978 i
+= scnprintf(buf
+ i
, sz
- i
, "wakeup: ");
2979 i
+= tipc_list_dump(&l
->wakeupq
, false, buf
+ i
);