2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
47 #include <linux/pkt_sched.h>
50 u32 sent_info
; /* used in counting # sent packets */
51 u32 recv_info
; /* used in counting # recv'd packets */
68 u32 link_congs
; /* # port sends blocked by congestion */
71 u32 max_queue_sz
; /* send queue size high water mark */
72 u32 accu_queue_sz
; /* used for send queue size profiling */
73 u32 queue_sz_counts
; /* used for send queue size profiling */
74 u32 msg_length_counts
; /* used for message length profiling */
75 u32 msg_lengths_total
; /* used for message length profiling */
76 u32 msg_length_profile
[7]; /* used for msg. length profiling */
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
126 char name
[TIPC_MAX_LINK_NAME
];
129 /* Management and link supervision data */
140 char if_name
[TIPC_MAX_IF_NAME
];
143 struct tipc_mon_state mon_state
;
148 struct sk_buff
*failover_reasm_skb
;
150 /* Max packet negotiation */
155 struct sk_buff_head transmq
;
156 struct sk_buff_head backlogq
;
169 struct sk_buff_head deferdq
;
170 struct sk_buff_head
*inputq
;
171 struct sk_buff_head
*namedq
;
173 /* Congestion handling */
174 struct sk_buff_head wakeupq
;
176 /* Fragmentation/reassembly */
177 struct sk_buff
*reasm_buf
;
182 struct tipc_link
*bc_rcvlink
;
183 struct tipc_link
*bc_sndlink
;
188 struct tipc_stats stats
;
192 * Error message prefixes
194 static const char *link_co_err
= "Link tunneling error, ";
195 static const char *link_rst_msg
= "Resetting link ";
197 /* Send states for broadcast NACKs
200 BC_NACK_SND_CONDITIONAL
,
201 BC_NACK_SND_UNCONDITIONAL
,
202 BC_NACK_SND_SUPPRESS
,
206 * Interval between NACKs when packets arrive out of order
208 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
210 /* Wildcard value for link session numbers. When it is known that
211 * peer endpoint is down, any session number must be accepted.
213 #define ANY_SESSION 0x10000
218 LINK_ESTABLISHED
= 0xe,
219 LINK_ESTABLISHING
= 0xe << 4,
220 LINK_RESET
= 0x1 << 8,
221 LINK_RESETTING
= 0x2 << 12,
222 LINK_PEER_RESET
= 0xd << 16,
223 LINK_FAILINGOVER
= 0xf << 20,
224 LINK_SYNCHING
= 0xc << 24
227 /* Link FSM state checking routines
229 static int link_is_up(struct tipc_link
*l
)
231 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
234 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
235 struct sk_buff_head
*xmitq
);
236 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
237 u16 rcvgap
, int tolerance
, int priority
,
238 struct sk_buff_head
*xmitq
);
239 static void link_print(struct tipc_link
*l
, const char *str
);
240 static void tipc_link_build_nack_msg(struct tipc_link
*l
,
241 struct sk_buff_head
*xmitq
);
242 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
243 struct sk_buff_head
*xmitq
);
244 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 to
);
247 * Simple non-static link routines (i.e. referenced outside this file)
249 bool tipc_link_is_up(struct tipc_link
*l
)
251 return link_is_up(l
);
254 bool tipc_link_peer_is_down(struct tipc_link
*l
)
256 return l
->state
== LINK_PEER_RESET
;
259 bool tipc_link_is_reset(struct tipc_link
*l
)
261 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
264 bool tipc_link_is_establishing(struct tipc_link
*l
)
266 return l
->state
== LINK_ESTABLISHING
;
269 bool tipc_link_is_synching(struct tipc_link
*l
)
271 return l
->state
== LINK_SYNCHING
;
274 bool tipc_link_is_failingover(struct tipc_link
*l
)
276 return l
->state
== LINK_FAILINGOVER
;
279 bool tipc_link_is_blocked(struct tipc_link
*l
)
281 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
284 static bool link_is_bc_sndlink(struct tipc_link
*l
)
286 return !l
->bc_sndlink
;
289 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
291 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
294 int tipc_link_is_active(struct tipc_link
*l
)
299 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
304 u32
tipc_link_id(struct tipc_link
*l
)
306 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
309 int tipc_link_window(struct tipc_link
*l
)
314 int tipc_link_prio(struct tipc_link
*l
)
319 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
324 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
329 char tipc_link_plane(struct tipc_link
*l
)
334 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
335 struct tipc_link
*uc_l
,
336 struct sk_buff_head
*xmitq
)
338 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
341 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
342 snd_l
->state
= LINK_ESTABLISHED
;
343 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
346 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
347 struct tipc_link
*rcv_l
,
348 struct sk_buff_head
*xmitq
)
350 u16 ack
= snd_l
->snd_nxt
- 1;
353 rcv_l
->bc_peer_is_up
= true;
354 rcv_l
->state
= LINK_ESTABLISHED
;
355 tipc_link_bc_ack_rcv(rcv_l
, ack
, xmitq
);
356 tipc_link_reset(rcv_l
);
357 rcv_l
->state
= LINK_RESET
;
358 if (!snd_l
->ackers
) {
359 tipc_link_reset(snd_l
);
360 snd_l
->state
= LINK_RESET
;
361 __skb_queue_purge(xmitq
);
365 int tipc_link_bc_peers(struct tipc_link
*l
)
370 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
375 int tipc_link_mtu(struct tipc_link
*l
)
380 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
385 u16
tipc_link_acked(struct tipc_link
*l
)
390 char *tipc_link_name(struct tipc_link
*l
)
396 * tipc_link_create - create a new link
397 * @n: pointer to associated node
398 * @if_name: associated interface name
399 * @bearer_id: id (index) of associated bearer
400 * @tolerance: link tolerance to be used by link
401 * @net_plane: network plane (A,B,c..) this link belongs to
402 * @mtu: mtu to be advertised by link
403 * @priority: priority to be used by link
404 * @window: send window to be used by link
405 * @session: session to be used by link
406 * @ownnode: identity of own node
407 * @peer: node id of peer node
408 * @peer_caps: bitmap describing peer node capabilities
409 * @bc_sndlink: the namespace global link used for broadcast sending
410 * @bc_rcvlink: the peer specific link used for broadcast reception
411 * @inputq: queue to put messages ready for delivery
412 * @namedq: queue to put binding table update messages ready for delivery
413 * @link: return value, pointer to put the created link
415 * Returns true if link was created, otherwise false
417 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
418 int tolerance
, char net_plane
, u32 mtu
, int priority
,
419 int window
, u32 session
, u32 ownnode
, u32 peer
,
421 struct tipc_link
*bc_sndlink
,
422 struct tipc_link
*bc_rcvlink
,
423 struct sk_buff_head
*inputq
,
424 struct sk_buff_head
*namedq
,
425 struct tipc_link
**link
)
429 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
433 l
->session
= session
;
435 /* Note: peer i/f name is completed by reset/activate message */
436 sprintf(l
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
437 tipc_zone(ownnode
), tipc_cluster(ownnode
), tipc_node(ownnode
),
438 if_name
, tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
439 strcpy(l
->if_name
, if_name
);
441 l
->peer_caps
= peer_caps
;
443 l
->peer_session
= ANY_SESSION
;
444 l
->bearer_id
= bearer_id
;
445 l
->tolerance
= tolerance
;
446 l
->net_plane
= net_plane
;
447 l
->advertised_mtu
= mtu
;
449 l
->priority
= priority
;
450 tipc_link_set_queue_limits(l
, window
);
452 l
->bc_sndlink
= bc_sndlink
;
453 l
->bc_rcvlink
= bc_rcvlink
;
456 l
->state
= LINK_RESETTING
;
457 __skb_queue_head_init(&l
->transmq
);
458 __skb_queue_head_init(&l
->backlogq
);
459 __skb_queue_head_init(&l
->deferdq
);
460 skb_queue_head_init(&l
->wakeupq
);
461 skb_queue_head_init(l
->inputq
);
466 * tipc_link_bc_create - create new link to be used for broadcast
467 * @n: pointer to associated node
468 * @mtu: mtu to be used
469 * @window: send window to be used
470 * @inputq: queue to put messages ready for delivery
471 * @namedq: queue to put binding table update messages ready for delivery
472 * @link: return value, pointer to put the created link
474 * Returns true if link was created, otherwise false
476 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
,
477 int mtu
, int window
, u16 peer_caps
,
478 struct sk_buff_head
*inputq
,
479 struct sk_buff_head
*namedq
,
480 struct tipc_link
*bc_sndlink
,
481 struct tipc_link
**link
)
485 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, window
,
486 0, ownnode
, peer
, peer_caps
, bc_sndlink
,
487 NULL
, inputq
, namedq
, link
))
491 strcpy(l
->name
, tipc_bclink_name
);
493 l
->state
= LINK_RESET
;
497 /* Broadcast send link is always up */
498 if (link_is_bc_sndlink(l
))
499 l
->state
= LINK_ESTABLISHED
;
505 * tipc_link_fsm_evt - link finite state machine
506 * @l: pointer to link
507 * @evt: state machine event to be processed
509 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
516 case LINK_PEER_RESET_EVT
:
517 l
->state
= LINK_PEER_RESET
;
520 l
->state
= LINK_RESET
;
522 case LINK_FAILURE_EVT
:
523 case LINK_FAILOVER_BEGIN_EVT
:
524 case LINK_ESTABLISH_EVT
:
525 case LINK_FAILOVER_END_EVT
:
526 case LINK_SYNCH_BEGIN_EVT
:
527 case LINK_SYNCH_END_EVT
:
534 case LINK_PEER_RESET_EVT
:
535 l
->state
= LINK_ESTABLISHING
;
537 case LINK_FAILOVER_BEGIN_EVT
:
538 l
->state
= LINK_FAILINGOVER
;
539 case LINK_FAILURE_EVT
:
541 case LINK_ESTABLISH_EVT
:
542 case LINK_FAILOVER_END_EVT
:
544 case LINK_SYNCH_BEGIN_EVT
:
545 case LINK_SYNCH_END_EVT
:
550 case LINK_PEER_RESET
:
553 l
->state
= LINK_ESTABLISHING
;
555 case LINK_PEER_RESET_EVT
:
556 case LINK_ESTABLISH_EVT
:
557 case LINK_FAILURE_EVT
:
559 case LINK_SYNCH_BEGIN_EVT
:
560 case LINK_SYNCH_END_EVT
:
561 case LINK_FAILOVER_BEGIN_EVT
:
562 case LINK_FAILOVER_END_EVT
:
567 case LINK_FAILINGOVER
:
569 case LINK_FAILOVER_END_EVT
:
570 l
->state
= LINK_RESET
;
572 case LINK_PEER_RESET_EVT
:
574 case LINK_ESTABLISH_EVT
:
575 case LINK_FAILURE_EVT
:
577 case LINK_FAILOVER_BEGIN_EVT
:
578 case LINK_SYNCH_BEGIN_EVT
:
579 case LINK_SYNCH_END_EVT
:
584 case LINK_ESTABLISHING
:
586 case LINK_ESTABLISH_EVT
:
587 l
->state
= LINK_ESTABLISHED
;
589 case LINK_FAILOVER_BEGIN_EVT
:
590 l
->state
= LINK_FAILINGOVER
;
593 l
->state
= LINK_RESET
;
595 case LINK_FAILURE_EVT
:
596 case LINK_PEER_RESET_EVT
:
597 case LINK_SYNCH_BEGIN_EVT
:
598 case LINK_FAILOVER_END_EVT
:
600 case LINK_SYNCH_END_EVT
:
605 case LINK_ESTABLISHED
:
607 case LINK_PEER_RESET_EVT
:
608 l
->state
= LINK_PEER_RESET
;
609 rc
|= TIPC_LINK_DOWN_EVT
;
611 case LINK_FAILURE_EVT
:
612 l
->state
= LINK_RESETTING
;
613 rc
|= TIPC_LINK_DOWN_EVT
;
616 l
->state
= LINK_RESET
;
618 case LINK_ESTABLISH_EVT
:
619 case LINK_SYNCH_END_EVT
:
621 case LINK_SYNCH_BEGIN_EVT
:
622 l
->state
= LINK_SYNCHING
;
624 case LINK_FAILOVER_BEGIN_EVT
:
625 case LINK_FAILOVER_END_EVT
:
632 case LINK_PEER_RESET_EVT
:
633 l
->state
= LINK_PEER_RESET
;
634 rc
|= TIPC_LINK_DOWN_EVT
;
636 case LINK_FAILURE_EVT
:
637 l
->state
= LINK_RESETTING
;
638 rc
|= TIPC_LINK_DOWN_EVT
;
641 l
->state
= LINK_RESET
;
643 case LINK_ESTABLISH_EVT
:
644 case LINK_SYNCH_BEGIN_EVT
:
646 case LINK_SYNCH_END_EVT
:
647 l
->state
= LINK_ESTABLISHED
;
649 case LINK_FAILOVER_BEGIN_EVT
:
650 case LINK_FAILOVER_END_EVT
:
656 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
660 pr_err("Illegal FSM event %x in state %x on link %s\n",
661 evt
, l
->state
, l
->name
);
665 /* link_profile_stats - update statistical profiling of traffic
667 static void link_profile_stats(struct tipc_link
*l
)
670 struct tipc_msg
*msg
;
673 /* Update counters used in statistical profiling of send traffic */
674 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
675 l
->stats
.queue_sz_counts
++;
677 skb
= skb_peek(&l
->transmq
);
681 length
= msg_size(msg
);
683 if (msg_user(msg
) == MSG_FRAGMENTER
) {
684 if (msg_type(msg
) != FIRST_FRAGMENT
)
686 length
= msg_size(msg_get_wrapped(msg
));
688 l
->stats
.msg_lengths_total
+= length
;
689 l
->stats
.msg_length_counts
++;
691 l
->stats
.msg_length_profile
[0]++;
692 else if (length
<= 256)
693 l
->stats
.msg_length_profile
[1]++;
694 else if (length
<= 1024)
695 l
->stats
.msg_length_profile
[2]++;
696 else if (length
<= 4096)
697 l
->stats
.msg_length_profile
[3]++;
698 else if (length
<= 16384)
699 l
->stats
.msg_length_profile
[4]++;
700 else if (length
<= 32768)
701 l
->stats
.msg_length_profile
[5]++;
703 l
->stats
.msg_length_profile
[6]++;
706 /* tipc_link_timeout - perform periodic task as instructed from node timeout
708 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
715 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
716 u16 bc_acked
= l
->bc_rcvlink
->acked
;
717 struct tipc_mon_state
*mstate
= &l
->mon_state
;
720 case LINK_ESTABLISHED
:
723 link_profile_stats(l
);
724 tipc_mon_get_state(l
->net
, l
->addr
, mstate
, l
->bearer_id
);
725 if (mstate
->reset
|| (l
->silent_intv_cnt
> l
->abort_limit
))
726 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
727 state
= bc_acked
!= bc_snt
;
728 state
|= l
->bc_rcvlink
->rcv_unacked
;
729 state
|= l
->rcv_unacked
;
730 state
|= !skb_queue_empty(&l
->transmq
);
731 state
|= !skb_queue_empty(&l
->deferdq
);
732 probe
= mstate
->probing
;
733 probe
|= l
->silent_intv_cnt
;
734 if (probe
|| mstate
->monitoring
)
735 l
->silent_intv_cnt
++;
738 setup
= l
->rst_cnt
++ <= 4;
739 setup
|= !(l
->rst_cnt
% 16);
742 case LINK_ESTABLISHING
:
746 case LINK_PEER_RESET
:
748 case LINK_FAILINGOVER
:
754 if (state
|| probe
|| setup
)
755 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, xmitq
);
761 * link_schedule_user - schedule a message sender for wakeup after congestion
762 * @link: congested link
763 * @list: message that was attempted sent
764 * Create pseudo msg to send back to user when congestion abates
765 * Does not consume buffer list
767 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
769 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
770 int imp
= msg_importance(msg
);
771 u32 oport
= msg_origport(msg
);
772 u32 addr
= tipc_own_addr(link
->net
);
775 /* This really cannot happen... */
776 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
777 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
780 /* Non-blocking sender: */
781 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
784 /* Create and schedule wakeup pseudo message */
785 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
786 addr
, addr
, oport
, 0, 0);
789 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
790 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
791 skb_queue_tail(&link
->wakeupq
, skb
);
792 link
->stats
.link_congs
++;
797 * link_prepare_wakeup - prepare users for wakeup after congestion
798 * @link: congested link
799 * Move a number of waiting users, as permitted by available space in
800 * the send queue, from link wait queue to node wait queue for wakeup
802 void link_prepare_wakeup(struct tipc_link
*l
)
804 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
806 struct sk_buff
*skb
, *tmp
;
808 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
809 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
810 lim
= l
->window
+ l
->backlog
[imp
].limit
;
811 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
812 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
814 skb_unlink(skb
, &l
->wakeupq
);
815 skb_queue_tail(l
->inputq
, skb
);
819 void tipc_link_reset(struct tipc_link
*l
)
821 l
->peer_session
= ANY_SESSION
;
823 l
->mtu
= l
->advertised_mtu
;
824 __skb_queue_purge(&l
->transmq
);
825 __skb_queue_purge(&l
->deferdq
);
826 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
827 __skb_queue_purge(&l
->backlogq
);
828 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
829 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
830 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
831 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
832 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
833 kfree_skb(l
->reasm_buf
);
834 kfree_skb(l
->failover_reasm_skb
);
836 l
->failover_reasm_skb
= NULL
;
841 l
->silent_intv_cnt
= 0;
843 l
->stats
.recv_info
= 0;
845 l
->bc_peer_is_up
= false;
846 memset(&l
->mon_state
, 0, sizeof(l
->mon_state
));
847 tipc_link_reset_stats(l
);
851 * tipc_link_xmit(): enqueue buffer list according to queue situation
853 * @list: chain of buffers containing message
854 * @xmitq: returned list of packets to be sent by caller
856 * Consumes the buffer chain, except when returning -ELINKCONG,
857 * since the caller then may want to make more send attempts.
858 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
859 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
861 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
862 struct sk_buff_head
*xmitq
)
864 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
865 unsigned int maxwin
= l
->window
;
866 unsigned int i
, imp
= msg_importance(hdr
);
867 unsigned int mtu
= l
->mtu
;
868 u16 ack
= l
->rcv_nxt
- 1;
869 u16 seqno
= l
->snd_nxt
;
870 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
871 struct sk_buff_head
*transmq
= &l
->transmq
;
872 struct sk_buff_head
*backlogq
= &l
->backlogq
;
873 struct sk_buff
*skb
, *_skb
, *bskb
;
875 /* Match msg importance against this and all higher backlog limits: */
876 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
877 if (unlikely(l
->backlog
[i
].len
>= l
->backlog
[i
].limit
))
878 return link_schedule_user(l
, list
);
880 if (unlikely(msg_size(hdr
) > mtu
)) {
881 skb_queue_purge(list
);
885 /* Prepare each packet for sending, and add to relevant queue: */
886 while (skb_queue_len(list
)) {
887 skb
= skb_peek(list
);
889 msg_set_seqno(hdr
, seqno
);
890 msg_set_ack(hdr
, ack
);
891 msg_set_bcast_ack(hdr
, bc_ack
);
893 if (likely(skb_queue_len(transmq
) < maxwin
)) {
894 _skb
= skb_clone(skb
, GFP_ATOMIC
);
896 skb_queue_purge(list
);
900 __skb_queue_tail(transmq
, skb
);
901 __skb_queue_tail(xmitq
, _skb
);
902 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
907 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
908 kfree_skb(__skb_dequeue(list
));
909 l
->stats
.sent_bundled
++;
912 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
913 kfree_skb(__skb_dequeue(list
));
914 __skb_queue_tail(backlogq
, bskb
);
915 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
916 l
->stats
.sent_bundled
++;
917 l
->stats
.sent_bundles
++;
920 l
->backlog
[imp
].len
+= skb_queue_len(list
);
921 skb_queue_splice_tail_init(list
, backlogq
);
927 void tipc_link_advance_backlog(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
929 struct sk_buff
*skb
, *_skb
;
930 struct tipc_msg
*hdr
;
931 u16 seqno
= l
->snd_nxt
;
932 u16 ack
= l
->rcv_nxt
- 1;
933 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
935 while (skb_queue_len(&l
->transmq
) < l
->window
) {
936 skb
= skb_peek(&l
->backlogq
);
939 _skb
= skb_clone(skb
, GFP_ATOMIC
);
942 __skb_dequeue(&l
->backlogq
);
944 l
->backlog
[msg_importance(hdr
)].len
--;
945 __skb_queue_tail(&l
->transmq
, skb
);
946 __skb_queue_tail(xmitq
, _skb
);
947 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
948 msg_set_seqno(hdr
, seqno
);
949 msg_set_ack(hdr
, ack
);
950 msg_set_bcast_ack(hdr
, bc_ack
);
957 static void link_retransmit_failure(struct tipc_link
*l
, struct sk_buff
*skb
)
959 struct tipc_msg
*hdr
= buf_msg(skb
);
961 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
962 link_print(l
, "Resetting link ");
963 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
964 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
965 pr_info("sqno %u, prev: %x, src: %x\n",
966 msg_seqno(hdr
), msg_prevnode(hdr
), msg_orignode(hdr
));
969 int tipc_link_retrans(struct tipc_link
*l
, u16 from
, u16 to
,
970 struct sk_buff_head
*xmitq
)
972 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
973 struct tipc_msg
*hdr
;
974 u16 ack
= l
->rcv_nxt
- 1;
975 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
980 /* Detect repeated retransmit failures on same packet */
981 if (likely(l
->last_retransm
!= buf_seqno(skb
))) {
982 l
->last_retransm
= buf_seqno(skb
);
984 } else if (++l
->stale_count
> 100) {
985 link_retransmit_failure(l
, skb
);
986 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
989 /* Move forward to where retransmission should start */
990 skb_queue_walk(&l
->transmq
, skb
) {
991 if (!less(buf_seqno(skb
), from
))
995 skb_queue_walk_from(&l
->transmq
, skb
) {
996 if (more(buf_seqno(skb
), to
))
999 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
1002 hdr
= buf_msg(_skb
);
1003 msg_set_ack(hdr
, ack
);
1004 msg_set_bcast_ack(hdr
, bc_ack
);
1005 _skb
->priority
= TC_PRIO_CONTROL
;
1006 __skb_queue_tail(xmitq
, _skb
);
1007 l
->stats
.retransmitted
++;
1012 /* tipc_data_input - deliver data and name distr msgs to upper layer
1014 * Consumes buffer if message is of right type
1015 * Node lock must be held
1017 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1018 struct sk_buff_head
*inputq
)
1020 switch (msg_user(buf_msg(skb
))) {
1021 case TIPC_LOW_IMPORTANCE
:
1022 case TIPC_MEDIUM_IMPORTANCE
:
1023 case TIPC_HIGH_IMPORTANCE
:
1024 case TIPC_CRITICAL_IMPORTANCE
:
1026 skb_queue_tail(inputq
, skb
);
1028 case NAME_DISTRIBUTOR
:
1029 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1030 skb_queue_tail(l
->namedq
, skb
);
1033 case TUNNEL_PROTOCOL
:
1034 case MSG_FRAGMENTER
:
1035 case BCAST_PROTOCOL
:
1038 pr_warn("Dropping received illegal msg type\n");
1044 /* tipc_link_input - process packet that has passed link protocol check
1048 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1049 struct sk_buff_head
*inputq
)
1051 struct tipc_msg
*hdr
= buf_msg(skb
);
1052 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
1053 struct sk_buff
*iskb
;
1054 struct sk_buff_head tmpq
;
1055 int usr
= msg_user(hdr
);
1060 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
1061 if (msg_type(hdr
) == SYNCH_MSG
) {
1062 __skb_queue_purge(&l
->deferdq
);
1065 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
1070 if (less(msg_seqno(hdr
), l
->drop_point
))
1072 if (tipc_data_input(l
, skb
, inputq
))
1074 usr
= msg_user(hdr
);
1075 reasm_skb
= &l
->failover_reasm_skb
;
1078 if (usr
== MSG_BUNDLER
) {
1079 skb_queue_head_init(&tmpq
);
1080 l
->stats
.recv_bundles
++;
1081 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1082 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1083 tipc_data_input(l
, iskb
, &tmpq
);
1084 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1086 } else if (usr
== MSG_FRAGMENTER
) {
1087 l
->stats
.recv_fragments
++;
1088 if (tipc_buf_append(reasm_skb
, &skb
)) {
1089 l
->stats
.recv_fragmented
++;
1090 tipc_data_input(l
, skb
, inputq
);
1091 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1092 pr_warn_ratelimited("Unable to build fragment list\n");
1093 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1096 } else if (usr
== BCAST_PROTOCOL
) {
1097 tipc_bcast_lock(l
->net
);
1098 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1099 tipc_bcast_unlock(l
->net
);
1106 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1108 bool released
= false;
1109 struct sk_buff
*skb
, *tmp
;
1111 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1112 if (more(buf_seqno(skb
), acked
))
1114 __skb_unlink(skb
, &l
->transmq
);
1121 /* tipc_link_build_state_msg: prepare link state message for transmission
1123 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1124 * risk of ack storms towards the sender
1126 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1131 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1132 if (link_is_bc_rcvlink(l
)) {
1133 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1136 return TIPC_LINK_SND_BC_ACK
;
1141 l
->stats
.sent_acks
++;
1142 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
1146 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1148 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1150 int mtyp
= RESET_MSG
;
1151 struct sk_buff
*skb
;
1153 if (l
->state
== LINK_ESTABLISHING
)
1154 mtyp
= ACTIVATE_MSG
;
1156 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, xmitq
);
1158 /* Inform peer that this endpoint is going down if applicable */
1159 skb
= skb_peek_tail(xmitq
);
1160 if (skb
&& (l
->state
== LINK_RESET
))
1161 msg_set_peer_stopping(buf_msg(skb
), 1);
1164 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1166 static void tipc_link_build_nack_msg(struct tipc_link
*l
,
1167 struct sk_buff_head
*xmitq
)
1169 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1171 if (link_is_bc_rcvlink(l
))
1174 if ((skb_queue_len(&l
->deferdq
) == 1) || !(def_cnt
% TIPC_NACK_INTV
))
1175 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
1178 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1179 * @l: the link that should handle the message
1181 * @xmitq: queue to place packets to be sent after this call
1183 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1184 struct sk_buff_head
*xmitq
)
1186 struct sk_buff_head
*defq
= &l
->deferdq
;
1187 struct tipc_msg
*hdr
;
1188 u16 seqno
, rcv_nxt
, win_lim
;
1193 seqno
= msg_seqno(hdr
);
1194 rcv_nxt
= l
->rcv_nxt
;
1195 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1197 /* Verify and update link state */
1198 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1199 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1201 if (unlikely(!link_is_up(l
))) {
1202 if (l
->state
== LINK_ESTABLISHING
)
1203 rc
= TIPC_LINK_UP_EVT
;
1207 /* Don't send probe at next timeout expiration */
1208 l
->silent_intv_cnt
= 0;
1210 /* Drop if outside receive window */
1211 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1212 l
->stats
.duplicates
++;
1216 /* Forward queues and wake up waiting users */
1217 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1218 tipc_link_advance_backlog(l
, xmitq
);
1219 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1220 link_prepare_wakeup(l
);
1223 /* Defer delivery if sequence gap */
1224 if (unlikely(seqno
!= rcv_nxt
)) {
1225 __tipc_skb_queue_sorted(defq
, seqno
, skb
);
1226 tipc_link_build_nack_msg(l
, xmitq
);
1230 /* Deliver packet */
1232 l
->stats
.recv_info
++;
1233 if (!tipc_data_input(l
, skb
, l
->inputq
))
1234 rc
|= tipc_link_input(l
, skb
, l
->inputq
);
1235 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1236 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1237 if (unlikely(rc
& ~TIPC_LINK_SND_BC_ACK
))
1239 } while ((skb
= __skb_dequeue(defq
)));
1247 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1248 u16 rcvgap
, int tolerance
, int priority
,
1249 struct sk_buff_head
*xmitq
)
1251 struct sk_buff
*skb
;
1252 struct tipc_msg
*hdr
;
1253 struct sk_buff_head
*dfq
= &l
->deferdq
;
1254 bool node_up
= link_is_up(l
->bc_rcvlink
);
1255 struct tipc_mon_state
*mstate
= &l
->mon_state
;
1259 /* Don't send protocol message during reset or link failover */
1260 if (tipc_link_is_blocked(l
))
1263 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1266 if (!skb_queue_empty(dfq
))
1267 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1269 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1270 tipc_max_domain_size
, l
->addr
,
1271 tipc_own_addr(l
->net
), 0, 0, 0);
1276 data
= msg_data(hdr
);
1277 msg_set_session(hdr
, l
->session
);
1278 msg_set_bearer_id(hdr
, l
->bearer_id
);
1279 msg_set_net_plane(hdr
, l
->net_plane
);
1280 msg_set_next_sent(hdr
, l
->snd_nxt
);
1281 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1282 msg_set_bcast_ack(hdr
, l
->bc_rcvlink
->rcv_nxt
- 1);
1283 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1284 msg_set_link_tolerance(hdr
, tolerance
);
1285 msg_set_linkprio(hdr
, priority
);
1286 msg_set_redundant_link(hdr
, node_up
);
1287 msg_set_seq_gap(hdr
, 0);
1288 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1290 if (mtyp
== STATE_MSG
) {
1291 msg_set_seq_gap(hdr
, rcvgap
);
1292 msg_set_probe(hdr
, probe
);
1293 tipc_mon_prep(l
->net
, data
, &dlen
, mstate
, l
->bearer_id
);
1294 msg_set_size(hdr
, INT_H_SIZE
+ dlen
);
1295 skb_trim(skb
, INT_H_SIZE
+ dlen
);
1296 l
->stats
.sent_states
++;
1299 /* RESET_MSG or ACTIVATE_MSG */
1300 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1301 strcpy(data
, l
->if_name
);
1302 msg_set_size(hdr
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1303 skb_trim(skb
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1306 l
->stats
.sent_probes
++;
1308 l
->stats
.sent_nacks
++;
1309 skb
->priority
= TC_PRIO_CONTROL
;
1310 __skb_queue_tail(xmitq
, skb
);
1313 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1314 * with contents of the link's transmit and backlog queues.
1316 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1317 int mtyp
, struct sk_buff_head
*xmitq
)
1319 struct sk_buff
*skb
, *tnlskb
;
1320 struct tipc_msg
*hdr
, tnlhdr
;
1321 struct sk_buff_head
*queue
= &l
->transmq
;
1322 struct sk_buff_head tmpxq
, tnlq
;
1323 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1328 skb_queue_head_init(&tnlq
);
1329 skb_queue_head_init(&tmpxq
);
1331 /* At least one packet required for safe algorithm => add dummy */
1332 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1333 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
1334 0, 0, TIPC_ERR_NO_PORT
);
1336 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1339 skb_queue_tail(&tnlq
, skb
);
1340 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1341 __skb_queue_purge(&tmpxq
);
1343 /* Initialize reusable tunnel packet header */
1344 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
1345 mtyp
, INT_H_SIZE
, l
->addr
);
1346 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1347 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1348 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1350 /* Wrap each packet into a tunnel packet */
1351 skb_queue_walk(queue
, skb
) {
1353 if (queue
== &l
->backlogq
)
1354 msg_set_seqno(hdr
, seqno
++);
1355 pktlen
= msg_size(hdr
);
1356 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1357 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
);
1359 pr_warn("%sunable to send packet\n", link_co_err
);
1362 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1363 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1364 __skb_queue_tail(&tnlq
, tnlskb
);
1366 if (queue
!= &l
->backlogq
) {
1367 queue
= &l
->backlogq
;
1371 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1373 if (mtyp
== FAILOVER_MSG
) {
1374 tnl
->drop_point
= l
->rcv_nxt
;
1375 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1376 l
->reasm_buf
= NULL
;
1380 /* tipc_link_proto_rcv(): receive link level protocol message :
1381 * Note that network plane id propagates through the network, and may
1382 * change at any time. The node with lowest numerical id determines
1385 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1386 struct sk_buff_head
*xmitq
)
1388 struct tipc_msg
*hdr
= buf_msg(skb
);
1390 u16 ack
= msg_ack(hdr
);
1391 u16 gap
= msg_seq_gap(hdr
);
1392 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1393 u16 peers_tol
= msg_link_tolerance(hdr
);
1394 u16 peers_prio
= msg_linkprio(hdr
);
1395 u16 rcv_nxt
= l
->rcv_nxt
;
1396 u16 dlen
= msg_data_sz(hdr
);
1397 int mtyp
= msg_type(hdr
);
1402 if (tipc_link_is_blocked(l
) || !xmitq
)
1405 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
1406 l
->net_plane
= msg_net_plane(hdr
);
1410 data
= msg_data(hdr
);
1415 /* Ignore duplicate RESET with old session number */
1416 if ((less_eq(msg_session(hdr
), l
->peer_session
)) &&
1417 (l
->peer_session
!= ANY_SESSION
))
1423 /* Complete own link name with peer's interface name */
1424 if_name
= strrchr(l
->name
, ':') + 1;
1425 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1427 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1429 strncpy(if_name
, data
, TIPC_MAX_IF_NAME
);
1431 /* Update own tolerance if peer indicates a non-zero value */
1432 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1433 l
->tolerance
= peers_tol
;
1435 /* Update own priority if peer's priority is higher */
1436 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1437 l
->priority
= peers_prio
;
1439 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1440 if (msg_peer_stopping(hdr
))
1441 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1442 else if ((mtyp
== RESET_MSG
) || !link_is_up(l
))
1443 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1445 /* ACTIVATE_MSG takes up link if it was already locally reset */
1446 if ((mtyp
== ACTIVATE_MSG
) && (l
->state
== LINK_ESTABLISHING
))
1447 rc
= TIPC_LINK_UP_EVT
;
1449 l
->peer_session
= msg_session(hdr
);
1450 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1451 if (l
->mtu
> msg_max_pkt(hdr
))
1452 l
->mtu
= msg_max_pkt(hdr
);
1457 /* Update own tolerance if peer indicates a non-zero value */
1458 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1459 l
->tolerance
= peers_tol
;
1461 if (peers_prio
&& in_range(peers_prio
, TIPC_MIN_LINK_PRI
,
1462 TIPC_MAX_LINK_PRI
)) {
1463 l
->priority
= peers_prio
;
1464 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1467 l
->silent_intv_cnt
= 0;
1468 l
->stats
.recv_states
++;
1470 l
->stats
.recv_probes
++;
1472 if (!link_is_up(l
)) {
1473 if (l
->state
== LINK_ESTABLISHING
)
1474 rc
= TIPC_LINK_UP_EVT
;
1477 tipc_mon_rcv(l
->net
, data
, dlen
, l
->addr
,
1478 &l
->mon_state
, l
->bearer_id
);
1480 /* Send NACK if peer has sent pkts we haven't received yet */
1481 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1482 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1483 if (rcvgap
|| (msg_probe(hdr
)))
1484 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, rcvgap
,
1486 tipc_link_release_pkts(l
, ack
);
1488 /* If NACK, retransmit will now start at right position */
1490 rc
= tipc_link_retrans(l
, ack
+ 1, ack
+ gap
, xmitq
);
1491 l
->stats
.recv_nacks
++;
1494 tipc_link_advance_backlog(l
, xmitq
);
1495 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1496 link_prepare_wakeup(l
);
1503 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1505 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
1507 struct sk_buff_head
*xmitq
)
1509 struct sk_buff
*skb
;
1510 struct tipc_msg
*hdr
;
1511 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
1512 u16 ack
= l
->rcv_nxt
- 1;
1513 u16 gap_to
= peers_snd_nxt
- 1;
1515 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
1516 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1520 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1521 msg_set_bcast_ack(hdr
, ack
);
1522 msg_set_bcgap_after(hdr
, ack
);
1524 gap_to
= buf_seqno(dfrd_skb
) - 1;
1525 msg_set_bcgap_to(hdr
, gap_to
);
1526 msg_set_non_seq(hdr
, bcast
);
1527 __skb_queue_tail(xmitq
, skb
);
1531 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1533 * Give a newly added peer node the sequence number where it should
1534 * start receiving and acking broadcast packets.
1536 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
1537 struct sk_buff_head
*xmitq
)
1539 struct sk_buff_head list
;
1541 __skb_queue_head_init(&list
);
1542 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
1544 tipc_link_xmit(l
, &list
, xmitq
);
1547 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1549 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1551 int mtyp
= msg_type(hdr
);
1552 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1557 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
1558 l
->rcv_nxt
= peers_snd_nxt
;
1559 l
->state
= LINK_ESTABLISHED
;
1563 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
1566 if (msg_peer_node_is_up(hdr
))
1569 /* Compatibility: accept older, less safe initial synch data */
1570 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
1571 l
->rcv_nxt
= peers_snd_nxt
;
1574 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1576 void tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
1577 struct sk_buff_head
*xmitq
)
1579 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1584 if (!msg_peer_node_is_up(hdr
))
1587 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1589 l
->bc_peer_is_up
= true;
1591 if (!l
->bc_peer_is_up
)
1594 /* Ignore if peers_snd_nxt goes beyond receive window */
1595 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
1598 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
1599 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
1603 /* Don't NACK if one was recently sent or peeked */
1604 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
1605 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1609 /* Conditionally delay NACK sending until next synch rcv */
1610 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
1611 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1612 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
1616 /* Send NACK now but suppress next one */
1617 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
1618 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1621 void tipc_link_bc_ack_rcv(struct tipc_link
*l
, u16 acked
,
1622 struct sk_buff_head
*xmitq
)
1624 struct sk_buff
*skb
, *tmp
;
1625 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1627 if (!link_is_up(l
) || !l
->bc_peer_is_up
)
1630 if (!more(acked
, l
->acked
))
1633 /* Skip over packets peer has already acked */
1634 skb_queue_walk(&snd_l
->transmq
, skb
) {
1635 if (more(buf_seqno(skb
), l
->acked
))
1639 /* Update/release the packets peer is acking now */
1640 skb_queue_walk_from_safe(&snd_l
->transmq
, skb
, tmp
) {
1641 if (more(buf_seqno(skb
), acked
))
1643 if (!--TIPC_SKB_CB(skb
)->ackers
) {
1644 __skb_unlink(skb
, &snd_l
->transmq
);
1649 tipc_link_advance_backlog(snd_l
, xmitq
);
1650 if (unlikely(!skb_queue_empty(&snd_l
->wakeupq
)))
1651 link_prepare_wakeup(snd_l
);
1654 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1656 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1657 struct sk_buff_head
*xmitq
)
1659 struct tipc_msg
*hdr
= buf_msg(skb
);
1660 u32 dnode
= msg_destnode(hdr
);
1661 int mtyp
= msg_type(hdr
);
1662 u16 acked
= msg_bcast_ack(hdr
);
1663 u16 from
= acked
+ 1;
1664 u16 to
= msg_bcgap_to(hdr
);
1665 u16 peers_snd_nxt
= to
+ 1;
1670 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
1673 if (mtyp
!= STATE_MSG
)
1676 if (dnode
== tipc_own_addr(l
->net
)) {
1677 tipc_link_bc_ack_rcv(l
, acked
, xmitq
);
1678 rc
= tipc_link_retrans(l
->bc_sndlink
, from
, to
, xmitq
);
1679 l
->stats
.recv_nacks
++;
1683 /* Msg for other node => suppress own NACK at next sync if applicable */
1684 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
1685 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1690 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1692 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1695 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1696 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1697 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1698 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1699 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1703 * link_reset_stats - reset link statistics
1704 * @l: pointer to link
1706 void tipc_link_reset_stats(struct tipc_link
*l
)
1708 memset(&l
->stats
, 0, sizeof(l
->stats
));
1709 if (!link_is_bc_sndlink(l
)) {
1710 l
->stats
.sent_info
= l
->snd_nxt
;
1711 l
->stats
.recv_info
= l
->rcv_nxt
;
1715 static void link_print(struct tipc_link
*l
, const char *str
)
1717 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1718 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
1719 u16 tail
= l
->snd_nxt
- 1;
1721 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1722 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1723 skb_queue_len(&l
->transmq
), head
, tail
,
1724 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1727 /* Parse and validate nested (link) properties valid for media, bearer and link
1729 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1733 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1734 tipc_nl_prop_policy
);
1738 if (props
[TIPC_NLA_PROP_PRIO
]) {
1741 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1742 if (prio
> TIPC_MAX_LINK_PRI
)
1746 if (props
[TIPC_NLA_PROP_TOL
]) {
1749 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1750 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1754 if (props
[TIPC_NLA_PROP_WIN
]) {
1757 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1758 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1765 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1768 struct nlattr
*stats
;
1775 struct nla_map map
[] = {
1776 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1777 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1778 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1779 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1780 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1781 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1782 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1783 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1784 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1785 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1786 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1787 s
->msg_length_counts
: 1},
1788 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1789 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1790 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1791 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1792 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1793 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1794 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1795 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1796 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1797 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1798 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1799 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1800 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1801 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1802 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1803 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1804 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1805 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1806 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1807 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1808 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1809 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1810 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1813 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1817 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1818 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1821 nla_nest_end(skb
, stats
);
1825 nla_nest_cancel(skb
, stats
);
1830 /* Caller should hold appropriate locks to protect the link */
1831 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1832 struct tipc_link
*link
, int nlflags
)
1836 struct nlattr
*attrs
;
1837 struct nlattr
*prop
;
1838 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1840 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1841 nlflags
, TIPC_NL_LINK_GET
);
1845 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1849 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
1851 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
1852 tipc_cluster_mask(tn
->own_addr
)))
1854 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
1856 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->rcv_nxt
))
1858 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->snd_nxt
))
1861 if (tipc_link_is_up(link
))
1862 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1865 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
1868 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1871 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1873 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
1875 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
1878 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1880 nla_nest_end(msg
->skb
, prop
);
1882 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
1886 nla_nest_end(msg
->skb
, attrs
);
1887 genlmsg_end(msg
->skb
, hdr
);
1892 nla_nest_cancel(msg
->skb
, prop
);
1894 nla_nest_cancel(msg
->skb
, attrs
);
1896 genlmsg_cancel(msg
->skb
, hdr
);
1901 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
1902 struct tipc_stats
*stats
)
1905 struct nlattr
*nest
;
1912 struct nla_map map
[] = {
1913 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_info
},
1914 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
1915 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
1916 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
1917 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
1918 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_info
},
1919 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
1920 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
1921 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
1922 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
1923 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
1924 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
1925 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
1926 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
1927 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
1928 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
1929 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
1930 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
1931 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
1932 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
1935 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1939 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1940 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1943 nla_nest_end(skb
, nest
);
1947 nla_nest_cancel(skb
, nest
);
1952 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
1956 struct nlattr
*attrs
;
1957 struct nlattr
*prop
;
1958 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1959 struct tipc_link
*bcl
= tn
->bcl
;
1964 tipc_bcast_lock(net
);
1966 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1967 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
1969 tipc_bcast_unlock(net
);
1973 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1977 /* The broadcast link is always up */
1978 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1981 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
1983 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
1985 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, bcl
->rcv_nxt
))
1987 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, bcl
->snd_nxt
))
1990 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1993 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
1995 nla_nest_end(msg
->skb
, prop
);
1997 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
2001 tipc_bcast_unlock(net
);
2002 nla_nest_end(msg
->skb
, attrs
);
2003 genlmsg_end(msg
->skb
, hdr
);
2008 nla_nest_cancel(msg
->skb
, prop
);
2010 nla_nest_cancel(msg
->skb
, attrs
);
2012 tipc_bcast_unlock(net
);
2013 genlmsg_cancel(msg
->skb
, hdr
);
2018 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
2019 struct sk_buff_head
*xmitq
)
2022 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, tol
, 0, xmitq
);
2025 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
2026 struct sk_buff_head
*xmitq
)
2029 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, prio
, xmitq
);
2032 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2034 l
->abort_limit
= limit
;