2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47 const char tipc_bclink_name
[] = "broadcast-link";
49 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
50 struct tipc_node_map
*nm_b
,
51 struct tipc_node_map
*nm_diff
);
52 static void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
);
53 static void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
);
55 static void tipc_bclink_lock(struct net
*net
)
57 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
59 spin_lock_bh(&tn
->bclink
->lock
);
62 static void tipc_bclink_unlock(struct net
*net
)
64 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
66 spin_unlock_bh(&tn
->bclink
->lock
);
69 void tipc_bclink_input(struct net
*net
)
71 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
73 tipc_sk_mcast_rcv(net
, &tn
->bclink
->arrvq
, &tn
->bclink
->inputq
);
76 uint
tipc_bclink_get_mtu(void)
78 return MAX_PKT_DEFAULT_MCAST
;
81 static u32
bcbuf_acks(struct sk_buff
*buf
)
83 return (u32
)(unsigned long)TIPC_SKB_CB(buf
)->handle
;
86 static void bcbuf_set_acks(struct sk_buff
*buf
, u32 acks
)
88 TIPC_SKB_CB(buf
)->handle
= (void *)(unsigned long)acks
;
91 static void bcbuf_decr_acks(struct sk_buff
*buf
)
93 bcbuf_set_acks(buf
, bcbuf_acks(buf
) - 1);
96 void tipc_bclink_add_node(struct net
*net
, u32 addr
)
98 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
100 tipc_bclink_lock(net
);
101 tipc_nmap_add(&tn
->bclink
->bcast_nodes
, addr
);
102 tipc_bclink_unlock(net
);
105 void tipc_bclink_remove_node(struct net
*net
, u32 addr
)
107 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
109 tipc_bclink_lock(net
);
110 tipc_nmap_remove(&tn
->bclink
->bcast_nodes
, addr
);
112 /* Last node? => reset backlog queue */
113 if (!tn
->bclink
->bcast_nodes
.count
)
114 tipc_link_purge_backlog(&tn
->bclink
->link
);
116 tipc_bclink_unlock(net
);
119 static void bclink_set_last_sent(struct net
*net
)
121 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
122 struct tipc_link
*bcl
= tn
->bcl
;
124 bcl
->silent_intv_cnt
= mod(bcl
->snd_nxt
- 1);
127 u32
tipc_bclink_get_last_sent(struct net
*net
)
129 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
131 return tn
->bcl
->silent_intv_cnt
;
134 static void bclink_update_last_sent(struct tipc_node
*node
, u32 seqno
)
136 node
->bclink
.last_sent
= less_eq(node
->bclink
.last_sent
, seqno
) ?
137 seqno
: node
->bclink
.last_sent
;
141 * tipc_bclink_retransmit_to - get most recent node to request retransmission
143 * Called with bclink_lock locked
145 struct tipc_node
*tipc_bclink_retransmit_to(struct net
*net
)
147 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
149 return tn
->bclink
->retransmit_to
;
153 * bclink_retransmit_pkt - retransmit broadcast packets
154 * @after: sequence number of last packet to *not* retransmit
155 * @to: sequence number of last packet to retransmit
157 * Called with bclink_lock locked
159 static void bclink_retransmit_pkt(struct tipc_net
*tn
, u32 after
, u32 to
)
162 struct tipc_link
*bcl
= tn
->bcl
;
164 skb_queue_walk(&bcl
->transmq
, skb
) {
165 if (more(buf_seqno(skb
), after
)) {
166 tipc_link_retransmit(bcl
, skb
, mod(to
- after
));
173 * tipc_bclink_wakeup_users - wake up pending users
175 * Called with no locks taken
177 void tipc_bclink_wakeup_users(struct net
*net
)
179 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
181 tipc_sk_rcv(net
, &tn
->bclink
->link
.wakeupq
);
185 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
186 * @n_ptr: node that sent acknowledgement info
187 * @acked: broadcast sequence # that has been acknowledged
189 * Node is locked, bclink_lock unlocked.
191 void tipc_bclink_acknowledge(struct tipc_node
*n_ptr
, u32 acked
)
193 struct sk_buff
*skb
, *tmp
;
194 unsigned int released
= 0;
195 struct net
*net
= n_ptr
->net
;
196 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
198 if (unlikely(!n_ptr
->bclink
.recv_permitted
))
201 tipc_bclink_lock(net
);
203 /* Bail out if tx queue is empty (no clean up is required) */
204 skb
= skb_peek(&tn
->bcl
->transmq
);
208 /* Determine which messages need to be acknowledged */
209 if (acked
== INVALID_LINK_SEQ
) {
211 * Contact with specified node has been lost, so need to
212 * acknowledge sent messages only (if other nodes still exist)
213 * or both sent and unsent messages (otherwise)
215 if (tn
->bclink
->bcast_nodes
.count
)
216 acked
= tn
->bcl
->silent_intv_cnt
;
218 acked
= tn
->bcl
->snd_nxt
;
221 * Bail out if specified sequence number does not correspond
222 * to a message that has been sent and not yet acknowledged
224 if (less(acked
, buf_seqno(skb
)) ||
225 less(tn
->bcl
->silent_intv_cnt
, acked
) ||
226 less_eq(acked
, n_ptr
->bclink
.acked
))
230 /* Skip over packets that node has previously acknowledged */
231 skb_queue_walk(&tn
->bcl
->transmq
, skb
) {
232 if (more(buf_seqno(skb
), n_ptr
->bclink
.acked
))
236 /* Update packets that node is now acknowledging */
237 skb_queue_walk_from_safe(&tn
->bcl
->transmq
, skb
, tmp
) {
238 if (more(buf_seqno(skb
), acked
))
240 bcbuf_decr_acks(skb
);
241 bclink_set_last_sent(net
);
242 if (bcbuf_acks(skb
) == 0) {
243 __skb_unlink(skb
, &tn
->bcl
->transmq
);
248 n_ptr
->bclink
.acked
= acked
;
250 /* Try resolving broadcast link congestion, if necessary */
251 if (unlikely(skb_peek(&tn
->bcl
->backlogq
))) {
252 tipc_link_push_packets(tn
->bcl
);
253 bclink_set_last_sent(net
);
255 if (unlikely(released
&& !skb_queue_empty(&tn
->bcl
->wakeupq
)))
256 n_ptr
->action_flags
|= TIPC_WAKEUP_BCAST_USERS
;
258 tipc_bclink_unlock(net
);
262 * tipc_bclink_update_link_state - update broadcast link state
264 * RCU and node lock set
266 void tipc_bclink_update_link_state(struct tipc_node
*n_ptr
,
270 struct net
*net
= n_ptr
->net
;
271 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
273 /* Ignore "stale" link state info */
274 if (less_eq(last_sent
, n_ptr
->bclink
.last_in
))
277 /* Update link synchronization state; quit if in sync */
278 bclink_update_last_sent(n_ptr
, last_sent
);
280 if (n_ptr
->bclink
.last_sent
== n_ptr
->bclink
.last_in
)
283 /* Update out-of-sync state; quit if loss is still unconfirmed */
284 if ((++n_ptr
->bclink
.oos_state
) == 1) {
285 if (n_ptr
->bclink
.deferred_size
< (TIPC_MIN_LINK_WIN
/ 2))
287 n_ptr
->bclink
.oos_state
++;
290 /* Don't NACK if one has been recently sent (or seen) */
291 if (n_ptr
->bclink
.oos_state
& 0x1)
295 buf
= tipc_buf_acquire(INT_H_SIZE
);
297 struct tipc_msg
*msg
= buf_msg(buf
);
298 struct sk_buff
*skb
= skb_peek(&n_ptr
->bclink
.deferdq
);
299 u32 to
= skb
? buf_seqno(skb
) - 1 : n_ptr
->bclink
.last_sent
;
301 tipc_msg_init(tn
->own_addr
, msg
, BCAST_PROTOCOL
, STATE_MSG
,
302 INT_H_SIZE
, n_ptr
->addr
);
303 msg_set_non_seq(msg
, 1);
304 msg_set_mc_netid(msg
, tn
->net_id
);
305 msg_set_bcast_ack(msg
, n_ptr
->bclink
.last_in
);
306 msg_set_bcgap_after(msg
, n_ptr
->bclink
.last_in
);
307 msg_set_bcgap_to(msg
, to
);
309 tipc_bclink_lock(net
);
310 tipc_bearer_send(net
, MAX_BEARERS
, buf
, NULL
);
311 tn
->bcl
->stats
.sent_nacks
++;
312 tipc_bclink_unlock(net
);
315 n_ptr
->bclink
.oos_state
++;
319 void tipc_bclink_sync_state(struct tipc_node
*n
, struct tipc_msg
*hdr
)
321 u16 last
= msg_last_bcast(hdr
);
322 int mtyp
= msg_type(hdr
);
324 if (unlikely(msg_user(hdr
) != LINK_PROTOCOL
))
326 if (mtyp
== STATE_MSG
) {
327 tipc_bclink_update_link_state(n
, last
);
330 /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
331 * and transfer synch info in LINK_PROTOCOL messages.
333 if (tipc_node_is_up(n
))
335 if ((mtyp
!= RESET_MSG
) && (mtyp
!= ACTIVATE_MSG
))
337 n
->bclink
.last_sent
= last
;
338 n
->bclink
.last_in
= last
;
339 n
->bclink
.oos_state
= 0;
343 * bclink_peek_nack - monitor retransmission requests sent by other nodes
345 * Delay any upcoming NACK by this node if another node has already
346 * requested the first message this node is going to ask for.
348 static void bclink_peek_nack(struct net
*net
, struct tipc_msg
*msg
)
350 struct tipc_node
*n_ptr
= tipc_node_find(net
, msg_destnode(msg
));
352 if (unlikely(!n_ptr
))
355 tipc_node_lock(n_ptr
);
356 if (n_ptr
->bclink
.recv_permitted
&&
357 (n_ptr
->bclink
.last_in
!= n_ptr
->bclink
.last_sent
) &&
358 (n_ptr
->bclink
.last_in
== msg_bcgap_after(msg
)))
359 n_ptr
->bclink
.oos_state
= 2;
360 tipc_node_unlock(n_ptr
);
361 tipc_node_put(n_ptr
);
364 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
365 * and to identified node local sockets
366 * @net: the applicable net namespace
367 * @list: chain of buffers containing message
368 * Consumes the buffer chain, except when returning -ELINKCONG
369 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
371 int tipc_bclink_xmit(struct net
*net
, struct sk_buff_head
*list
)
373 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
374 struct tipc_link
*bcl
= tn
->bcl
;
375 struct tipc_bclink
*bclink
= tn
->bclink
;
379 struct sk_buff_head arrvq
;
380 struct sk_buff_head inputq
;
382 /* Prepare clone of message for local node */
383 skb
= tipc_msg_reassemble(list
);
385 return -EHOSTUNREACH
;
387 /* Broadcast to all nodes */
388 if (likely(bclink
)) {
389 tipc_bclink_lock(net
);
390 if (likely(bclink
->bcast_nodes
.count
)) {
391 rc
= __tipc_link_xmit(net
, bcl
, list
);
393 u32 len
= skb_queue_len(&bcl
->transmq
);
395 bclink_set_last_sent(net
);
396 bcl
->stats
.queue_sz_counts
++;
397 bcl
->stats
.accu_queue_sz
+= len
;
401 tipc_bclink_unlock(net
);
405 __skb_queue_purge(list
);
411 /* Deliver message clone */
412 __skb_queue_head_init(&arrvq
);
413 skb_queue_head_init(&inputq
);
414 __skb_queue_tail(&arrvq
, skb
);
415 tipc_sk_mcast_rcv(net
, &arrvq
, &inputq
);
420 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
422 * Called with both sending node's lock and bclink_lock taken.
424 static void bclink_accept_pkt(struct tipc_node
*node
, u32 seqno
)
426 struct tipc_net
*tn
= net_generic(node
->net
, tipc_net_id
);
428 bclink_update_last_sent(node
, seqno
);
429 node
->bclink
.last_in
= seqno
;
430 node
->bclink
.oos_state
= 0;
431 tn
->bcl
->stats
.recv_info
++;
434 * Unicast an ACK periodically, ensuring that
435 * all nodes in the cluster don't ACK at the same time
437 if (((seqno
- tn
->own_addr
) % TIPC_MIN_LINK_WIN
) == 0) {
438 tipc_link_proto_xmit(node_active_link(node
, node
->addr
),
439 STATE_MSG
, 0, 0, 0, 0);
440 tn
->bcl
->stats
.sent_acks
++;
445 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
447 * RCU is locked, no other locks set
449 void tipc_bclink_rcv(struct net
*net
, struct sk_buff
*buf
)
451 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
452 struct tipc_link
*bcl
= tn
->bcl
;
453 struct tipc_msg
*msg
= buf_msg(buf
);
454 struct tipc_node
*node
;
459 struct sk_buff
*iskb
;
460 struct sk_buff_head
*arrvq
, *inputq
;
462 /* Screen out unwanted broadcast messages */
463 if (msg_mc_netid(msg
) != tn
->net_id
)
466 node
= tipc_node_find(net
, msg_prevnode(msg
));
470 tipc_node_lock(node
);
471 if (unlikely(!node
->bclink
.recv_permitted
))
474 /* Handle broadcast protocol message */
475 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
)) {
476 if (msg_type(msg
) != STATE_MSG
)
478 if (msg_destnode(msg
) == tn
->own_addr
) {
479 tipc_bclink_acknowledge(node
, msg_bcast_ack(msg
));
480 tipc_bclink_lock(net
);
481 bcl
->stats
.recv_nacks
++;
482 tn
->bclink
->retransmit_to
= node
;
483 bclink_retransmit_pkt(tn
, msg_bcgap_after(msg
),
485 tipc_bclink_unlock(net
);
486 tipc_node_unlock(node
);
488 tipc_node_unlock(node
);
489 bclink_peek_nack(net
, msg
);
495 /* Handle in-sequence broadcast message */
496 seqno
= msg_seqno(msg
);
497 next_in
= mod(node
->bclink
.last_in
+ 1);
498 arrvq
= &tn
->bclink
->arrvq
;
499 inputq
= &tn
->bclink
->inputq
;
501 if (likely(seqno
== next_in
)) {
503 /* Deliver message to destination */
504 if (likely(msg_isdata(msg
))) {
505 tipc_bclink_lock(net
);
506 bclink_accept_pkt(node
, seqno
);
507 spin_lock_bh(&inputq
->lock
);
508 __skb_queue_tail(arrvq
, buf
);
509 spin_unlock_bh(&inputq
->lock
);
510 node
->action_flags
|= TIPC_BCAST_MSG_EVT
;
511 tipc_bclink_unlock(net
);
512 tipc_node_unlock(node
);
513 } else if (msg_user(msg
) == MSG_BUNDLER
) {
514 tipc_bclink_lock(net
);
515 bclink_accept_pkt(node
, seqno
);
516 bcl
->stats
.recv_bundles
++;
517 bcl
->stats
.recv_bundled
+= msg_msgcnt(msg
);
519 while (tipc_msg_extract(buf
, &iskb
, &pos
)) {
520 spin_lock_bh(&inputq
->lock
);
521 __skb_queue_tail(arrvq
, iskb
);
522 spin_unlock_bh(&inputq
->lock
);
524 node
->action_flags
|= TIPC_BCAST_MSG_EVT
;
525 tipc_bclink_unlock(net
);
526 tipc_node_unlock(node
);
527 } else if (msg_user(msg
) == MSG_FRAGMENTER
) {
528 tipc_bclink_lock(net
);
529 bclink_accept_pkt(node
, seqno
);
530 tipc_buf_append(&node
->bclink
.reasm_buf
, &buf
);
531 if (unlikely(!buf
&& !node
->bclink
.reasm_buf
)) {
532 tipc_bclink_unlock(net
);
535 bcl
->stats
.recv_fragments
++;
537 bcl
->stats
.recv_fragmented
++;
539 tipc_bclink_unlock(net
);
542 tipc_bclink_unlock(net
);
543 tipc_node_unlock(node
);
545 tipc_bclink_lock(net
);
546 bclink_accept_pkt(node
, seqno
);
547 tipc_bclink_unlock(net
);
548 tipc_node_unlock(node
);
553 /* Determine new synchronization state */
554 tipc_node_lock(node
);
555 if (unlikely(!tipc_node_is_up(node
)))
558 if (node
->bclink
.last_in
== node
->bclink
.last_sent
)
561 if (skb_queue_empty(&node
->bclink
.deferdq
)) {
562 node
->bclink
.oos_state
= 1;
566 msg
= buf_msg(skb_peek(&node
->bclink
.deferdq
));
567 seqno
= msg_seqno(msg
);
568 next_in
= mod(next_in
+ 1);
569 if (seqno
!= next_in
)
572 /* Take in-sequence message from deferred queue & deliver it */
573 buf
= __skb_dequeue(&node
->bclink
.deferdq
);
577 /* Handle out-of-sequence broadcast message */
578 if (less(next_in
, seqno
)) {
579 deferred
= tipc_link_defer_pkt(&node
->bclink
.deferdq
,
581 bclink_update_last_sent(node
, seqno
);
585 tipc_bclink_lock(net
);
588 bcl
->stats
.deferred_recv
++;
590 bcl
->stats
.duplicates
++;
592 tipc_bclink_unlock(net
);
595 tipc_node_unlock(node
);
601 u32
tipc_bclink_acks_missing(struct tipc_node
*n_ptr
)
603 return (n_ptr
->bclink
.recv_permitted
&&
604 (tipc_bclink_get_last_sent(n_ptr
->net
) != n_ptr
->bclink
.acked
));
609 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
611 * Send packet over as many bearers as necessary to reach all nodes
612 * that have joined the broadcast link.
614 * Returns 0 (packet sent successfully) under all circumstances,
615 * since the broadcast link's pseudo-bearer never blocks
617 static int tipc_bcbearer_send(struct net
*net
, struct sk_buff
*buf
,
618 struct tipc_bearer
*unused1
,
619 struct tipc_media_addr
*unused2
)
622 struct tipc_msg
*msg
= buf_msg(buf
);
623 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
624 struct tipc_bcbearer
*bcbearer
= tn
->bcbearer
;
625 struct tipc_bclink
*bclink
= tn
->bclink
;
627 /* Prepare broadcast link message for reliable transmission,
628 * if first time trying to send it;
629 * preparation is skipped for broadcast link protocol messages
630 * since they are sent in an unreliable manner and don't need it
632 if (likely(!msg_non_seq(buf_msg(buf
)))) {
633 bcbuf_set_acks(buf
, bclink
->bcast_nodes
.count
);
634 msg_set_non_seq(msg
, 1);
635 msg_set_mc_netid(msg
, tn
->net_id
);
636 tn
->bcl
->stats
.sent_info
++;
637 if (WARN_ON(!bclink
->bcast_nodes
.count
)) {
643 /* Send buffer over bearers until all targets reached */
644 bcbearer
->remains
= bclink
->bcast_nodes
;
646 for (bp_index
= 0; bp_index
< MAX_BEARERS
; bp_index
++) {
647 struct tipc_bearer
*p
= bcbearer
->bpairs
[bp_index
].primary
;
648 struct tipc_bearer
*s
= bcbearer
->bpairs
[bp_index
].secondary
;
649 struct tipc_bearer
*bp
[2] = {p
, s
};
650 struct tipc_bearer
*b
= bp
[msg_link_selector(msg
)];
651 struct sk_buff
*tbuf
;
654 break; /* No more bearers to try */
657 tipc_nmap_diff(&bcbearer
->remains
, &b
->nodes
,
658 &bcbearer
->remains_new
);
659 if (bcbearer
->remains_new
.count
== bcbearer
->remains
.count
)
660 continue; /* Nothing added by bearer pair */
663 /* Use original buffer for first bearer */
664 tipc_bearer_send(net
, b
->identity
, buf
, &b
->bcast_addr
);
666 /* Avoid concurrent buffer access */
667 tbuf
= pskb_copy_for_clone(buf
, GFP_ATOMIC
);
670 tipc_bearer_send(net
, b
->identity
, tbuf
,
672 kfree_skb(tbuf
); /* Bearer keeps a clone */
674 if (bcbearer
->remains_new
.count
== 0)
675 break; /* All targets reached */
677 bcbearer
->remains
= bcbearer
->remains_new
;
684 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
686 void tipc_bcbearer_sort(struct net
*net
, struct tipc_node_map
*nm_ptr
,
687 u32 node
, bool action
)
689 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
690 struct tipc_bcbearer
*bcbearer
= tn
->bcbearer
;
691 struct tipc_bcbearer_pair
*bp_temp
= bcbearer
->bpairs_temp
;
692 struct tipc_bcbearer_pair
*bp_curr
;
693 struct tipc_bearer
*b
;
697 tipc_bclink_lock(net
);
700 tipc_nmap_add(nm_ptr
, node
);
702 tipc_nmap_remove(nm_ptr
, node
);
704 /* Group bearers by priority (can assume max of two per priority) */
705 memset(bp_temp
, 0, sizeof(bcbearer
->bpairs_temp
));
708 for (b_index
= 0; b_index
< MAX_BEARERS
; b_index
++) {
709 b
= rcu_dereference_rtnl(tn
->bearer_list
[b_index
]);
710 if (!b
|| !b
->nodes
.count
)
713 if (!bp_temp
[b
->priority
].primary
)
714 bp_temp
[b
->priority
].primary
= b
;
716 bp_temp
[b
->priority
].secondary
= b
;
720 /* Create array of bearer pairs for broadcasting */
721 bp_curr
= bcbearer
->bpairs
;
722 memset(bcbearer
->bpairs
, 0, sizeof(bcbearer
->bpairs
));
724 for (pri
= TIPC_MAX_LINK_PRI
; pri
>= 0; pri
--) {
726 if (!bp_temp
[pri
].primary
)
729 bp_curr
->primary
= bp_temp
[pri
].primary
;
731 if (bp_temp
[pri
].secondary
) {
732 if (tipc_nmap_equal(&bp_temp
[pri
].primary
->nodes
,
733 &bp_temp
[pri
].secondary
->nodes
)) {
734 bp_curr
->secondary
= bp_temp
[pri
].secondary
;
737 bp_curr
->primary
= bp_temp
[pri
].secondary
;
744 tipc_bclink_unlock(net
);
747 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
748 struct tipc_stats
*stats
)
758 struct nla_map map
[] = {
759 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_info
},
760 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
761 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
762 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
763 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
764 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_info
},
765 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
766 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
767 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
768 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
769 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
770 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
771 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
772 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
773 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
774 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
775 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
776 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
777 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
778 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
781 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
785 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
786 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
789 nla_nest_end(skb
, nest
);
793 nla_nest_cancel(skb
, nest
);
798 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
802 struct nlattr
*attrs
;
804 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
805 struct tipc_link
*bcl
= tn
->bcl
;
810 tipc_bclink_lock(net
);
812 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
813 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
817 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
821 /* The broadcast link is always up */
822 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
825 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
827 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
829 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, bcl
->rcv_nxt
))
831 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, bcl
->snd_nxt
))
834 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
837 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
839 nla_nest_end(msg
->skb
, prop
);
841 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
845 tipc_bclink_unlock(net
);
846 nla_nest_end(msg
->skb
, attrs
);
847 genlmsg_end(msg
->skb
, hdr
);
852 nla_nest_cancel(msg
->skb
, prop
);
854 nla_nest_cancel(msg
->skb
, attrs
);
856 tipc_bclink_unlock(net
);
857 genlmsg_cancel(msg
->skb
, hdr
);
862 int tipc_bclink_reset_stats(struct net
*net
)
864 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
865 struct tipc_link
*bcl
= tn
->bcl
;
870 tipc_bclink_lock(net
);
871 memset(&bcl
->stats
, 0, sizeof(bcl
->stats
));
872 tipc_bclink_unlock(net
);
876 int tipc_bclink_set_queue_limits(struct net
*net
, u32 limit
)
878 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
879 struct tipc_link
*bcl
= tn
->bcl
;
883 if ((limit
< TIPC_MIN_LINK_WIN
) || (limit
> TIPC_MAX_LINK_WIN
))
886 tipc_bclink_lock(net
);
887 tipc_link_set_queue_limits(bcl
, limit
);
888 tipc_bclink_unlock(net
);
892 int tipc_nl_bc_link_set(struct net
*net
, struct nlattr
*attrs
[])
896 struct nlattr
*props
[TIPC_NLA_PROP_MAX
+ 1];
898 if (!attrs
[TIPC_NLA_LINK_PROP
])
901 err
= tipc_nl_parse_link_prop(attrs
[TIPC_NLA_LINK_PROP
], props
);
905 if (!props
[TIPC_NLA_PROP_WIN
])
908 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
910 return tipc_bclink_set_queue_limits(net
, win
);
913 int tipc_bclink_init(struct net
*net
)
915 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
916 struct tipc_bcbearer
*bcbearer
;
917 struct tipc_bclink
*bclink
;
918 struct tipc_link
*bcl
;
920 bcbearer
= kzalloc(sizeof(*bcbearer
), GFP_ATOMIC
);
924 bclink
= kzalloc(sizeof(*bclink
), GFP_ATOMIC
);
931 bcbearer
->bearer
.media
= &bcbearer
->media
;
932 bcbearer
->media
.send_msg
= tipc_bcbearer_send
;
933 sprintf(bcbearer
->media
.name
, "tipc-broadcast");
935 spin_lock_init(&bclink
->lock
);
936 __skb_queue_head_init(&bcl
->transmq
);
937 __skb_queue_head_init(&bcl
->backlogq
);
938 __skb_queue_head_init(&bcl
->deferdq
);
939 skb_queue_head_init(&bcl
->wakeupq
);
941 spin_lock_init(&bclink
->node
.lock
);
942 __skb_queue_head_init(&bclink
->arrvq
);
943 skb_queue_head_init(&bclink
->inputq
);
944 bcl
->owner
= &bclink
->node
;
945 bcl
->owner
->net
= net
;
946 bcl
->mtu
= MAX_PKT_DEFAULT_MCAST
;
947 tipc_link_set_queue_limits(bcl
, BCLINK_WIN_DEFAULT
);
948 bcl
->bearer_id
= MAX_BEARERS
;
949 rcu_assign_pointer(tn
->bearer_list
[MAX_BEARERS
], &bcbearer
->bearer
);
950 bcl
->pmsg
= (struct tipc_msg
*)&bcl
->proto_msg
;
951 msg_set_prevnode(bcl
->pmsg
, tn
->own_addr
);
952 strlcpy(bcl
->name
, tipc_bclink_name
, TIPC_MAX_LINK_NAME
);
953 tn
->bcbearer
= bcbearer
;
959 void tipc_bclink_stop(struct net
*net
)
961 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
963 tipc_bclink_lock(net
);
964 tipc_link_purge_queues(tn
->bcl
);
965 tipc_bclink_unlock(net
);
967 RCU_INIT_POINTER(tn
->bearer_list
[BCBEARER
], NULL
);
974 * tipc_nmap_add - add a node to a node map
976 static void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
)
978 int n
= tipc_node(node
);
980 u32 mask
= (1 << (n
% WSIZE
));
982 if ((nm_ptr
->map
[w
] & mask
) == 0) {
984 nm_ptr
->map
[w
] |= mask
;
989 * tipc_nmap_remove - remove a node from a node map
991 static void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
)
993 int n
= tipc_node(node
);
995 u32 mask
= (1 << (n
% WSIZE
));
997 if ((nm_ptr
->map
[w
] & mask
) != 0) {
998 nm_ptr
->map
[w
] &= ~mask
;
1004 * tipc_nmap_diff - find differences between node maps
1005 * @nm_a: input node map A
1006 * @nm_b: input node map B
1007 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1009 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
1010 struct tipc_node_map
*nm_b
,
1011 struct tipc_node_map
*nm_diff
)
1013 int stop
= ARRAY_SIZE(nm_a
->map
);
1018 memset(nm_diff
, 0, sizeof(*nm_diff
));
1019 for (w
= 0; w
< stop
; w
++) {
1020 map
= nm_a
->map
[w
] ^ (nm_a
->map
[w
] & nm_b
->map
[w
]);
1021 nm_diff
->map
[w
] = map
;
1023 for (b
= 0 ; b
< WSIZE
; b
++) {