Merge tag 'dmaengine-fix-5.2' of git://git.infradead.org/users/vkoul/slave-dma
[linux-2.6/linux-2.6-arm.git] / net / tipc / bcast.c
blob6c997d4a62189347650c43b9d2891a6883b0be6f
1 /*
2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/tipc_config.h>
39 #include "socket.h"
40 #include "msg.h"
41 #include "bcast.h"
42 #include "link.h"
43 #include "name_table.h"
45 #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
46 #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
48 const char tipc_bclink_name[] = "broadcast-link";
50 /**
51 * struct tipc_bc_base - base structure for keeping broadcast send state
52 * @link: broadcast send link structure
53 * @inputq: data input queue; will only carry SOCK_WAKEUP messages
54 * @dests: array keeping number of reachable destinations per bearer
55 * @primary_bearer: a bearer having links to all broadcast destinations, if any
56 * @bcast_support: indicates if primary bearer, if any, supports broadcast
57 * @force_bcast: forces broadcast for multicast traffic
58 * @rcast_support: indicates if all peer nodes support replicast
59 * @force_rcast: forces replicast for multicast traffic
60 * @rc_ratio: dest count as percentage of cluster size where send method changes
61 * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
63 struct tipc_bc_base {
64 struct tipc_link *link;
65 struct sk_buff_head inputq;
66 int dests[MAX_BEARERS];
67 int primary_bearer;
68 bool bcast_support;
69 bool force_bcast;
70 bool rcast_support;
71 bool force_rcast;
72 int rc_ratio;
73 int bc_threshold;
76 static struct tipc_bc_base *tipc_bc_base(struct net *net)
78 return tipc_net(net)->bcbase;
81 /* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
82 * Note: the MTU is decremented to give room for a tunnel header, in
83 * case the message needs to be sent as replicast
85 int tipc_bcast_get_mtu(struct net *net)
87 return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
90 void tipc_bcast_disable_rcast(struct net *net)
92 tipc_bc_base(net)->rcast_support = false;
95 static void tipc_bcbase_calc_bc_threshold(struct net *net)
97 struct tipc_bc_base *bb = tipc_bc_base(net);
98 int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
100 bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
103 /* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
104 * if any, and make it primary bearer
106 static void tipc_bcbase_select_primary(struct net *net)
108 struct tipc_bc_base *bb = tipc_bc_base(net);
109 int all_dests = tipc_link_bc_peers(bb->link);
110 int i, mtu, prim;
112 bb->primary_bearer = INVALID_BEARER_ID;
113 bb->bcast_support = true;
115 if (!all_dests)
116 return;
118 for (i = 0; i < MAX_BEARERS; i++) {
119 if (!bb->dests[i])
120 continue;
122 mtu = tipc_bearer_mtu(net, i);
123 if (mtu < tipc_link_mtu(bb->link))
124 tipc_link_set_mtu(bb->link, mtu);
125 bb->bcast_support &= tipc_bearer_bcast_support(net, i);
126 if (bb->dests[i] < all_dests)
127 continue;
129 bb->primary_bearer = i;
131 /* Reduce risk that all nodes select same primary */
132 if ((i ^ tipc_own_addr(net)) & 1)
133 break;
135 prim = bb->primary_bearer;
136 if (prim != INVALID_BEARER_ID)
137 bb->bcast_support = tipc_bearer_bcast_support(net, prim);
140 void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
142 struct tipc_bc_base *bb = tipc_bc_base(net);
144 tipc_bcast_lock(net);
145 bb->dests[bearer_id]++;
146 tipc_bcbase_select_primary(net);
147 tipc_bcast_unlock(net);
150 void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
152 struct tipc_bc_base *bb = tipc_bc_base(net);
154 tipc_bcast_lock(net);
155 bb->dests[bearer_id]--;
156 tipc_bcbase_select_primary(net);
157 tipc_bcast_unlock(net);
160 /* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
162 * Note that number of reachable destinations, as indicated in the dests[]
163 * array, may transitionally differ from the number of destinations indicated
164 * in each sent buffer. We can sustain this. Excess destination nodes will
165 * drop and never acknowledge the unexpected packets, and missing destinations
166 * will either require retransmission (if they are just about to be added to
167 * the bearer), or be removed from the buffer's 'ackers' counter (if they
168 * just went down)
170 static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
172 int bearer_id;
173 struct tipc_bc_base *bb = tipc_bc_base(net);
174 struct sk_buff *skb, *_skb;
175 struct sk_buff_head _xmitq;
177 if (skb_queue_empty(xmitq))
178 return;
180 /* The typical case: at least one bearer has links to all nodes */
181 bearer_id = bb->primary_bearer;
182 if (bearer_id >= 0) {
183 tipc_bearer_bc_xmit(net, bearer_id, xmitq);
184 return;
187 /* We have to transmit across all bearers */
188 skb_queue_head_init(&_xmitq);
189 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
190 if (!bb->dests[bearer_id])
191 continue;
193 skb_queue_walk(xmitq, skb) {
194 _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
195 if (!_skb)
196 break;
197 __skb_queue_tail(&_xmitq, _skb);
199 tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
201 __skb_queue_purge(xmitq);
202 __skb_queue_purge(&_xmitq);
205 static void tipc_bcast_select_xmit_method(struct net *net, int dests,
206 struct tipc_mc_method *method)
208 struct tipc_bc_base *bb = tipc_bc_base(net);
209 unsigned long exp = method->expires;
211 /* Broadcast supported by used bearer/bearers? */
212 if (!bb->bcast_support) {
213 method->rcast = true;
214 return;
216 /* Any destinations which don't support replicast ? */
217 if (!bb->rcast_support) {
218 method->rcast = false;
219 return;
221 /* Can current method be changed ? */
222 method->expires = jiffies + TIPC_METHOD_EXPIRE;
223 if (method->mandatory)
224 return;
226 if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) &&
227 time_before(jiffies, exp))
228 return;
230 /* Configuration as force 'broadcast' method */
231 if (bb->force_bcast) {
232 method->rcast = false;
233 return;
235 /* Configuration as force 'replicast' method */
236 if (bb->force_rcast) {
237 method->rcast = true;
238 return;
240 /* Configuration as 'autoselect' or default method */
241 /* Determine method to use now */
242 method->rcast = dests <= bb->bc_threshold;
245 /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
246 * @net: the applicable net namespace
247 * @pkts: chain of buffers containing message
248 * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
249 * Consumes the buffer chain.
250 * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
252 static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
253 u16 *cong_link_cnt)
255 struct tipc_link *l = tipc_bc_sndlink(net);
256 struct sk_buff_head xmitq;
257 int rc = 0;
259 skb_queue_head_init(&xmitq);
260 tipc_bcast_lock(net);
261 if (tipc_link_bc_peers(l))
262 rc = tipc_link_xmit(l, pkts, &xmitq);
263 tipc_bcast_unlock(net);
264 tipc_bcbase_xmit(net, &xmitq);
265 __skb_queue_purge(pkts);
266 if (rc == -ELINKCONG) {
267 *cong_link_cnt = 1;
268 rc = 0;
270 return rc;
273 /* tipc_rcast_xmit - replicate and send a message to given destination nodes
274 * @net: the applicable net namespace
275 * @pkts: chain of buffers containing message
276 * @dests: list of destination nodes
277 * @cong_link_cnt: returns number of congested links
278 * @cong_links: returns identities of congested links
279 * Returns 0 if success, otherwise errno
281 static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
282 struct tipc_nlist *dests, u16 *cong_link_cnt)
284 struct tipc_dest *dst, *tmp;
285 struct sk_buff_head _pkts;
286 u32 dnode, selector;
288 selector = msg_link_selector(buf_msg(skb_peek(pkts)));
289 skb_queue_head_init(&_pkts);
291 list_for_each_entry_safe(dst, tmp, &dests->list, list) {
292 dnode = dst->node;
293 if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
294 return -ENOMEM;
296 /* Any other return value than -ELINKCONG is ignored */
297 if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
298 (*cong_link_cnt)++;
300 return 0;
303 /* tipc_mcast_send_sync - deliver a dummy message with SYN bit
304 * @net: the applicable net namespace
305 * @skb: socket buffer to copy
306 * @method: send method to be used
307 * @dests: destination nodes for message.
308 * @cong_link_cnt: returns number of encountered congested destination links
309 * Returns 0 if success, otherwise errno
311 static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
312 struct tipc_mc_method *method,
313 struct tipc_nlist *dests,
314 u16 *cong_link_cnt)
316 struct tipc_msg *hdr, *_hdr;
317 struct sk_buff_head tmpq;
318 struct sk_buff *_skb;
320 /* Is a cluster supporting with new capabilities ? */
321 if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
322 return 0;
324 hdr = buf_msg(skb);
325 if (msg_user(hdr) == MSG_FRAGMENTER)
326 hdr = msg_get_wrapped(hdr);
327 if (msg_type(hdr) != TIPC_MCAST_MSG)
328 return 0;
330 /* Allocate dummy message */
331 _skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL);
332 if (!_skb)
333 return -ENOMEM;
335 /* Preparing for 'synching' header */
336 msg_set_syn(hdr, 1);
338 /* Copy skb's header into a dummy header */
339 skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE);
340 skb_orphan(_skb);
342 /* Reverse method for dummy message */
343 _hdr = buf_msg(_skb);
344 msg_set_size(_hdr, MCAST_H_SIZE);
345 msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
347 skb_queue_head_init(&tmpq);
348 __skb_queue_tail(&tmpq, _skb);
349 if (method->rcast)
350 tipc_bcast_xmit(net, &tmpq, cong_link_cnt);
351 else
352 tipc_rcast_xmit(net, &tmpq, dests, cong_link_cnt);
354 /* This queue should normally be empty by now */
355 __skb_queue_purge(&tmpq);
357 return 0;
360 /* tipc_mcast_xmit - deliver message to indicated destination nodes
361 * and to identified node local sockets
362 * @net: the applicable net namespace
363 * @pkts: chain of buffers containing message
364 * @method: send method to be used
365 * @dests: destination nodes for message.
366 * @cong_link_cnt: returns number of encountered congested destination links
367 * Consumes buffer chain.
368 * Returns 0 if success, otherwise errno
370 int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
371 struct tipc_mc_method *method, struct tipc_nlist *dests,
372 u16 *cong_link_cnt)
374 struct sk_buff_head inputq, localq;
375 bool rcast = method->rcast;
376 struct tipc_msg *hdr;
377 struct sk_buff *skb;
378 int rc = 0;
380 skb_queue_head_init(&inputq);
381 skb_queue_head_init(&localq);
383 /* Clone packets before they are consumed by next call */
384 if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
385 rc = -ENOMEM;
386 goto exit;
388 /* Send according to determined transmit method */
389 if (dests->remote) {
390 tipc_bcast_select_xmit_method(net, dests->remote, method);
392 skb = skb_peek(pkts);
393 hdr = buf_msg(skb);
394 if (msg_user(hdr) == MSG_FRAGMENTER)
395 hdr = msg_get_wrapped(hdr);
396 msg_set_is_rcast(hdr, method->rcast);
398 /* Switch method ? */
399 if (rcast != method->rcast)
400 tipc_mcast_send_sync(net, skb, method,
401 dests, cong_link_cnt);
403 if (method->rcast)
404 rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
405 else
406 rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
409 if (dests->local)
410 tipc_sk_mcast_rcv(net, &localq, &inputq);
411 exit:
412 /* This queue should normally be empty by now */
413 __skb_queue_purge(pkts);
414 return rc;
417 /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
419 * RCU is locked, no other locks set
421 int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
423 struct tipc_msg *hdr = buf_msg(skb);
424 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
425 struct sk_buff_head xmitq;
426 int rc;
428 __skb_queue_head_init(&xmitq);
430 if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
431 kfree_skb(skb);
432 return 0;
435 tipc_bcast_lock(net);
436 if (msg_user(hdr) == BCAST_PROTOCOL)
437 rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
438 else
439 rc = tipc_link_rcv(l, skb, NULL);
440 tipc_bcast_unlock(net);
442 tipc_bcbase_xmit(net, &xmitq);
444 /* Any socket wakeup messages ? */
445 if (!skb_queue_empty(inputq))
446 tipc_sk_rcv(net, inputq);
448 return rc;
451 /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
453 * RCU is locked, no other locks set
455 void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
456 struct tipc_msg *hdr)
458 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
459 u16 acked = msg_bcast_ack(hdr);
460 struct sk_buff_head xmitq;
462 /* Ignore bc acks sent by peer before bcast synch point was received */
463 if (msg_bc_ack_invalid(hdr))
464 return;
466 __skb_queue_head_init(&xmitq);
468 tipc_bcast_lock(net);
469 tipc_link_bc_ack_rcv(l, acked, &xmitq);
470 tipc_bcast_unlock(net);
472 tipc_bcbase_xmit(net, &xmitq);
474 /* Any socket wakeup messages ? */
475 if (!skb_queue_empty(inputq))
476 tipc_sk_rcv(net, inputq);
479 /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
481 * RCU is locked, no other locks set
483 int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
484 struct tipc_msg *hdr)
486 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
487 struct sk_buff_head xmitq;
488 int rc = 0;
490 __skb_queue_head_init(&xmitq);
492 tipc_bcast_lock(net);
493 if (msg_type(hdr) != STATE_MSG) {
494 tipc_link_bc_init_rcv(l, hdr);
495 } else if (!msg_bc_ack_invalid(hdr)) {
496 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
497 rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
499 tipc_bcast_unlock(net);
501 tipc_bcbase_xmit(net, &xmitq);
503 /* Any socket wakeup messages ? */
504 if (!skb_queue_empty(inputq))
505 tipc_sk_rcv(net, inputq);
506 return rc;
509 /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
511 * RCU is locked, node lock is set
513 void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
514 struct sk_buff_head *xmitq)
516 struct tipc_link *snd_l = tipc_bc_sndlink(net);
518 tipc_bcast_lock(net);
519 tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
520 tipc_bcbase_select_primary(net);
521 tipc_bcbase_calc_bc_threshold(net);
522 tipc_bcast_unlock(net);
525 /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
527 * RCU is locked, node lock is set
529 void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
531 struct tipc_link *snd_l = tipc_bc_sndlink(net);
532 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
533 struct sk_buff_head xmitq;
535 __skb_queue_head_init(&xmitq);
537 tipc_bcast_lock(net);
538 tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
539 tipc_bcbase_select_primary(net);
540 tipc_bcbase_calc_bc_threshold(net);
541 tipc_bcast_unlock(net);
543 tipc_bcbase_xmit(net, &xmitq);
545 /* Any socket wakeup messages ? */
546 if (!skb_queue_empty(inputq))
547 tipc_sk_rcv(net, inputq);
550 int tipc_bclink_reset_stats(struct net *net)
552 struct tipc_link *l = tipc_bc_sndlink(net);
554 if (!l)
555 return -ENOPROTOOPT;
557 tipc_bcast_lock(net);
558 tipc_link_reset_stats(l);
559 tipc_bcast_unlock(net);
560 return 0;
563 static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
565 struct tipc_link *l = tipc_bc_sndlink(net);
567 if (!l)
568 return -ENOPROTOOPT;
569 if (limit < BCLINK_WIN_MIN)
570 limit = BCLINK_WIN_MIN;
571 if (limit > TIPC_MAX_LINK_WIN)
572 return -EINVAL;
573 tipc_bcast_lock(net);
574 tipc_link_set_queue_limits(l, limit);
575 tipc_bcast_unlock(net);
576 return 0;
579 static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode)
581 struct tipc_bc_base *bb = tipc_bc_base(net);
583 switch (bc_mode) {
584 case BCLINK_MODE_BCAST:
585 if (!bb->bcast_support)
586 return -ENOPROTOOPT;
588 bb->force_bcast = true;
589 bb->force_rcast = false;
590 break;
591 case BCLINK_MODE_RCAST:
592 if (!bb->rcast_support)
593 return -ENOPROTOOPT;
595 bb->force_bcast = false;
596 bb->force_rcast = true;
597 break;
598 case BCLINK_MODE_SEL:
599 if (!bb->bcast_support || !bb->rcast_support)
600 return -ENOPROTOOPT;
602 bb->force_bcast = false;
603 bb->force_rcast = false;
604 break;
605 default:
606 return -EINVAL;
609 return 0;
612 static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio)
614 struct tipc_bc_base *bb = tipc_bc_base(net);
616 if (!bb->bcast_support || !bb->rcast_support)
617 return -ENOPROTOOPT;
619 if (bc_ratio > 100 || bc_ratio <= 0)
620 return -EINVAL;
622 bb->rc_ratio = bc_ratio;
623 tipc_bcast_lock(net);
624 tipc_bcbase_calc_bc_threshold(net);
625 tipc_bcast_unlock(net);
627 return 0;
630 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
632 int err;
633 u32 win;
634 u32 bc_mode;
635 u32 bc_ratio;
636 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
638 if (!attrs[TIPC_NLA_LINK_PROP])
639 return -EINVAL;
641 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
642 if (err)
643 return err;
645 if (!props[TIPC_NLA_PROP_WIN] &&
646 !props[TIPC_NLA_PROP_BROADCAST] &&
647 !props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
648 return -EOPNOTSUPP;
651 if (props[TIPC_NLA_PROP_BROADCAST]) {
652 bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]);
653 err = tipc_bc_link_set_broadcast_mode(net, bc_mode);
656 if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
657 bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]);
658 err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio);
661 if (!err && props[TIPC_NLA_PROP_WIN]) {
662 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
663 err = tipc_bc_link_set_queue_limits(net, win);
666 return err;
669 int tipc_bcast_init(struct net *net)
671 struct tipc_net *tn = tipc_net(net);
672 struct tipc_bc_base *bb = NULL;
673 struct tipc_link *l = NULL;
675 bb = kzalloc(sizeof(*bb), GFP_KERNEL);
676 if (!bb)
677 goto enomem;
678 tn->bcbase = bb;
679 spin_lock_init(&tipc_net(net)->bclock);
681 if (!tipc_link_bc_create(net, 0, 0,
682 FB_MTU,
683 BCLINK_WIN_DEFAULT,
685 &bb->inputq,
686 NULL,
687 NULL,
688 &l))
689 goto enomem;
690 bb->link = l;
691 tn->bcl = l;
692 bb->rc_ratio = 10;
693 bb->rcast_support = true;
694 return 0;
695 enomem:
696 kfree(bb);
697 kfree(l);
698 return -ENOMEM;
701 void tipc_bcast_stop(struct net *net)
703 struct tipc_net *tn = net_generic(net, tipc_net_id);
705 synchronize_net();
706 kfree(tn->bcbase);
707 kfree(tn->bcl);
710 void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
712 memset(nl, 0, sizeof(*nl));
713 INIT_LIST_HEAD(&nl->list);
714 nl->self = self;
717 void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
719 if (node == nl->self)
720 nl->local = true;
721 else if (tipc_dest_push(&nl->list, node, 0))
722 nl->remote++;
725 void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
727 if (node == nl->self)
728 nl->local = false;
729 else if (tipc_dest_del(&nl->list, node, 0))
730 nl->remote--;
733 void tipc_nlist_purge(struct tipc_nlist *nl)
735 tipc_dest_list_purge(&nl->list);
736 nl->remote = 0;
737 nl->local = false;
740 u32 tipc_bcast_get_broadcast_mode(struct net *net)
742 struct tipc_bc_base *bb = tipc_bc_base(net);
744 if (bb->force_bcast)
745 return BCLINK_MODE_BCAST;
747 if (bb->force_rcast)
748 return BCLINK_MODE_RCAST;
750 if (bb->bcast_support && bb->rcast_support)
751 return BCLINK_MODE_SEL;
753 return 0;
756 u32 tipc_bcast_get_broadcast_ratio(struct net *net)
758 struct tipc_bc_base *bb = tipc_bc_base(net);
760 return bb->rc_ratio;
763 void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
764 struct sk_buff_head *inputq)
766 struct sk_buff *skb, *_skb, *tmp;
767 struct tipc_msg *hdr, *_hdr;
768 bool match = false;
769 u32 node, port;
771 skb = skb_peek(inputq);
772 if (!skb)
773 return;
775 hdr = buf_msg(skb);
777 if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
778 return;
780 node = msg_orignode(hdr);
781 if (node == tipc_own_addr(net))
782 return;
784 port = msg_origport(hdr);
786 /* Has the twin SYN message already arrived ? */
787 skb_queue_walk(defq, _skb) {
788 _hdr = buf_msg(_skb);
789 if (msg_orignode(_hdr) != node)
790 continue;
791 if (msg_origport(_hdr) != port)
792 continue;
793 match = true;
794 break;
797 if (!match) {
798 if (!msg_is_syn(hdr))
799 return;
800 __skb_dequeue(inputq);
801 __skb_queue_tail(defq, skb);
802 return;
805 /* Deliver non-SYN message from other link, otherwise queue it */
806 if (!msg_is_syn(hdr)) {
807 if (msg_is_rcast(hdr) != msg_is_rcast(_hdr))
808 return;
809 __skb_dequeue(inputq);
810 __skb_queue_tail(defq, skb);
811 return;
814 /* Queue non-SYN/SYN message from same link */
815 if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) {
816 __skb_dequeue(inputq);
817 __skb_queue_tail(defq, skb);
818 return;
821 /* Matching SYN messages => return the one with data, if any */
822 __skb_unlink(_skb, defq);
823 if (msg_data_sz(hdr)) {
824 kfree_skb(_skb);
825 } else {
826 __skb_dequeue(inputq);
827 kfree_skb(skb);
828 __skb_queue_tail(inputq, _skb);
831 /* Deliver subsequent non-SYN messages from same peer */
832 skb_queue_walk_safe(defq, _skb, tmp) {
833 _hdr = buf_msg(_skb);
834 if (msg_orignode(_hdr) != node)
835 continue;
836 if (msg_origport(_hdr) != port)
837 continue;
838 if (msg_is_syn(_hdr))
839 break;
840 __skb_unlink(_skb, defq);
841 __skb_queue_tail(inputq, _skb);