2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
29 #include "aggregation.h"
30 #include "gateway_common.h"
31 #include "originator.h"
33 static void send_outstanding_bcast_packet(struct work_struct
*work
);
35 /* apply hop penalty for a normal link */
36 static uint8_t hop_penalty(uint8_t tq
, const struct bat_priv
*bat_priv
)
38 int hop_penalty
= atomic_read(&bat_priv
->hop_penalty
);
39 return (tq
* (TQ_MAX_VALUE
- hop_penalty
)) / (TQ_MAX_VALUE
);
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(const struct bat_priv
*bat_priv
)
45 return jiffies
+ msecs_to_jiffies(
46 atomic_read(&bat_priv
->orig_interval
) -
47 JITTER
+ (random32() % 2*JITTER
));
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(void)
53 return jiffies
+ msecs_to_jiffies(random32() % (JITTER
/2));
56 /* send out an already prepared packet to the given address via the
57 * specified batman interface */
58 int send_skb_packet(struct sk_buff
*skb
, struct hard_iface
*hard_iface
,
59 const uint8_t *dst_addr
)
61 struct ethhdr
*ethhdr
;
63 if (hard_iface
->if_status
!= IF_ACTIVE
)
66 if (unlikely(!hard_iface
->net_dev
))
69 if (!(hard_iface
->net_dev
->flags
& IFF_UP
)) {
70 pr_warning("Interface %s is not up - can't send packet via "
71 "that interface!\n", hard_iface
->net_dev
->name
);
75 /* push to the ethernet header. */
76 if (my_skb_head_push(skb
, sizeof(*ethhdr
)) < 0)
79 skb_reset_mac_header(skb
);
81 ethhdr
= (struct ethhdr
*) skb_mac_header(skb
);
82 memcpy(ethhdr
->h_source
, hard_iface
->net_dev
->dev_addr
, ETH_ALEN
);
83 memcpy(ethhdr
->h_dest
, dst_addr
, ETH_ALEN
);
84 ethhdr
->h_proto
= __constant_htons(ETH_P_BATMAN
);
86 skb_set_network_header(skb
, ETH_HLEN
);
87 skb
->priority
= TC_PRIO_CONTROL
;
88 skb
->protocol
= __constant_htons(ETH_P_BATMAN
);
90 skb
->dev
= hard_iface
->net_dev
;
92 /* dev_queue_xmit() returns a negative result on error. However on
93 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
94 * (which is > 0). This will not be treated as an error. */
96 return dev_queue_xmit(skb
);
102 /* Send a packet to a given interface */
103 static void send_packet_to_if(struct forw_packet
*forw_packet
,
104 struct hard_iface
*hard_iface
)
106 struct bat_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
110 struct batman_packet
*batman_packet
;
113 if (hard_iface
->if_status
!= IF_ACTIVE
)
118 batman_packet
= (struct batman_packet
*)forw_packet
->skb
->data
;
120 /* adjust all flags and log packets */
121 while (aggregated_packet(buff_pos
,
122 forw_packet
->packet_len
,
123 batman_packet
->tt_num_changes
)) {
125 /* we might have aggregated direct link packets with an
126 * ordinary base packet */
127 if ((forw_packet
->direct_link_flags
& (1 << packet_num
)) &&
128 (forw_packet
->if_incoming
== hard_iface
))
129 batman_packet
->flags
|= DIRECTLINK
;
131 batman_packet
->flags
&= ~DIRECTLINK
;
133 fwd_str
= (packet_num
> 0 ? "Forwarding" : (forw_packet
->own
?
136 bat_dbg(DBG_BATMAN
, bat_priv
,
137 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
138 " IDF %s, ttvn %d) on interface %s [%pM]\n",
139 fwd_str
, (packet_num
> 0 ? "aggregated " : ""),
140 batman_packet
->orig
, ntohl(batman_packet
->seqno
),
141 batman_packet
->tq
, batman_packet
->ttl
,
142 (batman_packet
->flags
& DIRECTLINK
?
144 batman_packet
->ttvn
, hard_iface
->net_dev
->name
,
145 hard_iface
->net_dev
->dev_addr
);
147 buff_pos
+= sizeof(*batman_packet
) +
148 tt_len(batman_packet
->tt_num_changes
);
150 batman_packet
= (struct batman_packet
*)
151 (forw_packet
->skb
->data
+ buff_pos
);
154 /* create clone because function is called more than once */
155 skb
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
157 send_skb_packet(skb
, hard_iface
, broadcast_addr
);
160 /* send a batman packet */
161 static void send_packet(struct forw_packet
*forw_packet
)
163 struct hard_iface
*hard_iface
;
164 struct net_device
*soft_iface
;
165 struct bat_priv
*bat_priv
;
166 struct hard_iface
*primary_if
= NULL
;
167 struct batman_packet
*batman_packet
=
168 (struct batman_packet
*)(forw_packet
->skb
->data
);
169 int directlink
= (batman_packet
->flags
& DIRECTLINK
? 1 : 0);
171 if (!forw_packet
->if_incoming
) {
172 pr_err("Error - can't forward packet: incoming iface not "
177 soft_iface
= forw_packet
->if_incoming
->soft_iface
;
178 bat_priv
= netdev_priv(soft_iface
);
180 if (forw_packet
->if_incoming
->if_status
!= IF_ACTIVE
)
183 primary_if
= primary_if_get_selected(bat_priv
);
187 /* multihomed peer assumed */
188 /* non-primary OGMs are only broadcasted on their interface */
189 if ((directlink
&& (batman_packet
->ttl
== 1)) ||
190 (forw_packet
->own
&& (forw_packet
->if_incoming
!= primary_if
))) {
192 /* FIXME: what about aggregated packets ? */
193 bat_dbg(DBG_BATMAN
, bat_priv
,
194 "%s packet (originator %pM, seqno %d, TTL %d) "
195 "on interface %s [%pM]\n",
196 (forw_packet
->own
? "Sending own" : "Forwarding"),
197 batman_packet
->orig
, ntohl(batman_packet
->seqno
),
199 forw_packet
->if_incoming
->net_dev
->name
,
200 forw_packet
->if_incoming
->net_dev
->dev_addr
);
202 /* skb is only used once and than forw_packet is free'd */
203 send_skb_packet(forw_packet
->skb
, forw_packet
->if_incoming
,
205 forw_packet
->skb
= NULL
;
210 /* broadcast on every interface */
212 list_for_each_entry_rcu(hard_iface
, &hardif_list
, list
) {
213 if (hard_iface
->soft_iface
!= soft_iface
)
216 send_packet_to_if(forw_packet
, hard_iface
);
222 hardif_free_ref(primary_if
);
225 static void realloc_packet_buffer(struct hard_iface
*hard_iface
,
228 unsigned char *new_buff
;
229 struct batman_packet
*batman_packet
;
231 new_buff
= kmalloc(new_len
, GFP_ATOMIC
);
233 /* keep old buffer if kmalloc should fail */
235 memcpy(new_buff
, hard_iface
->packet_buff
,
236 sizeof(*batman_packet
));
238 kfree(hard_iface
->packet_buff
);
239 hard_iface
->packet_buff
= new_buff
;
240 hard_iface
->packet_len
= new_len
;
244 /* when calling this function (hard_iface == primary_if) has to be true */
245 static void prepare_packet_buffer(struct bat_priv
*bat_priv
,
246 struct hard_iface
*hard_iface
)
249 struct batman_packet
*batman_packet
;
251 new_len
= BAT_PACKET_LEN
+
252 tt_len((uint8_t)atomic_read(&bat_priv
->tt_local_changes
));
254 /* if we have too many changes for one packet don't send any
255 * and wait for the tt table request which will be fragmented */
256 if (new_len
> hard_iface
->soft_iface
->mtu
)
257 new_len
= BAT_PACKET_LEN
;
259 realloc_packet_buffer(hard_iface
, new_len
);
260 batman_packet
= (struct batman_packet
*)hard_iface
->packet_buff
;
262 atomic_set(&bat_priv
->tt_crc
, tt_local_crc(bat_priv
));
264 /* reset the sending counter */
265 atomic_set(&bat_priv
->tt_ogm_append_cnt
, TT_OGM_APPEND_MAX
);
267 batman_packet
->tt_num_changes
= tt_changes_fill_buffer(bat_priv
,
268 hard_iface
->packet_buff
+ BAT_PACKET_LEN
,
269 hard_iface
->packet_len
- BAT_PACKET_LEN
);
273 static void reset_packet_buffer(struct bat_priv
*bat_priv
,
274 struct hard_iface
*hard_iface
)
276 struct batman_packet
*batman_packet
;
278 realloc_packet_buffer(hard_iface
, BAT_PACKET_LEN
);
280 batman_packet
= (struct batman_packet
*)hard_iface
->packet_buff
;
281 batman_packet
->tt_num_changes
= 0;
284 void schedule_own_packet(struct hard_iface
*hard_iface
)
286 struct bat_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
287 struct hard_iface
*primary_if
;
288 unsigned long send_time
;
289 struct batman_packet
*batman_packet
;
292 if ((hard_iface
->if_status
== IF_NOT_IN_USE
) ||
293 (hard_iface
->if_status
== IF_TO_BE_REMOVED
))
296 vis_server
= atomic_read(&bat_priv
->vis_mode
);
297 primary_if
= primary_if_get_selected(bat_priv
);
300 * the interface gets activated here to avoid race conditions between
301 * the moment of activating the interface in
302 * hardif_activate_interface() where the originator mac is set and
303 * outdated packets (especially uninitialized mac addresses) in the
306 if (hard_iface
->if_status
== IF_TO_BE_ACTIVATED
)
307 hard_iface
->if_status
= IF_ACTIVE
;
309 if (hard_iface
== primary_if
) {
310 /* if at least one change happened */
311 if (atomic_read(&bat_priv
->tt_local_changes
) > 0) {
312 tt_commit_changes(bat_priv
);
313 prepare_packet_buffer(bat_priv
, hard_iface
);
316 /* if the changes have been sent often enough */
317 if (!atomic_dec_not_zero(&bat_priv
->tt_ogm_append_cnt
))
318 reset_packet_buffer(bat_priv
, hard_iface
);
322 * NOTE: packet_buff might just have been re-allocated in
323 * prepare_packet_buffer() or in reset_packet_buffer()
325 batman_packet
= (struct batman_packet
*)hard_iface
->packet_buff
;
327 /* change sequence number to network order */
328 batman_packet
->seqno
=
329 htonl((uint32_t)atomic_read(&hard_iface
->seqno
));
331 batman_packet
->ttvn
= atomic_read(&bat_priv
->ttvn
);
332 batman_packet
->tt_crc
= htons((uint16_t)atomic_read(&bat_priv
->tt_crc
));
334 if (vis_server
== VIS_TYPE_SERVER_SYNC
)
335 batman_packet
->flags
|= VIS_SERVER
;
337 batman_packet
->flags
&= ~VIS_SERVER
;
339 if ((hard_iface
== primary_if
) &&
340 (atomic_read(&bat_priv
->gw_mode
) == GW_MODE_SERVER
))
341 batman_packet
->gw_flags
=
342 (uint8_t)atomic_read(&bat_priv
->gw_bandwidth
);
344 batman_packet
->gw_flags
= NO_FLAGS
;
346 atomic_inc(&hard_iface
->seqno
);
348 slide_own_bcast_window(hard_iface
);
349 send_time
= own_send_time(bat_priv
);
350 add_bat_packet_to_list(bat_priv
,
351 hard_iface
->packet_buff
,
352 hard_iface
->packet_len
,
353 hard_iface
, 1, send_time
);
356 hardif_free_ref(primary_if
);
359 void schedule_forward_packet(struct orig_node
*orig_node
,
360 const struct ethhdr
*ethhdr
,
361 struct batman_packet
*batman_packet
,
363 struct hard_iface
*if_incoming
)
365 struct bat_priv
*bat_priv
= netdev_priv(if_incoming
->soft_iface
);
366 struct neigh_node
*router
;
367 uint8_t in_tq
, in_ttl
, tq_avg
= 0;
368 unsigned long send_time
;
369 uint8_t tt_num_changes
;
371 if (batman_packet
->ttl
<= 1) {
372 bat_dbg(DBG_BATMAN
, bat_priv
, "ttl exceeded\n");
376 router
= orig_node_get_router(orig_node
);
378 in_tq
= batman_packet
->tq
;
379 in_ttl
= batman_packet
->ttl
;
380 tt_num_changes
= batman_packet
->tt_num_changes
;
382 batman_packet
->ttl
--;
383 memcpy(batman_packet
->prev_sender
, ethhdr
->h_source
, ETH_ALEN
);
385 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
386 * of our best tq value */
387 if (router
&& router
->tq_avg
!= 0) {
389 /* rebroadcast ogm of best ranking neighbor as is */
390 if (!compare_eth(router
->addr
, ethhdr
->h_source
)) {
391 batman_packet
->tq
= router
->tq_avg
;
393 if (router
->last_ttl
)
394 batman_packet
->ttl
= router
->last_ttl
- 1;
397 tq_avg
= router
->tq_avg
;
401 neigh_node_free_ref(router
);
403 /* apply hop penalty */
404 batman_packet
->tq
= hop_penalty(batman_packet
->tq
, bat_priv
);
406 bat_dbg(DBG_BATMAN
, bat_priv
,
407 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
408 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
409 in_tq
, tq_avg
, batman_packet
->tq
, in_ttl
- 1,
412 batman_packet
->seqno
= htonl(batman_packet
->seqno
);
413 batman_packet
->tt_crc
= htons(batman_packet
->tt_crc
);
415 /* switch of primaries first hop flag when forwarding */
416 batman_packet
->flags
&= ~PRIMARIES_FIRST_HOP
;
418 batman_packet
->flags
|= DIRECTLINK
;
420 batman_packet
->flags
&= ~DIRECTLINK
;
422 send_time
= forward_send_time();
423 add_bat_packet_to_list(bat_priv
,
424 (unsigned char *)batman_packet
,
425 sizeof(*batman_packet
) + tt_len(tt_num_changes
),
426 if_incoming
, 0, send_time
);
429 static void forw_packet_free(struct forw_packet
*forw_packet
)
431 if (forw_packet
->skb
)
432 kfree_skb(forw_packet
->skb
);
433 if (forw_packet
->if_incoming
)
434 hardif_free_ref(forw_packet
->if_incoming
);
438 static void _add_bcast_packet_to_list(struct bat_priv
*bat_priv
,
439 struct forw_packet
*forw_packet
,
440 unsigned long send_time
)
442 INIT_HLIST_NODE(&forw_packet
->list
);
444 /* add new packet to packet list */
445 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
446 hlist_add_head(&forw_packet
->list
, &bat_priv
->forw_bcast_list
);
447 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
449 /* start timer for this packet */
450 INIT_DELAYED_WORK(&forw_packet
->delayed_work
,
451 send_outstanding_bcast_packet
);
452 queue_delayed_work(bat_event_workqueue
, &forw_packet
->delayed_work
,
456 /* add a broadcast packet to the queue and setup timers. broadcast packets
457 * are sent multiple times to increase probability for being received.
459 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
462 * The skb is not consumed, so the caller should make sure that the
464 int add_bcast_packet_to_list(struct bat_priv
*bat_priv
,
465 const struct sk_buff
*skb
, unsigned long delay
)
467 struct hard_iface
*primary_if
= NULL
;
468 struct forw_packet
*forw_packet
;
469 struct bcast_packet
*bcast_packet
;
470 struct sk_buff
*newskb
;
472 if (!atomic_dec_not_zero(&bat_priv
->bcast_queue_left
)) {
473 bat_dbg(DBG_BATMAN
, bat_priv
, "bcast packet queue full\n");
477 primary_if
= primary_if_get_selected(bat_priv
);
481 forw_packet
= kmalloc(sizeof(*forw_packet
), GFP_ATOMIC
);
486 newskb
= skb_copy(skb
, GFP_ATOMIC
);
490 /* as we have a copy now, it is safe to decrease the TTL */
491 bcast_packet
= (struct bcast_packet
*)newskb
->data
;
494 skb_reset_mac_header(newskb
);
496 forw_packet
->skb
= newskb
;
497 forw_packet
->if_incoming
= primary_if
;
499 /* how often did we send the bcast packet ? */
500 forw_packet
->num_packets
= 0;
502 _add_bcast_packet_to_list(bat_priv
, forw_packet
, delay
);
508 atomic_inc(&bat_priv
->bcast_queue_left
);
511 hardif_free_ref(primary_if
);
512 return NETDEV_TX_BUSY
;
515 static void send_outstanding_bcast_packet(struct work_struct
*work
)
517 struct hard_iface
*hard_iface
;
518 struct delayed_work
*delayed_work
=
519 container_of(work
, struct delayed_work
, work
);
520 struct forw_packet
*forw_packet
=
521 container_of(delayed_work
, struct forw_packet
, delayed_work
);
522 struct sk_buff
*skb1
;
523 struct net_device
*soft_iface
= forw_packet
->if_incoming
->soft_iface
;
524 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
526 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
527 hlist_del(&forw_packet
->list
);
528 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
530 if (atomic_read(&bat_priv
->mesh_state
) == MESH_DEACTIVATING
)
533 /* rebroadcast packet */
535 list_for_each_entry_rcu(hard_iface
, &hardif_list
, list
) {
536 if (hard_iface
->soft_iface
!= soft_iface
)
539 /* send a copy of the saved skb */
540 skb1
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
542 send_skb_packet(skb1
, hard_iface
, broadcast_addr
);
546 forw_packet
->num_packets
++;
548 /* if we still have some more bcasts to send */
549 if (forw_packet
->num_packets
< 3) {
550 _add_bcast_packet_to_list(bat_priv
, forw_packet
,
556 forw_packet_free(forw_packet
);
557 atomic_inc(&bat_priv
->bcast_queue_left
);
560 void send_outstanding_bat_packet(struct work_struct
*work
)
562 struct delayed_work
*delayed_work
=
563 container_of(work
, struct delayed_work
, work
);
564 struct forw_packet
*forw_packet
=
565 container_of(delayed_work
, struct forw_packet
, delayed_work
);
566 struct bat_priv
*bat_priv
;
568 bat_priv
= netdev_priv(forw_packet
->if_incoming
->soft_iface
);
569 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
570 hlist_del(&forw_packet
->list
);
571 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
573 if (atomic_read(&bat_priv
->mesh_state
) == MESH_DEACTIVATING
)
576 send_packet(forw_packet
);
579 * we have to have at least one packet in the queue
580 * to determine the queues wake up time unless we are
583 if (forw_packet
->own
)
584 schedule_own_packet(forw_packet
->if_incoming
);
587 /* don't count own packet */
588 if (!forw_packet
->own
)
589 atomic_inc(&bat_priv
->batman_queue_left
);
591 forw_packet_free(forw_packet
);
594 void purge_outstanding_packets(struct bat_priv
*bat_priv
,
595 const struct hard_iface
*hard_iface
)
597 struct forw_packet
*forw_packet
;
598 struct hlist_node
*tmp_node
, *safe_tmp_node
;
602 bat_dbg(DBG_BATMAN
, bat_priv
,
603 "purge_outstanding_packets(): %s\n",
604 hard_iface
->net_dev
->name
);
606 bat_dbg(DBG_BATMAN
, bat_priv
,
607 "purge_outstanding_packets()\n");
609 /* free bcast list */
610 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
611 hlist_for_each_entry_safe(forw_packet
, tmp_node
, safe_tmp_node
,
612 &bat_priv
->forw_bcast_list
, list
) {
615 * if purge_outstanding_packets() was called with an argument
616 * we delete only packets belonging to the given interface
619 (forw_packet
->if_incoming
!= hard_iface
))
622 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
625 * send_outstanding_bcast_packet() will lock the list to
626 * delete the item from the list
628 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
629 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
632 hlist_del(&forw_packet
->list
);
633 forw_packet_free(forw_packet
);
636 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
638 /* free batman packet list */
639 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
640 hlist_for_each_entry_safe(forw_packet
, tmp_node
, safe_tmp_node
,
641 &bat_priv
->forw_bat_list
, list
) {
644 * if purge_outstanding_packets() was called with an argument
645 * we delete only packets belonging to the given interface
648 (forw_packet
->if_incoming
!= hard_iface
))
651 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
654 * send_outstanding_bat_packet() will lock the list to
655 * delete the item from the list
657 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
658 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
661 hlist_del(&forw_packet
->list
);
662 forw_packet_free(forw_packet
);
665 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);