1 /* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "distributed-arp-table.h"
22 #include "translation-table.h"
23 #include "soft-interface.h"
24 #include "hard-interface.h"
25 #include "gateway_common.h"
26 #include "gateway_client.h"
27 #include "originator.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
31 static void batadv_send_outstanding_bcast_packet(struct work_struct
*work
);
33 /* send out an already prepared packet to the given address via the
34 * specified batman interface
36 int batadv_send_skb_packet(struct sk_buff
*skb
,
37 struct batadv_hard_iface
*hard_iface
,
38 const uint8_t *dst_addr
)
40 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
41 struct ethhdr
*ethhdr
;
43 if (hard_iface
->if_status
!= BATADV_IF_ACTIVE
)
46 if (unlikely(!hard_iface
->net_dev
))
49 if (!(hard_iface
->net_dev
->flags
& IFF_UP
)) {
50 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
51 hard_iface
->net_dev
->name
);
55 /* push to the ethernet header. */
56 if (batadv_skb_head_push(skb
, ETH_HLEN
) < 0)
59 skb_reset_mac_header(skb
);
61 ethhdr
= eth_hdr(skb
);
62 memcpy(ethhdr
->h_source
, hard_iface
->net_dev
->dev_addr
, ETH_ALEN
);
63 memcpy(ethhdr
->h_dest
, dst_addr
, ETH_ALEN
);
64 ethhdr
->h_proto
= htons(ETH_P_BATMAN
);
66 skb_set_network_header(skb
, ETH_HLEN
);
67 skb
->protocol
= htons(ETH_P_BATMAN
);
69 skb
->dev
= hard_iface
->net_dev
;
71 /* Save a clone of the skb to use when decoding coded packets */
72 batadv_nc_skb_store_for_decoding(bat_priv
, skb
);
74 /* dev_queue_xmit() returns a negative result on error. However on
75 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
76 * (which is > 0). This will not be treated as an error.
78 return dev_queue_xmit(skb
);
85 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
86 * @skb: Packet to be transmitted.
87 * @orig_node: Final destination of the packet.
88 * @recv_if: Interface used when receiving the packet (can be NULL).
90 * Looks up the best next-hop towards the passed originator and passes the
91 * skb on for preparation of MAC header. If the packet originated from this
92 * host, NULL can be passed as recv_if and no interface alternating is
95 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
96 * NET_XMIT_POLICED if the skb is buffered for later transmit.
98 int batadv_send_skb_to_orig(struct sk_buff
*skb
,
99 struct batadv_orig_node
*orig_node
,
100 struct batadv_hard_iface
*recv_if
)
102 struct batadv_priv
*bat_priv
= orig_node
->bat_priv
;
103 struct batadv_neigh_node
*neigh_node
;
104 int ret
= NET_XMIT_DROP
;
106 /* batadv_find_router() increases neigh_nodes refcount if found. */
107 neigh_node
= batadv_find_router(bat_priv
, orig_node
, recv_if
);
111 /* Check if the skb is too large to send in one piece and fragment
114 if (atomic_read(&bat_priv
->fragmentation
) &&
115 skb
->len
> neigh_node
->if_incoming
->net_dev
->mtu
) {
116 /* Fragment and send packet. */
117 if (batadv_frag_send_packet(skb
, orig_node
, neigh_node
))
118 ret
= NET_XMIT_SUCCESS
;
123 /* try to network code the packet, if it is received on an interface
124 * (i.e. being forwarded). If the packet originates from this node or if
125 * network coding fails, then send the packet as usual.
127 if (recv_if
&& batadv_nc_skb_forward(skb
, neigh_node
)) {
128 ret
= NET_XMIT_POLICED
;
130 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
,
132 ret
= NET_XMIT_SUCCESS
;
137 batadv_neigh_node_free_ref(neigh_node
);
143 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
144 * common fields for unicast packets
145 * @skb: the skb carrying the unicast header to initialize
146 * @hdr_size: amount of bytes to push at the beginning of the skb
147 * @orig_node: the destination node
149 * Returns false if the buffer extension was not possible or true otherwise.
152 batadv_send_skb_push_fill_unicast(struct sk_buff
*skb
, int hdr_size
,
153 struct batadv_orig_node
*orig_node
)
155 struct batadv_unicast_packet
*unicast_packet
;
156 uint8_t ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
158 if (batadv_skb_head_push(skb
, hdr_size
) < 0)
161 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
162 unicast_packet
->version
= BATADV_COMPAT_VERSION
;
163 /* batman packet type: unicast */
164 unicast_packet
->packet_type
= BATADV_UNICAST
;
165 /* set unicast ttl */
166 unicast_packet
->ttl
= BATADV_TTL
;
167 /* copy the destination for faster routing */
168 memcpy(unicast_packet
->dest
, orig_node
->orig
, ETH_ALEN
);
169 /* set the destination tt version number */
170 unicast_packet
->ttvn
= ttvn
;
176 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
177 * @skb: the skb containing the payload to encapsulate
178 * @orig_node: the destination node
180 * Returns false if the payload could not be encapsulated or true otherwise.
182 static bool batadv_send_skb_prepare_unicast(struct sk_buff
*skb
,
183 struct batadv_orig_node
*orig_node
)
185 size_t uni_size
= sizeof(struct batadv_unicast_packet
);
187 return batadv_send_skb_push_fill_unicast(skb
, uni_size
, orig_node
);
191 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
192 * unicast 4addr header
193 * @bat_priv: the bat priv with all the soft interface information
194 * @skb: the skb containing the payload to encapsulate
195 * @orig_node: the destination node
196 * @packet_subtype: the unicast 4addr packet subtype to use
198 * Returns false if the payload could not be encapsulated or true otherwise.
200 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv
*bat_priv
,
202 struct batadv_orig_node
*orig
,
205 struct batadv_hard_iface
*primary_if
;
206 struct batadv_unicast_4addr_packet
*uc_4addr_packet
;
209 primary_if
= batadv_primary_if_get_selected(bat_priv
);
213 /* Pull the header space and fill the unicast_packet substructure.
214 * We can do that because the first member of the uc_4addr_packet
215 * is of type struct unicast_packet
217 if (!batadv_send_skb_push_fill_unicast(skb
, sizeof(*uc_4addr_packet
),
221 uc_4addr_packet
= (struct batadv_unicast_4addr_packet
*)skb
->data
;
222 uc_4addr_packet
->u
.packet_type
= BATADV_UNICAST_4ADDR
;
223 memcpy(uc_4addr_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
224 uc_4addr_packet
->subtype
= packet_subtype
;
225 uc_4addr_packet
->reserved
= 0;
230 batadv_hardif_free_ref(primary_if
);
235 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
236 * @bat_priv: the bat priv with all the soft interface information
237 * @skb: payload to send
238 * @packet_type: the batman unicast packet type to use
239 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
241 * @orig_node: the originator to send the packet to
242 * @vid: the vid to be used to search the translation table
244 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
245 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
246 * as packet_type. Then send this frame to the given orig_node and release a
247 * reference to this orig_node.
249 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
251 static int batadv_send_skb_unicast(struct batadv_priv
*bat_priv
,
252 struct sk_buff
*skb
, int packet_type
,
254 struct batadv_orig_node
*orig_node
,
257 struct ethhdr
*ethhdr
;
258 struct batadv_unicast_packet
*unicast_packet
;
259 int ret
= NET_XMIT_DROP
, hdr_size
;
264 switch (packet_type
) {
266 if (!batadv_send_skb_prepare_unicast(skb
, orig_node
))
269 hdr_size
= sizeof(*unicast_packet
);
271 case BATADV_UNICAST_4ADDR
:
272 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv
, skb
,
277 hdr_size
= sizeof(struct batadv_unicast_4addr_packet
);
280 /* this function supports UNICAST and UNICAST_4ADDR only. It
281 * should never be invoked with any other packet type
286 ethhdr
= (struct ethhdr
*)(skb
->data
+ hdr_size
);
287 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
289 /* inform the destination node that we are still missing a correct route
290 * for this client. The destination will receive this packet and will
291 * try to reroute it because the ttvn contained in the header is less
292 * than the current one
294 if (batadv_tt_global_client_is_roaming(bat_priv
, ethhdr
->h_dest
, vid
))
295 unicast_packet
->ttvn
= unicast_packet
->ttvn
- 1;
297 if (batadv_send_skb_to_orig(skb
, orig_node
, NULL
) != NET_XMIT_DROP
)
298 ret
= NET_XMIT_SUCCESS
;
302 batadv_orig_node_free_ref(orig_node
);
303 if (ret
== NET_XMIT_DROP
)
309 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
310 * @bat_priv: the bat priv with all the soft interface information
311 * @skb: payload to send
312 * @packet_type: the batman unicast packet type to use
313 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
315 * @vid: the vid to be used to search the translation table
317 * Look up the recipient node for the destination address in the ethernet
318 * header via the translation table. Wrap the given skb into a batman-adv
319 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
320 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
321 * to the according destination node.
323 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
325 int batadv_send_skb_via_tt_generic(struct batadv_priv
*bat_priv
,
326 struct sk_buff
*skb
, int packet_type
,
327 int packet_subtype
, uint8_t *dst_hint
,
330 struct ethhdr
*ethhdr
= (struct ethhdr
*)skb
->data
;
331 struct batadv_orig_node
*orig_node
;
334 src
= ethhdr
->h_source
;
335 dst
= ethhdr
->h_dest
;
337 /* if we got an hint! let's send the packet to this client (if any) */
342 orig_node
= batadv_transtable_search(bat_priv
, src
, dst
, vid
);
344 return batadv_send_skb_unicast(bat_priv
, skb
, packet_type
,
345 packet_subtype
, orig_node
, vid
);
349 * batadv_send_skb_via_gw - send an skb via gateway lookup
350 * @bat_priv: the bat priv with all the soft interface information
351 * @skb: payload to send
352 * @vid: the vid to be used to search the translation table
354 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
355 * unicast header and send this frame to this gateway node.
357 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
359 int batadv_send_skb_via_gw(struct batadv_priv
*bat_priv
, struct sk_buff
*skb
,
362 struct batadv_orig_node
*orig_node
;
364 orig_node
= batadv_gw_get_selected_orig(bat_priv
);
365 return batadv_send_skb_unicast(bat_priv
, skb
, BATADV_UNICAST
, 0,
369 void batadv_schedule_bat_ogm(struct batadv_hard_iface
*hard_iface
)
371 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
373 if ((hard_iface
->if_status
== BATADV_IF_NOT_IN_USE
) ||
374 (hard_iface
->if_status
== BATADV_IF_TO_BE_REMOVED
))
377 /* the interface gets activated here to avoid race conditions between
378 * the moment of activating the interface in
379 * hardif_activate_interface() where the originator mac is set and
380 * outdated packets (especially uninitialized mac addresses) in the
383 if (hard_iface
->if_status
== BATADV_IF_TO_BE_ACTIVATED
)
384 hard_iface
->if_status
= BATADV_IF_ACTIVE
;
386 bat_priv
->bat_algo_ops
->bat_ogm_schedule(hard_iface
);
389 static void batadv_forw_packet_free(struct batadv_forw_packet
*forw_packet
)
391 if (forw_packet
->skb
)
392 kfree_skb(forw_packet
->skb
);
393 if (forw_packet
->if_incoming
)
394 batadv_hardif_free_ref(forw_packet
->if_incoming
);
395 if (forw_packet
->if_outgoing
)
396 batadv_hardif_free_ref(forw_packet
->if_outgoing
);
401 _batadv_add_bcast_packet_to_list(struct batadv_priv
*bat_priv
,
402 struct batadv_forw_packet
*forw_packet
,
403 unsigned long send_time
)
405 /* add new packet to packet list */
406 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
407 hlist_add_head(&forw_packet
->list
, &bat_priv
->forw_bcast_list
);
408 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
410 /* start timer for this packet */
411 queue_delayed_work(batadv_event_workqueue
, &forw_packet
->delayed_work
,
415 /* add a broadcast packet to the queue and setup timers. broadcast packets
416 * are sent multiple times to increase probability for being received.
418 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
421 * The skb is not consumed, so the caller should make sure that the
424 int batadv_add_bcast_packet_to_list(struct batadv_priv
*bat_priv
,
425 const struct sk_buff
*skb
,
428 struct batadv_hard_iface
*primary_if
= NULL
;
429 struct batadv_forw_packet
*forw_packet
;
430 struct batadv_bcast_packet
*bcast_packet
;
431 struct sk_buff
*newskb
;
433 if (!batadv_atomic_dec_not_zero(&bat_priv
->bcast_queue_left
)) {
434 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
435 "bcast packet queue full\n");
439 primary_if
= batadv_primary_if_get_selected(bat_priv
);
443 forw_packet
= kmalloc(sizeof(*forw_packet
), GFP_ATOMIC
);
448 newskb
= skb_copy(skb
, GFP_ATOMIC
);
452 /* as we have a copy now, it is safe to decrease the TTL */
453 bcast_packet
= (struct batadv_bcast_packet
*)newskb
->data
;
456 skb_reset_mac_header(newskb
);
458 forw_packet
->skb
= newskb
;
459 forw_packet
->if_incoming
= primary_if
;
460 forw_packet
->if_outgoing
= NULL
;
462 /* how often did we send the bcast packet ? */
463 forw_packet
->num_packets
= 0;
465 INIT_DELAYED_WORK(&forw_packet
->delayed_work
,
466 batadv_send_outstanding_bcast_packet
);
468 _batadv_add_bcast_packet_to_list(bat_priv
, forw_packet
, delay
);
474 atomic_inc(&bat_priv
->bcast_queue_left
);
477 batadv_hardif_free_ref(primary_if
);
478 return NETDEV_TX_BUSY
;
481 static void batadv_send_outstanding_bcast_packet(struct work_struct
*work
)
483 struct batadv_hard_iface
*hard_iface
;
484 struct delayed_work
*delayed_work
;
485 struct batadv_forw_packet
*forw_packet
;
486 struct sk_buff
*skb1
;
487 struct net_device
*soft_iface
;
488 struct batadv_priv
*bat_priv
;
490 delayed_work
= container_of(work
, struct delayed_work
, work
);
491 forw_packet
= container_of(delayed_work
, struct batadv_forw_packet
,
493 soft_iface
= forw_packet
->if_incoming
->soft_iface
;
494 bat_priv
= netdev_priv(soft_iface
);
496 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
497 hlist_del(&forw_packet
->list
);
498 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
500 if (atomic_read(&bat_priv
->mesh_state
) == BATADV_MESH_DEACTIVATING
)
503 if (batadv_dat_drop_broadcast_packet(bat_priv
, forw_packet
))
506 /* rebroadcast packet */
508 list_for_each_entry_rcu(hard_iface
, &batadv_hardif_list
, list
) {
509 if (hard_iface
->soft_iface
!= soft_iface
)
512 if (forw_packet
->num_packets
>= hard_iface
->num_bcasts
)
515 /* send a copy of the saved skb */
516 skb1
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
518 batadv_send_skb_packet(skb1
, hard_iface
,
519 batadv_broadcast_addr
);
523 forw_packet
->num_packets
++;
525 /* if we still have some more bcasts to send */
526 if (forw_packet
->num_packets
< BATADV_NUM_BCASTS_MAX
) {
527 _batadv_add_bcast_packet_to_list(bat_priv
, forw_packet
,
528 msecs_to_jiffies(5));
533 batadv_forw_packet_free(forw_packet
);
534 atomic_inc(&bat_priv
->bcast_queue_left
);
537 void batadv_send_outstanding_bat_ogm_packet(struct work_struct
*work
)
539 struct delayed_work
*delayed_work
;
540 struct batadv_forw_packet
*forw_packet
;
541 struct batadv_priv
*bat_priv
;
543 delayed_work
= container_of(work
, struct delayed_work
, work
);
544 forw_packet
= container_of(delayed_work
, struct batadv_forw_packet
,
546 bat_priv
= netdev_priv(forw_packet
->if_incoming
->soft_iface
);
547 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
548 hlist_del(&forw_packet
->list
);
549 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
551 if (atomic_read(&bat_priv
->mesh_state
) == BATADV_MESH_DEACTIVATING
)
554 bat_priv
->bat_algo_ops
->bat_ogm_emit(forw_packet
);
556 /* we have to have at least one packet in the queue to determine the
557 * queues wake up time unless we are shutting down.
559 * only re-schedule if this is the "original" copy, e.g. the OGM of the
560 * primary interface should only be rescheduled once per period, but
561 * this function will be called for the forw_packet instances of the
562 * other secondary interfaces as well.
564 if (forw_packet
->own
&&
565 forw_packet
->if_incoming
== forw_packet
->if_outgoing
)
566 batadv_schedule_bat_ogm(forw_packet
->if_incoming
);
569 /* don't count own packet */
570 if (!forw_packet
->own
)
571 atomic_inc(&bat_priv
->batman_queue_left
);
573 batadv_forw_packet_free(forw_packet
);
577 batadv_purge_outstanding_packets(struct batadv_priv
*bat_priv
,
578 const struct batadv_hard_iface
*hard_iface
)
580 struct batadv_forw_packet
*forw_packet
;
581 struct hlist_node
*safe_tmp_node
;
585 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
586 "purge_outstanding_packets(): %s\n",
587 hard_iface
->net_dev
->name
);
589 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
590 "purge_outstanding_packets()\n");
592 /* free bcast list */
593 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
594 hlist_for_each_entry_safe(forw_packet
, safe_tmp_node
,
595 &bat_priv
->forw_bcast_list
, list
) {
596 /* if purge_outstanding_packets() was called with an argument
597 * we delete only packets belonging to the given interface
600 (forw_packet
->if_incoming
!= hard_iface
))
603 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
605 /* batadv_send_outstanding_bcast_packet() will lock the list to
606 * delete the item from the list
608 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
609 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
612 hlist_del(&forw_packet
->list
);
613 batadv_forw_packet_free(forw_packet
);
616 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
618 /* free batman packet list */
619 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
620 hlist_for_each_entry_safe(forw_packet
, safe_tmp_node
,
621 &bat_priv
->forw_bat_list
, list
) {
622 /* if purge_outstanding_packets() was called with an argument
623 * we delete only packets belonging to the given interface
626 (forw_packet
->if_incoming
!= hard_iface
) &&
627 (forw_packet
->if_outgoing
!= hard_iface
))
630 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
632 /* send_outstanding_bat_packet() will lock the list to
633 * delete the item from the list
635 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
636 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
639 hlist_del(&forw_packet
->list
);
640 batadv_forw_packet_free(forw_packet
);
643 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);