1 /* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel.h>
29 #include <linux/kref.h>
30 #include <linux/list.h>
31 #include <linux/netdevice.h>
32 #include <linux/printk.h>
33 #include <linux/rculist.h>
34 #include <linux/rcupdate.h>
35 #include <linux/skbuff.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/stddef.h>
39 #include <linux/workqueue.h>
41 #include "distributed-arp-table.h"
42 #include "fragmentation.h"
43 #include "gateway_client.h"
44 #include "hard-interface.h"
45 #include "network-coding.h"
46 #include "originator.h"
48 #include "soft-interface.h"
49 #include "translation-table.h"
51 static void batadv_send_outstanding_bcast_packet(struct work_struct
*work
);
54 * batadv_send_skb_packet - send an already prepared packet
55 * @skb: the packet to send
56 * @hard_iface: the interface to use to send the broadcast packet
57 * @dst_addr: the payload destination
59 * Send out an already prepared packet to the given neighbor or broadcast it
60 * using the specified interface. Either hard_iface or neigh_node must be not
62 * If neigh_node is NULL, then the packet is broadcasted using hard_iface,
63 * otherwise it is sent as unicast to the given neighbor.
65 * Return: NET_TX_DROP in case of error or the result of dev_queue_xmit(skb)
68 int batadv_send_skb_packet(struct sk_buff
*skb
,
69 struct batadv_hard_iface
*hard_iface
,
72 struct batadv_priv
*bat_priv
;
73 struct ethhdr
*ethhdr
;
75 bat_priv
= netdev_priv(hard_iface
->soft_iface
);
77 if (hard_iface
->if_status
!= BATADV_IF_ACTIVE
)
80 if (unlikely(!hard_iface
->net_dev
))
83 if (!(hard_iface
->net_dev
->flags
& IFF_UP
)) {
84 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
85 hard_iface
->net_dev
->name
);
89 /* push to the ethernet header. */
90 if (batadv_skb_head_push(skb
, ETH_HLEN
) < 0)
93 skb_reset_mac_header(skb
);
95 ethhdr
= eth_hdr(skb
);
96 ether_addr_copy(ethhdr
->h_source
, hard_iface
->net_dev
->dev_addr
);
97 ether_addr_copy(ethhdr
->h_dest
, dst_addr
);
98 ethhdr
->h_proto
= htons(ETH_P_BATMAN
);
100 skb_set_network_header(skb
, ETH_HLEN
);
101 skb
->protocol
= htons(ETH_P_BATMAN
);
103 skb
->dev
= hard_iface
->net_dev
;
105 /* Save a clone of the skb to use when decoding coded packets */
106 batadv_nc_skb_store_for_decoding(bat_priv
, skb
);
108 /* dev_queue_xmit() returns a negative result on error. However on
109 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
110 * (which is > 0). This will not be treated as an error.
112 return dev_queue_xmit(skb
);
115 return NET_XMIT_DROP
;
118 int batadv_send_broadcast_skb(struct sk_buff
*skb
,
119 struct batadv_hard_iface
*hard_iface
)
121 return batadv_send_skb_packet(skb
, hard_iface
, batadv_broadcast_addr
);
124 int batadv_send_unicast_skb(struct sk_buff
*skb
,
125 struct batadv_neigh_node
*neigh
)
127 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
128 struct batadv_hardif_neigh_node
*hardif_neigh
;
132 ret
= batadv_send_skb_packet(skb
, neigh
->if_incoming
, neigh
->addr
);
134 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
135 hardif_neigh
= batadv_hardif_neigh_get(neigh
->if_incoming
, neigh
->addr
);
137 if ((hardif_neigh
) && (ret
!= NET_XMIT_DROP
))
138 hardif_neigh
->bat_v
.last_unicast_tx
= jiffies
;
141 batadv_hardif_neigh_put(hardif_neigh
);
148 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
149 * @skb: Packet to be transmitted.
150 * @orig_node: Final destination of the packet.
151 * @recv_if: Interface used when receiving the packet (can be NULL).
153 * Looks up the best next-hop towards the passed originator and passes the
154 * skb on for preparation of MAC header. If the packet originated from this
155 * host, NULL can be passed as recv_if and no interface alternating is
158 * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
159 * NET_XMIT_POLICED if the skb is buffered for later transmit.
161 int batadv_send_skb_to_orig(struct sk_buff
*skb
,
162 struct batadv_orig_node
*orig_node
,
163 struct batadv_hard_iface
*recv_if
)
165 struct batadv_priv
*bat_priv
= orig_node
->bat_priv
;
166 struct batadv_neigh_node
*neigh_node
;
167 int ret
= NET_XMIT_DROP
;
169 /* batadv_find_router() increases neigh_nodes refcount if found. */
170 neigh_node
= batadv_find_router(bat_priv
, orig_node
, recv_if
);
174 /* Check if the skb is too large to send in one piece and fragment
177 if (atomic_read(&bat_priv
->fragmentation
) &&
178 skb
->len
> neigh_node
->if_incoming
->net_dev
->mtu
) {
179 /* Fragment and send packet. */
180 if (batadv_frag_send_packet(skb
, orig_node
, neigh_node
))
181 ret
= NET_XMIT_SUCCESS
;
186 /* try to network code the packet, if it is received on an interface
187 * (i.e. being forwarded). If the packet originates from this node or if
188 * network coding fails, then send the packet as usual.
190 if (recv_if
&& batadv_nc_skb_forward(skb
, neigh_node
)) {
191 ret
= NET_XMIT_POLICED
;
193 batadv_send_unicast_skb(skb
, neigh_node
);
194 ret
= NET_XMIT_SUCCESS
;
199 batadv_neigh_node_put(neigh_node
);
205 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
206 * common fields for unicast packets
207 * @skb: the skb carrying the unicast header to initialize
208 * @hdr_size: amount of bytes to push at the beginning of the skb
209 * @orig_node: the destination node
211 * Return: false if the buffer extension was not possible or true otherwise.
214 batadv_send_skb_push_fill_unicast(struct sk_buff
*skb
, int hdr_size
,
215 struct batadv_orig_node
*orig_node
)
217 struct batadv_unicast_packet
*unicast_packet
;
218 u8 ttvn
= (u8
)atomic_read(&orig_node
->last_ttvn
);
220 if (batadv_skb_head_push(skb
, hdr_size
) < 0)
223 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
224 unicast_packet
->version
= BATADV_COMPAT_VERSION
;
225 /* batman packet type: unicast */
226 unicast_packet
->packet_type
= BATADV_UNICAST
;
227 /* set unicast ttl */
228 unicast_packet
->ttl
= BATADV_TTL
;
229 /* copy the destination for faster routing */
230 ether_addr_copy(unicast_packet
->dest
, orig_node
->orig
);
231 /* set the destination tt version number */
232 unicast_packet
->ttvn
= ttvn
;
238 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
239 * @skb: the skb containing the payload to encapsulate
240 * @orig_node: the destination node
242 * Return: false if the payload could not be encapsulated or true otherwise.
244 static bool batadv_send_skb_prepare_unicast(struct sk_buff
*skb
,
245 struct batadv_orig_node
*orig_node
)
247 size_t uni_size
= sizeof(struct batadv_unicast_packet
);
249 return batadv_send_skb_push_fill_unicast(skb
, uni_size
, orig_node
);
253 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
254 * unicast 4addr header
255 * @bat_priv: the bat priv with all the soft interface information
256 * @skb: the skb containing the payload to encapsulate
257 * @orig: the destination node
258 * @packet_subtype: the unicast 4addr packet subtype to use
260 * Return: false if the payload could not be encapsulated or true otherwise.
262 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv
*bat_priv
,
264 struct batadv_orig_node
*orig
,
267 struct batadv_hard_iface
*primary_if
;
268 struct batadv_unicast_4addr_packet
*uc_4addr_packet
;
271 primary_if
= batadv_primary_if_get_selected(bat_priv
);
275 /* Pull the header space and fill the unicast_packet substructure.
276 * We can do that because the first member of the uc_4addr_packet
277 * is of type struct unicast_packet
279 if (!batadv_send_skb_push_fill_unicast(skb
, sizeof(*uc_4addr_packet
),
283 uc_4addr_packet
= (struct batadv_unicast_4addr_packet
*)skb
->data
;
284 uc_4addr_packet
->u
.packet_type
= BATADV_UNICAST_4ADDR
;
285 ether_addr_copy(uc_4addr_packet
->src
, primary_if
->net_dev
->dev_addr
);
286 uc_4addr_packet
->subtype
= packet_subtype
;
287 uc_4addr_packet
->reserved
= 0;
292 batadv_hardif_put(primary_if
);
297 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
298 * @bat_priv: the bat priv with all the soft interface information
299 * @skb: payload to send
300 * @packet_type: the batman unicast packet type to use
301 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
303 * @orig_node: the originator to send the packet to
304 * @vid: the vid to be used to search the translation table
306 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
307 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
308 * as packet_type. Then send this frame to the given orig_node and release a
309 * reference to this orig_node.
311 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
313 int batadv_send_skb_unicast(struct batadv_priv
*bat_priv
,
314 struct sk_buff
*skb
, int packet_type
,
316 struct batadv_orig_node
*orig_node
,
319 struct batadv_unicast_packet
*unicast_packet
;
320 struct ethhdr
*ethhdr
;
321 int ret
= NET_XMIT_DROP
;
326 switch (packet_type
) {
328 if (!batadv_send_skb_prepare_unicast(skb
, orig_node
))
331 case BATADV_UNICAST_4ADDR
:
332 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv
, skb
,
338 /* this function supports UNICAST and UNICAST_4ADDR only. It
339 * should never be invoked with any other packet type
344 /* skb->data might have been reallocated by
345 * batadv_send_skb_prepare_unicast{,_4addr}()
347 ethhdr
= eth_hdr(skb
);
348 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
350 /* inform the destination node that we are still missing a correct route
351 * for this client. The destination will receive this packet and will
352 * try to reroute it because the ttvn contained in the header is less
353 * than the current one
355 if (batadv_tt_global_client_is_roaming(bat_priv
, ethhdr
->h_dest
, vid
))
356 unicast_packet
->ttvn
= unicast_packet
->ttvn
- 1;
358 if (batadv_send_skb_to_orig(skb
, orig_node
, NULL
) != NET_XMIT_DROP
)
359 ret
= NET_XMIT_SUCCESS
;
363 batadv_orig_node_put(orig_node
);
364 if (ret
== NET_XMIT_DROP
)
370 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
371 * @bat_priv: the bat priv with all the soft interface information
372 * @skb: payload to send
373 * @packet_type: the batman unicast packet type to use
374 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
376 * @dst_hint: can be used to override the destination contained in the skb
377 * @vid: the vid to be used to search the translation table
379 * Look up the recipient node for the destination address in the ethernet
380 * header via the translation table. Wrap the given skb into a batman-adv
381 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
382 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
383 * to the according destination node.
385 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
387 int batadv_send_skb_via_tt_generic(struct batadv_priv
*bat_priv
,
388 struct sk_buff
*skb
, int packet_type
,
389 int packet_subtype
, u8
*dst_hint
,
392 struct ethhdr
*ethhdr
= (struct ethhdr
*)skb
->data
;
393 struct batadv_orig_node
*orig_node
;
396 src
= ethhdr
->h_source
;
397 dst
= ethhdr
->h_dest
;
399 /* if we got an hint! let's send the packet to this client (if any) */
404 orig_node
= batadv_transtable_search(bat_priv
, src
, dst
, vid
);
406 return batadv_send_skb_unicast(bat_priv
, skb
, packet_type
,
407 packet_subtype
, orig_node
, vid
);
411 * batadv_send_skb_via_gw - send an skb via gateway lookup
412 * @bat_priv: the bat priv with all the soft interface information
413 * @skb: payload to send
414 * @vid: the vid to be used to search the translation table
416 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
417 * unicast header and send this frame to this gateway node.
419 * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
421 int batadv_send_skb_via_gw(struct batadv_priv
*bat_priv
, struct sk_buff
*skb
,
424 struct batadv_orig_node
*orig_node
;
426 orig_node
= batadv_gw_get_selected_orig(bat_priv
);
427 return batadv_send_skb_unicast(bat_priv
, skb
, BATADV_UNICAST
, 0,
431 void batadv_schedule_bat_ogm(struct batadv_hard_iface
*hard_iface
)
433 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
435 if ((hard_iface
->if_status
== BATADV_IF_NOT_IN_USE
) ||
436 (hard_iface
->if_status
== BATADV_IF_TO_BE_REMOVED
))
439 /* the interface gets activated here to avoid race conditions between
440 * the moment of activating the interface in
441 * hardif_activate_interface() where the originator mac is set and
442 * outdated packets (especially uninitialized mac addresses) in the
445 if (hard_iface
->if_status
== BATADV_IF_TO_BE_ACTIVATED
)
446 hard_iface
->if_status
= BATADV_IF_ACTIVE
;
448 bat_priv
->bat_algo_ops
->bat_ogm_schedule(hard_iface
);
451 static void batadv_forw_packet_free(struct batadv_forw_packet
*forw_packet
)
453 kfree_skb(forw_packet
->skb
);
454 if (forw_packet
->if_incoming
)
455 batadv_hardif_put(forw_packet
->if_incoming
);
456 if (forw_packet
->if_outgoing
)
457 batadv_hardif_put(forw_packet
->if_outgoing
);
462 _batadv_add_bcast_packet_to_list(struct batadv_priv
*bat_priv
,
463 struct batadv_forw_packet
*forw_packet
,
464 unsigned long send_time
)
466 /* add new packet to packet list */
467 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
468 hlist_add_head(&forw_packet
->list
, &bat_priv
->forw_bcast_list
);
469 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
471 /* start timer for this packet */
472 queue_delayed_work(batadv_event_workqueue
, &forw_packet
->delayed_work
,
477 * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
478 * @bat_priv: the bat priv with all the soft interface information
479 * @skb: broadcast packet to add
480 * @delay: number of jiffies to wait before sending
482 * add a broadcast packet to the queue and setup timers. broadcast packets
483 * are sent multiple times to increase probability for being received.
485 * The skb is not consumed, so the caller should make sure that the
488 * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
490 int batadv_add_bcast_packet_to_list(struct batadv_priv
*bat_priv
,
491 const struct sk_buff
*skb
,
494 struct batadv_hard_iface
*primary_if
= NULL
;
495 struct batadv_forw_packet
*forw_packet
;
496 struct batadv_bcast_packet
*bcast_packet
;
497 struct sk_buff
*newskb
;
499 if (!batadv_atomic_dec_not_zero(&bat_priv
->bcast_queue_left
)) {
500 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
501 "bcast packet queue full\n");
505 primary_if
= batadv_primary_if_get_selected(bat_priv
);
509 forw_packet
= kmalloc(sizeof(*forw_packet
), GFP_ATOMIC
);
514 newskb
= skb_copy(skb
, GFP_ATOMIC
);
518 /* as we have a copy now, it is safe to decrease the TTL */
519 bcast_packet
= (struct batadv_bcast_packet
*)newskb
->data
;
522 skb_reset_mac_header(newskb
);
524 forw_packet
->skb
= newskb
;
525 forw_packet
->if_incoming
= primary_if
;
526 forw_packet
->if_outgoing
= NULL
;
528 /* how often did we send the bcast packet ? */
529 forw_packet
->num_packets
= 0;
531 INIT_DELAYED_WORK(&forw_packet
->delayed_work
,
532 batadv_send_outstanding_bcast_packet
);
534 _batadv_add_bcast_packet_to_list(bat_priv
, forw_packet
, delay
);
540 atomic_inc(&bat_priv
->bcast_queue_left
);
543 batadv_hardif_put(primary_if
);
544 return NETDEV_TX_BUSY
;
547 static void batadv_send_outstanding_bcast_packet(struct work_struct
*work
)
549 struct batadv_hard_iface
*hard_iface
;
550 struct delayed_work
*delayed_work
;
551 struct batadv_forw_packet
*forw_packet
;
552 struct sk_buff
*skb1
;
553 struct net_device
*soft_iface
;
554 struct batadv_priv
*bat_priv
;
556 delayed_work
= to_delayed_work(work
);
557 forw_packet
= container_of(delayed_work
, struct batadv_forw_packet
,
559 soft_iface
= forw_packet
->if_incoming
->soft_iface
;
560 bat_priv
= netdev_priv(soft_iface
);
562 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
563 hlist_del(&forw_packet
->list
);
564 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
566 if (atomic_read(&bat_priv
->mesh_state
) == BATADV_MESH_DEACTIVATING
)
569 if (batadv_dat_drop_broadcast_packet(bat_priv
, forw_packet
))
572 /* rebroadcast packet */
574 list_for_each_entry_rcu(hard_iface
, &batadv_hardif_list
, list
) {
575 if (hard_iface
->soft_iface
!= soft_iface
)
578 if (forw_packet
->num_packets
>= hard_iface
->num_bcasts
)
581 if (!kref_get_unless_zero(&hard_iface
->refcount
))
584 /* send a copy of the saved skb */
585 skb1
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
587 batadv_send_broadcast_skb(skb1
, hard_iface
);
589 batadv_hardif_put(hard_iface
);
593 forw_packet
->num_packets
++;
595 /* if we still have some more bcasts to send */
596 if (forw_packet
->num_packets
< BATADV_NUM_BCASTS_MAX
) {
597 _batadv_add_bcast_packet_to_list(bat_priv
, forw_packet
,
598 msecs_to_jiffies(5));
603 batadv_forw_packet_free(forw_packet
);
604 atomic_inc(&bat_priv
->bcast_queue_left
);
607 void batadv_send_outstanding_bat_ogm_packet(struct work_struct
*work
)
609 struct delayed_work
*delayed_work
;
610 struct batadv_forw_packet
*forw_packet
;
611 struct batadv_priv
*bat_priv
;
613 delayed_work
= to_delayed_work(work
);
614 forw_packet
= container_of(delayed_work
, struct batadv_forw_packet
,
616 bat_priv
= netdev_priv(forw_packet
->if_incoming
->soft_iface
);
617 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
618 hlist_del(&forw_packet
->list
);
619 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
621 if (atomic_read(&bat_priv
->mesh_state
) == BATADV_MESH_DEACTIVATING
)
624 bat_priv
->bat_algo_ops
->bat_ogm_emit(forw_packet
);
626 /* we have to have at least one packet in the queue to determine the
627 * queues wake up time unless we are shutting down.
629 * only re-schedule if this is the "original" copy, e.g. the OGM of the
630 * primary interface should only be rescheduled once per period, but
631 * this function will be called for the forw_packet instances of the
632 * other secondary interfaces as well.
634 if (forw_packet
->own
&&
635 forw_packet
->if_incoming
== forw_packet
->if_outgoing
)
636 batadv_schedule_bat_ogm(forw_packet
->if_incoming
);
639 /* don't count own packet */
640 if (!forw_packet
->own
)
641 atomic_inc(&bat_priv
->batman_queue_left
);
643 batadv_forw_packet_free(forw_packet
);
647 batadv_purge_outstanding_packets(struct batadv_priv
*bat_priv
,
648 const struct batadv_hard_iface
*hard_iface
)
650 struct batadv_forw_packet
*forw_packet
;
651 struct hlist_node
*safe_tmp_node
;
655 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
656 "purge_outstanding_packets(): %s\n",
657 hard_iface
->net_dev
->name
);
659 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
660 "purge_outstanding_packets()\n");
662 /* free bcast list */
663 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
664 hlist_for_each_entry_safe(forw_packet
, safe_tmp_node
,
665 &bat_priv
->forw_bcast_list
, list
) {
666 /* if purge_outstanding_packets() was called with an argument
667 * we delete only packets belonging to the given interface
670 (forw_packet
->if_incoming
!= hard_iface
) &&
671 (forw_packet
->if_outgoing
!= hard_iface
))
674 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
676 /* batadv_send_outstanding_bcast_packet() will lock the list to
677 * delete the item from the list
679 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
680 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
683 hlist_del(&forw_packet
->list
);
684 if (!forw_packet
->own
)
685 atomic_inc(&bat_priv
->bcast_queue_left
);
687 batadv_forw_packet_free(forw_packet
);
690 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
692 /* free batman packet list */
693 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
694 hlist_for_each_entry_safe(forw_packet
, safe_tmp_node
,
695 &bat_priv
->forw_bat_list
, list
) {
696 /* if purge_outstanding_packets() was called with an argument
697 * we delete only packets belonging to the given interface
700 (forw_packet
->if_incoming
!= hard_iface
) &&
701 (forw_packet
->if_outgoing
!= hard_iface
))
704 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
706 /* send_outstanding_bat_packet() will lock the list to
707 * delete the item from the list
709 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
710 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
713 hlist_del(&forw_packet
->list
);
714 if (!forw_packet
->own
)
715 atomic_inc(&bat_priv
->batman_queue_left
);
717 batadv_forw_packet_free(forw_packet
);
720 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);