2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
29 #include "gateway_common.h"
30 #include "originator.h"
33 static void send_outstanding_bcast_packet(struct work_struct
*work
);
35 /* send out an already prepared packet to the given address via the
36 * specified batman interface */
37 int send_skb_packet(struct sk_buff
*skb
, struct hard_iface
*hard_iface
,
38 const uint8_t *dst_addr
)
40 struct ethhdr
*ethhdr
;
42 if (hard_iface
->if_status
!= IF_ACTIVE
)
45 if (unlikely(!hard_iface
->net_dev
))
48 if (!(hard_iface
->net_dev
->flags
& IFF_UP
)) {
49 pr_warning("Interface %s is not up - can't send packet via "
50 "that interface!\n", hard_iface
->net_dev
->name
);
54 /* push to the ethernet header. */
55 if (my_skb_head_push(skb
, sizeof(*ethhdr
)) < 0)
58 skb_reset_mac_header(skb
);
60 ethhdr
= (struct ethhdr
*) skb_mac_header(skb
);
61 memcpy(ethhdr
->h_source
, hard_iface
->net_dev
->dev_addr
, ETH_ALEN
);
62 memcpy(ethhdr
->h_dest
, dst_addr
, ETH_ALEN
);
63 ethhdr
->h_proto
= __constant_htons(ETH_P_BATMAN
);
65 skb_set_network_header(skb
, ETH_HLEN
);
66 skb
->priority
= TC_PRIO_CONTROL
;
67 skb
->protocol
= __constant_htons(ETH_P_BATMAN
);
69 skb
->dev
= hard_iface
->net_dev
;
71 /* dev_queue_xmit() returns a negative result on error. However on
72 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
73 * (which is > 0). This will not be treated as an error. */
75 return dev_queue_xmit(skb
);
81 static void realloc_packet_buffer(struct hard_iface
*hard_iface
,
84 unsigned char *new_buff
;
86 new_buff
= kmalloc(new_len
, GFP_ATOMIC
);
88 /* keep old buffer if kmalloc should fail */
90 memcpy(new_buff
, hard_iface
->packet_buff
,
93 kfree(hard_iface
->packet_buff
);
94 hard_iface
->packet_buff
= new_buff
;
95 hard_iface
->packet_len
= new_len
;
99 /* when calling this function (hard_iface == primary_if) has to be true */
100 static int prepare_packet_buffer(struct bat_priv
*bat_priv
,
101 struct hard_iface
*hard_iface
)
105 new_len
= BATMAN_OGM_LEN
+
106 tt_len((uint8_t)atomic_read(&bat_priv
->tt_local_changes
));
108 /* if we have too many changes for one packet don't send any
109 * and wait for the tt table request which will be fragmented */
110 if (new_len
> hard_iface
->soft_iface
->mtu
)
111 new_len
= BATMAN_OGM_LEN
;
113 realloc_packet_buffer(hard_iface
, new_len
);
115 atomic_set(&bat_priv
->tt_crc
, tt_local_crc(bat_priv
));
117 /* reset the sending counter */
118 atomic_set(&bat_priv
->tt_ogm_append_cnt
, TT_OGM_APPEND_MAX
);
120 return tt_changes_fill_buffer(bat_priv
,
121 hard_iface
->packet_buff
+ BATMAN_OGM_LEN
,
122 hard_iface
->packet_len
- BATMAN_OGM_LEN
);
125 static int reset_packet_buffer(struct bat_priv
*bat_priv
,
126 struct hard_iface
*hard_iface
)
128 realloc_packet_buffer(hard_iface
, BATMAN_OGM_LEN
);
132 void schedule_bat_ogm(struct hard_iface
*hard_iface
)
134 struct bat_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
135 struct hard_iface
*primary_if
;
136 int tt_num_changes
= -1;
138 if ((hard_iface
->if_status
== IF_NOT_IN_USE
) ||
139 (hard_iface
->if_status
== IF_TO_BE_REMOVED
))
143 * the interface gets activated here to avoid race conditions between
144 * the moment of activating the interface in
145 * hardif_activate_interface() where the originator mac is set and
146 * outdated packets (especially uninitialized mac addresses) in the
149 if (hard_iface
->if_status
== IF_TO_BE_ACTIVATED
)
150 hard_iface
->if_status
= IF_ACTIVE
;
152 primary_if
= primary_if_get_selected(bat_priv
);
154 if (hard_iface
== primary_if
) {
155 /* if at least one change happened */
156 if (atomic_read(&bat_priv
->tt_local_changes
) > 0) {
157 tt_commit_changes(bat_priv
);
158 tt_num_changes
= prepare_packet_buffer(bat_priv
,
162 /* if the changes have been sent often enough */
163 if (!atomic_dec_not_zero(&bat_priv
->tt_ogm_append_cnt
))
164 tt_num_changes
= reset_packet_buffer(bat_priv
,
169 hardif_free_ref(primary_if
);
171 bat_ogm_schedule(hard_iface
, tt_num_changes
);
174 static void forw_packet_free(struct forw_packet
*forw_packet
)
176 if (forw_packet
->skb
)
177 kfree_skb(forw_packet
->skb
);
178 if (forw_packet
->if_incoming
)
179 hardif_free_ref(forw_packet
->if_incoming
);
183 static void _add_bcast_packet_to_list(struct bat_priv
*bat_priv
,
184 struct forw_packet
*forw_packet
,
185 unsigned long send_time
)
187 INIT_HLIST_NODE(&forw_packet
->list
);
189 /* add new packet to packet list */
190 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
191 hlist_add_head(&forw_packet
->list
, &bat_priv
->forw_bcast_list
);
192 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
194 /* start timer for this packet */
195 INIT_DELAYED_WORK(&forw_packet
->delayed_work
,
196 send_outstanding_bcast_packet
);
197 queue_delayed_work(bat_event_workqueue
, &forw_packet
->delayed_work
,
201 /* add a broadcast packet to the queue and setup timers. broadcast packets
202 * are sent multiple times to increase probability for being received.
204 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
207 * The skb is not consumed, so the caller should make sure that the
209 int add_bcast_packet_to_list(struct bat_priv
*bat_priv
,
210 const struct sk_buff
*skb
, unsigned long delay
)
212 struct hard_iface
*primary_if
= NULL
;
213 struct forw_packet
*forw_packet
;
214 struct bcast_packet
*bcast_packet
;
215 struct sk_buff
*newskb
;
217 if (!atomic_dec_not_zero(&bat_priv
->bcast_queue_left
)) {
218 bat_dbg(DBG_BATMAN
, bat_priv
, "bcast packet queue full\n");
222 primary_if
= primary_if_get_selected(bat_priv
);
226 forw_packet
= kmalloc(sizeof(*forw_packet
), GFP_ATOMIC
);
231 newskb
= skb_copy(skb
, GFP_ATOMIC
);
235 /* as we have a copy now, it is safe to decrease the TTL */
236 bcast_packet
= (struct bcast_packet
*)newskb
->data
;
239 skb_reset_mac_header(newskb
);
241 forw_packet
->skb
= newskb
;
242 forw_packet
->if_incoming
= primary_if
;
244 /* how often did we send the bcast packet ? */
245 forw_packet
->num_packets
= 0;
247 _add_bcast_packet_to_list(bat_priv
, forw_packet
, delay
);
253 atomic_inc(&bat_priv
->bcast_queue_left
);
256 hardif_free_ref(primary_if
);
257 return NETDEV_TX_BUSY
;
260 static void send_outstanding_bcast_packet(struct work_struct
*work
)
262 struct hard_iface
*hard_iface
;
263 struct delayed_work
*delayed_work
=
264 container_of(work
, struct delayed_work
, work
);
265 struct forw_packet
*forw_packet
=
266 container_of(delayed_work
, struct forw_packet
, delayed_work
);
267 struct sk_buff
*skb1
;
268 struct net_device
*soft_iface
= forw_packet
->if_incoming
->soft_iface
;
269 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
271 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
272 hlist_del(&forw_packet
->list
);
273 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
275 if (atomic_read(&bat_priv
->mesh_state
) == MESH_DEACTIVATING
)
278 /* rebroadcast packet */
280 list_for_each_entry_rcu(hard_iface
, &hardif_list
, list
) {
281 if (hard_iface
->soft_iface
!= soft_iface
)
284 /* send a copy of the saved skb */
285 skb1
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
287 send_skb_packet(skb1
, hard_iface
, broadcast_addr
);
291 forw_packet
->num_packets
++;
293 /* if we still have some more bcasts to send */
294 if (forw_packet
->num_packets
< 3) {
295 _add_bcast_packet_to_list(bat_priv
, forw_packet
,
301 forw_packet_free(forw_packet
);
302 atomic_inc(&bat_priv
->bcast_queue_left
);
305 void send_outstanding_bat_ogm_packet(struct work_struct
*work
)
307 struct delayed_work
*delayed_work
=
308 container_of(work
, struct delayed_work
, work
);
309 struct forw_packet
*forw_packet
=
310 container_of(delayed_work
, struct forw_packet
, delayed_work
);
311 struct bat_priv
*bat_priv
;
313 bat_priv
= netdev_priv(forw_packet
->if_incoming
->soft_iface
);
314 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
315 hlist_del(&forw_packet
->list
);
316 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
318 if (atomic_read(&bat_priv
->mesh_state
) == MESH_DEACTIVATING
)
321 bat_ogm_emit(forw_packet
);
324 * we have to have at least one packet in the queue
325 * to determine the queues wake up time unless we are
328 if (forw_packet
->own
)
329 schedule_bat_ogm(forw_packet
->if_incoming
);
332 /* don't count own packet */
333 if (!forw_packet
->own
)
334 atomic_inc(&bat_priv
->batman_queue_left
);
336 forw_packet_free(forw_packet
);
339 void purge_outstanding_packets(struct bat_priv
*bat_priv
,
340 const struct hard_iface
*hard_iface
)
342 struct forw_packet
*forw_packet
;
343 struct hlist_node
*tmp_node
, *safe_tmp_node
;
347 bat_dbg(DBG_BATMAN
, bat_priv
,
348 "purge_outstanding_packets(): %s\n",
349 hard_iface
->net_dev
->name
);
351 bat_dbg(DBG_BATMAN
, bat_priv
,
352 "purge_outstanding_packets()\n");
354 /* free bcast list */
355 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
356 hlist_for_each_entry_safe(forw_packet
, tmp_node
, safe_tmp_node
,
357 &bat_priv
->forw_bcast_list
, list
) {
360 * if purge_outstanding_packets() was called with an argument
361 * we delete only packets belonging to the given interface
364 (forw_packet
->if_incoming
!= hard_iface
))
367 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
370 * send_outstanding_bcast_packet() will lock the list to
371 * delete the item from the list
373 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
374 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
377 hlist_del(&forw_packet
->list
);
378 forw_packet_free(forw_packet
);
381 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
383 /* free batman packet list */
384 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
385 hlist_for_each_entry_safe(forw_packet
, tmp_node
, safe_tmp_node
,
386 &bat_priv
->forw_bat_list
, list
) {
389 * if purge_outstanding_packets() was called with an argument
390 * we delete only packets belonging to the given interface
393 (forw_packet
->if_incoming
!= hard_iface
))
396 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
399 * send_outstanding_bat_packet() will lock the list to
400 * delete the item from the list
402 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
403 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
406 hlist_del(&forw_packet
->list
);
407 forw_packet_free(forw_packet
);
410 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);