1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2013-2020 B.A.T.M.A.N. contributors:
4 * Martin Hundebøll <martin@hundeboll.net>
7 #include "fragmentation.h"
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/errno.h>
13 #include <linux/etherdevice.h>
14 #include <linux/gfp.h>
15 #include <linux/if_ether.h>
16 #include <linux/jiffies.h>
17 #include <linux/lockdep.h>
18 #include <linux/minmax.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <uapi/linux/batadv_packet.h>
26 #include "hard-interface.h"
27 #include "originator.h"
32 * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
33 * @head: head of chain with entries.
34 * @dropped: whether the chain is cleared because all fragments are dropped
36 * Free fragments in the passed hlist. Should be called with appropriate lock.
38 static void batadv_frag_clear_chain(struct hlist_head
*head
, bool dropped
)
40 struct batadv_frag_list_entry
*entry
;
41 struct hlist_node
*node
;
43 hlist_for_each_entry_safe(entry
, node
, head
, list
) {
44 hlist_del(&entry
->list
);
47 kfree_skb(entry
->skb
);
49 consume_skb(entry
->skb
);
56 * batadv_frag_purge_orig() - free fragments associated to an orig
57 * @orig_node: originator to free fragments from
58 * @check_cb: optional function to tell if an entry should be purged
60 void batadv_frag_purge_orig(struct batadv_orig_node
*orig_node
,
61 bool (*check_cb
)(struct batadv_frag_table_entry
*))
63 struct batadv_frag_table_entry
*chain
;
66 for (i
= 0; i
< BATADV_FRAG_BUFFER_COUNT
; i
++) {
67 chain
= &orig_node
->fragments
[i
];
68 spin_lock_bh(&chain
->lock
);
70 if (!check_cb
|| check_cb(chain
)) {
71 batadv_frag_clear_chain(&chain
->fragment_list
, true);
75 spin_unlock_bh(&chain
->lock
);
80 * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
82 * Return: the maximum size of payload that can be fragmented.
84 static int batadv_frag_size_limit(void)
86 int limit
= BATADV_FRAG_MAX_FRAG_SIZE
;
88 limit
-= sizeof(struct batadv_frag_packet
);
89 limit
*= BATADV_FRAG_MAX_FRAGMENTS
;
95 * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
96 * @chain: chain in fragments table to init
97 * @seqno: sequence number of the received fragment
99 * Make chain ready for a fragment with sequence number "seqno". Delete existing
100 * entries if they have an "old" sequence number.
102 * Caller must hold chain->lock.
104 * Return: true if chain is empty and the caller can just insert the new
105 * fragment without searching for the right position.
107 static bool batadv_frag_init_chain(struct batadv_frag_table_entry
*chain
,
110 lockdep_assert_held(&chain
->lock
);
112 if (chain
->seqno
== seqno
)
115 if (!hlist_empty(&chain
->fragment_list
))
116 batadv_frag_clear_chain(&chain
->fragment_list
, true);
119 chain
->seqno
= seqno
;
125 * batadv_frag_insert_packet() - insert a fragment into a fragment chain
126 * @orig_node: originator that the fragment was received from
127 * @skb: skb to insert
128 * @chain_out: list head to attach complete chains of fragments to
130 * Insert a new fragment into the reverse ordered chain in the right table
131 * entry. The hash table entry is cleared if "old" fragments exist in it.
133 * Return: true if skb is buffered, false on error. If the chain has all the
134 * fragments needed to merge the packet, the chain is moved to the passed head
135 * to avoid locking the chain in the table.
137 static bool batadv_frag_insert_packet(struct batadv_orig_node
*orig_node
,
139 struct hlist_head
*chain_out
)
141 struct batadv_frag_table_entry
*chain
;
142 struct batadv_frag_list_entry
*frag_entry_new
= NULL
, *frag_entry_curr
;
143 struct batadv_frag_list_entry
*frag_entry_last
= NULL
;
144 struct batadv_frag_packet
*frag_packet
;
146 u16 seqno
, hdr_size
= sizeof(struct batadv_frag_packet
);
149 /* Linearize packet to avoid linearizing 16 packets in a row when doing
150 * the later merge. Non-linear merge should be added to remove this
153 if (skb_linearize(skb
) < 0)
156 frag_packet
= (struct batadv_frag_packet
*)skb
->data
;
157 seqno
= ntohs(frag_packet
->seqno
);
158 bucket
= seqno
% BATADV_FRAG_BUFFER_COUNT
;
160 frag_entry_new
= kmalloc(sizeof(*frag_entry_new
), GFP_ATOMIC
);
164 frag_entry_new
->skb
= skb
;
165 frag_entry_new
->no
= frag_packet
->no
;
167 /* Select entry in the "chain table" and delete any prior fragments
168 * with another sequence number. batadv_frag_init_chain() returns true,
169 * if the list is empty at return.
171 chain
= &orig_node
->fragments
[bucket
];
172 spin_lock_bh(&chain
->lock
);
173 if (batadv_frag_init_chain(chain
, seqno
)) {
174 hlist_add_head(&frag_entry_new
->list
, &chain
->fragment_list
);
175 chain
->size
= skb
->len
- hdr_size
;
176 chain
->timestamp
= jiffies
;
177 chain
->total_size
= ntohs(frag_packet
->total_size
);
182 /* Find the position for the new fragment. */
183 hlist_for_each_entry(frag_entry_curr
, &chain
->fragment_list
, list
) {
184 /* Drop packet if fragment already exists. */
185 if (frag_entry_curr
->no
== frag_entry_new
->no
)
188 /* Order fragments from highest to lowest. */
189 if (frag_entry_curr
->no
< frag_entry_new
->no
) {
190 hlist_add_before(&frag_entry_new
->list
,
191 &frag_entry_curr
->list
);
192 chain
->size
+= skb
->len
- hdr_size
;
193 chain
->timestamp
= jiffies
;
198 /* store current entry because it could be the last in list */
199 frag_entry_last
= frag_entry_curr
;
202 /* Reached the end of the list, so insert after 'frag_entry_last'. */
203 if (likely(frag_entry_last
)) {
204 hlist_add_behind(&frag_entry_new
->list
, &frag_entry_last
->list
);
205 chain
->size
+= skb
->len
- hdr_size
;
206 chain
->timestamp
= jiffies
;
211 if (chain
->size
> batadv_frag_size_limit() ||
212 chain
->total_size
!= ntohs(frag_packet
->total_size
) ||
213 chain
->total_size
> batadv_frag_size_limit()) {
214 /* Clear chain if total size of either the list or the packet
215 * exceeds the maximum size of one merged packet. Don't allow
216 * packets to have different total_size.
218 batadv_frag_clear_chain(&chain
->fragment_list
, true);
220 } else if (ntohs(frag_packet
->total_size
) == chain
->size
) {
221 /* All fragments received. Hand over chain to caller. */
222 hlist_move_list(&chain
->fragment_list
, chain_out
);
227 spin_unlock_bh(&chain
->lock
);
231 kfree(frag_entry_new
);
239 * batadv_frag_merge_packets() - merge a chain of fragments
240 * @chain: head of chain with fragments
242 * Expand the first skb in the chain and copy the content of the remaining
243 * skb's into the expanded one. After doing so, clear the chain.
245 * Return: the merged skb or NULL on error.
247 static struct sk_buff
*
248 batadv_frag_merge_packets(struct hlist_head
*chain
)
250 struct batadv_frag_packet
*packet
;
251 struct batadv_frag_list_entry
*entry
;
252 struct sk_buff
*skb_out
;
253 int size
, hdr_size
= sizeof(struct batadv_frag_packet
);
254 bool dropped
= false;
256 /* Remove first entry, as this is the destination for the rest of the
259 entry
= hlist_entry(chain
->first
, struct batadv_frag_list_entry
, list
);
260 hlist_del(&entry
->list
);
261 skb_out
= entry
->skb
;
264 packet
= (struct batadv_frag_packet
*)skb_out
->data
;
265 size
= ntohs(packet
->total_size
) + hdr_size
;
267 /* Make room for the rest of the fragments. */
268 if (pskb_expand_head(skb_out
, 0, size
- skb_out
->len
, GFP_ATOMIC
) < 0) {
275 /* Move the existing MAC header to just before the payload. (Override
276 * the fragment header.)
278 skb_pull(skb_out
, hdr_size
);
279 skb_out
->ip_summed
= CHECKSUM_NONE
;
280 memmove(skb_out
->data
- ETH_HLEN
, skb_mac_header(skb_out
), ETH_HLEN
);
281 skb_set_mac_header(skb_out
, -ETH_HLEN
);
282 skb_reset_network_header(skb_out
);
283 skb_reset_transport_header(skb_out
);
285 /* Copy the payload of the each fragment into the last skb */
286 hlist_for_each_entry(entry
, chain
, list
) {
287 size
= entry
->skb
->len
- hdr_size
;
288 skb_put_data(skb_out
, entry
->skb
->data
+ hdr_size
, size
);
292 /* Locking is not needed, because 'chain' is not part of any orig. */
293 batadv_frag_clear_chain(chain
, dropped
);
298 * batadv_frag_skb_buffer() - buffer fragment for later merge
299 * @skb: skb to buffer
300 * @orig_node_src: originator that the skb is received from
302 * Add fragment to buffer and merge fragments if possible.
304 * There are three possible outcomes: 1) Packet is merged: Return true and
305 * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
306 * to NULL; 3) Error: Return false and free skb.
308 * Return: true when the packet is merged or buffered, false when skb is not
311 bool batadv_frag_skb_buffer(struct sk_buff
**skb
,
312 struct batadv_orig_node
*orig_node_src
)
314 struct sk_buff
*skb_out
= NULL
;
315 struct hlist_head head
= HLIST_HEAD_INIT
;
318 /* Add packet to buffer and table entry if merge is possible. */
319 if (!batadv_frag_insert_packet(orig_node_src
, *skb
, &head
))
322 /* Leave if more fragments are needed to merge. */
323 if (hlist_empty(&head
))
326 skb_out
= batadv_frag_merge_packets(&head
);
338 * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
339 * @skb: skb to forward
340 * @recv_if: interface that the skb is received on
341 * @orig_node_src: originator that the skb is received from
343 * Look up the next-hop of the fragments payload and check if the merged packet
344 * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
345 * without merging it.
347 * Return: true if the fragment is consumed/forwarded, false otherwise.
349 bool batadv_frag_skb_fwd(struct sk_buff
*skb
,
350 struct batadv_hard_iface
*recv_if
,
351 struct batadv_orig_node
*orig_node_src
)
353 struct batadv_priv
*bat_priv
= netdev_priv(recv_if
->soft_iface
);
354 struct batadv_orig_node
*orig_node_dst
;
355 struct batadv_neigh_node
*neigh_node
= NULL
;
356 struct batadv_frag_packet
*packet
;
360 packet
= (struct batadv_frag_packet
*)skb
->data
;
361 orig_node_dst
= batadv_orig_hash_find(bat_priv
, packet
->dest
);
365 neigh_node
= batadv_find_router(bat_priv
, orig_node_dst
, recv_if
);
369 /* Forward the fragment, if the merged packet would be too big to
372 total_size
= ntohs(packet
->total_size
);
373 if (total_size
> neigh_node
->if_incoming
->net_dev
->mtu
) {
374 batadv_inc_counter(bat_priv
, BATADV_CNT_FRAG_FWD
);
375 batadv_add_counter(bat_priv
, BATADV_CNT_FRAG_FWD_BYTES
,
376 skb
->len
+ ETH_HLEN
);
379 batadv_send_unicast_skb(skb
, neigh_node
);
385 batadv_orig_node_put(orig_node_dst
);
387 batadv_neigh_node_put(neigh_node
);
392 * batadv_frag_create() - create a fragment from skb
393 * @net_dev: outgoing device for fragment
394 * @skb: skb to create fragment from
395 * @frag_head: header to use in new fragment
396 * @fragment_size: size of new fragment
398 * Split the passed skb into two fragments: A new one with size matching the
399 * passed mtu and the old one with the rest. The new skb contains data from the
400 * tail of the old skb.
402 * Return: the new fragment, NULL on error.
404 static struct sk_buff
*batadv_frag_create(struct net_device
*net_dev
,
406 struct batadv_frag_packet
*frag_head
,
407 unsigned int fragment_size
)
409 unsigned int ll_reserved
= LL_RESERVED_SPACE(net_dev
);
410 unsigned int tailroom
= net_dev
->needed_tailroom
;
411 struct sk_buff
*skb_fragment
;
412 unsigned int header_size
= sizeof(*frag_head
);
413 unsigned int mtu
= fragment_size
+ header_size
;
415 skb_fragment
= dev_alloc_skb(ll_reserved
+ mtu
+ tailroom
);
419 skb_fragment
->priority
= skb
->priority
;
421 /* Eat the last mtu-bytes of the skb */
422 skb_reserve(skb_fragment
, ll_reserved
+ header_size
);
423 skb_split(skb
, skb_fragment
, skb
->len
- fragment_size
);
426 skb_push(skb_fragment
, header_size
);
427 memcpy(skb_fragment
->data
, frag_head
, header_size
);
434 * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
435 * @skb: skb to create fragments from
436 * @orig_node: final destination of the created fragments
437 * @neigh_node: next-hop of the created fragments
439 * Return: the netdev tx status or a negative errno code on a failure
441 int batadv_frag_send_packet(struct sk_buff
*skb
,
442 struct batadv_orig_node
*orig_node
,
443 struct batadv_neigh_node
*neigh_node
)
445 struct net_device
*net_dev
= neigh_node
->if_incoming
->net_dev
;
446 struct batadv_priv
*bat_priv
;
447 struct batadv_hard_iface
*primary_if
= NULL
;
448 struct batadv_frag_packet frag_header
;
449 struct sk_buff
*skb_fragment
;
450 unsigned int mtu
= net_dev
->mtu
;
451 unsigned int header_size
= sizeof(frag_header
);
452 unsigned int max_fragment_size
, num_fragments
;
455 /* To avoid merge and refragmentation at next-hops we never send
456 * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
458 mtu
= min_t(unsigned int, mtu
, BATADV_FRAG_MAX_FRAG_SIZE
);
459 max_fragment_size
= mtu
- header_size
;
461 if (skb
->len
== 0 || max_fragment_size
== 0)
464 num_fragments
= (skb
->len
- 1) / max_fragment_size
+ 1;
465 max_fragment_size
= (skb
->len
- 1) / num_fragments
+ 1;
467 /* Don't even try to fragment, if we need more than 16 fragments */
468 if (num_fragments
> BATADV_FRAG_MAX_FRAGMENTS
) {
473 bat_priv
= orig_node
->bat_priv
;
474 primary_if
= batadv_primary_if_get_selected(bat_priv
);
480 /* Create one header to be copied to all fragments */
481 frag_header
.packet_type
= BATADV_UNICAST_FRAG
;
482 frag_header
.version
= BATADV_COMPAT_VERSION
;
483 frag_header
.ttl
= BATADV_TTL
;
484 frag_header
.seqno
= htons(atomic_inc_return(&bat_priv
->frag_seqno
));
485 frag_header
.reserved
= 0;
487 frag_header
.total_size
= htons(skb
->len
);
489 /* skb->priority values from 256->263 are magic values to
490 * directly indicate a specific 802.1d priority. This is used
491 * to allow 802.1d priority to be passed directly in from VLAN
494 if (skb
->priority
>= 256 && skb
->priority
<= 263)
495 frag_header
.priority
= skb
->priority
- 256;
497 frag_header
.priority
= 0;
499 ether_addr_copy(frag_header
.orig
, primary_if
->net_dev
->dev_addr
);
500 ether_addr_copy(frag_header
.dest
, orig_node
->orig
);
502 /* Eat and send fragments from the tail of skb */
503 while (skb
->len
> max_fragment_size
) {
504 /* The initial check in this function should cover this case */
505 if (unlikely(frag_header
.no
== BATADV_FRAG_MAX_FRAGMENTS
- 1)) {
510 skb_fragment
= batadv_frag_create(net_dev
, skb
, &frag_header
,
517 batadv_inc_counter(bat_priv
, BATADV_CNT_FRAG_TX
);
518 batadv_add_counter(bat_priv
, BATADV_CNT_FRAG_TX_BYTES
,
519 skb_fragment
->len
+ ETH_HLEN
);
520 ret
= batadv_send_unicast_skb(skb_fragment
, neigh_node
);
521 if (ret
!= NET_XMIT_SUCCESS
) {
529 /* make sure that there is at least enough head for the fragmentation
530 * and ethernet headers
532 ret
= skb_cow_head(skb
, ETH_HLEN
+ header_size
);
536 skb_push(skb
, header_size
);
537 memcpy(skb
->data
, &frag_header
, header_size
);
539 /* Send the last fragment */
540 batadv_inc_counter(bat_priv
, BATADV_CNT_FRAG_TX
);
541 batadv_add_counter(bat_priv
, BATADV_CNT_FRAG_TX_BYTES
,
542 skb
->len
+ ETH_HLEN
);
543 ret
= batadv_send_unicast_skb(skb
, neigh_node
);
544 /* skb was consumed */
548 batadv_hardif_put(primary_if
);