1 /* 6LoWPAN fragment reassembly
5 * Alexander Aring <aar@pengutronix.de>
7 * Based on: net/ipv6/reassembly.c
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
29 #include <net/inet_frag.h>
31 #include "reassembly.h"
33 struct lowpan_frag_info
{
39 static struct lowpan_frag_info
*lowpan_cb(struct sk_buff
*skb
)
41 return (struct lowpan_frag_info
*)skb
->cb
;
44 static struct inet_frags lowpan_frags
;
46 static int lowpan_frag_reasm(struct lowpan_frag_queue
*fq
,
47 struct sk_buff
*prev
, struct net_device
*dev
);
49 static unsigned int lowpan_hash_frag(__be16 tag
, u16 d_size
,
50 const struct ieee802154_addr
*saddr
,
51 const struct ieee802154_addr
*daddr
)
55 net_get_random_once(&lowpan_frags
.rnd
, sizeof(lowpan_frags
.rnd
));
56 c
= jhash_3words(ieee802154_addr_hash(saddr
),
57 ieee802154_addr_hash(daddr
),
58 (__force u32
)(tag
+ (d_size
<< 16)),
61 return c
& (INETFRAGS_HASHSZ
- 1);
64 static unsigned int lowpan_hashfn(struct inet_frag_queue
*q
)
66 struct lowpan_frag_queue
*fq
;
68 fq
= container_of(q
, struct lowpan_frag_queue
, q
);
69 return lowpan_hash_frag(fq
->tag
, fq
->d_size
, &fq
->saddr
, &fq
->daddr
);
72 static bool lowpan_frag_match(struct inet_frag_queue
*q
, void *a
)
74 struct lowpan_frag_queue
*fq
;
75 struct lowpan_create_arg
*arg
= a
;
77 fq
= container_of(q
, struct lowpan_frag_queue
, q
);
78 return fq
->tag
== arg
->tag
&& fq
->d_size
== arg
->d_size
&&
79 ieee802154_addr_equal(&fq
->saddr
, arg
->src
) &&
80 ieee802154_addr_equal(&fq
->daddr
, arg
->dst
);
83 static void lowpan_frag_init(struct inet_frag_queue
*q
, void *a
)
85 struct lowpan_frag_queue
*fq
;
86 struct lowpan_create_arg
*arg
= a
;
88 fq
= container_of(q
, struct lowpan_frag_queue
, q
);
91 fq
->d_size
= arg
->d_size
;
92 fq
->saddr
= *arg
->src
;
93 fq
->daddr
= *arg
->dst
;
96 static void lowpan_frag_expire(unsigned long data
)
98 struct frag_queue
*fq
;
101 fq
= container_of((struct inet_frag_queue
*)data
, struct frag_queue
, q
);
102 net
= container_of(fq
->q
.net
, struct net
, ieee802154_lowpan
.frags
);
104 spin_lock(&fq
->q
.lock
);
106 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
109 inet_frag_kill(&fq
->q
, &lowpan_frags
);
111 spin_unlock(&fq
->q
.lock
);
112 inet_frag_put(&fq
->q
, &lowpan_frags
);
115 static inline struct lowpan_frag_queue
*
116 fq_find(struct net
*net
, const struct lowpan_frag_info
*frag_info
,
117 const struct ieee802154_addr
*src
,
118 const struct ieee802154_addr
*dst
)
120 struct inet_frag_queue
*q
;
121 struct lowpan_create_arg arg
;
123 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
124 net_ieee802154_lowpan(net
);
126 arg
.tag
= frag_info
->d_tag
;
127 arg
.d_size
= frag_info
->d_size
;
131 read_lock(&lowpan_frags
.lock
);
132 hash
= lowpan_hash_frag(frag_info
->d_tag
, frag_info
->d_size
, src
, dst
);
134 q
= inet_frag_find(&ieee802154_lowpan
->frags
,
135 &lowpan_frags
, &arg
, hash
);
136 if (IS_ERR_OR_NULL(q
)) {
137 inet_frag_maybe_warn_overflow(q
, pr_fmt());
140 return container_of(q
, struct lowpan_frag_queue
, q
);
143 static int lowpan_frag_queue(struct lowpan_frag_queue
*fq
,
144 struct sk_buff
*skb
, const u8 frag_type
)
146 struct sk_buff
*prev
, *next
;
147 struct net_device
*dev
;
150 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
153 offset
= lowpan_cb(skb
)->d_offset
<< 3;
154 end
= lowpan_cb(skb
)->d_size
;
156 /* Is this the final fragment? */
157 if (offset
+ skb
->len
== end
) {
158 /* If we already have some bits beyond end
159 * or have different end, the segment is corrupted.
161 if (end
< fq
->q
.len
||
162 ((fq
->q
.last_in
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
164 fq
->q
.last_in
|= INET_FRAG_LAST_IN
;
167 if (end
> fq
->q
.len
) {
168 /* Some bits beyond end -> corruption. */
169 if (fq
->q
.last_in
& INET_FRAG_LAST_IN
)
175 /* Find out which fragments are in front and at the back of us
176 * in the chain of fragments so far. We must know where to put
177 * this fragment, right?
179 prev
= fq
->q
.fragments_tail
;
180 if (!prev
|| lowpan_cb(prev
)->d_offset
< lowpan_cb(skb
)->d_offset
) {
185 for (next
= fq
->q
.fragments
; next
!= NULL
; next
= next
->next
) {
186 if (lowpan_cb(next
)->d_offset
>= lowpan_cb(skb
)->d_offset
)
192 /* Insert this fragment in the chain of fragments. */
195 fq
->q
.fragments_tail
= skb
;
199 fq
->q
.fragments
= skb
;
205 fq
->q
.stamp
= skb
->tstamp
;
206 if (frag_type
== LOWPAN_DISPATCH_FRAG1
) {
207 /* Calculate uncomp. 6lowpan header to estimate full size */
208 fq
->q
.meat
+= lowpan_uncompress_size(skb
, NULL
);
209 fq
->q
.last_in
|= INET_FRAG_FIRST_IN
;
211 fq
->q
.meat
+= skb
->len
;
213 add_frag_mem_limit(&fq
->q
, skb
->truesize
);
215 if (fq
->q
.last_in
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
216 fq
->q
.meat
== fq
->q
.len
) {
218 unsigned long orefdst
= skb
->_skb_refdst
;
220 skb
->_skb_refdst
= 0UL;
221 res
= lowpan_frag_reasm(fq
, prev
, dev
);
222 skb
->_skb_refdst
= orefdst
;
226 inet_frag_lru_move(&fq
->q
);
233 /* Check if this packet is complete.
234 * Returns NULL on failure by any reason, and pointer
235 * to current nexthdr field in reassembled frame.
237 * It is called with locked fq, and caller must check that
238 * queue is eligible for reassembly i.e. it is not COMPLETE,
239 * the last and the first frames arrived and all the bits are here.
241 static int lowpan_frag_reasm(struct lowpan_frag_queue
*fq
, struct sk_buff
*prev
,
242 struct net_device
*dev
)
244 struct sk_buff
*fp
, *head
= fq
->q
.fragments
;
247 inet_frag_kill(&fq
->q
, &lowpan_frags
);
249 /* Make the one we just received the head. */
252 fp
= skb_clone(head
, GFP_ATOMIC
);
257 fp
->next
= head
->next
;
259 fq
->q
.fragments_tail
= fp
;
262 skb_morph(head
, fq
->q
.fragments
);
263 head
->next
= fq
->q
.fragments
->next
;
265 consume_skb(fq
->q
.fragments
);
266 fq
->q
.fragments
= head
;
269 /* Head of list must not be cloned. */
270 if (skb_unclone(head
, GFP_ATOMIC
))
273 /* If the first fragment is fragmented itself, we split
274 * it to two chunks: the first with data and paged part
275 * and the second, holding only fragments.
277 if (skb_has_frag_list(head
)) {
278 struct sk_buff
*clone
;
281 clone
= alloc_skb(0, GFP_ATOMIC
);
284 clone
->next
= head
->next
;
286 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
287 skb_frag_list_init(head
);
288 for (i
= 0; i
< skb_shinfo(head
)->nr_frags
; i
++)
289 plen
+= skb_frag_size(&skb_shinfo(head
)->frags
[i
]);
290 clone
->len
= head
->data_len
- plen
;
291 clone
->data_len
= clone
->len
;
292 head
->data_len
-= clone
->len
;
293 head
->len
-= clone
->len
;
294 add_frag_mem_limit(&fq
->q
, clone
->truesize
);
297 WARN_ON(head
== NULL
);
299 sum_truesize
= head
->truesize
;
300 for (fp
= head
->next
; fp
;) {
303 struct sk_buff
*next
= fp
->next
;
305 sum_truesize
+= fp
->truesize
;
306 if (skb_try_coalesce(head
, fp
, &headstolen
, &delta
)) {
307 kfree_skb_partial(fp
, headstolen
);
309 if (!skb_shinfo(head
)->frag_list
)
310 skb_shinfo(head
)->frag_list
= fp
;
311 head
->data_len
+= fp
->len
;
312 head
->len
+= fp
->len
;
313 head
->truesize
+= fp
->truesize
;
317 sub_frag_mem_limit(&fq
->q
, sum_truesize
);
321 head
->tstamp
= fq
->q
.stamp
;
323 fq
->q
.fragments
= NULL
;
324 fq
->q
.fragments_tail
= NULL
;
328 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
332 static int lowpan_get_frag_info(struct sk_buff
*skb
, const u8 frag_type
,
333 struct lowpan_frag_info
*frag_info
)
336 u8 pattern
= 0, low
= 0;
338 fail
= lowpan_fetch_skb(skb
, &pattern
, 1);
339 fail
|= lowpan_fetch_skb(skb
, &low
, 1);
340 frag_info
->d_size
= (pattern
& 7) << 8 | low
;
341 fail
|= lowpan_fetch_skb(skb
, &frag_info
->d_tag
, 2);
343 if (frag_type
== LOWPAN_DISPATCH_FRAGN
) {
344 fail
|= lowpan_fetch_skb(skb
, &frag_info
->d_offset
, 1);
346 skb_reset_network_header(skb
);
347 frag_info
->d_offset
= 0;
356 int lowpan_frag_rcv(struct sk_buff
*skb
, const u8 frag_type
)
358 struct lowpan_frag_queue
*fq
;
359 struct net
*net
= dev_net(skb
->dev
);
360 struct lowpan_frag_info
*frag_info
= lowpan_cb(skb
);
361 struct ieee802154_addr source
, dest
;
362 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
363 net_ieee802154_lowpan(net
);
366 source
= mac_cb(skb
)->source
;
367 dest
= mac_cb(skb
)->dest
;
369 err
= lowpan_get_frag_info(skb
, frag_type
, frag_info
);
373 if (frag_info
->d_size
> ieee802154_lowpan
->max_dsize
)
376 inet_frag_evictor(&ieee802154_lowpan
->frags
, &lowpan_frags
, false);
378 fq
= fq_find(net
, frag_info
, &source
, &dest
);
381 spin_lock(&fq
->q
.lock
);
382 ret
= lowpan_frag_queue(fq
, skb
, frag_type
);
383 spin_unlock(&fq
->q
.lock
);
385 inet_frag_put(&fq
->q
, &lowpan_frags
);
393 EXPORT_SYMBOL(lowpan_frag_rcv
);
396 static struct ctl_table lowpan_frags_ns_ctl_table
[] = {
398 .procname
= "6lowpanfrag_high_thresh",
399 .data
= &init_net
.ieee802154_lowpan
.frags
.high_thresh
,
400 .maxlen
= sizeof(int),
402 .proc_handler
= proc_dointvec
405 .procname
= "6lowpanfrag_low_thresh",
406 .data
= &init_net
.ieee802154_lowpan
.frags
.low_thresh
,
407 .maxlen
= sizeof(int),
409 .proc_handler
= proc_dointvec
412 .procname
= "6lowpanfrag_time",
413 .data
= &init_net
.ieee802154_lowpan
.frags
.timeout
,
414 .maxlen
= sizeof(int),
416 .proc_handler
= proc_dointvec_jiffies
,
419 .procname
= "6lowpanfrag_max_datagram_size",
420 .data
= &init_net
.ieee802154_lowpan
.max_dsize
,
421 .maxlen
= sizeof(int),
423 .proc_handler
= proc_dointvec
428 static struct ctl_table lowpan_frags_ctl_table
[] = {
430 .procname
= "6lowpanfrag_secret_interval",
431 .data
= &lowpan_frags
.secret_interval
,
432 .maxlen
= sizeof(int),
434 .proc_handler
= proc_dointvec_jiffies
,
439 static int __net_init
lowpan_frags_ns_sysctl_register(struct net
*net
)
441 struct ctl_table
*table
;
442 struct ctl_table_header
*hdr
;
443 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
444 net_ieee802154_lowpan(net
);
446 table
= lowpan_frags_ns_ctl_table
;
447 if (!net_eq(net
, &init_net
)) {
448 table
= kmemdup(table
, sizeof(lowpan_frags_ns_ctl_table
),
453 table
[0].data
= &ieee802154_lowpan
->frags
.high_thresh
;
454 table
[1].data
= &ieee802154_lowpan
->frags
.low_thresh
;
455 table
[2].data
= &ieee802154_lowpan
->frags
.timeout
;
456 table
[3].data
= &ieee802154_lowpan
->max_dsize
;
458 /* Don't export sysctls to unprivileged users */
459 if (net
->user_ns
!= &init_user_ns
)
460 table
[0].procname
= NULL
;
463 hdr
= register_net_sysctl(net
, "net/ieee802154/6lowpan", table
);
467 ieee802154_lowpan
->sysctl
.frags_hdr
= hdr
;
471 if (!net_eq(net
, &init_net
))
477 static void __net_exit
lowpan_frags_ns_sysctl_unregister(struct net
*net
)
479 struct ctl_table
*table
;
480 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
481 net_ieee802154_lowpan(net
);
483 table
= ieee802154_lowpan
->sysctl
.frags_hdr
->ctl_table_arg
;
484 unregister_net_sysctl_table(ieee802154_lowpan
->sysctl
.frags_hdr
);
485 if (!net_eq(net
, &init_net
))
489 static struct ctl_table_header
*lowpan_ctl_header
;
491 static int lowpan_frags_sysctl_register(void)
493 lowpan_ctl_header
= register_net_sysctl(&init_net
,
494 "net/ieee802154/6lowpan",
495 lowpan_frags_ctl_table
);
496 return lowpan_ctl_header
== NULL
? -ENOMEM
: 0;
499 static void lowpan_frags_sysctl_unregister(void)
501 unregister_net_sysctl_table(lowpan_ctl_header
);
504 static inline int lowpan_frags_ns_sysctl_register(struct net
*net
)
509 static inline void lowpan_frags_ns_sysctl_unregister(struct net
*net
)
513 static inline int lowpan_frags_sysctl_register(void)
518 static inline void lowpan_frags_sysctl_unregister(void)
523 static int __net_init
lowpan_frags_init_net(struct net
*net
)
525 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
526 net_ieee802154_lowpan(net
);
528 ieee802154_lowpan
->frags
.high_thresh
= IPV6_FRAG_HIGH_THRESH
;
529 ieee802154_lowpan
->frags
.low_thresh
= IPV6_FRAG_LOW_THRESH
;
530 ieee802154_lowpan
->frags
.timeout
= IPV6_FRAG_TIMEOUT
;
531 ieee802154_lowpan
->max_dsize
= 0xFFFF;
533 inet_frags_init_net(&ieee802154_lowpan
->frags
);
535 return lowpan_frags_ns_sysctl_register(net
);
538 static void __net_exit
lowpan_frags_exit_net(struct net
*net
)
540 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
541 net_ieee802154_lowpan(net
);
543 lowpan_frags_ns_sysctl_unregister(net
);
544 inet_frags_exit_net(&ieee802154_lowpan
->frags
, &lowpan_frags
);
547 static struct pernet_operations lowpan_frags_ops
= {
548 .init
= lowpan_frags_init_net
,
549 .exit
= lowpan_frags_exit_net
,
552 int __init
lowpan_net_frag_init(void)
556 ret
= lowpan_frags_sysctl_register();
560 ret
= register_pernet_subsys(&lowpan_frags_ops
);
564 lowpan_frags
.hashfn
= lowpan_hashfn
;
565 lowpan_frags
.constructor
= lowpan_frag_init
;
566 lowpan_frags
.destructor
= NULL
;
567 lowpan_frags
.skb_free
= NULL
;
568 lowpan_frags
.qsize
= sizeof(struct frag_queue
);
569 lowpan_frags
.match
= lowpan_frag_match
;
570 lowpan_frags
.frag_expire
= lowpan_frag_expire
;
571 lowpan_frags
.secret_interval
= 10 * 60 * HZ
;
572 inet_frags_init(&lowpan_frags
);
576 lowpan_frags_sysctl_unregister();
580 void lowpan_net_frag_exit(void)
582 inet_frags_fini(&lowpan_frags
);
583 lowpan_frags_sysctl_unregister();
584 unregister_pernet_subsys(&lowpan_frags_ops
);