1 /* 6LoWPAN fragment reassembly
5 * Alexander Aring <aar@pengutronix.de>
7 * Based on: net/ipv6/reassembly.c
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
29 #include <net/inet_frag.h>
31 #include "reassembly.h"
33 static const char lowpan_frags_cache_name
[] = "lowpan-frags";
35 struct lowpan_frag_info
{
41 static struct lowpan_frag_info
*lowpan_cb(struct sk_buff
*skb
)
43 return (struct lowpan_frag_info
*)skb
->cb
;
46 static struct inet_frags lowpan_frags
;
48 static int lowpan_frag_reasm(struct lowpan_frag_queue
*fq
,
49 struct sk_buff
*prev
, struct net_device
*dev
);
51 static unsigned int lowpan_hash_frag(u16 tag
, u16 d_size
,
52 const struct ieee802154_addr
*saddr
,
53 const struct ieee802154_addr
*daddr
)
55 net_get_random_once(&lowpan_frags
.rnd
, sizeof(lowpan_frags
.rnd
));
56 return jhash_3words(ieee802154_addr_hash(saddr
),
57 ieee802154_addr_hash(daddr
),
58 (__force u32
)(tag
+ (d_size
<< 16)),
62 static unsigned int lowpan_hashfn(const struct inet_frag_queue
*q
)
64 const struct lowpan_frag_queue
*fq
;
66 fq
= container_of(q
, struct lowpan_frag_queue
, q
);
67 return lowpan_hash_frag(fq
->tag
, fq
->d_size
, &fq
->saddr
, &fq
->daddr
);
70 static bool lowpan_frag_match(const struct inet_frag_queue
*q
, const void *a
)
72 const struct lowpan_frag_queue
*fq
;
73 const struct lowpan_create_arg
*arg
= a
;
75 fq
= container_of(q
, struct lowpan_frag_queue
, q
);
76 return fq
->tag
== arg
->tag
&& fq
->d_size
== arg
->d_size
&&
77 ieee802154_addr_equal(&fq
->saddr
, arg
->src
) &&
78 ieee802154_addr_equal(&fq
->daddr
, arg
->dst
);
81 static void lowpan_frag_init(struct inet_frag_queue
*q
, const void *a
)
83 const struct lowpan_create_arg
*arg
= a
;
84 struct lowpan_frag_queue
*fq
;
86 fq
= container_of(q
, struct lowpan_frag_queue
, q
);
89 fq
->d_size
= arg
->d_size
;
90 fq
->saddr
= *arg
->src
;
91 fq
->daddr
= *arg
->dst
;
94 static void lowpan_frag_expire(unsigned long data
)
96 struct frag_queue
*fq
;
99 fq
= container_of((struct inet_frag_queue
*)data
, struct frag_queue
, q
);
100 net
= container_of(fq
->q
.net
, struct net
, ieee802154_lowpan
.frags
);
102 spin_lock(&fq
->q
.lock
);
104 if (fq
->q
.flags
& INET_FRAG_COMPLETE
)
107 inet_frag_kill(&fq
->q
, &lowpan_frags
);
109 spin_unlock(&fq
->q
.lock
);
110 inet_frag_put(&fq
->q
, &lowpan_frags
);
113 static inline struct lowpan_frag_queue
*
114 fq_find(struct net
*net
, const struct lowpan_frag_info
*frag_info
,
115 const struct ieee802154_addr
*src
,
116 const struct ieee802154_addr
*dst
)
118 struct inet_frag_queue
*q
;
119 struct lowpan_create_arg arg
;
121 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
122 net_ieee802154_lowpan(net
);
124 arg
.tag
= frag_info
->d_tag
;
125 arg
.d_size
= frag_info
->d_size
;
129 hash
= lowpan_hash_frag(frag_info
->d_tag
, frag_info
->d_size
, src
, dst
);
131 q
= inet_frag_find(&ieee802154_lowpan
->frags
,
132 &lowpan_frags
, &arg
, hash
);
133 if (IS_ERR_OR_NULL(q
)) {
134 inet_frag_maybe_warn_overflow(q
, pr_fmt());
137 return container_of(q
, struct lowpan_frag_queue
, q
);
140 static int lowpan_frag_queue(struct lowpan_frag_queue
*fq
,
141 struct sk_buff
*skb
, const u8 frag_type
)
143 struct sk_buff
*prev
, *next
;
144 struct net_device
*dev
;
147 if (fq
->q
.flags
& INET_FRAG_COMPLETE
)
150 offset
= lowpan_cb(skb
)->d_offset
<< 3;
151 end
= lowpan_cb(skb
)->d_size
;
153 /* Is this the final fragment? */
154 if (offset
+ skb
->len
== end
) {
155 /* If we already have some bits beyond end
156 * or have different end, the segment is corrupted.
158 if (end
< fq
->q
.len
||
159 ((fq
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
161 fq
->q
.flags
|= INET_FRAG_LAST_IN
;
164 if (end
> fq
->q
.len
) {
165 /* Some bits beyond end -> corruption. */
166 if (fq
->q
.flags
& INET_FRAG_LAST_IN
)
172 /* Find out which fragments are in front and at the back of us
173 * in the chain of fragments so far. We must know where to put
174 * this fragment, right?
176 prev
= fq
->q
.fragments_tail
;
177 if (!prev
|| lowpan_cb(prev
)->d_offset
< lowpan_cb(skb
)->d_offset
) {
182 for (next
= fq
->q
.fragments
; next
!= NULL
; next
= next
->next
) {
183 if (lowpan_cb(next
)->d_offset
>= lowpan_cb(skb
)->d_offset
)
189 /* Insert this fragment in the chain of fragments. */
192 fq
->q
.fragments_tail
= skb
;
196 fq
->q
.fragments
= skb
;
202 fq
->q
.stamp
= skb
->tstamp
;
203 if (frag_type
== LOWPAN_DISPATCH_FRAG1
) {
204 /* Calculate uncomp. 6lowpan header to estimate full size */
205 fq
->q
.meat
+= lowpan_uncompress_size(skb
, NULL
);
206 fq
->q
.flags
|= INET_FRAG_FIRST_IN
;
208 fq
->q
.meat
+= skb
->len
;
210 add_frag_mem_limit(&fq
->q
, skb
->truesize
);
212 if (fq
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
213 fq
->q
.meat
== fq
->q
.len
) {
215 unsigned long orefdst
= skb
->_skb_refdst
;
217 skb
->_skb_refdst
= 0UL;
218 res
= lowpan_frag_reasm(fq
, prev
, dev
);
219 skb
->_skb_refdst
= orefdst
;
229 /* Check if this packet is complete.
230 * Returns NULL on failure by any reason, and pointer
231 * to current nexthdr field in reassembled frame.
233 * It is called with locked fq, and caller must check that
234 * queue is eligible for reassembly i.e. it is not COMPLETE,
235 * the last and the first frames arrived and all the bits are here.
237 static int lowpan_frag_reasm(struct lowpan_frag_queue
*fq
, struct sk_buff
*prev
,
238 struct net_device
*dev
)
240 struct sk_buff
*fp
, *head
= fq
->q
.fragments
;
243 inet_frag_kill(&fq
->q
, &lowpan_frags
);
245 /* Make the one we just received the head. */
248 fp
= skb_clone(head
, GFP_ATOMIC
);
253 fp
->next
= head
->next
;
255 fq
->q
.fragments_tail
= fp
;
258 skb_morph(head
, fq
->q
.fragments
);
259 head
->next
= fq
->q
.fragments
->next
;
261 consume_skb(fq
->q
.fragments
);
262 fq
->q
.fragments
= head
;
265 /* Head of list must not be cloned. */
266 if (skb_unclone(head
, GFP_ATOMIC
))
269 /* If the first fragment is fragmented itself, we split
270 * it to two chunks: the first with data and paged part
271 * and the second, holding only fragments.
273 if (skb_has_frag_list(head
)) {
274 struct sk_buff
*clone
;
277 clone
= alloc_skb(0, GFP_ATOMIC
);
280 clone
->next
= head
->next
;
282 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
283 skb_frag_list_init(head
);
284 for (i
= 0; i
< skb_shinfo(head
)->nr_frags
; i
++)
285 plen
+= skb_frag_size(&skb_shinfo(head
)->frags
[i
]);
286 clone
->len
= head
->data_len
- plen
;
287 clone
->data_len
= clone
->len
;
288 head
->data_len
-= clone
->len
;
289 head
->len
-= clone
->len
;
290 add_frag_mem_limit(&fq
->q
, clone
->truesize
);
293 WARN_ON(head
== NULL
);
295 sum_truesize
= head
->truesize
;
296 for (fp
= head
->next
; fp
;) {
299 struct sk_buff
*next
= fp
->next
;
301 sum_truesize
+= fp
->truesize
;
302 if (skb_try_coalesce(head
, fp
, &headstolen
, &delta
)) {
303 kfree_skb_partial(fp
, headstolen
);
305 if (!skb_shinfo(head
)->frag_list
)
306 skb_shinfo(head
)->frag_list
= fp
;
307 head
->data_len
+= fp
->len
;
308 head
->len
+= fp
->len
;
309 head
->truesize
+= fp
->truesize
;
313 sub_frag_mem_limit(&fq
->q
, sum_truesize
);
317 head
->tstamp
= fq
->q
.stamp
;
319 fq
->q
.fragments
= NULL
;
320 fq
->q
.fragments_tail
= NULL
;
324 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
328 static int lowpan_get_frag_info(struct sk_buff
*skb
, const u8 frag_type
,
329 struct lowpan_frag_info
*frag_info
)
332 u8 pattern
= 0, low
= 0;
335 fail
= lowpan_fetch_skb(skb
, &pattern
, 1);
336 fail
|= lowpan_fetch_skb(skb
, &low
, 1);
337 frag_info
->d_size
= (pattern
& 7) << 8 | low
;
338 fail
|= lowpan_fetch_skb(skb
, &d_tag
, 2);
339 frag_info
->d_tag
= ntohs(d_tag
);
341 if (frag_type
== LOWPAN_DISPATCH_FRAGN
) {
342 fail
|= lowpan_fetch_skb(skb
, &frag_info
->d_offset
, 1);
344 skb_reset_network_header(skb
);
345 frag_info
->d_offset
= 0;
354 int lowpan_frag_rcv(struct sk_buff
*skb
, const u8 frag_type
)
356 struct lowpan_frag_queue
*fq
;
357 struct net
*net
= dev_net(skb
->dev
);
358 struct lowpan_frag_info
*frag_info
= lowpan_cb(skb
);
359 struct ieee802154_addr source
, dest
;
362 source
= mac_cb(skb
)->source
;
363 dest
= mac_cb(skb
)->dest
;
365 err
= lowpan_get_frag_info(skb
, frag_type
, frag_info
);
369 if (frag_info
->d_size
> IPV6_MIN_MTU
) {
370 net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
374 fq
= fq_find(net
, frag_info
, &source
, &dest
);
378 spin_lock(&fq
->q
.lock
);
379 ret
= lowpan_frag_queue(fq
, skb
, frag_type
);
380 spin_unlock(&fq
->q
.lock
);
382 inet_frag_put(&fq
->q
, &lowpan_frags
);
390 EXPORT_SYMBOL(lowpan_frag_rcv
);
395 static struct ctl_table lowpan_frags_ns_ctl_table
[] = {
397 .procname
= "6lowpanfrag_high_thresh",
398 .data
= &init_net
.ieee802154_lowpan
.frags
.high_thresh
,
399 .maxlen
= sizeof(int),
401 .proc_handler
= proc_dointvec_minmax
,
402 .extra1
= &init_net
.ieee802154_lowpan
.frags
.low_thresh
405 .procname
= "6lowpanfrag_low_thresh",
406 .data
= &init_net
.ieee802154_lowpan
.frags
.low_thresh
,
407 .maxlen
= sizeof(int),
409 .proc_handler
= proc_dointvec_minmax
,
411 .extra2
= &init_net
.ieee802154_lowpan
.frags
.high_thresh
414 .procname
= "6lowpanfrag_time",
415 .data
= &init_net
.ieee802154_lowpan
.frags
.timeout
,
416 .maxlen
= sizeof(int),
418 .proc_handler
= proc_dointvec_jiffies
,
423 /* secret interval has been deprecated */
424 static int lowpan_frags_secret_interval_unused
;
425 static struct ctl_table lowpan_frags_ctl_table
[] = {
427 .procname
= "6lowpanfrag_secret_interval",
428 .data
= &lowpan_frags_secret_interval_unused
,
429 .maxlen
= sizeof(int),
431 .proc_handler
= proc_dointvec_jiffies
,
436 static int __net_init
lowpan_frags_ns_sysctl_register(struct net
*net
)
438 struct ctl_table
*table
;
439 struct ctl_table_header
*hdr
;
440 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
441 net_ieee802154_lowpan(net
);
443 table
= lowpan_frags_ns_ctl_table
;
444 if (!net_eq(net
, &init_net
)) {
445 table
= kmemdup(table
, sizeof(lowpan_frags_ns_ctl_table
),
450 table
[0].data
= &ieee802154_lowpan
->frags
.high_thresh
;
451 table
[0].extra1
= &ieee802154_lowpan
->frags
.low_thresh
;
452 table
[0].extra2
= &init_net
.ieee802154_lowpan
.frags
.high_thresh
;
453 table
[1].data
= &ieee802154_lowpan
->frags
.low_thresh
;
454 table
[1].extra2
= &ieee802154_lowpan
->frags
.high_thresh
;
455 table
[2].data
= &ieee802154_lowpan
->frags
.timeout
;
457 /* Don't export sysctls to unprivileged users */
458 if (net
->user_ns
!= &init_user_ns
)
459 table
[0].procname
= NULL
;
462 hdr
= register_net_sysctl(net
, "net/ieee802154/6lowpan", table
);
466 ieee802154_lowpan
->sysctl
.frags_hdr
= hdr
;
470 if (!net_eq(net
, &init_net
))
476 static void __net_exit
lowpan_frags_ns_sysctl_unregister(struct net
*net
)
478 struct ctl_table
*table
;
479 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
480 net_ieee802154_lowpan(net
);
482 table
= ieee802154_lowpan
->sysctl
.frags_hdr
->ctl_table_arg
;
483 unregister_net_sysctl_table(ieee802154_lowpan
->sysctl
.frags_hdr
);
484 if (!net_eq(net
, &init_net
))
488 static struct ctl_table_header
*lowpan_ctl_header
;
490 static int __init
lowpan_frags_sysctl_register(void)
492 lowpan_ctl_header
= register_net_sysctl(&init_net
,
493 "net/ieee802154/6lowpan",
494 lowpan_frags_ctl_table
);
495 return lowpan_ctl_header
== NULL
? -ENOMEM
: 0;
498 static void lowpan_frags_sysctl_unregister(void)
500 unregister_net_sysctl_table(lowpan_ctl_header
);
503 static inline int lowpan_frags_ns_sysctl_register(struct net
*net
)
508 static inline void lowpan_frags_ns_sysctl_unregister(struct net
*net
)
512 static inline int __init
lowpan_frags_sysctl_register(void)
517 static inline void lowpan_frags_sysctl_unregister(void)
522 static int __net_init
lowpan_frags_init_net(struct net
*net
)
524 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
525 net_ieee802154_lowpan(net
);
527 ieee802154_lowpan
->frags
.high_thresh
= IPV6_FRAG_HIGH_THRESH
;
528 ieee802154_lowpan
->frags
.low_thresh
= IPV6_FRAG_LOW_THRESH
;
529 ieee802154_lowpan
->frags
.timeout
= IPV6_FRAG_TIMEOUT
;
531 inet_frags_init_net(&ieee802154_lowpan
->frags
);
533 return lowpan_frags_ns_sysctl_register(net
);
536 static void __net_exit
lowpan_frags_exit_net(struct net
*net
)
538 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
539 net_ieee802154_lowpan(net
);
541 lowpan_frags_ns_sysctl_unregister(net
);
542 inet_frags_exit_net(&ieee802154_lowpan
->frags
, &lowpan_frags
);
545 static struct pernet_operations lowpan_frags_ops
= {
546 .init
= lowpan_frags_init_net
,
547 .exit
= lowpan_frags_exit_net
,
550 int __init
lowpan_net_frag_init(void)
554 ret
= lowpan_frags_sysctl_register();
558 ret
= register_pernet_subsys(&lowpan_frags_ops
);
562 lowpan_frags
.hashfn
= lowpan_hashfn
;
563 lowpan_frags
.constructor
= lowpan_frag_init
;
564 lowpan_frags
.destructor
= NULL
;
565 lowpan_frags
.skb_free
= NULL
;
566 lowpan_frags
.qsize
= sizeof(struct frag_queue
);
567 lowpan_frags
.match
= lowpan_frag_match
;
568 lowpan_frags
.frag_expire
= lowpan_frag_expire
;
569 lowpan_frags
.frags_cache_name
= lowpan_frags_cache_name
;
570 ret
= inet_frags_init(&lowpan_frags
);
576 lowpan_frags_sysctl_unregister();
580 void lowpan_net_frag_exit(void)
582 inet_frags_fini(&lowpan_frags
);
583 lowpan_frags_sysctl_unregister();
584 unregister_pernet_subsys(&lowpan_frags_ops
);