1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* 6LoWPAN fragment reassembly
5 * Alexander Aring <aar@pengutronix.de>
7 * Based on: net/ipv6/reassembly.c
10 #define pr_fmt(fmt) "6LoWPAN: " fmt
12 #include <linux/net.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/random.h>
16 #include <linux/jhash.h>
17 #include <linux/skbuff.h>
18 #include <linux/slab.h>
19 #include <linux/export.h>
21 #include <net/ieee802154_netdev.h>
22 #include <net/6lowpan.h>
23 #include <net/ipv6_frag.h>
24 #include <net/inet_frag.h>
27 #include "6lowpan_i.h"
29 static const char lowpan_frags_cache_name
[] = "lowpan-frags";
31 static struct inet_frags lowpan_frags
;
33 static int lowpan_frag_reasm(struct lowpan_frag_queue
*fq
, struct sk_buff
*skb
,
34 struct sk_buff
*prev
, struct net_device
*ldev
);
36 static void lowpan_frag_init(struct inet_frag_queue
*q
, const void *a
)
38 const struct frag_lowpan_compare_key
*key
= a
;
40 BUILD_BUG_ON(sizeof(*key
) > sizeof(q
->key
));
41 memcpy(&q
->key
, key
, sizeof(*key
));
44 static void lowpan_frag_expire(struct timer_list
*t
)
46 struct inet_frag_queue
*frag
= from_timer(frag
, t
, timer
);
47 struct frag_queue
*fq
;
49 fq
= container_of(frag
, struct frag_queue
, q
);
51 spin_lock(&fq
->q
.lock
);
53 if (fq
->q
.flags
& INET_FRAG_COMPLETE
)
56 inet_frag_kill(&fq
->q
);
58 spin_unlock(&fq
->q
.lock
);
59 inet_frag_put(&fq
->q
);
62 static inline struct lowpan_frag_queue
*
63 fq_find(struct net
*net
, const struct lowpan_802154_cb
*cb
,
64 const struct ieee802154_addr
*src
,
65 const struct ieee802154_addr
*dst
)
67 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
68 net_ieee802154_lowpan(net
);
69 struct frag_lowpan_compare_key key
= {};
70 struct inet_frag_queue
*q
;
73 key
.d_size
= cb
->d_size
;
77 q
= inet_frag_find(ieee802154_lowpan
->fqdir
, &key
);
81 return container_of(q
, struct lowpan_frag_queue
, q
);
84 static int lowpan_frag_queue(struct lowpan_frag_queue
*fq
,
85 struct sk_buff
*skb
, u8 frag_type
)
87 struct sk_buff
*prev_tail
;
88 struct net_device
*ldev
;
91 /* inet_frag_queue_* functions use skb->cb; see struct ipfrag_skb_cb
94 BUILD_BUG_ON(sizeof(struct lowpan_802154_cb
) > sizeof(struct inet_skb_parm
));
95 BUILD_BUG_ON(sizeof(struct lowpan_802154_cb
) > sizeof(struct inet6_skb_parm
));
97 if (fq
->q
.flags
& INET_FRAG_COMPLETE
)
100 offset
= lowpan_802154_cb(skb
)->d_offset
<< 3;
101 end
= lowpan_802154_cb(skb
)->d_size
;
103 /* Is this the final fragment? */
104 if (offset
+ skb
->len
== end
) {
105 /* If we already have some bits beyond end
106 * or have different end, the segment is corrupted.
108 if (end
< fq
->q
.len
||
109 ((fq
->q
.flags
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
111 fq
->q
.flags
|= INET_FRAG_LAST_IN
;
114 if (end
> fq
->q
.len
) {
115 /* Some bits beyond end -> corruption. */
116 if (fq
->q
.flags
& INET_FRAG_LAST_IN
)
127 prev_tail
= fq
->q
.fragments_tail
;
128 err
= inet_frag_queue_insert(&fq
->q
, skb
, offset
, end
);
132 fq
->q
.stamp
= skb
->tstamp
;
133 if (frag_type
== LOWPAN_DISPATCH_FRAG1
)
134 fq
->q
.flags
|= INET_FRAG_FIRST_IN
;
136 fq
->q
.meat
+= skb
->len
;
137 add_frag_mem_limit(fq
->q
.fqdir
, skb
->truesize
);
139 if (fq
->q
.flags
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
140 fq
->q
.meat
== fq
->q
.len
) {
142 unsigned long orefdst
= skb
->_skb_refdst
;
144 skb
->_skb_refdst
= 0UL;
145 res
= lowpan_frag_reasm(fq
, skb
, prev_tail
, ldev
);
146 skb
->_skb_refdst
= orefdst
;
157 /* Check if this packet is complete.
159 * It is called with locked fq, and caller must check that
160 * queue is eligible for reassembly i.e. it is not COMPLETE,
161 * the last and the first frames arrived and all the bits are here.
163 static int lowpan_frag_reasm(struct lowpan_frag_queue
*fq
, struct sk_buff
*skb
,
164 struct sk_buff
*prev_tail
, struct net_device
*ldev
)
168 inet_frag_kill(&fq
->q
);
170 reasm_data
= inet_frag_reasm_prepare(&fq
->q
, skb
, prev_tail
);
173 inet_frag_reasm_finish(&fq
->q
, skb
, reasm_data
, false);
176 skb
->tstamp
= fq
->q
.stamp
;
177 fq
->q
.rb_fragments
= RB_ROOT
;
178 fq
->q
.fragments_tail
= NULL
;
179 fq
->q
.last_run_head
= NULL
;
183 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
187 static int lowpan_frag_rx_handlers_result(struct sk_buff
*skb
,
188 lowpan_rx_result res
)
192 return NET_RX_SUCCESS
;
194 /* nobody cared about this packet */
195 net_warn_ratelimited("%s: received unknown dispatch\n",
200 /* all others failure */
205 static lowpan_rx_result
lowpan_frag_rx_h_iphc(struct sk_buff
*skb
)
209 if (!lowpan_is_iphc(*skb_network_header(skb
)))
212 ret
= lowpan_iphc_decompress(skb
);
219 static int lowpan_invoke_frag_rx_handlers(struct sk_buff
*skb
)
221 lowpan_rx_result res
;
223 #define CALL_RXH(rxh) \
226 if (res != RX_CONTINUE) \
230 /* likely at first */
231 CALL_RXH(lowpan_frag_rx_h_iphc
);
232 CALL_RXH(lowpan_rx_h_ipv6
);
235 return lowpan_frag_rx_handlers_result(skb
, res
);
239 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
240 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
242 static int lowpan_get_cb(struct sk_buff
*skb
, u8 frag_type
,
243 struct lowpan_802154_cb
*cb
)
246 u8 high
= 0, low
= 0;
249 fail
= lowpan_fetch_skb(skb
, &high
, 1);
250 fail
|= lowpan_fetch_skb(skb
, &low
, 1);
251 /* remove the dispatch value and use first three bits as high value
252 * for the datagram size
254 cb
->d_size
= (high
& LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK
) <<
255 LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT
| low
;
256 fail
|= lowpan_fetch_skb(skb
, &d_tag
, 2);
257 cb
->d_tag
= ntohs(d_tag
);
259 if (frag_type
== LOWPAN_DISPATCH_FRAGN
) {
260 fail
|= lowpan_fetch_skb(skb
, &cb
->d_offset
, 1);
262 skb_reset_network_header(skb
);
264 /* check if datagram_size has ipv6hdr on FRAG1 */
265 fail
|= cb
->d_size
< sizeof(struct ipv6hdr
);
266 /* check if we can dereference the dispatch value */
276 int lowpan_frag_rcv(struct sk_buff
*skb
, u8 frag_type
)
278 struct lowpan_frag_queue
*fq
;
279 struct net
*net
= dev_net(skb
->dev
);
280 struct lowpan_802154_cb
*cb
= lowpan_802154_cb(skb
);
281 struct ieee802154_hdr hdr
= {};
284 if (ieee802154_hdr_peek_addrs(skb
, &hdr
) < 0)
287 err
= lowpan_get_cb(skb
, frag_type
, cb
);
291 if (frag_type
== LOWPAN_DISPATCH_FRAG1
) {
292 err
= lowpan_invoke_frag_rx_handlers(skb
);
293 if (err
== NET_RX_DROP
)
297 if (cb
->d_size
> IPV6_MIN_MTU
) {
298 net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
302 fq
= fq_find(net
, cb
, &hdr
.source
, &hdr
.dest
);
306 spin_lock(&fq
->q
.lock
);
307 ret
= lowpan_frag_queue(fq
, skb
, frag_type
);
308 spin_unlock(&fq
->q
.lock
);
310 inet_frag_put(&fq
->q
);
321 static struct ctl_table lowpan_frags_ns_ctl_table
[] = {
323 .procname
= "6lowpanfrag_high_thresh",
324 .maxlen
= sizeof(unsigned long),
326 .proc_handler
= proc_doulongvec_minmax
,
329 .procname
= "6lowpanfrag_low_thresh",
330 .maxlen
= sizeof(unsigned long),
332 .proc_handler
= proc_doulongvec_minmax
,
335 .procname
= "6lowpanfrag_time",
336 .maxlen
= sizeof(int),
338 .proc_handler
= proc_dointvec_jiffies
,
343 /* secret interval has been deprecated */
344 static int lowpan_frags_secret_interval_unused
;
345 static struct ctl_table lowpan_frags_ctl_table
[] = {
347 .procname
= "6lowpanfrag_secret_interval",
348 .data
= &lowpan_frags_secret_interval_unused
,
349 .maxlen
= sizeof(int),
351 .proc_handler
= proc_dointvec_jiffies
,
356 static int __net_init
lowpan_frags_ns_sysctl_register(struct net
*net
)
358 struct ctl_table
*table
;
359 struct ctl_table_header
*hdr
;
360 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
361 net_ieee802154_lowpan(net
);
363 table
= lowpan_frags_ns_ctl_table
;
364 if (!net_eq(net
, &init_net
)) {
365 table
= kmemdup(table
, sizeof(lowpan_frags_ns_ctl_table
),
370 /* Don't export sysctls to unprivileged users */
371 if (net
->user_ns
!= &init_user_ns
)
372 table
[0].procname
= NULL
;
375 table
[0].data
= &ieee802154_lowpan
->fqdir
->high_thresh
;
376 table
[0].extra1
= &ieee802154_lowpan
->fqdir
->low_thresh
;
377 table
[1].data
= &ieee802154_lowpan
->fqdir
->low_thresh
;
378 table
[1].extra2
= &ieee802154_lowpan
->fqdir
->high_thresh
;
379 table
[2].data
= &ieee802154_lowpan
->fqdir
->timeout
;
381 hdr
= register_net_sysctl(net
, "net/ieee802154/6lowpan", table
);
385 ieee802154_lowpan
->sysctl
.frags_hdr
= hdr
;
389 if (!net_eq(net
, &init_net
))
395 static void __net_exit
lowpan_frags_ns_sysctl_unregister(struct net
*net
)
397 struct ctl_table
*table
;
398 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
399 net_ieee802154_lowpan(net
);
401 table
= ieee802154_lowpan
->sysctl
.frags_hdr
->ctl_table_arg
;
402 unregister_net_sysctl_table(ieee802154_lowpan
->sysctl
.frags_hdr
);
403 if (!net_eq(net
, &init_net
))
407 static struct ctl_table_header
*lowpan_ctl_header
;
409 static int __init
lowpan_frags_sysctl_register(void)
411 lowpan_ctl_header
= register_net_sysctl(&init_net
,
412 "net/ieee802154/6lowpan",
413 lowpan_frags_ctl_table
);
414 return lowpan_ctl_header
== NULL
? -ENOMEM
: 0;
417 static void lowpan_frags_sysctl_unregister(void)
419 unregister_net_sysctl_table(lowpan_ctl_header
);
422 static inline int lowpan_frags_ns_sysctl_register(struct net
*net
)
427 static inline void lowpan_frags_ns_sysctl_unregister(struct net
*net
)
431 static inline int __init
lowpan_frags_sysctl_register(void)
436 static inline void lowpan_frags_sysctl_unregister(void)
441 static int __net_init
lowpan_frags_init_net(struct net
*net
)
443 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
444 net_ieee802154_lowpan(net
);
448 res
= fqdir_init(&ieee802154_lowpan
->fqdir
, &lowpan_frags
, net
);
452 ieee802154_lowpan
->fqdir
->high_thresh
= IPV6_FRAG_HIGH_THRESH
;
453 ieee802154_lowpan
->fqdir
->low_thresh
= IPV6_FRAG_LOW_THRESH
;
454 ieee802154_lowpan
->fqdir
->timeout
= IPV6_FRAG_TIMEOUT
;
456 res
= lowpan_frags_ns_sysctl_register(net
);
458 fqdir_exit(ieee802154_lowpan
->fqdir
);
462 static void __net_exit
lowpan_frags_pre_exit_net(struct net
*net
)
464 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
465 net_ieee802154_lowpan(net
);
467 fqdir_pre_exit(ieee802154_lowpan
->fqdir
);
470 static void __net_exit
lowpan_frags_exit_net(struct net
*net
)
472 struct netns_ieee802154_lowpan
*ieee802154_lowpan
=
473 net_ieee802154_lowpan(net
);
475 lowpan_frags_ns_sysctl_unregister(net
);
476 fqdir_exit(ieee802154_lowpan
->fqdir
);
479 static struct pernet_operations lowpan_frags_ops
= {
480 .init
= lowpan_frags_init_net
,
481 .pre_exit
= lowpan_frags_pre_exit_net
,
482 .exit
= lowpan_frags_exit_net
,
485 static u32
lowpan_key_hashfn(const void *data
, u32 len
, u32 seed
)
488 sizeof(struct frag_lowpan_compare_key
) / sizeof(u32
), seed
);
491 static u32
lowpan_obj_hashfn(const void *data
, u32 len
, u32 seed
)
493 const struct inet_frag_queue
*fq
= data
;
495 return jhash2((const u32
*)&fq
->key
,
496 sizeof(struct frag_lowpan_compare_key
) / sizeof(u32
), seed
);
499 static int lowpan_obj_cmpfn(struct rhashtable_compare_arg
*arg
, const void *ptr
)
501 const struct frag_lowpan_compare_key
*key
= arg
->key
;
502 const struct inet_frag_queue
*fq
= ptr
;
504 return !!memcmp(&fq
->key
, key
, sizeof(*key
));
507 static const struct rhashtable_params lowpan_rhash_params
= {
508 .head_offset
= offsetof(struct inet_frag_queue
, node
),
509 .hashfn
= lowpan_key_hashfn
,
510 .obj_hashfn
= lowpan_obj_hashfn
,
511 .obj_cmpfn
= lowpan_obj_cmpfn
,
512 .automatic_shrinking
= true,
515 int __init
lowpan_net_frag_init(void)
519 lowpan_frags
.constructor
= lowpan_frag_init
;
520 lowpan_frags
.destructor
= NULL
;
521 lowpan_frags
.qsize
= sizeof(struct frag_queue
);
522 lowpan_frags
.frag_expire
= lowpan_frag_expire
;
523 lowpan_frags
.frags_cache_name
= lowpan_frags_cache_name
;
524 lowpan_frags
.rhash_params
= lowpan_rhash_params
;
525 ret
= inet_frags_init(&lowpan_frags
);
529 ret
= lowpan_frags_sysctl_register();
533 ret
= register_pernet_subsys(&lowpan_frags_ops
);
539 lowpan_frags_sysctl_unregister();
541 inet_frags_fini(&lowpan_frags
);
545 void lowpan_net_frag_exit(void)
547 lowpan_frags_sysctl_unregister();
548 unregister_pernet_subsys(&lowpan_frags_ops
);
549 inet_frags_fini(&lowpan_frags
);