2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on: net/ipv4/ip_fragment.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
30 #define pr_fmt(fmt) "IPv6: " fmt
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 #include <linux/skbuff.h>
47 #include <linux/slab.h>
48 #include <linux/export.h>
54 #include <net/ip6_route.h>
55 #include <net/protocol.h>
56 #include <net/transp_v6.h>
57 #include <net/rawv6.h>
58 #include <net/ndisc.h>
59 #include <net/addrconf.h>
60 #include <net/inet_frag.h>
61 #include <net/inet_ecn.h>
65 struct inet6_skb_parm h
;
69 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
71 static inline u8
ip6_frag_ecn(const struct ipv6hdr
*ipv6h
)
73 return 1 << (ipv6_get_dsfield(ipv6h
) & INET_ECN_MASK
);
76 static struct inet_frags ip6_frags
;
78 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
79 struct net_device
*dev
);
82 * callers should be careful not to use the hash value outside the ipfrag_lock
83 * as doing so could race with ipfrag_hash_rnd being recalculated.
85 unsigned int inet6_hash_frag(__be32 id
, const struct in6_addr
*saddr
,
86 const struct in6_addr
*daddr
, u32 rnd
)
90 c
= jhash_3words(ipv6_addr_hash(saddr
), ipv6_addr_hash(daddr
),
91 (__force u32
)id
, rnd
);
93 return c
& (INETFRAGS_HASHSZ
- 1);
95 EXPORT_SYMBOL_GPL(inet6_hash_frag
);
97 static unsigned int ip6_hashfn(struct inet_frag_queue
*q
)
99 struct frag_queue
*fq
;
101 fq
= container_of(q
, struct frag_queue
, q
);
102 return inet6_hash_frag(fq
->id
, &fq
->saddr
, &fq
->daddr
, ip6_frags
.rnd
);
105 bool ip6_frag_match(struct inet_frag_queue
*q
, void *a
)
107 struct frag_queue
*fq
;
108 struct ip6_create_arg
*arg
= a
;
110 fq
= container_of(q
, struct frag_queue
, q
);
111 return fq
->id
== arg
->id
&&
112 fq
->user
== arg
->user
&&
113 ipv6_addr_equal(&fq
->saddr
, arg
->src
) &&
114 ipv6_addr_equal(&fq
->daddr
, arg
->dst
);
116 EXPORT_SYMBOL(ip6_frag_match
);
118 void ip6_frag_init(struct inet_frag_queue
*q
, void *a
)
120 struct frag_queue
*fq
= container_of(q
, struct frag_queue
, q
);
121 struct ip6_create_arg
*arg
= a
;
124 fq
->user
= arg
->user
;
125 fq
->saddr
= *arg
->src
;
126 fq
->daddr
= *arg
->dst
;
129 EXPORT_SYMBOL(ip6_frag_init
);
131 void ip6_expire_frag_queue(struct net
*net
, struct frag_queue
*fq
,
132 struct inet_frags
*frags
)
134 struct net_device
*dev
= NULL
;
136 spin_lock(&fq
->q
.lock
);
138 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
141 inet_frag_kill(&fq
->q
, frags
);
144 dev
= dev_get_by_index_rcu(net
, fq
->iif
);
148 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMTIMEOUT
);
149 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
151 /* Don't send error if the first segment did not arrive. */
152 if (!(fq
->q
.last_in
& INET_FRAG_FIRST_IN
) || !fq
->q
.fragments
)
156 But use as source device on which LAST ARRIVED
157 segment was received. And do not use fq->dev
158 pointer directly, device might already disappeared.
160 fq
->q
.fragments
->dev
= dev
;
161 icmpv6_send(fq
->q
.fragments
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_FRAGTIME
, 0);
165 spin_unlock(&fq
->q
.lock
);
166 inet_frag_put(&fq
->q
, frags
);
168 EXPORT_SYMBOL(ip6_expire_frag_queue
);
170 static void ip6_frag_expire(unsigned long data
)
172 struct frag_queue
*fq
;
175 fq
= container_of((struct inet_frag_queue
*)data
, struct frag_queue
, q
);
176 net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
178 ip6_expire_frag_queue(net
, fq
, &ip6_frags
);
181 static __inline__
struct frag_queue
*
182 fq_find(struct net
*net
, __be32 id
, const struct in6_addr
*src
,
183 const struct in6_addr
*dst
, u8 ecn
)
185 struct inet_frag_queue
*q
;
186 struct ip6_create_arg arg
;
190 arg
.user
= IP6_DEFRAG_LOCAL_DELIVER
;
195 read_lock(&ip6_frags
.lock
);
196 hash
= inet6_hash_frag(id
, src
, dst
, ip6_frags
.rnd
);
198 q
= inet_frag_find(&net
->ipv6
.frags
, &ip6_frags
, &arg
, hash
);
199 if (IS_ERR_OR_NULL(q
)) {
200 inet_frag_maybe_warn_overflow(q
, pr_fmt());
203 return container_of(q
, struct frag_queue
, q
);
206 static int ip6_frag_queue(struct frag_queue
*fq
, struct sk_buff
*skb
,
207 struct frag_hdr
*fhdr
, int nhoff
)
209 struct sk_buff
*prev
, *next
;
210 struct net_device
*dev
;
212 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
215 if (fq
->q
.last_in
& INET_FRAG_COMPLETE
)
218 offset
= ntohs(fhdr
->frag_off
) & ~0x7;
219 end
= offset
+ (ntohs(ipv6_hdr(skb
)->payload_len
) -
220 ((u8
*)(fhdr
+ 1) - (u8
*)(ipv6_hdr(skb
) + 1)));
222 if ((unsigned int)end
> IPV6_MAXPLEN
) {
223 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
224 IPSTATS_MIB_INHDRERRORS
);
225 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
226 ((u8
*)&fhdr
->frag_off
-
227 skb_network_header(skb
)));
231 ecn
= ip6_frag_ecn(ipv6_hdr(skb
));
233 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
234 const unsigned char *nh
= skb_network_header(skb
);
235 skb
->csum
= csum_sub(skb
->csum
,
236 csum_partial(nh
, (u8
*)(fhdr
+ 1) - nh
,
240 /* Is this the final fragment? */
241 if (!(fhdr
->frag_off
& htons(IP6_MF
))) {
242 /* If we already have some bits beyond end
243 * or have different end, the segment is corrupted.
245 if (end
< fq
->q
.len
||
246 ((fq
->q
.last_in
& INET_FRAG_LAST_IN
) && end
!= fq
->q
.len
))
248 fq
->q
.last_in
|= INET_FRAG_LAST_IN
;
251 /* Check if the fragment is rounded to 8 bytes.
252 * Required by the RFC.
255 /* RFC2460 says always send parameter problem in
258 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
259 IPSTATS_MIB_INHDRERRORS
);
260 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
261 offsetof(struct ipv6hdr
, payload_len
));
264 if (end
> fq
->q
.len
) {
265 /* Some bits beyond end -> corruption. */
266 if (fq
->q
.last_in
& INET_FRAG_LAST_IN
)
275 /* Point into the IP datagram 'data' part. */
276 if (!pskb_pull(skb
, (u8
*) (fhdr
+ 1) - skb
->data
))
279 if (pskb_trim_rcsum(skb
, end
- offset
))
282 /* Find out which fragments are in front and at the back of us
283 * in the chain of fragments so far. We must know where to put
284 * this fragment, right?
286 prev
= fq
->q
.fragments_tail
;
287 if (!prev
|| FRAG6_CB(prev
)->offset
< offset
) {
292 for(next
= fq
->q
.fragments
; next
!= NULL
; next
= next
->next
) {
293 if (FRAG6_CB(next
)->offset
>= offset
)
299 /* RFC5722, Section 4, amended by Errata ID : 3089
300 * When reassembling an IPv6 datagram, if
301 * one or more its constituent fragments is determined to be an
302 * overlapping fragment, the entire datagram (and any constituent
303 * fragments) MUST be silently discarded.
306 /* Check for overlap with preceding fragment. */
308 (FRAG6_CB(prev
)->offset
+ prev
->len
) > offset
)
311 /* Look for overlap with succeeding segment. */
312 if (next
&& FRAG6_CB(next
)->offset
< end
)
315 FRAG6_CB(skb
)->offset
= offset
;
317 /* Insert this fragment in the chain of fragments. */
320 fq
->q
.fragments_tail
= skb
;
324 fq
->q
.fragments
= skb
;
328 fq
->iif
= dev
->ifindex
;
331 fq
->q
.stamp
= skb
->tstamp
;
332 fq
->q
.meat
+= skb
->len
;
334 add_frag_mem_limit(&fq
->q
, skb
->truesize
);
336 /* The first fragment.
337 * nhoffset is obtained from the first fragment, of course.
340 fq
->nhoffset
= nhoff
;
341 fq
->q
.last_in
|= INET_FRAG_FIRST_IN
;
344 if (fq
->q
.last_in
== (INET_FRAG_FIRST_IN
| INET_FRAG_LAST_IN
) &&
345 fq
->q
.meat
== fq
->q
.len
) {
347 unsigned long orefdst
= skb
->_skb_refdst
;
349 skb
->_skb_refdst
= 0UL;
350 res
= ip6_frag_reasm(fq
, prev
, dev
);
351 skb
->_skb_refdst
= orefdst
;
356 inet_frag_lru_move(&fq
->q
);
360 inet_frag_kill(&fq
->q
, &ip6_frags
);
362 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
363 IPSTATS_MIB_REASMFAILS
);
369 * Check if this packet is complete.
370 * Returns NULL on failure by any reason, and pointer
371 * to current nexthdr field in reassembled frame.
373 * It is called with locked fq, and caller must check that
374 * queue is eligible for reassembly i.e. it is not COMPLETE,
375 * the last and the first frames arrived and all the bits are here.
377 static int ip6_frag_reasm(struct frag_queue
*fq
, struct sk_buff
*prev
,
378 struct net_device
*dev
)
380 struct net
*net
= container_of(fq
->q
.net
, struct net
, ipv6
.frags
);
381 struct sk_buff
*fp
, *head
= fq
->q
.fragments
;
387 inet_frag_kill(&fq
->q
, &ip6_frags
);
389 ecn
= ip_frag_ecn_table
[fq
->ecn
];
390 if (unlikely(ecn
== 0xff))
393 /* Make the one we just received the head. */
396 fp
= skb_clone(head
, GFP_ATOMIC
);
401 fp
->next
= head
->next
;
403 fq
->q
.fragments_tail
= fp
;
406 skb_morph(head
, fq
->q
.fragments
);
407 head
->next
= fq
->q
.fragments
->next
;
409 consume_skb(fq
->q
.fragments
);
410 fq
->q
.fragments
= head
;
413 WARN_ON(head
== NULL
);
414 WARN_ON(FRAG6_CB(head
)->offset
!= 0);
416 /* Unfragmented part is taken from the first segment. */
417 payload_len
= ((head
->data
- skb_network_header(head
)) -
418 sizeof(struct ipv6hdr
) + fq
->q
.len
-
419 sizeof(struct frag_hdr
));
420 if (payload_len
> IPV6_MAXPLEN
)
423 /* Head of list must not be cloned. */
424 if (skb_unclone(head
, GFP_ATOMIC
))
427 /* If the first fragment is fragmented itself, we split
428 * it to two chunks: the first with data and paged part
429 * and the second, holding only fragments. */
430 if (skb_has_frag_list(head
)) {
431 struct sk_buff
*clone
;
434 if ((clone
= alloc_skb(0, GFP_ATOMIC
)) == NULL
)
436 clone
->next
= head
->next
;
438 skb_shinfo(clone
)->frag_list
= skb_shinfo(head
)->frag_list
;
439 skb_frag_list_init(head
);
440 for (i
= 0; i
< skb_shinfo(head
)->nr_frags
; i
++)
441 plen
+= skb_frag_size(&skb_shinfo(head
)->frags
[i
]);
442 clone
->len
= clone
->data_len
= head
->data_len
- plen
;
443 head
->data_len
-= clone
->len
;
444 head
->len
-= clone
->len
;
446 clone
->ip_summed
= head
->ip_summed
;
447 add_frag_mem_limit(&fq
->q
, clone
->truesize
);
450 /* We have to remove fragment header from datagram and to relocate
451 * header in order to calculate ICV correctly. */
452 nhoff
= fq
->nhoffset
;
453 skb_network_header(head
)[nhoff
] = skb_transport_header(head
)[0];
454 memmove(head
->head
+ sizeof(struct frag_hdr
), head
->head
,
455 (head
->data
- head
->head
) - sizeof(struct frag_hdr
));
456 head
->mac_header
+= sizeof(struct frag_hdr
);
457 head
->network_header
+= sizeof(struct frag_hdr
);
459 skb_reset_transport_header(head
);
460 skb_push(head
, head
->data
- skb_network_header(head
));
462 sum_truesize
= head
->truesize
;
463 for (fp
= head
->next
; fp
;) {
466 struct sk_buff
*next
= fp
->next
;
468 sum_truesize
+= fp
->truesize
;
469 if (head
->ip_summed
!= fp
->ip_summed
)
470 head
->ip_summed
= CHECKSUM_NONE
;
471 else if (head
->ip_summed
== CHECKSUM_COMPLETE
)
472 head
->csum
= csum_add(head
->csum
, fp
->csum
);
474 if (skb_try_coalesce(head
, fp
, &headstolen
, &delta
)) {
475 kfree_skb_partial(fp
, headstolen
);
477 if (!skb_shinfo(head
)->frag_list
)
478 skb_shinfo(head
)->frag_list
= fp
;
479 head
->data_len
+= fp
->len
;
480 head
->len
+= fp
->len
;
481 head
->truesize
+= fp
->truesize
;
485 sub_frag_mem_limit(&fq
->q
, sum_truesize
);
489 head
->tstamp
= fq
->q
.stamp
;
490 ipv6_hdr(head
)->payload_len
= htons(payload_len
);
491 ipv6_change_dsfield(ipv6_hdr(head
), 0xff, ecn
);
492 IP6CB(head
)->nhoff
= nhoff
;
493 IP6CB(head
)->flags
|= IP6SKB_FRAGMENTED
;
495 /* Yes, and fold redundant checksum back. 8) */
496 if (head
->ip_summed
== CHECKSUM_COMPLETE
)
497 head
->csum
= csum_partial(skb_network_header(head
),
498 skb_network_header_len(head
),
502 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMOKS
);
504 fq
->q
.fragments
= NULL
;
505 fq
->q
.fragments_tail
= NULL
;
509 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len
);
512 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
515 IP6_INC_STATS_BH(net
, __in6_dev_get(dev
), IPSTATS_MIB_REASMFAILS
);
520 static int ipv6_frag_rcv(struct sk_buff
*skb
)
522 struct frag_hdr
*fhdr
;
523 struct frag_queue
*fq
;
524 const struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
525 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
528 if (IP6CB(skb
)->flags
& IP6SKB_FRAGMENTED
)
531 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMREQDS
);
533 /* Jumbo payload inhibits frag. header */
534 if (hdr
->payload_len
==0)
537 if (!pskb_may_pull(skb
, (skb_transport_offset(skb
) +
538 sizeof(struct frag_hdr
))))
542 fhdr
= (struct frag_hdr
*)skb_transport_header(skb
);
544 if (!(fhdr
->frag_off
& htons(0xFFF9))) {
545 /* It is not a fragmented frame */
546 skb
->transport_header
+= sizeof(struct frag_hdr
);
547 IP6_INC_STATS_BH(net
,
548 ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMOKS
);
550 IP6CB(skb
)->nhoff
= (u8
*)fhdr
- skb_network_header(skb
);
551 IP6CB(skb
)->flags
|= IP6SKB_FRAGMENTED
;
555 evicted
= inet_frag_evictor(&net
->ipv6
.frags
, &ip6_frags
, false);
557 IP6_ADD_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
558 IPSTATS_MIB_REASMFAILS
, evicted
);
560 fq
= fq_find(net
, fhdr
->identification
, &hdr
->saddr
, &hdr
->daddr
,
565 spin_lock(&fq
->q
.lock
);
567 ret
= ip6_frag_queue(fq
, skb
, fhdr
, IP6CB(skb
)->nhoff
);
569 spin_unlock(&fq
->q
.lock
);
570 inet_frag_put(&fq
->q
, &ip6_frags
);
574 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_REASMFAILS
);
579 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_INHDRERRORS
);
580 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, skb_network_header_len(skb
));
584 static const struct inet6_protocol frag_protocol
=
586 .handler
= ipv6_frag_rcv
,
587 .flags
= INET6_PROTO_NOPOLICY
,
591 static struct ctl_table ip6_frags_ns_ctl_table
[] = {
593 .procname
= "ip6frag_high_thresh",
594 .data
= &init_net
.ipv6
.frags
.high_thresh
,
595 .maxlen
= sizeof(int),
597 .proc_handler
= proc_dointvec
600 .procname
= "ip6frag_low_thresh",
601 .data
= &init_net
.ipv6
.frags
.low_thresh
,
602 .maxlen
= sizeof(int),
604 .proc_handler
= proc_dointvec
607 .procname
= "ip6frag_time",
608 .data
= &init_net
.ipv6
.frags
.timeout
,
609 .maxlen
= sizeof(int),
611 .proc_handler
= proc_dointvec_jiffies
,
616 static struct ctl_table ip6_frags_ctl_table
[] = {
618 .procname
= "ip6frag_secret_interval",
619 .data
= &ip6_frags
.secret_interval
,
620 .maxlen
= sizeof(int),
622 .proc_handler
= proc_dointvec_jiffies
,
627 static int __net_init
ip6_frags_ns_sysctl_register(struct net
*net
)
629 struct ctl_table
*table
;
630 struct ctl_table_header
*hdr
;
632 table
= ip6_frags_ns_ctl_table
;
633 if (!net_eq(net
, &init_net
)) {
634 table
= kmemdup(table
, sizeof(ip6_frags_ns_ctl_table
), GFP_KERNEL
);
638 table
[0].data
= &net
->ipv6
.frags
.high_thresh
;
639 table
[1].data
= &net
->ipv6
.frags
.low_thresh
;
640 table
[2].data
= &net
->ipv6
.frags
.timeout
;
642 /* Don't export sysctls to unprivileged users */
643 if (net
->user_ns
!= &init_user_ns
)
644 table
[0].procname
= NULL
;
647 hdr
= register_net_sysctl(net
, "net/ipv6", table
);
651 net
->ipv6
.sysctl
.frags_hdr
= hdr
;
655 if (!net_eq(net
, &init_net
))
661 static void __net_exit
ip6_frags_ns_sysctl_unregister(struct net
*net
)
663 struct ctl_table
*table
;
665 table
= net
->ipv6
.sysctl
.frags_hdr
->ctl_table_arg
;
666 unregister_net_sysctl_table(net
->ipv6
.sysctl
.frags_hdr
);
667 if (!net_eq(net
, &init_net
))
671 static struct ctl_table_header
*ip6_ctl_header
;
673 static int ip6_frags_sysctl_register(void)
675 ip6_ctl_header
= register_net_sysctl(&init_net
, "net/ipv6",
676 ip6_frags_ctl_table
);
677 return ip6_ctl_header
== NULL
? -ENOMEM
: 0;
680 static void ip6_frags_sysctl_unregister(void)
682 unregister_net_sysctl_table(ip6_ctl_header
);
685 static inline int ip6_frags_ns_sysctl_register(struct net
*net
)
690 static inline void ip6_frags_ns_sysctl_unregister(struct net
*net
)
694 static inline int ip6_frags_sysctl_register(void)
699 static inline void ip6_frags_sysctl_unregister(void)
704 static int __net_init
ipv6_frags_init_net(struct net
*net
)
706 net
->ipv6
.frags
.high_thresh
= IPV6_FRAG_HIGH_THRESH
;
707 net
->ipv6
.frags
.low_thresh
= IPV6_FRAG_LOW_THRESH
;
708 net
->ipv6
.frags
.timeout
= IPV6_FRAG_TIMEOUT
;
710 inet_frags_init_net(&net
->ipv6
.frags
);
712 return ip6_frags_ns_sysctl_register(net
);
715 static void __net_exit
ipv6_frags_exit_net(struct net
*net
)
717 ip6_frags_ns_sysctl_unregister(net
);
718 inet_frags_exit_net(&net
->ipv6
.frags
, &ip6_frags
);
721 static struct pernet_operations ip6_frags_ops
= {
722 .init
= ipv6_frags_init_net
,
723 .exit
= ipv6_frags_exit_net
,
726 int __init
ipv6_frag_init(void)
730 ret
= inet6_add_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
734 ret
= ip6_frags_sysctl_register();
738 ret
= register_pernet_subsys(&ip6_frags_ops
);
742 ip6_frags
.hashfn
= ip6_hashfn
;
743 ip6_frags
.constructor
= ip6_frag_init
;
744 ip6_frags
.destructor
= NULL
;
745 ip6_frags
.skb_free
= NULL
;
746 ip6_frags
.qsize
= sizeof(struct frag_queue
);
747 ip6_frags
.match
= ip6_frag_match
;
748 ip6_frags
.frag_expire
= ip6_frag_expire
;
749 ip6_frags
.secret_interval
= 10 * 60 * HZ
;
750 inet_frags_init(&ip6_frags
);
755 ip6_frags_sysctl_unregister();
757 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);
761 void ipv6_frag_exit(void)
763 inet_frags_fini(&ip6_frags
);
764 ip6_frags_sysctl_unregister();
765 unregister_pernet_subsys(&ip6_frags_ops
);
766 inet6_del_protocol(&frag_protocol
, IPPROTO_FRAGMENT
);