2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #include <linux/module.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/socket.h>
37 #include <linux/kernel.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/netfilter.h>
43 #include <linux/slab.h>
46 #include <linux/sysctl.h>
49 #include <linux/inet.h>
50 #include <linux/netdevice.h>
51 #include <linux/icmpv6.h>
57 #include <net/ip6_checksum.h>
58 #include <net/protocol.h>
60 #include <net/rawv6.h>
61 #include <net/transp_v6.h>
62 #include <net/ip6_route.h>
63 #include <net/addrconf.h>
66 #include <net/inet_common.h>
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
72 * The ICMP socket(s). This is the most convenient way to flow control
73 * our ICMP output as well as maintain a clean interface throughout
74 * all layers. All Socketless IP sends will soon be gone.
76 * On SMP we have one ICMP socket per-cpu.
78 static inline struct sock
*icmpv6_sk(struct net
*net
)
80 return net
->ipv6
.icmp_sk
[smp_processor_id()];
83 static int icmpv6_rcv(struct sk_buff
*skb
);
85 static const struct inet6_protocol icmpv6_protocol
= {
86 .handler
= icmpv6_rcv
,
87 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
90 static __inline__
struct sock
*icmpv6_xmit_lock(struct net
*net
)
97 if (unlikely(!spin_trylock(&sk
->sk_lock
.slock
))) {
98 /* This can happen if the output path (f.e. SIT or
99 * ip6ip6 tunnel) signals dst_link_failure() for an
100 * outgoing ICMP6 packet.
108 static __inline__
void icmpv6_xmit_unlock(struct sock
*sk
)
110 spin_unlock_bh(&sk
->sk_lock
.slock
);
114 * Slightly more convenient version of icmpv6_send.
116 void icmpv6_param_prob(struct sk_buff
*skb
, u8 code
, int pos
)
118 icmpv6_send(skb
, ICMPV6_PARAMPROB
, code
, pos
);
123 * Figure out, may we reply to this packet with icmp error.
125 * We do not reply, if:
126 * - it was icmp error message.
127 * - it is truncated, so that it is known, that protocol is ICMPV6
128 * (i.e. in the middle of some exthdr)
133 static int is_ineligible(struct sk_buff
*skb
)
135 int ptr
= (u8
*)(ipv6_hdr(skb
) + 1) - skb
->data
;
136 int len
= skb
->len
- ptr
;
137 __u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
143 ptr
= ipv6_skip_exthdr(skb
, ptr
, &nexthdr
, &frag_off
);
146 if (nexthdr
== IPPROTO_ICMPV6
) {
148 tp
= skb_header_pointer(skb
,
149 ptr
+offsetof(struct icmp6hdr
, icmp6_type
),
150 sizeof(_type
), &_type
);
152 !(*tp
& ICMPV6_INFOMSG_MASK
))
159 * Check the ICMP output rate limit
161 static inline bool icmpv6_xrlim_allow(struct sock
*sk
, u8 type
,
164 struct dst_entry
*dst
;
165 struct net
*net
= sock_net(sk
);
168 /* Informational messages are not limited. */
169 if (type
& ICMPV6_INFOMSG_MASK
)
172 /* Do not limit pmtu discovery, it would break it. */
173 if (type
== ICMPV6_PKT_TOOBIG
)
177 * Look up the output route.
178 * XXX: perhaps the expire for routing entries cloned by
179 * this lookup should be more aggressive (not longer than timeout).
181 dst
= ip6_route_output(net
, sk
, fl6
);
183 IP6_INC_STATS(net
, ip6_dst_idev(dst
),
184 IPSTATS_MIB_OUTNOROUTES
);
185 } else if (dst
->dev
&& (dst
->dev
->flags
&IFF_LOOPBACK
)) {
188 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
189 int tmo
= net
->ipv6
.sysctl
.icmpv6_time
;
191 /* Give more bandwidth to wider prefixes. */
192 if (rt
->rt6i_dst
.plen
< 128)
193 tmo
>>= ((128 - rt
->rt6i_dst
.plen
)>>5);
196 rt6_bind_peer(rt
, 1);
197 res
= inet_peer_xrlim_allow(rt
->rt6i_peer
, tmo
);
204 * an inline helper for the "simple" if statement below
205 * checks if parameter problem report is caused by an
206 * unrecognized IPv6 option that has the Option Type
207 * highest-order two bits set to 10
210 static __inline__
int opt_unrec(struct sk_buff
*skb
, __u32 offset
)
214 offset
+= skb_network_offset(skb
);
215 op
= skb_header_pointer(skb
, offset
, sizeof(_optval
), &_optval
);
218 return (*op
& 0xC0) == 0x80;
221 static int icmpv6_push_pending_frames(struct sock
*sk
, struct flowi6
*fl6
, struct icmp6hdr
*thdr
, int len
)
224 struct icmp6hdr
*icmp6h
;
227 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
230 icmp6h
= icmp6_hdr(skb
);
231 memcpy(icmp6h
, thdr
, sizeof(struct icmp6hdr
));
232 icmp6h
->icmp6_cksum
= 0;
234 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
235 skb
->csum
= csum_partial(icmp6h
,
236 sizeof(struct icmp6hdr
), skb
->csum
);
237 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&fl6
->saddr
,
239 len
, fl6
->flowi6_proto
,
244 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
245 tmp_csum
= csum_add(tmp_csum
, skb
->csum
);
248 tmp_csum
= csum_partial(icmp6h
,
249 sizeof(struct icmp6hdr
), tmp_csum
);
250 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&fl6
->saddr
,
252 len
, fl6
->flowi6_proto
,
255 ip6_push_pending_frames(sk
);
266 static int icmpv6_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
268 struct icmpv6_msg
*msg
= (struct icmpv6_msg
*) from
;
269 struct sk_buff
*org_skb
= msg
->skb
;
272 csum
= skb_copy_and_csum_bits(org_skb
, msg
->offset
+ offset
,
274 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
275 if (!(msg
->type
& ICMPV6_INFOMSG_MASK
))
276 nf_ct_attach(skb
, org_skb
);
280 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
281 static void mip6_addr_swap(struct sk_buff
*skb
)
283 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
284 struct inet6_skb_parm
*opt
= IP6CB(skb
);
285 struct ipv6_destopt_hao
*hao
;
290 off
= ipv6_find_tlv(skb
, opt
->dsthao
, IPV6_TLV_HAO
);
291 if (likely(off
>= 0)) {
292 hao
= (struct ipv6_destopt_hao
*)
293 (skb_network_header(skb
) + off
);
295 iph
->saddr
= hao
->addr
;
301 static inline void mip6_addr_swap(struct sk_buff
*skb
) {}
304 static struct dst_entry
*icmpv6_route_lookup(struct net
*net
, struct sk_buff
*skb
,
305 struct sock
*sk
, struct flowi6
*fl6
)
307 struct dst_entry
*dst
, *dst2
;
311 err
= ip6_dst_lookup(sk
, &dst
, fl6
);
316 * We won't send icmp if the destination is known
319 if (((struct rt6_info
*)dst
)->rt6i_flags
& RTF_ANYCAST
) {
320 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6_send: acast source\n");
322 return ERR_PTR(-EINVAL
);
325 /* No need to clone since we're just using its address. */
328 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(fl6
), sk
, 0);
333 if (PTR_ERR(dst
) == -EPERM
)
339 err
= xfrm_decode_session_reverse(skb
, flowi6_to_flowi(&fl2
), AF_INET6
);
341 goto relookup_failed
;
343 err
= ip6_dst_lookup(sk
, &dst2
, &fl2
);
345 goto relookup_failed
;
347 dst2
= xfrm_lookup(net
, dst2
, flowi6_to_flowi(&fl2
), sk
, XFRM_LOOKUP_ICMP
);
357 goto relookup_failed
;
367 * Send an ICMP message in response to a packet in error
369 void icmpv6_send(struct sk_buff
*skb
, u8 type
, u8 code
, __u32 info
)
371 struct net
*net
= dev_net(skb
->dev
);
372 struct inet6_dev
*idev
= NULL
;
373 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
375 struct ipv6_pinfo
*np
;
376 const struct in6_addr
*saddr
= NULL
;
377 struct dst_entry
*dst
;
378 struct icmp6hdr tmp_hdr
;
380 struct icmpv6_msg msg
;
387 if ((u8
*)hdr
< skb
->head
||
388 (skb
->network_header
+ sizeof(*hdr
)) > skb
->tail
)
392 * Make sure we respect the rules
393 * i.e. RFC 1885 2.4(e)
394 * Rule (e.1) is enforced by not using icmpv6_send
395 * in any code that processes icmp errors.
397 addr_type
= ipv6_addr_type(&hdr
->daddr
);
399 if (ipv6_chk_addr(net
, &hdr
->daddr
, skb
->dev
, 0))
406 if ((addr_type
& IPV6_ADDR_MULTICAST
|| skb
->pkt_type
!= PACKET_HOST
)) {
407 if (type
!= ICMPV6_PKT_TOOBIG
&&
408 !(type
== ICMPV6_PARAMPROB
&&
409 code
== ICMPV6_UNK_OPTION
&&
410 (opt_unrec(skb
, info
))))
416 addr_type
= ipv6_addr_type(&hdr
->saddr
);
422 if (addr_type
& IPV6_ADDR_LINKLOCAL
)
423 iif
= skb
->dev
->ifindex
;
426 * Must not send error if the source does not uniquely
427 * identify a single node (RFC2463 Section 2.4).
428 * We check unspecified / multicast addresses here,
429 * and anycast addresses will be checked later.
431 if ((addr_type
== IPV6_ADDR_ANY
) || (addr_type
& IPV6_ADDR_MULTICAST
)) {
432 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6_send: addr_any/mcast source\n");
437 * Never answer to a ICMP packet.
439 if (is_ineligible(skb
)) {
440 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6_send: no reply to icmp error\n");
446 memset(&fl6
, 0, sizeof(fl6
));
447 fl6
.flowi6_proto
= IPPROTO_ICMPV6
;
448 fl6
.daddr
= hdr
->saddr
;
451 fl6
.flowi6_oif
= iif
;
452 fl6
.fl6_icmp_type
= type
;
453 fl6
.fl6_icmp_code
= code
;
454 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
456 sk
= icmpv6_xmit_lock(net
);
461 if (!icmpv6_xrlim_allow(sk
, type
, &fl6
))
464 tmp_hdr
.icmp6_type
= type
;
465 tmp_hdr
.icmp6_code
= code
;
466 tmp_hdr
.icmp6_cksum
= 0;
467 tmp_hdr
.icmp6_pointer
= htonl(info
);
469 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
))
470 fl6
.flowi6_oif
= np
->mcast_oif
;
471 else if (!fl6
.flowi6_oif
)
472 fl6
.flowi6_oif
= np
->ucast_oif
;
474 dst
= icmpv6_route_lookup(net
, skb
, sk
, &fl6
);
478 if (ipv6_addr_is_multicast(&fl6
.daddr
))
479 hlimit
= np
->mcast_hops
;
481 hlimit
= np
->hop_limit
;
483 hlimit
= ip6_dst_hoplimit(dst
);
486 msg
.offset
= skb_network_offset(skb
);
489 len
= skb
->len
- msg
.offset
;
490 len
= min_t(unsigned int, len
, IPV6_MIN_MTU
- sizeof(struct ipv6hdr
) -sizeof(struct icmp6hdr
));
492 LIMIT_NETDEBUG(KERN_DEBUG
"icmp: len problem\n");
493 goto out_dst_release
;
497 idev
= __in6_dev_get(skb
->dev
);
499 err
= ip6_append_data(sk
, icmpv6_getfrag
, &msg
,
500 len
+ sizeof(struct icmp6hdr
),
501 sizeof(struct icmp6hdr
), hlimit
,
502 np
->tclass
, NULL
, &fl6
, (struct rt6_info
*)dst
,
503 MSG_DONTWAIT
, np
->dontfrag
);
505 ICMP6_INC_STATS_BH(net
, idev
, ICMP6_MIB_OUTERRORS
);
506 ip6_flush_pending_frames(sk
);
508 err
= icmpv6_push_pending_frames(sk
, &fl6
, &tmp_hdr
,
509 len
+ sizeof(struct icmp6hdr
));
515 icmpv6_xmit_unlock(sk
);
517 EXPORT_SYMBOL(icmpv6_send
);
519 static void icmpv6_echo_reply(struct sk_buff
*skb
)
521 struct net
*net
= dev_net(skb
->dev
);
523 struct inet6_dev
*idev
;
524 struct ipv6_pinfo
*np
;
525 const struct in6_addr
*saddr
= NULL
;
526 struct icmp6hdr
*icmph
= icmp6_hdr(skb
);
527 struct icmp6hdr tmp_hdr
;
529 struct icmpv6_msg msg
;
530 struct dst_entry
*dst
;
534 saddr
= &ipv6_hdr(skb
)->daddr
;
536 if (!ipv6_unicast_destination(skb
))
539 memcpy(&tmp_hdr
, icmph
, sizeof(tmp_hdr
));
540 tmp_hdr
.icmp6_type
= ICMPV6_ECHO_REPLY
;
542 memset(&fl6
, 0, sizeof(fl6
));
543 fl6
.flowi6_proto
= IPPROTO_ICMPV6
;
544 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
547 fl6
.flowi6_oif
= skb
->dev
->ifindex
;
548 fl6
.fl6_icmp_type
= ICMPV6_ECHO_REPLY
;
549 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
551 sk
= icmpv6_xmit_lock(net
);
556 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
))
557 fl6
.flowi6_oif
= np
->mcast_oif
;
558 else if (!fl6
.flowi6_oif
)
559 fl6
.flowi6_oif
= np
->ucast_oif
;
561 err
= ip6_dst_lookup(sk
, &dst
, &fl6
);
564 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(&fl6
), sk
, 0);
568 if (ipv6_addr_is_multicast(&fl6
.daddr
))
569 hlimit
= np
->mcast_hops
;
571 hlimit
= np
->hop_limit
;
573 hlimit
= ip6_dst_hoplimit(dst
);
575 idev
= __in6_dev_get(skb
->dev
);
579 msg
.type
= ICMPV6_ECHO_REPLY
;
581 err
= ip6_append_data(sk
, icmpv6_getfrag
, &msg
, skb
->len
+ sizeof(struct icmp6hdr
),
582 sizeof(struct icmp6hdr
), hlimit
, np
->tclass
, NULL
, &fl6
,
583 (struct rt6_info
*)dst
, MSG_DONTWAIT
,
587 ICMP6_INC_STATS_BH(net
, idev
, ICMP6_MIB_OUTERRORS
);
588 ip6_flush_pending_frames(sk
);
590 err
= icmpv6_push_pending_frames(sk
, &fl6
, &tmp_hdr
,
591 skb
->len
+ sizeof(struct icmp6hdr
));
595 icmpv6_xmit_unlock(sk
);
598 static void icmpv6_notify(struct sk_buff
*skb
, u8 type
, u8 code
, __be32 info
)
600 const struct inet6_protocol
*ipprot
;
606 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
609 nexthdr
= ((struct ipv6hdr
*)skb
->data
)->nexthdr
;
610 if (ipv6_ext_hdr(nexthdr
)) {
611 /* now skip over extension headers */
612 inner_offset
= ipv6_skip_exthdr(skb
, sizeof(struct ipv6hdr
),
613 &nexthdr
, &frag_off
);
617 inner_offset
= sizeof(struct ipv6hdr
);
620 /* Checkin header including 8 bytes of inner protocol header. */
621 if (!pskb_may_pull(skb
, inner_offset
+8))
624 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
625 Without this we will not able f.e. to make source routed
627 Corresponding argument (opt) to notifiers is already added.
631 hash
= nexthdr
& (MAX_INET_PROTOS
- 1);
634 ipprot
= rcu_dereference(inet6_protos
[hash
]);
635 if (ipprot
&& ipprot
->err_handler
)
636 ipprot
->err_handler(skb
, NULL
, type
, code
, inner_offset
, info
);
639 raw6_icmp_error(skb
, nexthdr
, type
, code
, inner_offset
, info
);
643 * Handle icmp messages
646 static int icmpv6_rcv(struct sk_buff
*skb
)
648 struct net_device
*dev
= skb
->dev
;
649 struct inet6_dev
*idev
= __in6_dev_get(dev
);
650 const struct in6_addr
*saddr
, *daddr
;
651 const struct ipv6hdr
*orig_hdr
;
652 struct icmp6hdr
*hdr
;
655 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
656 struct sec_path
*sp
= skb_sec_path(skb
);
659 if (!(sp
&& sp
->xvec
[sp
->len
- 1]->props
.flags
&
663 if (!pskb_may_pull(skb
, sizeof(*hdr
) + sizeof(*orig_hdr
)))
666 nh
= skb_network_offset(skb
);
667 skb_set_network_header(skb
, sizeof(*hdr
));
669 if (!xfrm6_policy_check_reverse(NULL
, XFRM_POLICY_IN
, skb
))
672 skb_set_network_header(skb
, nh
);
675 ICMP6_INC_STATS_BH(dev_net(dev
), idev
, ICMP6_MIB_INMSGS
);
677 saddr
= &ipv6_hdr(skb
)->saddr
;
678 daddr
= &ipv6_hdr(skb
)->daddr
;
680 /* Perform checksum. */
681 switch (skb
->ip_summed
) {
682 case CHECKSUM_COMPLETE
:
683 if (!csum_ipv6_magic(saddr
, daddr
, skb
->len
, IPPROTO_ICMPV6
,
688 skb
->csum
= ~csum_unfold(csum_ipv6_magic(saddr
, daddr
, skb
->len
,
690 if (__skb_checksum_complete(skb
)) {
691 LIMIT_NETDEBUG(KERN_DEBUG
"ICMPv6 checksum failed [%pI6 > %pI6]\n",
697 if (!pskb_pull(skb
, sizeof(*hdr
)))
700 hdr
= icmp6_hdr(skb
);
702 type
= hdr
->icmp6_type
;
704 ICMP6MSGIN_INC_STATS_BH(dev_net(dev
), idev
, type
);
707 case ICMPV6_ECHO_REQUEST
:
708 icmpv6_echo_reply(skb
);
711 case ICMPV6_ECHO_REPLY
:
712 /* we couldn't care less */
715 case ICMPV6_PKT_TOOBIG
:
716 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
717 standard destination cache. Seems, only "advanced"
718 destination cache will allow to solve this problem
721 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
723 hdr
= icmp6_hdr(skb
);
724 orig_hdr
= (struct ipv6hdr
*) (hdr
+ 1);
725 rt6_pmtu_discovery(&orig_hdr
->daddr
, &orig_hdr
->saddr
, dev
,
726 ntohl(hdr
->icmp6_mtu
));
729 * Drop through to notify
732 case ICMPV6_DEST_UNREACH
:
733 case ICMPV6_TIME_EXCEED
:
734 case ICMPV6_PARAMPROB
:
735 icmpv6_notify(skb
, type
, hdr
->icmp6_code
, hdr
->icmp6_mtu
);
738 case NDISC_ROUTER_SOLICITATION
:
739 case NDISC_ROUTER_ADVERTISEMENT
:
740 case NDISC_NEIGHBOUR_SOLICITATION
:
741 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
746 case ICMPV6_MGM_QUERY
:
747 igmp6_event_query(skb
);
750 case ICMPV6_MGM_REPORT
:
751 igmp6_event_report(skb
);
754 case ICMPV6_MGM_REDUCTION
:
755 case ICMPV6_NI_QUERY
:
756 case ICMPV6_NI_REPLY
:
757 case ICMPV6_MLD2_REPORT
:
758 case ICMPV6_DHAAD_REQUEST
:
759 case ICMPV6_DHAAD_REPLY
:
760 case ICMPV6_MOBILE_PREFIX_SOL
:
761 case ICMPV6_MOBILE_PREFIX_ADV
:
765 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6: msg of unknown type\n");
768 if (type
& ICMPV6_INFOMSG_MASK
)
772 * error of unknown type.
773 * must pass to upper level
776 icmpv6_notify(skb
, type
, hdr
->icmp6_code
, hdr
->icmp6_mtu
);
783 ICMP6_INC_STATS_BH(dev_net(dev
), idev
, ICMP6_MIB_INERRORS
);
789 void icmpv6_flow_init(struct sock
*sk
, struct flowi6
*fl6
,
791 const struct in6_addr
*saddr
,
792 const struct in6_addr
*daddr
,
795 memset(fl6
, 0, sizeof(*fl6
));
798 fl6
->flowi6_proto
= IPPROTO_ICMPV6
;
799 fl6
->fl6_icmp_type
= type
;
800 fl6
->fl6_icmp_code
= 0;
801 fl6
->flowi6_oif
= oif
;
802 security_sk_classify_flow(sk
, flowi6_to_flowi(fl6
));
806 * Special lock-class for __icmpv6_sk:
808 static struct lock_class_key icmpv6_socket_sk_dst_lock_key
;
810 static int __net_init
icmpv6_sk_init(struct net
*net
)
816 kzalloc(nr_cpu_ids
* sizeof(struct sock
*), GFP_KERNEL
);
817 if (net
->ipv6
.icmp_sk
== NULL
)
820 for_each_possible_cpu(i
) {
821 err
= inet_ctl_sock_create(&sk
, PF_INET6
,
822 SOCK_RAW
, IPPROTO_ICMPV6
, net
);
825 "Failed to initialize the ICMP6 control socket "
831 net
->ipv6
.icmp_sk
[i
] = sk
;
834 * Split off their lock-class, because sk->sk_dst_lock
835 * gets used from softirqs, which is safe for
836 * __icmpv6_sk (because those never get directly used
837 * via userspace syscalls), but unsafe for normal sockets.
839 lockdep_set_class(&sk
->sk_dst_lock
,
840 &icmpv6_socket_sk_dst_lock_key
);
842 /* Enough space for 2 64K ICMP packets, including
843 * sk_buff struct overhead.
845 sk
->sk_sndbuf
= 2 * SKB_TRUESIZE(64 * 1024);
850 for (j
= 0; j
< i
; j
++)
851 inet_ctl_sock_destroy(net
->ipv6
.icmp_sk
[j
]);
852 kfree(net
->ipv6
.icmp_sk
);
856 static void __net_exit
icmpv6_sk_exit(struct net
*net
)
860 for_each_possible_cpu(i
) {
861 inet_ctl_sock_destroy(net
->ipv6
.icmp_sk
[i
]);
863 kfree(net
->ipv6
.icmp_sk
);
866 static struct pernet_operations icmpv6_sk_ops
= {
867 .init
= icmpv6_sk_init
,
868 .exit
= icmpv6_sk_exit
,
871 int __init
icmpv6_init(void)
875 err
= register_pernet_subsys(&icmpv6_sk_ops
);
880 if (inet6_add_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
) < 0)
885 printk(KERN_ERR
"Failed to register ICMP6 protocol\n");
886 unregister_pernet_subsys(&icmpv6_sk_ops
);
890 void icmpv6_cleanup(void)
892 unregister_pernet_subsys(&icmpv6_sk_ops
);
893 inet6_del_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
);
897 static const struct icmp6_err
{
905 { /* ADM_PROHIBITED */
909 { /* Was NOT_NEIGHBOUR, now reserved */
923 int icmpv6_err_convert(u8 type
, u8 code
, int *err
)
930 case ICMPV6_DEST_UNREACH
:
932 if (code
<= ICMPV6_PORT_UNREACH
) {
933 *err
= tab_unreach
[code
].err
;
934 fatal
= tab_unreach
[code
].fatal
;
938 case ICMPV6_PKT_TOOBIG
:
942 case ICMPV6_PARAMPROB
:
947 case ICMPV6_TIME_EXCEED
:
955 EXPORT_SYMBOL(icmpv6_err_convert
);
958 ctl_table ipv6_icmp_table_template
[] = {
960 .procname
= "ratelimit",
961 .data
= &init_net
.ipv6
.sysctl
.icmpv6_time
,
962 .maxlen
= sizeof(int),
964 .proc_handler
= proc_dointvec_ms_jiffies
,
969 struct ctl_table
* __net_init
ipv6_icmp_sysctl_init(struct net
*net
)
971 struct ctl_table
*table
;
973 table
= kmemdup(ipv6_icmp_table_template
,
974 sizeof(ipv6_icmp_table_template
),
978 table
[0].data
= &net
->ipv6
.sysctl
.icmpv6_time
;