2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on net/ipv4/icmp.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
32 #define pr_fmt(fmt) "IPv6: " fmt
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
45 #include <linux/slab.h>
48 #include <linux/sysctl.h>
51 #include <linux/inet.h>
52 #include <linux/netdevice.h>
53 #include <linux/icmpv6.h>
59 #include <net/ip6_checksum.h>
61 #include <net/protocol.h>
63 #include <net/rawv6.h>
64 #include <net/transp_v6.h>
65 #include <net/ip6_route.h>
66 #include <net/addrconf.h>
69 #include <net/inet_common.h>
70 #include <net/dsfield.h>
71 #include <net/l3mdev.h>
73 #include <linux/uaccess.h>
76 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout
78 * all layers. All Socketless IP sends will soon be gone.
80 * On SMP we have one ICMP socket per-cpu.
82 static inline struct sock
*icmpv6_sk(struct net
*net
)
84 return net
->ipv6
.icmp_sk
[smp_processor_id()];
87 static void icmpv6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
88 u8 type
, u8 code
, int offset
, __be32 info
)
90 /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
91 struct icmp6hdr
*icmp6
= (struct icmp6hdr
*) (skb
->data
+ offset
);
92 struct net
*net
= dev_net(skb
->dev
);
94 if (type
== ICMPV6_PKT_TOOBIG
)
95 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
96 else if (type
== NDISC_REDIRECT
)
97 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
98 sock_net_uid(net
, NULL
));
100 if (!(type
& ICMPV6_INFOMSG_MASK
))
101 if (icmp6
->icmp6_type
== ICMPV6_ECHO_REQUEST
)
102 ping_err(skb
, offset
, ntohl(info
));
105 static int icmpv6_rcv(struct sk_buff
*skb
);
107 static const struct inet6_protocol icmpv6_protocol
= {
108 .handler
= icmpv6_rcv
,
109 .err_handler
= icmpv6_err
,
110 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
113 /* Called with BH disabled */
114 static __inline__
struct sock
*icmpv6_xmit_lock(struct net
*net
)
119 if (unlikely(!spin_trylock(&sk
->sk_lock
.slock
))) {
120 /* This can happen if the output path (f.e. SIT or
121 * ip6ip6 tunnel) signals dst_link_failure() for an
122 * outgoing ICMP6 packet.
129 static __inline__
void icmpv6_xmit_unlock(struct sock
*sk
)
131 spin_unlock(&sk
->sk_lock
.slock
);
135 * Figure out, may we reply to this packet with icmp error.
137 * We do not reply, if:
138 * - it was icmp error message.
139 * - it is truncated, so that it is known, that protocol is ICMPV6
140 * (i.e. in the middle of some exthdr)
145 static bool is_ineligible(const struct sk_buff
*skb
)
147 int ptr
= (u8
*)(ipv6_hdr(skb
) + 1) - skb
->data
;
148 int len
= skb
->len
- ptr
;
149 __u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
155 ptr
= ipv6_skip_exthdr(skb
, ptr
, &nexthdr
, &frag_off
);
158 if (nexthdr
== IPPROTO_ICMPV6
) {
160 tp
= skb_header_pointer(skb
,
161 ptr
+offsetof(struct icmp6hdr
, icmp6_type
),
162 sizeof(_type
), &_type
);
163 if (!tp
|| !(*tp
& ICMPV6_INFOMSG_MASK
))
169 static bool icmpv6_mask_allow(int type
)
171 /* Informational messages are not limited. */
172 if (type
& ICMPV6_INFOMSG_MASK
)
175 /* Do not limit pmtu discovery, it would break it. */
176 if (type
== ICMPV6_PKT_TOOBIG
)
182 static bool icmpv6_global_allow(int type
)
184 if (icmpv6_mask_allow(type
))
187 if (icmp_global_allow())
194 * Check the ICMP output rate limit
196 static bool icmpv6_xrlim_allow(struct sock
*sk
, u8 type
,
199 struct net
*net
= sock_net(sk
);
200 struct dst_entry
*dst
;
203 if (icmpv6_mask_allow(type
))
207 * Look up the output route.
208 * XXX: perhaps the expire for routing entries cloned by
209 * this lookup should be more aggressive (not longer than timeout).
211 dst
= ip6_route_output(net
, sk
, fl6
);
213 IP6_INC_STATS(net
, ip6_dst_idev(dst
),
214 IPSTATS_MIB_OUTNOROUTES
);
215 } else if (dst
->dev
&& (dst
->dev
->flags
&IFF_LOOPBACK
)) {
218 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
219 int tmo
= net
->ipv6
.sysctl
.icmpv6_time
;
220 struct inet_peer
*peer
;
222 /* Give more bandwidth to wider prefixes. */
223 if (rt
->rt6i_dst
.plen
< 128)
224 tmo
>>= ((128 - rt
->rt6i_dst
.plen
)>>5);
226 peer
= inet_getpeer_v6(net
->ipv6
.peers
, &fl6
->daddr
, 1);
227 res
= inet_peer_xrlim_allow(peer
, tmo
);
236 * an inline helper for the "simple" if statement below
237 * checks if parameter problem report is caused by an
238 * unrecognized IPv6 option that has the Option Type
239 * highest-order two bits set to 10
242 static bool opt_unrec(struct sk_buff
*skb
, __u32 offset
)
246 offset
+= skb_network_offset(skb
);
247 op
= skb_header_pointer(skb
, offset
, sizeof(_optval
), &_optval
);
250 return (*op
& 0xC0) == 0x80;
253 int icmpv6_push_pending_frames(struct sock
*sk
, struct flowi6
*fl6
,
254 struct icmp6hdr
*thdr
, int len
)
257 struct icmp6hdr
*icmp6h
;
260 skb
= skb_peek(&sk
->sk_write_queue
);
264 icmp6h
= icmp6_hdr(skb
);
265 memcpy(icmp6h
, thdr
, sizeof(struct icmp6hdr
));
266 icmp6h
->icmp6_cksum
= 0;
268 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
269 skb
->csum
= csum_partial(icmp6h
,
270 sizeof(struct icmp6hdr
), skb
->csum
);
271 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&fl6
->saddr
,
273 len
, fl6
->flowi6_proto
,
278 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
279 tmp_csum
= csum_add(tmp_csum
, skb
->csum
);
282 tmp_csum
= csum_partial(icmp6h
,
283 sizeof(struct icmp6hdr
), tmp_csum
);
284 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&fl6
->saddr
,
286 len
, fl6
->flowi6_proto
,
289 ip6_push_pending_frames(sk
);
300 static int icmpv6_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
302 struct icmpv6_msg
*msg
= (struct icmpv6_msg
*) from
;
303 struct sk_buff
*org_skb
= msg
->skb
;
306 csum
= skb_copy_and_csum_bits(org_skb
, msg
->offset
+ offset
,
308 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
309 if (!(msg
->type
& ICMPV6_INFOMSG_MASK
))
310 nf_ct_attach(skb
, org_skb
);
314 #if IS_ENABLED(CONFIG_IPV6_MIP6)
315 static void mip6_addr_swap(struct sk_buff
*skb
)
317 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
318 struct inet6_skb_parm
*opt
= IP6CB(skb
);
319 struct ipv6_destopt_hao
*hao
;
324 off
= ipv6_find_tlv(skb
, opt
->dsthao
, IPV6_TLV_HAO
);
325 if (likely(off
>= 0)) {
326 hao
= (struct ipv6_destopt_hao
*)
327 (skb_network_header(skb
) + off
);
329 iph
->saddr
= hao
->addr
;
335 static inline void mip6_addr_swap(struct sk_buff
*skb
) {}
338 static struct dst_entry
*icmpv6_route_lookup(struct net
*net
,
343 struct dst_entry
*dst
, *dst2
;
347 err
= ip6_dst_lookup(net
, sk
, &dst
, fl6
);
352 * We won't send icmp if the destination is known
355 if (ipv6_anycast_destination(dst
, &fl6
->daddr
)) {
356 net_dbg_ratelimited("icmp6_send: acast source\n");
358 return ERR_PTR(-EINVAL
);
361 /* No need to clone since we're just using its address. */
364 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(fl6
), sk
, 0);
369 if (PTR_ERR(dst
) == -EPERM
)
375 err
= xfrm_decode_session_reverse(skb
, flowi6_to_flowi(&fl2
), AF_INET6
);
377 goto relookup_failed
;
379 err
= ip6_dst_lookup(net
, sk
, &dst2
, &fl2
);
381 goto relookup_failed
;
383 dst2
= xfrm_lookup(net
, dst2
, flowi6_to_flowi(&fl2
), sk
, XFRM_LOOKUP_ICMP
);
393 goto relookup_failed
;
403 * Send an ICMP message in response to a packet in error
405 static void icmp6_send(struct sk_buff
*skb
, u8 type
, u8 code
, __u32 info
,
406 const struct in6_addr
*force_saddr
)
408 struct net
*net
= dev_net(skb
->dev
);
409 struct inet6_dev
*idev
= NULL
;
410 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
412 struct ipv6_pinfo
*np
;
413 const struct in6_addr
*saddr
= NULL
;
414 struct dst_entry
*dst
;
415 struct icmp6hdr tmp_hdr
;
417 struct icmpv6_msg msg
;
418 struct sockcm_cookie sockc_unused
= {0};
419 struct ipcm6_cookie ipc6
;
424 u32 mark
= IP6_REPLY_MARK(net
, skb
->mark
);
426 if ((u8
*)hdr
< skb
->head
||
427 (skb_network_header(skb
) + sizeof(*hdr
)) > skb_tail_pointer(skb
))
431 * Make sure we respect the rules
432 * i.e. RFC 1885 2.4(e)
433 * Rule (e.1) is enforced by not using icmp6_send
434 * in any code that processes icmp errors.
436 addr_type
= ipv6_addr_type(&hdr
->daddr
);
438 if (ipv6_chk_addr(net
, &hdr
->daddr
, skb
->dev
, 0) ||
439 ipv6_chk_acast_addr_src(net
, skb
->dev
, &hdr
->daddr
))
446 if (addr_type
& IPV6_ADDR_MULTICAST
|| skb
->pkt_type
!= PACKET_HOST
) {
447 if (type
!= ICMPV6_PKT_TOOBIG
&&
448 !(type
== ICMPV6_PARAMPROB
&&
449 code
== ICMPV6_UNK_OPTION
&&
450 (opt_unrec(skb
, info
))))
456 addr_type
= ipv6_addr_type(&hdr
->saddr
);
462 if (__ipv6_addr_needs_scope_id(addr_type
))
463 iif
= skb
->dev
->ifindex
;
466 iif
= l3mdev_master_ifindex(dst
? dst
->dev
: skb
->dev
);
470 * Must not send error if the source does not uniquely
471 * identify a single node (RFC2463 Section 2.4).
472 * We check unspecified / multicast addresses here,
473 * and anycast addresses will be checked later.
475 if ((addr_type
== IPV6_ADDR_ANY
) || (addr_type
& IPV6_ADDR_MULTICAST
)) {
476 net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
477 &hdr
->saddr
, &hdr
->daddr
);
482 * Never answer to a ICMP packet.
484 if (is_ineligible(skb
)) {
485 net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
486 &hdr
->saddr
, &hdr
->daddr
);
490 /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */
494 if (!(skb
->dev
->flags
&IFF_LOOPBACK
) && !icmpv6_global_allow(type
))
499 memset(&fl6
, 0, sizeof(fl6
));
500 fl6
.flowi6_proto
= IPPROTO_ICMPV6
;
501 fl6
.daddr
= hdr
->saddr
;
506 fl6
.flowi6_mark
= mark
;
507 fl6
.flowi6_oif
= iif
;
508 fl6
.fl6_icmp_type
= type
;
509 fl6
.fl6_icmp_code
= code
;
510 fl6
.flowi6_uid
= sock_net_uid(net
, NULL
);
511 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
513 sk
= icmpv6_xmit_lock(net
);
520 if (!icmpv6_xrlim_allow(sk
, type
, &fl6
))
523 tmp_hdr
.icmp6_type
= type
;
524 tmp_hdr
.icmp6_code
= code
;
525 tmp_hdr
.icmp6_cksum
= 0;
526 tmp_hdr
.icmp6_pointer
= htonl(info
);
528 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
))
529 fl6
.flowi6_oif
= np
->mcast_oif
;
530 else if (!fl6
.flowi6_oif
)
531 fl6
.flowi6_oif
= np
->ucast_oif
;
533 ipc6
.tclass
= np
->tclass
;
534 fl6
.flowlabel
= ip6_make_flowinfo(ipc6
.tclass
, fl6
.flowlabel
);
536 dst
= icmpv6_route_lookup(net
, skb
, sk
, &fl6
);
540 ipc6
.hlimit
= ip6_sk_dst_hoplimit(np
, &fl6
, dst
);
541 ipc6
.dontfrag
= np
->dontfrag
;
545 msg
.offset
= skb_network_offset(skb
);
548 len
= skb
->len
- msg
.offset
;
549 len
= min_t(unsigned int, len
, IPV6_MIN_MTU
- sizeof(struct ipv6hdr
) - sizeof(struct icmp6hdr
));
551 net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
552 &hdr
->saddr
, &hdr
->daddr
);
553 goto out_dst_release
;
557 idev
= __in6_dev_get(skb
->dev
);
559 err
= ip6_append_data(sk
, icmpv6_getfrag
, &msg
,
560 len
+ sizeof(struct icmp6hdr
),
561 sizeof(struct icmp6hdr
),
562 &ipc6
, &fl6
, (struct rt6_info
*)dst
,
563 MSG_DONTWAIT
, &sockc_unused
);
565 ICMP6_INC_STATS(net
, idev
, ICMP6_MIB_OUTERRORS
);
566 ip6_flush_pending_frames(sk
);
568 err
= icmpv6_push_pending_frames(sk
, &fl6
, &tmp_hdr
,
569 len
+ sizeof(struct icmp6hdr
));
575 icmpv6_xmit_unlock(sk
);
580 /* Slightly more convenient version of icmp6_send.
582 void icmpv6_param_prob(struct sk_buff
*skb
, u8 code
, int pos
)
584 icmp6_send(skb
, ICMPV6_PARAMPROB
, code
, pos
, NULL
);
588 /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH
589 * if sufficient data bytes are available
590 * @nhs is the size of the tunnel header(s) :
591 * Either an IPv4 header for SIT encap
592 * an IPv4 header + GRE header for GRE encap
594 int ip6_err_gen_icmpv6_unreach(struct sk_buff
*skb
, int nhs
, int type
,
595 unsigned int data_len
)
597 struct in6_addr temp_saddr
;
599 struct sk_buff
*skb2
;
602 if (!pskb_may_pull(skb
, nhs
+ sizeof(struct ipv6hdr
) + 8))
605 /* RFC 4884 (partial) support for ICMP extensions */
606 if (data_len
< 128 || (data_len
& 7) || skb
->len
< data_len
)
609 skb2
= data_len
? skb_copy(skb
, GFP_ATOMIC
) : skb_clone(skb
, GFP_ATOMIC
);
616 skb_reset_network_header(skb2
);
618 rt
= rt6_lookup(dev_net(skb
->dev
), &ipv6_hdr(skb2
)->saddr
, NULL
, 0, 0);
620 if (rt
&& rt
->dst
.dev
)
621 skb2
->dev
= rt
->dst
.dev
;
623 ipv6_addr_set_v4mapped(ip_hdr(skb
)->saddr
, &temp_saddr
);
626 /* RFC 4884 (partial) support :
627 * insert 0 padding at the end, before the extensions
629 __skb_push(skb2
, nhs
);
630 skb_reset_network_header(skb2
);
631 memmove(skb2
->data
, skb2
->data
+ nhs
, data_len
- nhs
);
632 memset(skb2
->data
+ data_len
- nhs
, 0, nhs
);
633 /* RFC 4884 4.5 : Length is measured in 64-bit words,
634 * and stored in reserved[0]
636 info
= (data_len
/8) << 24;
638 if (type
== ICMP_TIME_EXCEEDED
)
639 icmp6_send(skb2
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
642 icmp6_send(skb2
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
,
651 EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach
);
653 static void icmpv6_echo_reply(struct sk_buff
*skb
)
655 struct net
*net
= dev_net(skb
->dev
);
657 struct inet6_dev
*idev
;
658 struct ipv6_pinfo
*np
;
659 const struct in6_addr
*saddr
= NULL
;
660 struct icmp6hdr
*icmph
= icmp6_hdr(skb
);
661 struct icmp6hdr tmp_hdr
;
663 struct icmpv6_msg msg
;
664 struct dst_entry
*dst
;
665 struct ipcm6_cookie ipc6
;
667 u32 mark
= IP6_REPLY_MARK(net
, skb
->mark
);
668 struct sockcm_cookie sockc_unused
= {0};
670 saddr
= &ipv6_hdr(skb
)->daddr
;
672 if (!ipv6_unicast_destination(skb
) &&
673 !(net
->ipv6
.sysctl
.anycast_src_echo_reply
&&
674 ipv6_anycast_destination(skb_dst(skb
), saddr
)))
677 memcpy(&tmp_hdr
, icmph
, sizeof(tmp_hdr
));
678 tmp_hdr
.icmp6_type
= ICMPV6_ECHO_REPLY
;
680 memset(&fl6
, 0, sizeof(fl6
));
681 fl6
.flowi6_proto
= IPPROTO_ICMPV6
;
682 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
685 fl6
.flowi6_oif
= skb
->dev
->ifindex
;
686 fl6
.fl6_icmp_type
= ICMPV6_ECHO_REPLY
;
687 fl6
.flowi6_mark
= mark
;
688 fl6
.flowi6_uid
= sock_net_uid(net
, NULL
);
689 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
692 sk
= icmpv6_xmit_lock(net
);
698 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
))
699 fl6
.flowi6_oif
= np
->mcast_oif
;
700 else if (!fl6
.flowi6_oif
)
701 fl6
.flowi6_oif
= np
->ucast_oif
;
703 err
= ip6_dst_lookup(net
, sk
, &dst
, &fl6
);
706 dst
= xfrm_lookup(net
, dst
, flowi6_to_flowi(&fl6
), sk
, 0);
710 idev
= __in6_dev_get(skb
->dev
);
714 msg
.type
= ICMPV6_ECHO_REPLY
;
716 ipc6
.hlimit
= ip6_sk_dst_hoplimit(np
, &fl6
, dst
);
717 ipc6
.tclass
= ipv6_get_dsfield(ipv6_hdr(skb
));
718 ipc6
.dontfrag
= np
->dontfrag
;
721 err
= ip6_append_data(sk
, icmpv6_getfrag
, &msg
, skb
->len
+ sizeof(struct icmp6hdr
),
722 sizeof(struct icmp6hdr
), &ipc6
, &fl6
,
723 (struct rt6_info
*)dst
, MSG_DONTWAIT
,
727 __ICMP6_INC_STATS(net
, idev
, ICMP6_MIB_OUTERRORS
);
728 ip6_flush_pending_frames(sk
);
730 err
= icmpv6_push_pending_frames(sk
, &fl6
, &tmp_hdr
,
731 skb
->len
+ sizeof(struct icmp6hdr
));
735 icmpv6_xmit_unlock(sk
);
740 void icmpv6_notify(struct sk_buff
*skb
, u8 type
, u8 code
, __be32 info
)
742 const struct inet6_protocol
*ipprot
;
746 struct net
*net
= dev_net(skb
->dev
);
748 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
751 nexthdr
= ((struct ipv6hdr
*)skb
->data
)->nexthdr
;
752 if (ipv6_ext_hdr(nexthdr
)) {
753 /* now skip over extension headers */
754 inner_offset
= ipv6_skip_exthdr(skb
, sizeof(struct ipv6hdr
),
755 &nexthdr
, &frag_off
);
756 if (inner_offset
< 0)
759 inner_offset
= sizeof(struct ipv6hdr
);
762 /* Checkin header including 8 bytes of inner protocol header. */
763 if (!pskb_may_pull(skb
, inner_offset
+8))
766 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
767 Without this we will not able f.e. to make source routed
769 Corresponding argument (opt) to notifiers is already added.
773 ipprot
= rcu_dereference(inet6_protos
[nexthdr
]);
774 if (ipprot
&& ipprot
->err_handler
)
775 ipprot
->err_handler(skb
, NULL
, type
, code
, inner_offset
, info
);
777 raw6_icmp_error(skb
, nexthdr
, type
, code
, inner_offset
, info
);
781 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
785 * Handle icmp messages
788 static int icmpv6_rcv(struct sk_buff
*skb
)
790 struct net_device
*dev
= skb
->dev
;
791 struct inet6_dev
*idev
= __in6_dev_get(dev
);
792 const struct in6_addr
*saddr
, *daddr
;
793 struct icmp6hdr
*hdr
;
795 bool success
= false;
797 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
798 struct sec_path
*sp
= skb_sec_path(skb
);
801 if (!(sp
&& sp
->xvec
[sp
->len
- 1]->props
.flags
&
805 if (!pskb_may_pull(skb
, sizeof(*hdr
) + sizeof(struct ipv6hdr
)))
808 nh
= skb_network_offset(skb
);
809 skb_set_network_header(skb
, sizeof(*hdr
));
811 if (!xfrm6_policy_check_reverse(NULL
, XFRM_POLICY_IN
, skb
))
814 skb_set_network_header(skb
, nh
);
817 __ICMP6_INC_STATS(dev_net(dev
), idev
, ICMP6_MIB_INMSGS
);
819 saddr
= &ipv6_hdr(skb
)->saddr
;
820 daddr
= &ipv6_hdr(skb
)->daddr
;
822 if (skb_checksum_validate(skb
, IPPROTO_ICMPV6
, ip6_compute_pseudo
)) {
823 net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n",
828 if (!pskb_pull(skb
, sizeof(*hdr
)))
831 hdr
= icmp6_hdr(skb
);
833 type
= hdr
->icmp6_type
;
835 ICMP6MSGIN_INC_STATS(dev_net(dev
), idev
, type
);
838 case ICMPV6_ECHO_REQUEST
:
839 icmpv6_echo_reply(skb
);
842 case ICMPV6_ECHO_REPLY
:
843 success
= ping_rcv(skb
);
846 case ICMPV6_PKT_TOOBIG
:
847 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
848 standard destination cache. Seems, only "advanced"
849 destination cache will allow to solve this problem
852 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
854 hdr
= icmp6_hdr(skb
);
857 * Drop through to notify
860 case ICMPV6_DEST_UNREACH
:
861 case ICMPV6_TIME_EXCEED
:
862 case ICMPV6_PARAMPROB
:
863 icmpv6_notify(skb
, type
, hdr
->icmp6_code
, hdr
->icmp6_mtu
);
866 case NDISC_ROUTER_SOLICITATION
:
867 case NDISC_ROUTER_ADVERTISEMENT
:
868 case NDISC_NEIGHBOUR_SOLICITATION
:
869 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
874 case ICMPV6_MGM_QUERY
:
875 igmp6_event_query(skb
);
878 case ICMPV6_MGM_REPORT
:
879 igmp6_event_report(skb
);
882 case ICMPV6_MGM_REDUCTION
:
883 case ICMPV6_NI_QUERY
:
884 case ICMPV6_NI_REPLY
:
885 case ICMPV6_MLD2_REPORT
:
886 case ICMPV6_DHAAD_REQUEST
:
887 case ICMPV6_DHAAD_REPLY
:
888 case ICMPV6_MOBILE_PREFIX_SOL
:
889 case ICMPV6_MOBILE_PREFIX_ADV
:
894 if (type
& ICMPV6_INFOMSG_MASK
)
897 net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n",
901 * error of unknown type.
902 * must pass to upper level
905 icmpv6_notify(skb
, type
, hdr
->icmp6_code
, hdr
->icmp6_mtu
);
908 /* until the v6 path can be better sorted assume failure and
909 * preserve the status quo behaviour for the rest of the paths to here
919 __ICMP6_INC_STATS(dev_net(dev
), idev
, ICMP6_MIB_CSUMERRORS
);
921 __ICMP6_INC_STATS(dev_net(dev
), idev
, ICMP6_MIB_INERRORS
);
927 void icmpv6_flow_init(struct sock
*sk
, struct flowi6
*fl6
,
929 const struct in6_addr
*saddr
,
930 const struct in6_addr
*daddr
,
933 memset(fl6
, 0, sizeof(*fl6
));
936 fl6
->flowi6_proto
= IPPROTO_ICMPV6
;
937 fl6
->fl6_icmp_type
= type
;
938 fl6
->fl6_icmp_code
= 0;
939 fl6
->flowi6_oif
= oif
;
940 security_sk_classify_flow(sk
, flowi6_to_flowi(fl6
));
943 static int __net_init
icmpv6_sk_init(struct net
*net
)
949 kzalloc(nr_cpu_ids
* sizeof(struct sock
*), GFP_KERNEL
);
950 if (!net
->ipv6
.icmp_sk
)
953 for_each_possible_cpu(i
) {
954 err
= inet_ctl_sock_create(&sk
, PF_INET6
,
955 SOCK_RAW
, IPPROTO_ICMPV6
, net
);
957 pr_err("Failed to initialize the ICMP6 control socket (err %d)\n",
962 net
->ipv6
.icmp_sk
[i
] = sk
;
964 /* Enough space for 2 64K ICMP packets, including
965 * sk_buff struct overhead.
967 sk
->sk_sndbuf
= 2 * SKB_TRUESIZE(64 * 1024);
972 for (j
= 0; j
< i
; j
++)
973 inet_ctl_sock_destroy(net
->ipv6
.icmp_sk
[j
]);
974 kfree(net
->ipv6
.icmp_sk
);
978 static void __net_exit
icmpv6_sk_exit(struct net
*net
)
982 for_each_possible_cpu(i
) {
983 inet_ctl_sock_destroy(net
->ipv6
.icmp_sk
[i
]);
985 kfree(net
->ipv6
.icmp_sk
);
988 static struct pernet_operations icmpv6_sk_ops
= {
989 .init
= icmpv6_sk_init
,
990 .exit
= icmpv6_sk_exit
,
993 int __init
icmpv6_init(void)
997 err
= register_pernet_subsys(&icmpv6_sk_ops
);
1002 if (inet6_add_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
) < 0)
1005 err
= inet6_register_icmp_sender(icmp6_send
);
1007 goto sender_reg_err
;
1011 inet6_del_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
);
1013 pr_err("Failed to register ICMP6 protocol\n");
1014 unregister_pernet_subsys(&icmpv6_sk_ops
);
1018 void icmpv6_cleanup(void)
1020 inet6_unregister_icmp_sender(icmp6_send
);
1021 unregister_pernet_subsys(&icmpv6_sk_ops
);
1022 inet6_del_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
);
1026 static const struct icmp6_err
{
1034 { /* ADM_PROHIBITED */
1038 { /* Was NOT_NEIGHBOUR, now reserved */
1039 .err
= EHOSTUNREACH
,
1042 { /* ADDR_UNREACH */
1043 .err
= EHOSTUNREACH
,
1046 { /* PORT_UNREACH */
1047 .err
= ECONNREFUSED
,
1054 { /* REJECT_ROUTE */
1060 int icmpv6_err_convert(u8 type
, u8 code
, int *err
)
1067 case ICMPV6_DEST_UNREACH
:
1069 if (code
< ARRAY_SIZE(tab_unreach
)) {
1070 *err
= tab_unreach
[code
].err
;
1071 fatal
= tab_unreach
[code
].fatal
;
1075 case ICMPV6_PKT_TOOBIG
:
1079 case ICMPV6_PARAMPROB
:
1084 case ICMPV6_TIME_EXCEED
:
1085 *err
= EHOSTUNREACH
;
1091 EXPORT_SYMBOL(icmpv6_err_convert
);
1093 #ifdef CONFIG_SYSCTL
1094 static struct ctl_table ipv6_icmp_table_template
[] = {
1096 .procname
= "ratelimit",
1097 .data
= &init_net
.ipv6
.sysctl
.icmpv6_time
,
1098 .maxlen
= sizeof(int),
1100 .proc_handler
= proc_dointvec_ms_jiffies
,
1105 struct ctl_table
* __net_init
ipv6_icmp_sysctl_init(struct net
*net
)
1107 struct ctl_table
*table
;
1109 table
= kmemdup(ipv6_icmp_table_template
,
1110 sizeof(ipv6_icmp_table_template
),
1114 table
[0].data
= &net
->ipv6
.sysctl
.icmpv6_time
;