2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: icmp.c,v 1.38 2002/02/08 03:57:19 davem Exp $
10 * Based on net/ipv4/icmp.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
23 * Andi Kleen : exception handling
24 * Andi Kleen add rate limits. never reply to a icmp.
25 * add more length checks and other fixes.
26 * yoshfuji : ensure to sent parameter problem for
28 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
30 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
31 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/types.h>
37 #include <linux/socket.h>
39 #include <linux/kernel.h>
40 #include <linux/sockios.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/netfilter.h>
47 #include <linux/sysctl.h>
50 #include <linux/inet.h>
51 #include <linux/netdevice.h>
52 #include <linux/icmpv6.h>
58 #include <net/ip6_checksum.h>
59 #include <net/protocol.h>
61 #include <net/rawv6.h>
62 #include <net/transp_v6.h>
63 #include <net/ip6_route.h>
64 #include <net/addrconf.h>
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
70 DEFINE_SNMP_STAT(struct icmpv6_mib
, icmpv6_statistics
) __read_mostly
;
71 EXPORT_SYMBOL(icmpv6_statistics
);
72 DEFINE_SNMP_STAT(struct icmpv6msg_mib
, icmpv6msg_statistics
) __read_mostly
;
73 EXPORT_SYMBOL(icmpv6msg_statistics
);
76 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout
78 * all layers. All Socketless IP sends will soon be gone.
80 * On SMP we have one ICMP socket per-cpu.
82 static DEFINE_PER_CPU(struct socket
*, __icmpv6_socket
) = NULL
;
83 #define icmpv6_socket __get_cpu_var(__icmpv6_socket)
85 static int icmpv6_rcv(struct sk_buff
*skb
);
87 static struct inet6_protocol icmpv6_protocol
= {
88 .handler
= icmpv6_rcv
,
89 .flags
= INET6_PROTO_FINAL
,
92 static __inline__
int icmpv6_xmit_lock(void)
96 if (unlikely(!spin_trylock(&icmpv6_socket
->sk
->sk_lock
.slock
))) {
97 /* This can happen if the output path (f.e. SIT or
98 * ip6ip6 tunnel) signals dst_link_failure() for an
99 * outgoing ICMP6 packet.
107 static __inline__
void icmpv6_xmit_unlock(void)
109 spin_unlock_bh(&icmpv6_socket
->sk
->sk_lock
.slock
);
113 * Slightly more convenient version of icmpv6_send.
115 void icmpv6_param_prob(struct sk_buff
*skb
, int code
, int pos
)
117 icmpv6_send(skb
, ICMPV6_PARAMPROB
, code
, pos
, skb
->dev
);
122 * Figure out, may we reply to this packet with icmp error.
124 * We do not reply, if:
125 * - it was icmp error message.
126 * - it is truncated, so that it is known, that protocol is ICMPV6
127 * (i.e. in the middle of some exthdr)
132 static int is_ineligible(struct sk_buff
*skb
)
134 int ptr
= (u8
*)(ipv6_hdr(skb
) + 1) - skb
->data
;
135 int len
= skb
->len
- ptr
;
136 __u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
141 ptr
= ipv6_skip_exthdr(skb
, ptr
, &nexthdr
);
144 if (nexthdr
== IPPROTO_ICMPV6
) {
146 tp
= skb_header_pointer(skb
,
147 ptr
+offsetof(struct icmp6hdr
, icmp6_type
),
148 sizeof(_type
), &_type
);
150 !(*tp
& ICMPV6_INFOMSG_MASK
))
156 static int sysctl_icmpv6_time __read_mostly
= 1*HZ
;
159 * Check the ICMP output rate limit
161 static inline int icmpv6_xrlim_allow(struct sock
*sk
, int type
,
164 struct dst_entry
*dst
;
167 /* Informational messages are not limited. */
168 if (type
& ICMPV6_INFOMSG_MASK
)
171 /* Do not limit pmtu discovery, it would break it. */
172 if (type
== ICMPV6_PKT_TOOBIG
)
176 * Look up the output route.
177 * XXX: perhaps the expire for routing entries cloned by
178 * this lookup should be more aggressive (not longer than timeout).
180 dst
= ip6_route_output(sk
, fl
);
182 IP6_INC_STATS(ip6_dst_idev(dst
),
183 IPSTATS_MIB_OUTNOROUTES
);
184 } else if (dst
->dev
&& (dst
->dev
->flags
&IFF_LOOPBACK
)) {
187 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
188 int tmo
= sysctl_icmpv6_time
;
190 /* Give more bandwidth to wider prefixes. */
191 if (rt
->rt6i_dst
.plen
< 128)
192 tmo
>>= ((128 - rt
->rt6i_dst
.plen
)>>5);
194 res
= xrlim_allow(dst
, tmo
);
201 * an inline helper for the "simple" if statement below
202 * checks if parameter problem report is caused by an
203 * unrecognized IPv6 option that has the Option Type
204 * highest-order two bits set to 10
207 static __inline__
int opt_unrec(struct sk_buff
*skb
, __u32 offset
)
211 offset
+= skb_network_offset(skb
);
212 op
= skb_header_pointer(skb
, offset
, sizeof(_optval
), &_optval
);
215 return (*op
& 0xC0) == 0x80;
218 static int icmpv6_push_pending_frames(struct sock
*sk
, struct flowi
*fl
, struct icmp6hdr
*thdr
, int len
)
221 struct icmp6hdr
*icmp6h
;
224 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
227 icmp6h
= icmp6_hdr(skb
);
228 memcpy(icmp6h
, thdr
, sizeof(struct icmp6hdr
));
229 icmp6h
->icmp6_cksum
= 0;
231 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
232 skb
->csum
= csum_partial((char *)icmp6h
,
233 sizeof(struct icmp6hdr
), skb
->csum
);
234 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&fl
->fl6_src
,
241 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
242 tmp_csum
= csum_add(tmp_csum
, skb
->csum
);
245 tmp_csum
= csum_partial((char *)icmp6h
,
246 sizeof(struct icmp6hdr
), tmp_csum
);
247 icmp6h
->icmp6_cksum
= csum_ipv6_magic(&fl
->fl6_src
,
252 ip6_push_pending_frames(sk
);
263 static int icmpv6_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
265 struct icmpv6_msg
*msg
= (struct icmpv6_msg
*) from
;
266 struct sk_buff
*org_skb
= msg
->skb
;
269 csum
= skb_copy_and_csum_bits(org_skb
, msg
->offset
+ offset
,
271 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
272 if (!(msg
->type
& ICMPV6_INFOMSG_MASK
))
273 nf_ct_attach(skb
, org_skb
);
277 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
278 static void mip6_addr_swap(struct sk_buff
*skb
)
280 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
281 struct inet6_skb_parm
*opt
= IP6CB(skb
);
282 struct ipv6_destopt_hao
*hao
;
287 off
= ipv6_find_tlv(skb
, opt
->dsthao
, IPV6_TLV_HAO
);
288 if (likely(off
>= 0)) {
289 hao
= (struct ipv6_destopt_hao
*)
290 (skb_network_header(skb
) + off
);
291 ipv6_addr_copy(&tmp
, &iph
->saddr
);
292 ipv6_addr_copy(&iph
->saddr
, &hao
->addr
);
293 ipv6_addr_copy(&hao
->addr
, &tmp
);
298 static inline void mip6_addr_swap(struct sk_buff
*skb
) {}
302 * Send an ICMP message in response to a packet in error
304 void icmpv6_send(struct sk_buff
*skb
, int type
, int code
, __u32 info
,
305 struct net_device
*dev
)
307 struct inet6_dev
*idev
= NULL
;
308 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
310 struct ipv6_pinfo
*np
;
311 struct in6_addr
*saddr
= NULL
;
312 struct dst_entry
*dst
;
313 struct icmp6hdr tmp_hdr
;
315 struct icmpv6_msg msg
;
322 if ((u8
*)hdr
< skb
->head
||
323 (skb
->network_header
+ sizeof(*hdr
)) > skb
->tail
)
327 * Make sure we respect the rules
328 * i.e. RFC 1885 2.4(e)
329 * Rule (e.1) is enforced by not using icmpv6_send
330 * in any code that processes icmp errors.
332 addr_type
= ipv6_addr_type(&hdr
->daddr
);
334 if (ipv6_chk_addr(&hdr
->daddr
, skb
->dev
, 0))
341 if ((addr_type
& IPV6_ADDR_MULTICAST
|| skb
->pkt_type
!= PACKET_HOST
)) {
342 if (type
!= ICMPV6_PKT_TOOBIG
&&
343 !(type
== ICMPV6_PARAMPROB
&&
344 code
== ICMPV6_UNK_OPTION
&&
345 (opt_unrec(skb
, info
))))
351 addr_type
= ipv6_addr_type(&hdr
->saddr
);
357 if (addr_type
& IPV6_ADDR_LINKLOCAL
)
358 iif
= skb
->dev
->ifindex
;
361 * Must not send error if the source does not uniquely
362 * identify a single node (RFC2463 Section 2.4).
363 * We check unspecified / multicast addresses here,
364 * and anycast addresses will be checked later.
366 if ((addr_type
== IPV6_ADDR_ANY
) || (addr_type
& IPV6_ADDR_MULTICAST
)) {
367 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6_send: addr_any/mcast source\n");
372 * Never answer to a ICMP packet.
374 if (is_ineligible(skb
)) {
375 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6_send: no reply to icmp error\n");
381 memset(&fl
, 0, sizeof(fl
));
382 fl
.proto
= IPPROTO_ICMPV6
;
383 ipv6_addr_copy(&fl
.fl6_dst
, &hdr
->saddr
);
385 ipv6_addr_copy(&fl
.fl6_src
, saddr
);
387 fl
.fl_icmp_type
= type
;
388 fl
.fl_icmp_code
= code
;
389 security_skb_classify_flow(skb
, &fl
);
391 if (icmpv6_xmit_lock())
394 sk
= icmpv6_socket
->sk
;
397 if (!icmpv6_xrlim_allow(sk
, type
, &fl
))
400 tmp_hdr
.icmp6_type
= type
;
401 tmp_hdr
.icmp6_code
= code
;
402 tmp_hdr
.icmp6_cksum
= 0;
403 tmp_hdr
.icmp6_pointer
= htonl(info
);
405 if (!fl
.oif
&& ipv6_addr_is_multicast(&fl
.fl6_dst
))
406 fl
.oif
= np
->mcast_oif
;
408 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
413 * We won't send icmp if the destination is known
416 if (((struct rt6_info
*)dst
)->rt6i_flags
& RTF_ANYCAST
) {
417 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6_send: acast source\n");
418 goto out_dst_release
;
421 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
424 if (ipv6_addr_is_multicast(&fl
.fl6_dst
))
425 hlimit
= np
->mcast_hops
;
427 hlimit
= np
->hop_limit
;
429 hlimit
= dst_metric(dst
, RTAX_HOPLIMIT
);
431 hlimit
= ipv6_get_hoplimit(dst
->dev
);
438 msg
.offset
= skb_network_offset(skb
);
441 len
= skb
->len
- msg
.offset
;
442 len
= min_t(unsigned int, len
, IPV6_MIN_MTU
- sizeof(struct ipv6hdr
) -sizeof(struct icmp6hdr
));
444 LIMIT_NETDEBUG(KERN_DEBUG
"icmp: len problem\n");
445 goto out_dst_release
;
448 idev
= in6_dev_get(skb
->dev
);
450 err
= ip6_append_data(sk
, icmpv6_getfrag
, &msg
,
451 len
+ sizeof(struct icmp6hdr
),
452 sizeof(struct icmp6hdr
),
453 hlimit
, tclass
, NULL
, &fl
, (struct rt6_info
*)dst
,
456 ip6_flush_pending_frames(sk
);
459 err
= icmpv6_push_pending_frames(sk
, &fl
, &tmp_hdr
, len
+ sizeof(struct icmp6hdr
));
462 if (likely(idev
!= NULL
))
467 icmpv6_xmit_unlock();
470 EXPORT_SYMBOL(icmpv6_send
);
472 static void icmpv6_echo_reply(struct sk_buff
*skb
)
475 struct inet6_dev
*idev
;
476 struct ipv6_pinfo
*np
;
477 struct in6_addr
*saddr
= NULL
;
478 struct icmp6hdr
*icmph
= icmp6_hdr(skb
);
479 struct icmp6hdr tmp_hdr
;
481 struct icmpv6_msg msg
;
482 struct dst_entry
*dst
;
487 saddr
= &ipv6_hdr(skb
)->daddr
;
489 if (!ipv6_unicast_destination(skb
))
492 memcpy(&tmp_hdr
, icmph
, sizeof(tmp_hdr
));
493 tmp_hdr
.icmp6_type
= ICMPV6_ECHO_REPLY
;
495 memset(&fl
, 0, sizeof(fl
));
496 fl
.proto
= IPPROTO_ICMPV6
;
497 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
499 ipv6_addr_copy(&fl
.fl6_src
, saddr
);
500 fl
.oif
= skb
->dev
->ifindex
;
501 fl
.fl_icmp_type
= ICMPV6_ECHO_REPLY
;
502 security_skb_classify_flow(skb
, &fl
);
504 if (icmpv6_xmit_lock())
507 sk
= icmpv6_socket
->sk
;
510 if (!fl
.oif
&& ipv6_addr_is_multicast(&fl
.fl6_dst
))
511 fl
.oif
= np
->mcast_oif
;
513 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
516 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
519 if (ipv6_addr_is_multicast(&fl
.fl6_dst
))
520 hlimit
= np
->mcast_hops
;
522 hlimit
= np
->hop_limit
;
524 hlimit
= dst_metric(dst
, RTAX_HOPLIMIT
);
526 hlimit
= ipv6_get_hoplimit(dst
->dev
);
532 idev
= in6_dev_get(skb
->dev
);
536 msg
.type
= ICMPV6_ECHO_REPLY
;
538 err
= ip6_append_data(sk
, icmpv6_getfrag
, &msg
, skb
->len
+ sizeof(struct icmp6hdr
),
539 sizeof(struct icmp6hdr
), hlimit
, tclass
, NULL
, &fl
,
540 (struct rt6_info
*)dst
, MSG_DONTWAIT
);
543 ip6_flush_pending_frames(sk
);
546 err
= icmpv6_push_pending_frames(sk
, &fl
, &tmp_hdr
, skb
->len
+ sizeof(struct icmp6hdr
));
549 if (likely(idev
!= NULL
))
553 icmpv6_xmit_unlock();
556 static void icmpv6_notify(struct sk_buff
*skb
, int type
, int code
, __be32 info
)
558 struct in6_addr
*saddr
, *daddr
;
559 struct inet6_protocol
*ipprot
;
565 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
568 nexthdr
= ((struct ipv6hdr
*)skb
->data
)->nexthdr
;
569 if (ipv6_ext_hdr(nexthdr
)) {
570 /* now skip over extension headers */
571 inner_offset
= ipv6_skip_exthdr(skb
, sizeof(struct ipv6hdr
), &nexthdr
);
575 inner_offset
= sizeof(struct ipv6hdr
);
578 /* Checkin header including 8 bytes of inner protocol header. */
579 if (!pskb_may_pull(skb
, inner_offset
+8))
582 saddr
= &ipv6_hdr(skb
)->saddr
;
583 daddr
= &ipv6_hdr(skb
)->daddr
;
585 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
586 Without this we will not able f.e. to make source routed
588 Corresponding argument (opt) to notifiers is already added.
592 hash
= nexthdr
& (MAX_INET_PROTOS
- 1);
595 ipprot
= rcu_dereference(inet6_protos
[hash
]);
596 if (ipprot
&& ipprot
->err_handler
)
597 ipprot
->err_handler(skb
, NULL
, type
, code
, inner_offset
, info
);
600 read_lock(&raw_v6_lock
);
601 if ((sk
= sk_head(&raw_v6_htable
[hash
])) != NULL
) {
602 while ((sk
= __raw_v6_lookup(sk
, nexthdr
, saddr
, daddr
,
604 rawv6_err(sk
, skb
, NULL
, type
, code
, inner_offset
, info
);
608 read_unlock(&raw_v6_lock
);
612 * Handle icmp messages
615 static int icmpv6_rcv(struct sk_buff
*skb
)
617 struct net_device
*dev
= skb
->dev
;
618 struct inet6_dev
*idev
= __in6_dev_get(dev
);
619 struct in6_addr
*saddr
, *daddr
;
620 struct ipv6hdr
*orig_hdr
;
621 struct icmp6hdr
*hdr
;
624 ICMP6_INC_STATS_BH(idev
, ICMP6_MIB_INMSGS
);
626 saddr
= &ipv6_hdr(skb
)->saddr
;
627 daddr
= &ipv6_hdr(skb
)->daddr
;
629 /* Perform checksum. */
630 switch (skb
->ip_summed
) {
631 case CHECKSUM_COMPLETE
:
632 if (!csum_ipv6_magic(saddr
, daddr
, skb
->len
, IPPROTO_ICMPV6
,
637 skb
->csum
= ~csum_unfold(csum_ipv6_magic(saddr
, daddr
, skb
->len
,
639 if (__skb_checksum_complete(skb
)) {
640 LIMIT_NETDEBUG(KERN_DEBUG
"ICMPv6 checksum failed [" NIP6_FMT
" > " NIP6_FMT
"]\n",
641 NIP6(*saddr
), NIP6(*daddr
));
646 if (!pskb_pull(skb
, sizeof(struct icmp6hdr
)))
649 hdr
= icmp6_hdr(skb
);
651 type
= hdr
->icmp6_type
;
653 ICMP6MSGIN_INC_STATS_BH(idev
, type
);
656 case ICMPV6_ECHO_REQUEST
:
657 icmpv6_echo_reply(skb
);
660 case ICMPV6_ECHO_REPLY
:
661 /* we couldn't care less */
664 case ICMPV6_PKT_TOOBIG
:
665 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
666 standard destination cache. Seems, only "advanced"
667 destination cache will allow to solve this problem
670 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
)))
672 hdr
= icmp6_hdr(skb
);
673 orig_hdr
= (struct ipv6hdr
*) (hdr
+ 1);
674 rt6_pmtu_discovery(&orig_hdr
->daddr
, &orig_hdr
->saddr
, dev
,
675 ntohl(hdr
->icmp6_mtu
));
678 * Drop through to notify
681 case ICMPV6_DEST_UNREACH
:
682 case ICMPV6_TIME_EXCEED
:
683 case ICMPV6_PARAMPROB
:
684 icmpv6_notify(skb
, type
, hdr
->icmp6_code
, hdr
->icmp6_mtu
);
687 case NDISC_ROUTER_SOLICITATION
:
688 case NDISC_ROUTER_ADVERTISEMENT
:
689 case NDISC_NEIGHBOUR_SOLICITATION
:
690 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
695 case ICMPV6_MGM_QUERY
:
696 igmp6_event_query(skb
);
699 case ICMPV6_MGM_REPORT
:
700 igmp6_event_report(skb
);
703 case ICMPV6_MGM_REDUCTION
:
704 case ICMPV6_NI_QUERY
:
705 case ICMPV6_NI_REPLY
:
706 case ICMPV6_MLD2_REPORT
:
707 case ICMPV6_DHAAD_REQUEST
:
708 case ICMPV6_DHAAD_REPLY
:
709 case ICMPV6_MOBILE_PREFIX_SOL
:
710 case ICMPV6_MOBILE_PREFIX_ADV
:
714 LIMIT_NETDEBUG(KERN_DEBUG
"icmpv6: msg of unknown type\n");
717 if (type
& ICMPV6_INFOMSG_MASK
)
721 * error of unknown type.
722 * must pass to upper level
725 icmpv6_notify(skb
, type
, hdr
->icmp6_code
, hdr
->icmp6_mtu
);
732 ICMP6_INC_STATS_BH(idev
, ICMP6_MIB_INERRORS
);
738 * Special lock-class for __icmpv6_socket:
740 static struct lock_class_key icmpv6_socket_sk_dst_lock_key
;
742 int __init
icmpv6_init(struct net_proto_family
*ops
)
747 for_each_possible_cpu(i
) {
748 err
= sock_create_kern(PF_INET6
, SOCK_RAW
, IPPROTO_ICMPV6
,
749 &per_cpu(__icmpv6_socket
, i
));
752 "Failed to initialize the ICMP6 control socket "
758 sk
= per_cpu(__icmpv6_socket
, i
)->sk
;
759 sk
->sk_allocation
= GFP_ATOMIC
;
761 * Split off their lock-class, because sk->sk_dst_lock
762 * gets used from softirqs, which is safe for
763 * __icmpv6_socket (because those never get directly used
764 * via userspace syscalls), but unsafe for normal sockets.
766 lockdep_set_class(&sk
->sk_dst_lock
,
767 &icmpv6_socket_sk_dst_lock_key
);
769 /* Enough space for 2 64K ICMP packets, including
770 * sk_buff struct overhead.
773 (2 * ((64 * 1024) + sizeof(struct sk_buff
)));
775 sk
->sk_prot
->unhash(sk
);
779 if (inet6_add_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
) < 0) {
780 printk(KERN_ERR
"Failed to register ICMP6 protocol\n");
788 for (j
= 0; j
< i
; j
++) {
789 if (!cpu_possible(j
))
791 sock_release(per_cpu(__icmpv6_socket
, j
));
797 void icmpv6_cleanup(void)
801 for_each_possible_cpu(i
) {
802 sock_release(per_cpu(__icmpv6_socket
, i
));
804 inet6_del_protocol(&icmpv6_protocol
, IPPROTO_ICMPV6
);
807 static const struct icmp6_err
{
815 { /* ADM_PROHIBITED */
819 { /* Was NOT_NEIGHBOUR, now reserved */
833 int icmpv6_err_convert(int type
, int code
, int *err
)
840 case ICMPV6_DEST_UNREACH
:
842 if (code
<= ICMPV6_PORT_UNREACH
) {
843 *err
= tab_unreach
[code
].err
;
844 fatal
= tab_unreach
[code
].fatal
;
848 case ICMPV6_PKT_TOOBIG
:
852 case ICMPV6_PARAMPROB
:
857 case ICMPV6_TIME_EXCEED
:
865 EXPORT_SYMBOL(icmpv6_err_convert
);
868 ctl_table ipv6_icmp_table
[] = {
870 .ctl_name
= NET_IPV6_ICMP_RATELIMIT
,
871 .procname
= "ratelimit",
872 .data
= &sysctl_icmpv6_time
,
873 .maxlen
= sizeof(int),
875 .proc_handler
= &proc_dointvec