3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Adapted from linux/net/ipv4/raw.c
11 * Hideaki YOSHIFUJI : sin6_scope_id support
12 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
13 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/slab.h>
25 #include <linux/sockios.h>
26 #include <linux/net.h>
27 #include <linux/in6.h>
28 #include <linux/netdevice.h>
29 #include <linux/if_arp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/netfilter.h>
32 #include <linux/netfilter_ipv6.h>
33 #include <linux/skbuff.h>
34 #include <linux/compat.h>
35 #include <asm/uaccess.h>
36 #include <asm/ioctls.h>
38 #include <net/net_namespace.h>
44 #include <net/ndisc.h>
45 #include <net/protocol.h>
46 #include <net/ip6_route.h>
47 #include <net/ip6_checksum.h>
48 #include <net/addrconf.h>
49 #include <net/transp_v6.h>
51 #include <net/inet_common.h>
52 #include <net/tcp_states.h>
53 #if IS_ENABLED(CONFIG_IPV6_MIP6)
56 #include <linux/mroute6.h>
59 #include <net/rawv6.h>
62 #include <linux/proc_fs.h>
63 #include <linux/seq_file.h>
64 #include <linux/export.h>
66 static struct raw_hashinfo raw_v6_hashinfo
= {
67 .lock
= __RW_LOCK_UNLOCKED(raw_v6_hashinfo
.lock
),
70 static struct sock
*__raw_v6_lookup(struct net
*net
, struct sock
*sk
,
71 unsigned short num
, const struct in6_addr
*loc_addr
,
72 const struct in6_addr
*rmt_addr
, int dif
)
74 bool is_multicast
= ipv6_addr_is_multicast(loc_addr
);
77 if (inet_sk(sk
)->inet_num
== num
) {
78 struct ipv6_pinfo
*np
= inet6_sk(sk
);
80 if (!net_eq(sock_net(sk
), net
))
83 if (!ipv6_addr_any(&np
->daddr
) &&
84 !ipv6_addr_equal(&np
->daddr
, rmt_addr
))
87 if (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
)
90 if (!ipv6_addr_any(&np
->rcv_saddr
)) {
91 if (ipv6_addr_equal(&np
->rcv_saddr
, loc_addr
))
94 inet6_mc_check(sk
, loc_addr
, rmt_addr
))
109 static int icmpv6_filter(const struct sock
*sk
, const struct sk_buff
*skb
)
111 struct icmp6hdr
*_hdr
;
112 const struct icmp6hdr
*hdr
;
114 hdr
= skb_header_pointer(skb
, skb_transport_offset(skb
),
115 sizeof(_hdr
), &_hdr
);
117 const __u32
*data
= &raw6_sk(sk
)->filter
.data
[0];
118 unsigned int type
= hdr
->icmp6_type
;
120 return (data
[type
>> 5] & (1U << (type
& 31))) != 0;
125 #if IS_ENABLED(CONFIG_IPV6_MIP6)
126 typedef int mh_filter_t(struct sock
*sock
, struct sk_buff
*skb
);
128 static mh_filter_t __rcu
*mh_filter __read_mostly
;
130 int rawv6_mh_filter_register(mh_filter_t filter
)
132 rcu_assign_pointer(mh_filter
, filter
);
135 EXPORT_SYMBOL(rawv6_mh_filter_register
);
137 int rawv6_mh_filter_unregister(mh_filter_t filter
)
139 RCU_INIT_POINTER(mh_filter
, NULL
);
143 EXPORT_SYMBOL(rawv6_mh_filter_unregister
);
148 * demultiplex raw sockets.
149 * (should consider queueing the skb in the sock receive_queue
150 * without calling rawv6.c)
152 * Caller owns SKB so we must make clones.
154 static bool ipv6_raw_deliver(struct sk_buff
*skb
, int nexthdr
)
156 const struct in6_addr
*saddr
;
157 const struct in6_addr
*daddr
;
159 bool delivered
= false;
163 saddr
= &ipv6_hdr(skb
)->saddr
;
166 hash
= nexthdr
& (RAW_HTABLE_SIZE
- 1);
168 read_lock(&raw_v6_hashinfo
.lock
);
169 sk
= sk_head(&raw_v6_hashinfo
.ht
[hash
]);
174 net
= dev_net(skb
->dev
);
175 sk
= __raw_v6_lookup(net
, sk
, nexthdr
, daddr
, saddr
, IP6CB(skb
)->iif
);
183 filtered
= icmpv6_filter(sk
, skb
);
186 #if IS_ENABLED(CONFIG_IPV6_MIP6)
189 /* XXX: To validate MH only once for each packet,
190 * this is placed here. It should be after checking
191 * xfrm policy, however it doesn't. The checking xfrm
192 * policy is placed in rawv6_rcv() because it is
193 * required for each socket.
197 filter
= rcu_dereference(mh_filter
);
198 filtered
= filter
? (*filter
)(sk
, skb
) : 0;
210 struct sk_buff
*clone
= skb_clone(skb
, GFP_ATOMIC
);
212 /* Not releasing hash table! */
215 rawv6_rcv(sk
, clone
);
218 sk
= __raw_v6_lookup(net
, sk_next(sk
), nexthdr
, daddr
, saddr
,
222 read_unlock(&raw_v6_hashinfo
.lock
);
226 bool raw6_local_deliver(struct sk_buff
*skb
, int nexthdr
)
230 raw_sk
= sk_head(&raw_v6_hashinfo
.ht
[nexthdr
& (RAW_HTABLE_SIZE
- 1)]);
231 if (raw_sk
&& !ipv6_raw_deliver(skb
, nexthdr
))
234 return raw_sk
!= NULL
;
237 /* This cleans up af_inet6 a bit. -DaveM */
238 static int rawv6_bind(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
240 struct inet_sock
*inet
= inet_sk(sk
);
241 struct ipv6_pinfo
*np
= inet6_sk(sk
);
242 struct sockaddr_in6
*addr
= (struct sockaddr_in6
*) uaddr
;
247 if (addr_len
< SIN6_LEN_RFC2133
)
249 addr_type
= ipv6_addr_type(&addr
->sin6_addr
);
251 /* Raw sockets are IPv6 only */
252 if (addr_type
== IPV6_ADDR_MAPPED
)
253 return -EADDRNOTAVAIL
;
258 if (sk
->sk_state
!= TCP_CLOSE
)
262 /* Check if the address belongs to the host. */
263 if (addr_type
!= IPV6_ADDR_ANY
) {
264 struct net_device
*dev
= NULL
;
266 if (__ipv6_addr_needs_scope_id(addr_type
)) {
267 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
268 addr
->sin6_scope_id
) {
269 /* Override any existing binding, if another
270 * one is supplied by user.
272 sk
->sk_bound_dev_if
= addr
->sin6_scope_id
;
275 /* Binding to link-local address requires an interface */
276 if (!sk
->sk_bound_dev_if
)
280 dev
= dev_get_by_index_rcu(sock_net(sk
),
281 sk
->sk_bound_dev_if
);
286 /* ipv4 addr of the socket is invalid. Only the
287 * unspecified and mapped address have a v4 equivalent.
289 v4addr
= LOOPBACK4_IPV6
;
290 if (!(addr_type
& IPV6_ADDR_MULTICAST
)) {
291 err
= -EADDRNOTAVAIL
;
292 if (!ipv6_chk_addr(sock_net(sk
), &addr
->sin6_addr
,
299 inet
->inet_rcv_saddr
= inet
->inet_saddr
= v4addr
;
300 np
->rcv_saddr
= addr
->sin6_addr
;
301 if (!(addr_type
& IPV6_ADDR_MULTICAST
))
302 np
->saddr
= addr
->sin6_addr
;
311 static void rawv6_err(struct sock
*sk
, struct sk_buff
*skb
,
312 struct inet6_skb_parm
*opt
,
313 u8 type
, u8 code
, int offset
, __be32 info
)
315 struct inet_sock
*inet
= inet_sk(sk
);
316 struct ipv6_pinfo
*np
= inet6_sk(sk
);
320 /* Report error on raw socket, if:
321 1. User requested recverr.
322 2. Socket is connected (otherwise the error indication
323 is useless without recverr and error is hard.
325 if (!np
->recverr
&& sk
->sk_state
!= TCP_ESTABLISHED
)
328 harderr
= icmpv6_err_convert(type
, code
, &err
);
329 if (type
== ICMPV6_PKT_TOOBIG
) {
330 ip6_sk_update_pmtu(skb
, sk
, info
);
331 harderr
= (np
->pmtudisc
== IPV6_PMTUDISC_DO
);
333 if (type
== NDISC_REDIRECT
)
334 ip6_sk_redirect(skb
, sk
);
336 u8
*payload
= skb
->data
;
339 ipv6_icmp_error(sk
, skb
, err
, 0, ntohl(info
), payload
);
342 if (np
->recverr
|| harderr
) {
344 sk
->sk_error_report(sk
);
348 void raw6_icmp_error(struct sk_buff
*skb
, int nexthdr
,
349 u8 type
, u8 code
, int inner_offset
, __be32 info
)
353 const struct in6_addr
*saddr
, *daddr
;
356 hash
= nexthdr
& (RAW_HTABLE_SIZE
- 1);
358 read_lock(&raw_v6_hashinfo
.lock
);
359 sk
= sk_head(&raw_v6_hashinfo
.ht
[hash
]);
361 /* Note: ipv6_hdr(skb) != skb->data */
362 const struct ipv6hdr
*ip6h
= (const struct ipv6hdr
*)skb
->data
;
363 saddr
= &ip6h
->saddr
;
364 daddr
= &ip6h
->daddr
;
365 net
= dev_net(skb
->dev
);
367 while ((sk
= __raw_v6_lookup(net
, sk
, nexthdr
, saddr
, daddr
,
369 rawv6_err(sk
, skb
, NULL
, type
, code
,
374 read_unlock(&raw_v6_hashinfo
.lock
);
377 static inline int rawv6_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
379 if ((raw6_sk(sk
)->checksum
|| rcu_access_pointer(sk
->sk_filter
)) &&
380 skb_checksum_complete(skb
)) {
381 atomic_inc(&sk
->sk_drops
);
386 /* Charge it to the socket. */
388 if (sock_queue_rcv_skb(sk
, skb
) < 0) {
397 * This is next to useless...
398 * if we demultiplex in network layer we don't need the extra call
399 * just to queue the skb...
400 * maybe we could have the network decide upon a hint if it
401 * should call raw_rcv for demultiplexing
403 int rawv6_rcv(struct sock
*sk
, struct sk_buff
*skb
)
405 struct inet_sock
*inet
= inet_sk(sk
);
406 struct raw6_sock
*rp
= raw6_sk(sk
);
408 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
)) {
409 atomic_inc(&sk
->sk_drops
);
415 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
417 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
418 skb_postpull_rcsum(skb
, skb_network_header(skb
),
419 skb_network_header_len(skb
));
420 if (!csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
421 &ipv6_hdr(skb
)->daddr
,
422 skb
->len
, inet
->inet_num
, skb
->csum
))
423 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
425 if (!skb_csum_unnecessary(skb
))
426 skb
->csum
= ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
427 &ipv6_hdr(skb
)->daddr
,
432 if (skb_checksum_complete(skb
)) {
433 atomic_inc(&sk
->sk_drops
);
439 rawv6_rcv_skb(sk
, skb
);
445 * This should be easy, if there is something there
446 * we return it, otherwise we block.
449 static int rawv6_recvmsg(struct kiocb
*iocb
, struct sock
*sk
,
450 struct msghdr
*msg
, size_t len
,
451 int noblock
, int flags
, int *addr_len
)
453 struct ipv6_pinfo
*np
= inet6_sk(sk
);
454 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)msg
->msg_name
;
463 *addr_len
=sizeof(*sin6
);
465 if (flags
& MSG_ERRQUEUE
)
466 return ipv6_recv_error(sk
, msg
, len
);
468 if (np
->rxpmtu
&& np
->rxopt
.bits
.rxpmtu
)
469 return ipv6_recv_rxpmtu(sk
, msg
, len
);
471 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
478 msg
->msg_flags
|= MSG_TRUNC
;
481 if (skb_csum_unnecessary(skb
)) {
482 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
483 } else if (msg
->msg_flags
&MSG_TRUNC
) {
484 if (__skb_checksum_complete(skb
))
486 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
488 err
= skb_copy_and_csum_datagram_iovec(skb
, 0, msg
->msg_iov
);
495 /* Copy the address. */
497 sin6
->sin6_family
= AF_INET6
;
499 sin6
->sin6_addr
= ipv6_hdr(skb
)->saddr
;
500 sin6
->sin6_flowinfo
= 0;
501 sin6
->sin6_scope_id
= ipv6_iface_scope_id(&sin6
->sin6_addr
,
505 sock_recv_ts_and_drops(msg
, sk
, skb
);
508 ip6_datagram_recv_ctl(sk
, msg
, skb
);
511 if (flags
& MSG_TRUNC
)
515 skb_free_datagram(sk
, skb
);
520 skb_kill_datagram(sk
, skb
, flags
);
522 /* Error for blocking case is chosen to masquerade
523 as some normal condition.
525 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
529 static int rawv6_push_pending_frames(struct sock
*sk
, struct flowi6
*fl6
,
530 struct raw6_sock
*rp
)
543 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
547 total_len
= inet_sk(sk
)->cork
.base
.length
;
548 if (offset
>= total_len
- 1) {
550 ip6_flush_pending_frames(sk
);
554 /* should be check HW csum miyazawa */
555 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
557 * Only one fragment on the socket.
559 tmp_csum
= skb
->csum
;
561 struct sk_buff
*csum_skb
= NULL
;
564 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
565 tmp_csum
= csum_add(tmp_csum
, skb
->csum
);
570 len
= skb
->len
- skb_transport_offset(skb
);
582 offset
+= skb_transport_offset(skb
);
583 if (skb_copy_bits(skb
, offset
, &csum
, 2))
586 /* in case cksum was not initialized */
588 tmp_csum
= csum_sub(tmp_csum
, csum_unfold(csum
));
590 csum
= csum_ipv6_magic(&fl6
->saddr
, &fl6
->daddr
,
591 total_len
, fl6
->flowi6_proto
, tmp_csum
);
593 if (csum
== 0 && fl6
->flowi6_proto
== IPPROTO_UDP
)
594 csum
= CSUM_MANGLED_0
;
596 if (skb_store_bits(skb
, offset
, &csum
, 2))
600 err
= ip6_push_pending_frames(sk
);
605 static int rawv6_send_hdrinc(struct sock
*sk
, void *from
, int length
,
606 struct flowi6
*fl6
, struct dst_entry
**dstp
,
609 struct ipv6_pinfo
*np
= inet6_sk(sk
);
613 struct rt6_info
*rt
= (struct rt6_info
*)*dstp
;
614 int hlen
= LL_RESERVED_SPACE(rt
->dst
.dev
);
615 int tlen
= rt
->dst
.dev
->needed_tailroom
;
617 if (length
> rt
->dst
.dev
->mtu
) {
618 ipv6_local_error(sk
, EMSGSIZE
, fl6
, rt
->dst
.dev
->mtu
);
624 skb
= sock_alloc_send_skb(sk
,
625 length
+ hlen
+ tlen
+ 15,
626 flags
& MSG_DONTWAIT
, &err
);
629 skb_reserve(skb
, hlen
);
631 skb
->priority
= sk
->sk_priority
;
632 skb
->mark
= sk
->sk_mark
;
633 skb_dst_set(skb
, &rt
->dst
);
636 skb_put(skb
, length
);
637 skb_reset_network_header(skb
);
640 skb
->ip_summed
= CHECKSUM_NONE
;
642 skb
->transport_header
= skb
->network_header
;
643 err
= memcpy_fromiovecend((void *)iph
, from
, 0, length
);
647 IP6_UPD_PO_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUT
, skb
->len
);
648 err
= NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, skb
, NULL
,
649 rt
->dst
.dev
, dst_output
);
651 err
= net_xmit_errno(err
);
661 IP6_INC_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
662 if (err
== -ENOBUFS
&& !np
->recverr
)
667 static int rawv6_probe_proto_opt(struct flowi6
*fl6
, struct msghdr
*msg
)
670 u8 __user
*type
= NULL
;
671 u8 __user
*code
= NULL
;
679 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
680 iov
= &msg
->msg_iov
[i
];
684 switch (fl6
->flowi6_proto
) {
686 /* check if one-byte field is readable or not. */
687 if (iov
->iov_base
&& iov
->iov_len
< 1)
691 type
= iov
->iov_base
;
692 /* check if code field is readable or not. */
693 if (iov
->iov_len
> 1)
696 code
= iov
->iov_base
;
699 if (get_user(fl6
->fl6_icmp_type
, type
) ||
700 get_user(fl6
->fl6_icmp_code
, code
))
706 if (iov
->iov_base
&& iov
->iov_len
< 1)
708 /* check if type field is readable or not. */
709 if (iov
->iov_len
> 2 - len
) {
710 u8 __user
*p
= iov
->iov_base
;
711 if (get_user(fl6
->fl6_mh_type
, &p
[2 - len
]))
728 static int rawv6_sendmsg(struct kiocb
*iocb
, struct sock
*sk
,
729 struct msghdr
*msg
, size_t len
)
731 struct ipv6_txoptions opt_space
;
732 struct sockaddr_in6
* sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
733 struct in6_addr
*daddr
, *final_p
, final
;
734 struct inet_sock
*inet
= inet_sk(sk
);
735 struct ipv6_pinfo
*np
= inet6_sk(sk
);
736 struct raw6_sock
*rp
= raw6_sk(sk
);
737 struct ipv6_txoptions
*opt
= NULL
;
738 struct ip6_flowlabel
*flowlabel
= NULL
;
739 struct dst_entry
*dst
= NULL
;
741 int addr_len
= msg
->msg_namelen
;
748 /* Rough check on arithmetic overflow,
749 better check is made in ip6_append_data().
754 /* Mirror BSD error message compatibility */
755 if (msg
->msg_flags
& MSG_OOB
)
759 * Get and verify the address.
761 memset(&fl6
, 0, sizeof(fl6
));
763 fl6
.flowi6_mark
= sk
->sk_mark
;
766 if (addr_len
< SIN6_LEN_RFC2133
)
769 if (sin6
->sin6_family
&& sin6
->sin6_family
!= AF_INET6
)
770 return -EAFNOSUPPORT
;
772 /* port is the proto value [0..255] carried in nexthdr */
773 proto
= ntohs(sin6
->sin6_port
);
776 proto
= inet
->inet_num
;
777 else if (proto
!= inet
->inet_num
)
783 daddr
= &sin6
->sin6_addr
;
785 fl6
.flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
786 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
787 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
788 if (flowlabel
== NULL
)
790 daddr
= &flowlabel
->dst
;
795 * Otherwise it will be difficult to maintain
798 if (sk
->sk_state
== TCP_ESTABLISHED
&&
799 ipv6_addr_equal(daddr
, &np
->daddr
))
802 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
803 sin6
->sin6_scope_id
&&
804 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr
)))
805 fl6
.flowi6_oif
= sin6
->sin6_scope_id
;
807 if (sk
->sk_state
!= TCP_ESTABLISHED
)
808 return -EDESTADDRREQ
;
810 proto
= inet
->inet_num
;
812 fl6
.flowlabel
= np
->flow_label
;
815 if (fl6
.flowi6_oif
== 0)
816 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
818 if (msg
->msg_controllen
) {
820 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
821 opt
->tot_len
= sizeof(struct ipv6_txoptions
);
823 err
= ip6_datagram_send_ctl(sock_net(sk
), sk
, msg
, &fl6
, opt
,
824 &hlimit
, &tclass
, &dontfrag
);
826 fl6_sock_release(flowlabel
);
829 if ((fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
830 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
831 if (flowlabel
== NULL
)
834 if (!(opt
->opt_nflen
|opt
->opt_flen
))
840 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
841 opt
= ipv6_fixup_options(&opt_space
, opt
);
843 fl6
.flowi6_proto
= proto
;
844 err
= rawv6_probe_proto_opt(&fl6
, msg
);
848 if (!ipv6_addr_any(daddr
))
851 fl6
.daddr
.s6_addr
[15] = 0x1; /* :: means loopback (BSD'ism) */
852 if (ipv6_addr_any(&fl6
.saddr
) && !ipv6_addr_any(&np
->saddr
))
853 fl6
.saddr
= np
->saddr
;
855 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
857 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
))
858 fl6
.flowi6_oif
= np
->mcast_oif
;
859 else if (!fl6
.flowi6_oif
)
860 fl6
.flowi6_oif
= np
->ucast_oif
;
861 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
863 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
869 if (ipv6_addr_is_multicast(&fl6
.daddr
))
870 hlimit
= np
->mcast_hops
;
872 hlimit
= np
->hop_limit
;
874 hlimit
= ip6_dst_hoplimit(dst
);
881 dontfrag
= np
->dontfrag
;
883 if (msg
->msg_flags
&MSG_CONFIRM
)
888 err
= rawv6_send_hdrinc(sk
, msg
->msg_iov
, len
, &fl6
, &dst
, msg
->msg_flags
);
891 err
= ip6_append_data(sk
, ip_generic_getfrag
, msg
->msg_iov
,
892 len
, 0, hlimit
, tclass
, opt
, &fl6
, (struct rt6_info
*)dst
,
893 msg
->msg_flags
, dontfrag
);
896 ip6_flush_pending_frames(sk
);
897 else if (!(msg
->msg_flags
& MSG_MORE
))
898 err
= rawv6_push_pending_frames(sk
, &fl6
, rp
);
904 fl6_sock_release(flowlabel
);
905 return err
<0?err
:len
;
908 if (!(msg
->msg_flags
& MSG_PROBE
) || len
)
909 goto back_from_confirm
;
914 static int rawv6_seticmpfilter(struct sock
*sk
, int level
, int optname
,
915 char __user
*optval
, int optlen
)
919 if (optlen
> sizeof(struct icmp6_filter
))
920 optlen
= sizeof(struct icmp6_filter
);
921 if (copy_from_user(&raw6_sk(sk
)->filter
, optval
, optlen
))
931 static int rawv6_geticmpfilter(struct sock
*sk
, int level
, int optname
,
932 char __user
*optval
, int __user
*optlen
)
938 if (get_user(len
, optlen
))
942 if (len
> sizeof(struct icmp6_filter
))
943 len
= sizeof(struct icmp6_filter
);
944 if (put_user(len
, optlen
))
946 if (copy_to_user(optval
, &raw6_sk(sk
)->filter
, len
))
957 static int do_rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
958 char __user
*optval
, unsigned int optlen
)
960 struct raw6_sock
*rp
= raw6_sk(sk
);
963 if (get_user(val
, (int __user
*)optval
))
968 if (inet_sk(sk
)->inet_num
== IPPROTO_ICMPV6
&&
969 level
== IPPROTO_IPV6
) {
971 * RFC3542 tells that IPV6_CHECKSUM socket
972 * option in the IPPROTO_IPV6 level is not
973 * allowed on ICMPv6 sockets.
974 * If you want to set it, use IPPROTO_RAW
975 * level IPV6_CHECKSUM socket option
981 /* You may get strange result with a positive odd offset;
982 RFC2292bis agrees with me. */
983 if (val
> 0 && (val
&1))
999 static int rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1000 char __user
*optval
, unsigned int optlen
)
1007 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1009 return rawv6_seticmpfilter(sk
, level
, optname
, optval
, optlen
);
1011 if (optname
== IPV6_CHECKSUM
)
1014 return ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1017 return do_rawv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1020 #ifdef CONFIG_COMPAT
1021 static int compat_rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1022 char __user
*optval
, unsigned int optlen
)
1028 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1030 return rawv6_seticmpfilter(sk
, level
, optname
, optval
, optlen
);
1032 if (optname
== IPV6_CHECKSUM
)
1035 return compat_ipv6_setsockopt(sk
, level
, optname
,
1038 return do_rawv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1042 static int do_rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1043 char __user
*optval
, int __user
*optlen
)
1045 struct raw6_sock
*rp
= raw6_sk(sk
);
1048 if (get_user(len
,optlen
))
1054 * We allow getsockopt() for IPPROTO_IPV6-level
1055 * IPV6_CHECKSUM socket option on ICMPv6 sockets
1056 * since RFC3542 is silent about it.
1058 if (rp
->checksum
== 0)
1065 return -ENOPROTOOPT
;
1068 len
= min_t(unsigned int, sizeof(int), len
);
1070 if (put_user(len
, optlen
))
1072 if (copy_to_user(optval
,&val
,len
))
1077 static int rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1078 char __user
*optval
, int __user
*optlen
)
1085 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1087 return rawv6_geticmpfilter(sk
, level
, optname
, optval
, optlen
);
1089 if (optname
== IPV6_CHECKSUM
)
1092 return ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1095 return do_rawv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1098 #ifdef CONFIG_COMPAT
1099 static int compat_rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1100 char __user
*optval
, int __user
*optlen
)
1106 if (inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1108 return rawv6_geticmpfilter(sk
, level
, optname
, optval
, optlen
);
1110 if (optname
== IPV6_CHECKSUM
)
1113 return compat_ipv6_getsockopt(sk
, level
, optname
,
1116 return do_rawv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1120 static int rawv6_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1124 int amount
= sk_wmem_alloc_get(sk
);
1126 return put_user(amount
, (int __user
*)arg
);
1129 struct sk_buff
*skb
;
1132 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1133 skb
= skb_peek(&sk
->sk_receive_queue
);
1135 amount
= skb_tail_pointer(skb
) -
1136 skb_transport_header(skb
);
1137 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1138 return put_user(amount
, (int __user
*)arg
);
1142 #ifdef CONFIG_IPV6_MROUTE
1143 return ip6mr_ioctl(sk
, cmd
, (void __user
*)arg
);
1145 return -ENOIOCTLCMD
;
1150 #ifdef CONFIG_COMPAT
1151 static int compat_rawv6_ioctl(struct sock
*sk
, unsigned int cmd
, unsigned long arg
)
1156 return -ENOIOCTLCMD
;
1158 #ifdef CONFIG_IPV6_MROUTE
1159 return ip6mr_compat_ioctl(sk
, cmd
, compat_ptr(arg
));
1161 return -ENOIOCTLCMD
;
1167 static void rawv6_close(struct sock
*sk
, long timeout
)
1169 if (inet_sk(sk
)->inet_num
== IPPROTO_RAW
)
1170 ip6_ra_control(sk
, -1);
1172 sk_common_release(sk
);
1175 static void raw6_destroy(struct sock
*sk
)
1178 ip6_flush_pending_frames(sk
);
1181 inet6_destroy_sock(sk
);
1184 static int rawv6_init_sk(struct sock
*sk
)
1186 struct raw6_sock
*rp
= raw6_sk(sk
);
1188 switch (inet_sk(sk
)->inet_num
) {
1189 case IPPROTO_ICMPV6
:
1203 struct proto rawv6_prot
= {
1205 .owner
= THIS_MODULE
,
1206 .close
= rawv6_close
,
1207 .destroy
= raw6_destroy
,
1208 .connect
= ip6_datagram_connect
,
1209 .disconnect
= udp_disconnect
,
1210 .ioctl
= rawv6_ioctl
,
1211 .init
= rawv6_init_sk
,
1212 .setsockopt
= rawv6_setsockopt
,
1213 .getsockopt
= rawv6_getsockopt
,
1214 .sendmsg
= rawv6_sendmsg
,
1215 .recvmsg
= rawv6_recvmsg
,
1217 .backlog_rcv
= rawv6_rcv_skb
,
1218 .hash
= raw_hash_sk
,
1219 .unhash
= raw_unhash_sk
,
1220 .obj_size
= sizeof(struct raw6_sock
),
1221 .h
.raw_hash
= &raw_v6_hashinfo
,
1222 #ifdef CONFIG_COMPAT
1223 .compat_setsockopt
= compat_rawv6_setsockopt
,
1224 .compat_getsockopt
= compat_rawv6_getsockopt
,
1225 .compat_ioctl
= compat_rawv6_ioctl
,
1229 #ifdef CONFIG_PROC_FS
1230 static int raw6_seq_show(struct seq_file
*seq
, void *v
)
1232 if (v
== SEQ_START_TOKEN
) {
1233 seq_puts(seq
, IPV6_SEQ_DGRAM_HEADER
);
1235 struct sock
*sp
= v
;
1236 __u16 srcp
= inet_sk(sp
)->inet_num
;
1237 ip6_dgram_sock_seq_show(seq
, v
, srcp
, 0,
1238 raw_seq_private(seq
)->bucket
);
1243 static const struct seq_operations raw6_seq_ops
= {
1244 .start
= raw_seq_start
,
1245 .next
= raw_seq_next
,
1246 .stop
= raw_seq_stop
,
1247 .show
= raw6_seq_show
,
1250 static int raw6_seq_open(struct inode
*inode
, struct file
*file
)
1252 return raw_seq_open(inode
, file
, &raw_v6_hashinfo
, &raw6_seq_ops
);
1255 static const struct file_operations raw6_seq_fops
= {
1256 .owner
= THIS_MODULE
,
1257 .open
= raw6_seq_open
,
1259 .llseek
= seq_lseek
,
1260 .release
= seq_release_net
,
1263 static int __net_init
raw6_init_net(struct net
*net
)
1265 if (!proc_create("raw6", S_IRUGO
, net
->proc_net
, &raw6_seq_fops
))
1271 static void __net_exit
raw6_exit_net(struct net
*net
)
1273 remove_proc_entry("raw6", net
->proc_net
);
1276 static struct pernet_operations raw6_net_ops
= {
1277 .init
= raw6_init_net
,
1278 .exit
= raw6_exit_net
,
1281 int __init
raw6_proc_init(void)
1283 return register_pernet_subsys(&raw6_net_ops
);
1286 void raw6_proc_exit(void)
1288 unregister_pernet_subsys(&raw6_net_ops
);
1290 #endif /* CONFIG_PROC_FS */
1292 /* Same as inet6_dgram_ops, sans udp_poll. */
1293 static const struct proto_ops inet6_sockraw_ops
= {
1295 .owner
= THIS_MODULE
,
1296 .release
= inet6_release
,
1298 .connect
= inet_dgram_connect
, /* ok */
1299 .socketpair
= sock_no_socketpair
, /* a do nothing */
1300 .accept
= sock_no_accept
, /* a do nothing */
1301 .getname
= inet6_getname
,
1302 .poll
= datagram_poll
, /* ok */
1303 .ioctl
= inet6_ioctl
, /* must change */
1304 .listen
= sock_no_listen
, /* ok */
1305 .shutdown
= inet_shutdown
, /* ok */
1306 .setsockopt
= sock_common_setsockopt
, /* ok */
1307 .getsockopt
= sock_common_getsockopt
, /* ok */
1308 .sendmsg
= inet_sendmsg
, /* ok */
1309 .recvmsg
= sock_common_recvmsg
, /* ok */
1310 .mmap
= sock_no_mmap
,
1311 .sendpage
= sock_no_sendpage
,
1312 #ifdef CONFIG_COMPAT
1313 .compat_setsockopt
= compat_sock_common_setsockopt
,
1314 .compat_getsockopt
= compat_sock_common_getsockopt
,
1318 static struct inet_protosw rawv6_protosw
= {
1320 .protocol
= IPPROTO_IP
, /* wild card */
1321 .prot
= &rawv6_prot
,
1322 .ops
= &inet6_sockraw_ops
,
1323 .no_check
= UDP_CSUM_DEFAULT
,
1324 .flags
= INET_PROTOSW_REUSE
,
1327 int __init
rawv6_init(void)
1331 ret
= inet6_register_protosw(&rawv6_protosw
);
1338 void rawv6_exit(void)
1340 inet6_unregister_protosw(&rawv6_protosw
);