3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Adapted from linux/net/ipv4/raw.c
10 * $Id: raw.c,v 1.51 2002/02/01 22:01:04 davem Exp $
13 * Hideaki YOSHIFUJI : sin6_scope_id support
14 * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/netfilter.h>
33 #include <linux/netfilter_ipv6.h>
34 #include <linux/skbuff.h>
35 #include <asm/uaccess.h>
36 #include <asm/ioctls.h>
38 #include <net/net_namespace.h>
44 #include <net/ndisc.h>
45 #include <net/protocol.h>
46 #include <net/ip6_route.h>
47 #include <net/ip6_checksum.h>
48 #include <net/addrconf.h>
49 #include <net/transp_v6.h>
51 #include <net/inet_common.h>
52 #include <net/tcp_states.h>
53 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
57 #include <net/rawv6.h>
60 #include <linux/proc_fs.h>
61 #include <linux/seq_file.h>
63 struct hlist_head raw_v6_htable
[RAWV6_HTABLE_SIZE
];
64 DEFINE_RWLOCK(raw_v6_lock
);
66 static void raw_v6_hash(struct sock
*sk
)
68 struct hlist_head
*list
= &raw_v6_htable
[inet_sk(sk
)->num
&
69 (RAWV6_HTABLE_SIZE
- 1)];
71 write_lock_bh(&raw_v6_lock
);
72 sk_add_node(sk
, list
);
73 sock_prot_inc_use(sk
->sk_prot
);
74 write_unlock_bh(&raw_v6_lock
);
77 static void raw_v6_unhash(struct sock
*sk
)
79 write_lock_bh(&raw_v6_lock
);
80 if (sk_del_node_init(sk
))
81 sock_prot_dec_use(sk
->sk_prot
);
82 write_unlock_bh(&raw_v6_lock
);
86 /* Grumble... icmp and ip_input want to get at this... */
87 struct sock
*__raw_v6_lookup(struct sock
*sk
, unsigned short num
,
88 struct in6_addr
*loc_addr
, struct in6_addr
*rmt_addr
,
91 struct hlist_node
*node
;
92 int is_multicast
= ipv6_addr_is_multicast(loc_addr
);
94 sk_for_each_from(sk
, node
)
95 if (inet_sk(sk
)->num
== num
) {
96 struct ipv6_pinfo
*np
= inet6_sk(sk
);
98 if (!ipv6_addr_any(&np
->daddr
) &&
99 !ipv6_addr_equal(&np
->daddr
, rmt_addr
))
102 if (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
)
105 if (!ipv6_addr_any(&np
->rcv_saddr
)) {
106 if (ipv6_addr_equal(&np
->rcv_saddr
, loc_addr
))
109 inet6_mc_check(sk
, loc_addr
, rmt_addr
))
124 static __inline__
int icmpv6_filter(struct sock
*sk
, struct sk_buff
*skb
)
126 struct icmp6hdr
*icmph
;
127 struct raw6_sock
*rp
= raw6_sk(sk
);
129 if (pskb_may_pull(skb
, sizeof(struct icmp6hdr
))) {
130 __u32
*data
= &rp
->filter
.data
[0];
133 icmph
= (struct icmp6hdr
*) skb
->data
;
134 bit_nr
= icmph
->icmp6_type
;
136 return (data
[bit_nr
>> 5] & (1 << (bit_nr
& 31))) != 0;
141 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
142 static int (*mh_filter
)(struct sock
*sock
, struct sk_buff
*skb
);
144 int rawv6_mh_filter_register(int (*filter
)(struct sock
*sock
,
145 struct sk_buff
*skb
))
147 rcu_assign_pointer(mh_filter
, filter
);
150 EXPORT_SYMBOL(rawv6_mh_filter_register
);
152 int rawv6_mh_filter_unregister(int (*filter
)(struct sock
*sock
,
153 struct sk_buff
*skb
))
155 rcu_assign_pointer(mh_filter
, NULL
);
159 EXPORT_SYMBOL(rawv6_mh_filter_unregister
);
164 * demultiplex raw sockets.
165 * (should consider queueing the skb in the sock receive_queue
166 * without calling rawv6.c)
168 * Caller owns SKB so we must make clones.
170 int ipv6_raw_deliver(struct sk_buff
*skb
, int nexthdr
)
172 struct in6_addr
*saddr
;
173 struct in6_addr
*daddr
;
178 saddr
= &ipv6_hdr(skb
)->saddr
;
181 hash
= nexthdr
& (MAX_INET_PROTOS
- 1);
183 read_lock(&raw_v6_lock
);
184 sk
= sk_head(&raw_v6_htable
[hash
]);
187 * The first socket found will be delivered after
188 * delivery to transport protocols.
194 sk
= __raw_v6_lookup(sk
, nexthdr
, daddr
, saddr
, IP6CB(skb
)->iif
);
202 filtered
= icmpv6_filter(sk
, skb
);
205 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
208 /* XXX: To validate MH only once for each packet,
209 * this is placed here. It should be after checking
210 * xfrm policy, however it doesn't. The checking xfrm
211 * policy is placed in rawv6_rcv() because it is
212 * required for each socket.
214 int (*filter
)(struct sock
*sock
, struct sk_buff
*skb
);
216 filter
= rcu_dereference(mh_filter
);
217 filtered
= filter
? filter(sk
, skb
) : 0;
229 struct sk_buff
*clone
= skb_clone(skb
, GFP_ATOMIC
);
231 /* Not releasing hash table! */
234 rawv6_rcv(sk
, clone
);
237 sk
= __raw_v6_lookup(sk_next(sk
), nexthdr
, daddr
, saddr
,
241 read_unlock(&raw_v6_lock
);
245 /* This cleans up af_inet6 a bit. -DaveM */
246 static int rawv6_bind(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
248 struct inet_sock
*inet
= inet_sk(sk
);
249 struct ipv6_pinfo
*np
= inet6_sk(sk
);
250 struct sockaddr_in6
*addr
= (struct sockaddr_in6
*) uaddr
;
255 if (addr_len
< SIN6_LEN_RFC2133
)
257 addr_type
= ipv6_addr_type(&addr
->sin6_addr
);
259 /* Raw sockets are IPv6 only */
260 if (addr_type
== IPV6_ADDR_MAPPED
)
261 return(-EADDRNOTAVAIL
);
266 if (sk
->sk_state
!= TCP_CLOSE
)
269 /* Check if the address belongs to the host. */
270 if (addr_type
!= IPV6_ADDR_ANY
) {
271 struct net_device
*dev
= NULL
;
273 if (addr_type
& IPV6_ADDR_LINKLOCAL
) {
274 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
275 addr
->sin6_scope_id
) {
276 /* Override any existing binding, if another
277 * one is supplied by user.
279 sk
->sk_bound_dev_if
= addr
->sin6_scope_id
;
282 /* Binding to link-local address requires an interface */
283 if (!sk
->sk_bound_dev_if
)
286 dev
= dev_get_by_index(&init_net
, sk
->sk_bound_dev_if
);
293 /* ipv4 addr of the socket is invalid. Only the
294 * unspecified and mapped address have a v4 equivalent.
296 v4addr
= LOOPBACK4_IPV6
;
297 if (!(addr_type
& IPV6_ADDR_MULTICAST
)) {
298 err
= -EADDRNOTAVAIL
;
299 if (!ipv6_chk_addr(&addr
->sin6_addr
, dev
, 0)) {
309 inet
->rcv_saddr
= inet
->saddr
= v4addr
;
310 ipv6_addr_copy(&np
->rcv_saddr
, &addr
->sin6_addr
);
311 if (!(addr_type
& IPV6_ADDR_MULTICAST
))
312 ipv6_addr_copy(&np
->saddr
, &addr
->sin6_addr
);
319 void rawv6_err(struct sock
*sk
, struct sk_buff
*skb
,
320 struct inet6_skb_parm
*opt
,
321 int type
, int code
, int offset
, __be32 info
)
323 struct inet_sock
*inet
= inet_sk(sk
);
324 struct ipv6_pinfo
*np
= inet6_sk(sk
);
328 /* Report error on raw socket, if:
329 1. User requested recverr.
330 2. Socket is connected (otherwise the error indication
331 is useless without recverr and error is hard.
333 if (!np
->recverr
&& sk
->sk_state
!= TCP_ESTABLISHED
)
336 harderr
= icmpv6_err_convert(type
, code
, &err
);
337 if (type
== ICMPV6_PKT_TOOBIG
)
338 harderr
= (np
->pmtudisc
== IPV6_PMTUDISC_DO
);
341 u8
*payload
= skb
->data
;
344 ipv6_icmp_error(sk
, skb
, err
, 0, ntohl(info
), payload
);
347 if (np
->recverr
|| harderr
) {
349 sk
->sk_error_report(sk
);
353 static inline int rawv6_rcv_skb(struct sock
* sk
, struct sk_buff
* skb
)
355 if ((raw6_sk(sk
)->checksum
|| sk
->sk_filter
) &&
356 skb_checksum_complete(skb
)) {
357 /* FIXME: increment a raw6 drops counter here */
362 /* Charge it to the socket. */
363 if (sock_queue_rcv_skb(sk
,skb
)<0) {
364 /* FIXME: increment a raw6 drops counter here */
373 * This is next to useless...
374 * if we demultiplex in network layer we don't need the extra call
375 * just to queue the skb...
376 * maybe we could have the network decide upon a hint if it
377 * should call raw_rcv for demultiplexing
379 int rawv6_rcv(struct sock
*sk
, struct sk_buff
*skb
)
381 struct inet_sock
*inet
= inet_sk(sk
);
382 struct raw6_sock
*rp
= raw6_sk(sk
);
384 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
)) {
390 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
392 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
393 skb_postpull_rcsum(skb
, skb_network_header(skb
),
394 skb_network_header_len(skb
));
395 if (!csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
396 &ipv6_hdr(skb
)->daddr
,
397 skb
->len
, inet
->num
, skb
->csum
))
398 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
400 if (!skb_csum_unnecessary(skb
))
401 skb
->csum
= ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
402 &ipv6_hdr(skb
)->daddr
,
407 if (skb_checksum_complete(skb
)) {
408 /* FIXME: increment a raw6 drops counter here */
414 rawv6_rcv_skb(sk
, skb
);
420 * This should be easy, if there is something there
421 * we return it, otherwise we block.
424 static int rawv6_recvmsg(struct kiocb
*iocb
, struct sock
*sk
,
425 struct msghdr
*msg
, size_t len
,
426 int noblock
, int flags
, int *addr_len
)
428 struct ipv6_pinfo
*np
= inet6_sk(sk
);
429 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)msg
->msg_name
;
438 *addr_len
=sizeof(*sin6
);
440 if (flags
& MSG_ERRQUEUE
)
441 return ipv6_recv_error(sk
, msg
, len
);
443 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
450 msg
->msg_flags
|= MSG_TRUNC
;
453 if (skb_csum_unnecessary(skb
)) {
454 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
455 } else if (msg
->msg_flags
&MSG_TRUNC
) {
456 if (__skb_checksum_complete(skb
))
458 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
460 err
= skb_copy_and_csum_datagram_iovec(skb
, 0, msg
->msg_iov
);
467 /* Copy the address. */
469 sin6
->sin6_family
= AF_INET6
;
471 ipv6_addr_copy(&sin6
->sin6_addr
, &ipv6_hdr(skb
)->saddr
);
472 sin6
->sin6_flowinfo
= 0;
473 sin6
->sin6_scope_id
= 0;
474 if (ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_LINKLOCAL
)
475 sin6
->sin6_scope_id
= IP6CB(skb
)->iif
;
478 sock_recv_timestamp(msg
, sk
, skb
);
481 datagram_recv_ctl(sk
, msg
, skb
);
484 if (flags
& MSG_TRUNC
)
488 skb_free_datagram(sk
, skb
);
493 skb_kill_datagram(sk
, skb
, flags
);
495 /* Error for blocking case is chosen to masquerade
496 as some normal condition.
498 err
= (flags
&MSG_DONTWAIT
) ? -EAGAIN
: -EHOSTUNREACH
;
499 /* FIXME: increment a raw6 drops counter here */
503 static int rawv6_push_pending_frames(struct sock
*sk
, struct flowi
*fl
,
504 struct raw6_sock
*rp
)
517 if ((skb
= skb_peek(&sk
->sk_write_queue
)) == NULL
)
521 total_len
= inet_sk(sk
)->cork
.length
- (skb_network_header(skb
) -
523 if (offset
>= total_len
- 1) {
525 ip6_flush_pending_frames(sk
);
529 /* should be check HW csum miyazawa */
530 if (skb_queue_len(&sk
->sk_write_queue
) == 1) {
532 * Only one fragment on the socket.
534 tmp_csum
= skb
->csum
;
536 struct sk_buff
*csum_skb
= NULL
;
539 skb_queue_walk(&sk
->sk_write_queue
, skb
) {
540 tmp_csum
= csum_add(tmp_csum
, skb
->csum
);
545 len
= skb
->len
- skb_transport_offset(skb
);
557 offset
+= skb_transport_offset(skb
);
558 if (skb_copy_bits(skb
, offset
, &csum
, 2))
561 /* in case cksum was not initialized */
563 tmp_csum
= csum_sub(tmp_csum
, csum_unfold(csum
));
565 csum
= csum_ipv6_magic(&fl
->fl6_src
,
567 total_len
, fl
->proto
, tmp_csum
);
569 if (csum
== 0 && fl
->proto
== IPPROTO_UDP
)
570 csum
= CSUM_MANGLED_0
;
572 if (skb_store_bits(skb
, offset
, &csum
, 2))
576 err
= ip6_push_pending_frames(sk
);
581 static int rawv6_send_hdrinc(struct sock
*sk
, void *from
, int length
,
582 struct flowi
*fl
, struct rt6_info
*rt
,
585 struct ipv6_pinfo
*np
= inet6_sk(sk
);
591 if (length
> rt
->u
.dst
.dev
->mtu
) {
592 ipv6_local_error(sk
, EMSGSIZE
, fl
, rt
->u
.dst
.dev
->mtu
);
598 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
600 skb
= sock_alloc_send_skb(sk
, length
+hh_len
+15,
601 flags
&MSG_DONTWAIT
, &err
);
604 skb_reserve(skb
, hh_len
);
606 skb
->priority
= sk
->sk_priority
;
607 skb
->dst
= dst_clone(&rt
->u
.dst
);
609 skb_put(skb
, length
);
610 skb_reset_network_header(skb
);
613 skb
->ip_summed
= CHECKSUM_NONE
;
615 skb
->transport_header
= skb
->network_header
;
616 err
= memcpy_fromiovecend((void *)iph
, from
, 0, length
);
620 IP6_INC_STATS(rt
->rt6i_idev
, IPSTATS_MIB_OUTREQUESTS
);
621 err
= NF_HOOK(PF_INET6
, NF_IP6_LOCAL_OUT
, skb
, NULL
, rt
->u
.dst
.dev
,
624 err
= np
->recverr
? net_xmit_errno(err
) : 0;
634 IP6_INC_STATS(rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
638 static int rawv6_probe_proto_opt(struct flowi
*fl
, struct msghdr
*msg
)
641 u8 __user
*type
= NULL
;
642 u8 __user
*code
= NULL
;
650 for (i
= 0; i
< msg
->msg_iovlen
; i
++) {
651 iov
= &msg
->msg_iov
[i
];
657 /* check if one-byte field is readable or not. */
658 if (iov
->iov_base
&& iov
->iov_len
< 1)
662 type
= iov
->iov_base
;
663 /* check if code field is readable or not. */
664 if (iov
->iov_len
> 1)
667 code
= iov
->iov_base
;
670 if (get_user(fl
->fl_icmp_type
, type
) ||
671 get_user(fl
->fl_icmp_code
, code
))
677 if (iov
->iov_base
&& iov
->iov_len
< 1)
679 /* check if type field is readable or not. */
680 if (iov
->iov_len
> 2 - len
) {
681 u8 __user
*p
= iov
->iov_base
;
682 if (get_user(fl
->fl_mh_type
, &p
[2 - len
]))
699 static int rawv6_sendmsg(struct kiocb
*iocb
, struct sock
*sk
,
700 struct msghdr
*msg
, size_t len
)
702 struct ipv6_txoptions opt_space
;
703 struct sockaddr_in6
* sin6
= (struct sockaddr_in6
*) msg
->msg_name
;
704 struct in6_addr
*daddr
, *final_p
= NULL
, final
;
705 struct inet_sock
*inet
= inet_sk(sk
);
706 struct ipv6_pinfo
*np
= inet6_sk(sk
);
707 struct raw6_sock
*rp
= raw6_sk(sk
);
708 struct ipv6_txoptions
*opt
= NULL
;
709 struct ip6_flowlabel
*flowlabel
= NULL
;
710 struct dst_entry
*dst
= NULL
;
712 int addr_len
= msg
->msg_namelen
;
718 /* Rough check on arithmetic overflow,
719 better check is made in ip6_append_data().
724 /* Mirror BSD error message compatibility */
725 if (msg
->msg_flags
& MSG_OOB
)
729 * Get and verify the address.
731 memset(&fl
, 0, sizeof(fl
));
734 if (addr_len
< SIN6_LEN_RFC2133
)
737 if (sin6
->sin6_family
&& sin6
->sin6_family
!= AF_INET6
)
738 return(-EAFNOSUPPORT
);
740 /* port is the proto value [0..255] carried in nexthdr */
741 proto
= ntohs(sin6
->sin6_port
);
745 else if (proto
!= inet
->num
)
751 daddr
= &sin6
->sin6_addr
;
753 fl
.fl6_flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
754 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
755 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
756 if (flowlabel
== NULL
)
758 daddr
= &flowlabel
->dst
;
763 * Otherwise it will be difficult to maintain
766 if (sk
->sk_state
== TCP_ESTABLISHED
&&
767 ipv6_addr_equal(daddr
, &np
->daddr
))
770 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
771 sin6
->sin6_scope_id
&&
772 ipv6_addr_type(daddr
)&IPV6_ADDR_LINKLOCAL
)
773 fl
.oif
= sin6
->sin6_scope_id
;
775 if (sk
->sk_state
!= TCP_ESTABLISHED
)
776 return -EDESTADDRREQ
;
780 fl
.fl6_flowlabel
= np
->flow_label
;
783 if (ipv6_addr_any(daddr
)) {
785 * unspecified destination address
786 * treated as error... is this correct ?
788 fl6_sock_release(flowlabel
);
793 fl
.oif
= sk
->sk_bound_dev_if
;
795 if (msg
->msg_controllen
) {
797 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
798 opt
->tot_len
= sizeof(struct ipv6_txoptions
);
800 err
= datagram_send_ctl(msg
, &fl
, opt
, &hlimit
, &tclass
);
802 fl6_sock_release(flowlabel
);
805 if ((fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
806 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
807 if (flowlabel
== NULL
)
810 if (!(opt
->opt_nflen
|opt
->opt_flen
))
816 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
817 opt
= ipv6_fixup_options(&opt_space
, opt
);
820 err
= rawv6_probe_proto_opt(&fl
, msg
);
824 ipv6_addr_copy(&fl
.fl6_dst
, daddr
);
825 if (ipv6_addr_any(&fl
.fl6_src
) && !ipv6_addr_any(&np
->saddr
))
826 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
828 /* merge ip6_build_xmit from ip6_output */
829 if (opt
&& opt
->srcrt
) {
830 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
831 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
832 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
836 if (!fl
.oif
&& ipv6_addr_is_multicast(&fl
.fl6_dst
))
837 fl
.oif
= np
->mcast_oif
;
838 security_sk_classify_flow(sk
, &fl
);
840 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
844 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
846 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, 1)) < 0) {
848 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
854 if (ipv6_addr_is_multicast(&fl
.fl6_dst
))
855 hlimit
= np
->mcast_hops
;
857 hlimit
= np
->hop_limit
;
859 hlimit
= dst_metric(dst
, RTAX_HOPLIMIT
);
861 hlimit
= ipv6_get_hoplimit(dst
->dev
);
870 if (msg
->msg_flags
&MSG_CONFIRM
)
875 err
= rawv6_send_hdrinc(sk
, msg
->msg_iov
, len
, &fl
, (struct rt6_info
*)dst
, msg
->msg_flags
);
878 err
= ip6_append_data(sk
, ip_generic_getfrag
, msg
->msg_iov
,
879 len
, 0, hlimit
, tclass
, opt
, &fl
, (struct rt6_info
*)dst
,
883 ip6_flush_pending_frames(sk
);
884 else if (!(msg
->msg_flags
& MSG_MORE
))
885 err
= rawv6_push_pending_frames(sk
, &fl
, rp
);
891 fl6_sock_release(flowlabel
);
892 return err
<0?err
:len
;
895 if (!(msg
->msg_flags
& MSG_PROBE
) || len
)
896 goto back_from_confirm
;
901 static int rawv6_seticmpfilter(struct sock
*sk
, int level
, int optname
,
902 char __user
*optval
, int optlen
)
906 if (optlen
> sizeof(struct icmp6_filter
))
907 optlen
= sizeof(struct icmp6_filter
);
908 if (copy_from_user(&raw6_sk(sk
)->filter
, optval
, optlen
))
918 static int rawv6_geticmpfilter(struct sock
*sk
, int level
, int optname
,
919 char __user
*optval
, int __user
*optlen
)
925 if (get_user(len
, optlen
))
929 if (len
> sizeof(struct icmp6_filter
))
930 len
= sizeof(struct icmp6_filter
);
931 if (put_user(len
, optlen
))
933 if (copy_to_user(optval
, &raw6_sk(sk
)->filter
, len
))
944 static int do_rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
945 char __user
*optval
, int optlen
)
947 struct raw6_sock
*rp
= raw6_sk(sk
);
950 if (get_user(val
, (int __user
*)optval
))
955 /* You may get strange result with a positive odd offset;
956 RFC2292bis agrees with me. */
957 if (val
> 0 && (val
&1))
970 return(-ENOPROTOOPT
);
974 static int rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
975 char __user
*optval
, int optlen
)
982 if (inet_sk(sk
)->num
!= IPPROTO_ICMPV6
)
984 return rawv6_seticmpfilter(sk
, level
, optname
, optval
,
987 if (optname
== IPV6_CHECKSUM
)
990 return ipv6_setsockopt(sk
, level
, optname
, optval
,
994 return do_rawv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
998 static int compat_rawv6_setsockopt(struct sock
*sk
, int level
, int optname
,
999 char __user
*optval
, int optlen
)
1005 if (inet_sk(sk
)->num
!= IPPROTO_ICMPV6
)
1007 return rawv6_seticmpfilter(sk
, level
, optname
, optval
, optlen
);
1009 if (optname
== IPV6_CHECKSUM
)
1012 return compat_ipv6_setsockopt(sk
, level
, optname
,
1015 return do_rawv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1019 static int do_rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1020 char __user
*optval
, int __user
*optlen
)
1022 struct raw6_sock
*rp
= raw6_sk(sk
);
1025 if (get_user(len
,optlen
))
1030 if (rp
->checksum
== 0)
1037 return -ENOPROTOOPT
;
1040 len
= min_t(unsigned int, sizeof(int), len
);
1042 if (put_user(len
, optlen
))
1044 if (copy_to_user(optval
,&val
,len
))
1049 static int rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1050 char __user
*optval
, int __user
*optlen
)
1057 if (inet_sk(sk
)->num
!= IPPROTO_ICMPV6
)
1059 return rawv6_geticmpfilter(sk
, level
, optname
, optval
,
1062 if (optname
== IPV6_CHECKSUM
)
1065 return ipv6_getsockopt(sk
, level
, optname
, optval
,
1069 return do_rawv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1072 #ifdef CONFIG_COMPAT
1073 static int compat_rawv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1074 char __user
*optval
, int __user
*optlen
)
1080 if (inet_sk(sk
)->num
!= IPPROTO_ICMPV6
)
1082 return rawv6_geticmpfilter(sk
, level
, optname
, optval
, optlen
);
1084 if (optname
== IPV6_CHECKSUM
)
1087 return compat_ipv6_getsockopt(sk
, level
, optname
,
1090 return do_rawv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1094 static int rawv6_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1099 int amount
= atomic_read(&sk
->sk_wmem_alloc
);
1100 return put_user(amount
, (int __user
*)arg
);
1104 struct sk_buff
*skb
;
1107 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1108 skb
= skb_peek(&sk
->sk_receive_queue
);
1110 amount
= skb
->tail
- skb
->transport_header
;
1111 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1112 return put_user(amount
, (int __user
*)arg
);
1116 return -ENOIOCTLCMD
;
1120 static void rawv6_close(struct sock
*sk
, long timeout
)
1122 if (inet_sk(sk
)->num
== IPPROTO_RAW
)
1123 ip6_ra_control(sk
, -1, NULL
);
1125 sk_common_release(sk
);
1128 static int rawv6_init_sk(struct sock
*sk
)
1130 struct raw6_sock
*rp
= raw6_sk(sk
);
1132 switch (inet_sk(sk
)->num
) {
1133 case IPPROTO_ICMPV6
:
1147 DEFINE_PROTO_INUSE(rawv6
)
1149 struct proto rawv6_prot
= {
1151 .owner
= THIS_MODULE
,
1152 .close
= rawv6_close
,
1153 .connect
= ip6_datagram_connect
,
1154 .disconnect
= udp_disconnect
,
1155 .ioctl
= rawv6_ioctl
,
1156 .init
= rawv6_init_sk
,
1157 .destroy
= inet6_destroy_sock
,
1158 .setsockopt
= rawv6_setsockopt
,
1159 .getsockopt
= rawv6_getsockopt
,
1160 .sendmsg
= rawv6_sendmsg
,
1161 .recvmsg
= rawv6_recvmsg
,
1163 .backlog_rcv
= rawv6_rcv_skb
,
1164 .hash
= raw_v6_hash
,
1165 .unhash
= raw_v6_unhash
,
1166 .obj_size
= sizeof(struct raw6_sock
),
1167 #ifdef CONFIG_COMPAT
1168 .compat_setsockopt
= compat_rawv6_setsockopt
,
1169 .compat_getsockopt
= compat_rawv6_getsockopt
,
1171 REF_PROTO_INUSE(rawv6
)
1174 #ifdef CONFIG_PROC_FS
1175 struct raw6_iter_state
{
1179 #define raw6_seq_private(seq) ((struct raw6_iter_state *)(seq)->private)
1181 static struct sock
*raw6_get_first(struct seq_file
*seq
)
1184 struct hlist_node
*node
;
1185 struct raw6_iter_state
* state
= raw6_seq_private(seq
);
1187 for (state
->bucket
= 0; state
->bucket
< RAWV6_HTABLE_SIZE
; ++state
->bucket
)
1188 sk_for_each(sk
, node
, &raw_v6_htable
[state
->bucket
])
1189 if (sk
->sk_family
== PF_INET6
)
1196 static struct sock
*raw6_get_next(struct seq_file
*seq
, struct sock
*sk
)
1198 struct raw6_iter_state
* state
= raw6_seq_private(seq
);
1204 } while (sk
&& sk
->sk_family
!= PF_INET6
);
1206 if (!sk
&& ++state
->bucket
< RAWV6_HTABLE_SIZE
) {
1207 sk
= sk_head(&raw_v6_htable
[state
->bucket
]);
1213 static struct sock
*raw6_get_idx(struct seq_file
*seq
, loff_t pos
)
1215 struct sock
*sk
= raw6_get_first(seq
);
1217 while (pos
&& (sk
= raw6_get_next(seq
, sk
)) != NULL
)
1219 return pos
? NULL
: sk
;
1222 static void *raw6_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1224 read_lock(&raw_v6_lock
);
1225 return *pos
? raw6_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
1228 static void *raw6_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1232 if (v
== SEQ_START_TOKEN
)
1233 sk
= raw6_get_first(seq
);
1235 sk
= raw6_get_next(seq
, v
);
1240 static void raw6_seq_stop(struct seq_file
*seq
, void *v
)
1242 read_unlock(&raw_v6_lock
);
1245 static void raw6_sock_seq_show(struct seq_file
*seq
, struct sock
*sp
, int i
)
1247 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1248 struct in6_addr
*dest
, *src
;
1252 src
= &np
->rcv_saddr
;
1254 srcp
= inet_sk(sp
)->num
;
1256 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1257 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n",
1259 src
->s6_addr32
[0], src
->s6_addr32
[1],
1260 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1261 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1262 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1264 atomic_read(&sp
->sk_wmem_alloc
),
1265 atomic_read(&sp
->sk_rmem_alloc
),
1269 atomic_read(&sp
->sk_refcnt
), sp
);
1272 static int raw6_seq_show(struct seq_file
*seq
, void *v
)
1274 if (v
== SEQ_START_TOKEN
)
1279 "st tx_queue rx_queue tr tm->when retrnsmt"
1280 " uid timeout inode\n");
1282 raw6_sock_seq_show(seq
, v
, raw6_seq_private(seq
)->bucket
);
1286 static const struct seq_operations raw6_seq_ops
= {
1287 .start
= raw6_seq_start
,
1288 .next
= raw6_seq_next
,
1289 .stop
= raw6_seq_stop
,
1290 .show
= raw6_seq_show
,
1293 static int raw6_seq_open(struct inode
*inode
, struct file
*file
)
1295 return seq_open_private(file
, &raw6_seq_ops
,
1296 sizeof(struct raw6_iter_state
));
1299 static const struct file_operations raw6_seq_fops
= {
1300 .owner
= THIS_MODULE
,
1301 .open
= raw6_seq_open
,
1303 .llseek
= seq_lseek
,
1304 .release
= seq_release_private
,
1307 int __init
raw6_proc_init(void)
1309 if (!proc_net_fops_create(&init_net
, "raw6", S_IRUGO
, &raw6_seq_fops
))
1314 void raw6_proc_exit(void)
1316 proc_net_remove(&init_net
, "raw6");
1318 #endif /* CONFIG_PROC_FS */