2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
15 * Mike McLagan : Routing by source
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/skbuff.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
39 #include <net/compat.h>
40 #include <net/checksum.h>
41 #if IS_ENABLED(CONFIG_IPV6)
42 #include <net/transp_v6.h>
44 #include <net/ip_fib.h>
46 #include <linux/errqueue.h>
47 #include <asm/uaccess.h>
50 * SOL_IP control messages.
53 static void ip_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
55 struct in_pktinfo info
= *PKTINFO_SKB_CB(skb
);
57 info
.ipi_addr
.s_addr
= ip_hdr(skb
)->daddr
;
59 put_cmsg(msg
, SOL_IP
, IP_PKTINFO
, sizeof(info
), &info
);
62 static void ip_cmsg_recv_ttl(struct msghdr
*msg
, struct sk_buff
*skb
)
64 int ttl
= ip_hdr(skb
)->ttl
;
65 put_cmsg(msg
, SOL_IP
, IP_TTL
, sizeof(int), &ttl
);
68 static void ip_cmsg_recv_tos(struct msghdr
*msg
, struct sk_buff
*skb
)
70 put_cmsg(msg
, SOL_IP
, IP_TOS
, 1, &ip_hdr(skb
)->tos
);
73 static void ip_cmsg_recv_opts(struct msghdr
*msg
, struct sk_buff
*skb
)
75 if (IPCB(skb
)->opt
.optlen
== 0)
78 put_cmsg(msg
, SOL_IP
, IP_RECVOPTS
, IPCB(skb
)->opt
.optlen
,
83 static void ip_cmsg_recv_retopts(struct msghdr
*msg
, struct sk_buff
*skb
)
85 unsigned char optbuf
[sizeof(struct ip_options
) + 40];
86 struct ip_options
*opt
= (struct ip_options
*)optbuf
;
88 if (IPCB(skb
)->opt
.optlen
== 0)
91 if (ip_options_echo(opt
, skb
)) {
92 msg
->msg_flags
|= MSG_CTRUNC
;
97 put_cmsg(msg
, SOL_IP
, IP_RETOPTS
, opt
->optlen
, opt
->__data
);
100 static void ip_cmsg_recv_checksum(struct msghdr
*msg
, struct sk_buff
*skb
,
103 __wsum csum
= skb
->csum
;
105 if (skb
->ip_summed
!= CHECKSUM_COMPLETE
)
109 csum
= csum_sub(csum
, csum_partial(skb
->data
, offset
, 0));
111 put_cmsg(msg
, SOL_IP
, IP_CHECKSUM
, sizeof(__wsum
), &csum
);
114 static void ip_cmsg_recv_security(struct msghdr
*msg
, struct sk_buff
*skb
)
120 err
= security_socket_getpeersec_dgram(NULL
, skb
, &secid
);
124 err
= security_secid_to_secctx(secid
, &secdata
, &seclen
);
128 put_cmsg(msg
, SOL_IP
, SCM_SECURITY
, seclen
, secdata
);
129 security_release_secctx(secdata
, seclen
);
132 static void ip_cmsg_recv_dstaddr(struct msghdr
*msg
, struct sk_buff
*skb
)
134 struct sockaddr_in sin
;
135 const struct iphdr
*iph
= ip_hdr(skb
);
136 __be16
*ports
= (__be16
*)skb_transport_header(skb
);
138 if (skb_transport_offset(skb
) + 4 > skb
->len
)
141 /* All current transport protocols have the port numbers in the
142 * first four bytes of the transport header and this function is
143 * written with this assumption in mind.
146 sin
.sin_family
= AF_INET
;
147 sin
.sin_addr
.s_addr
= iph
->daddr
;
148 sin
.sin_port
= ports
[1];
149 memset(sin
.sin_zero
, 0, sizeof(sin
.sin_zero
));
151 put_cmsg(msg
, SOL_IP
, IP_ORIGDSTADDR
, sizeof(sin
), &sin
);
154 void ip_cmsg_recv_offset(struct msghdr
*msg
, struct sk_buff
*skb
,
157 struct inet_sock
*inet
= inet_sk(skb
->sk
);
158 unsigned int flags
= inet
->cmsg_flags
;
160 /* Ordered by supposed usage frequency */
161 if (flags
& IP_CMSG_PKTINFO
) {
162 ip_cmsg_recv_pktinfo(msg
, skb
);
164 flags
&= ~IP_CMSG_PKTINFO
;
169 if (flags
& IP_CMSG_TTL
) {
170 ip_cmsg_recv_ttl(msg
, skb
);
172 flags
&= ~IP_CMSG_TTL
;
177 if (flags
& IP_CMSG_TOS
) {
178 ip_cmsg_recv_tos(msg
, skb
);
180 flags
&= ~IP_CMSG_TOS
;
185 if (flags
& IP_CMSG_RECVOPTS
) {
186 ip_cmsg_recv_opts(msg
, skb
);
188 flags
&= ~IP_CMSG_RECVOPTS
;
193 if (flags
& IP_CMSG_RETOPTS
) {
194 ip_cmsg_recv_retopts(msg
, skb
);
196 flags
&= ~IP_CMSG_RETOPTS
;
201 if (flags
& IP_CMSG_PASSSEC
) {
202 ip_cmsg_recv_security(msg
, skb
);
204 flags
&= ~IP_CMSG_PASSSEC
;
209 if (flags
& IP_CMSG_ORIGDSTADDR
) {
210 ip_cmsg_recv_dstaddr(msg
, skb
);
212 flags
&= ~IP_CMSG_ORIGDSTADDR
;
217 if (flags
& IP_CMSG_CHECKSUM
)
218 ip_cmsg_recv_checksum(msg
, skb
, offset
);
220 EXPORT_SYMBOL(ip_cmsg_recv_offset
);
222 int ip_cmsg_send(struct net
*net
, struct msghdr
*msg
, struct ipcm_cookie
*ipc
,
226 struct cmsghdr
*cmsg
;
228 for_each_cmsghdr(cmsg
, msg
) {
229 if (!CMSG_OK(msg
, cmsg
))
231 #if IS_ENABLED(CONFIG_IPV6)
233 cmsg
->cmsg_level
== SOL_IPV6
&&
234 cmsg
->cmsg_type
== IPV6_PKTINFO
) {
235 struct in6_pktinfo
*src_info
;
237 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(*src_info
)))
239 src_info
= (struct in6_pktinfo
*)CMSG_DATA(cmsg
);
240 if (!ipv6_addr_v4mapped(&src_info
->ipi6_addr
))
242 ipc
->oif
= src_info
->ipi6_ifindex
;
243 ipc
->addr
= src_info
->ipi6_addr
.s6_addr32
[3];
247 if (cmsg
->cmsg_level
!= SOL_IP
)
249 switch (cmsg
->cmsg_type
) {
251 err
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof(struct cmsghdr
));
252 err
= ip_options_get(net
, &ipc
->opt
, CMSG_DATA(cmsg
),
253 err
< 40 ? err
: 40);
259 struct in_pktinfo
*info
;
260 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct in_pktinfo
)))
262 info
= (struct in_pktinfo
*)CMSG_DATA(cmsg
);
263 ipc
->oif
= info
->ipi_ifindex
;
264 ipc
->addr
= info
->ipi_spec_dst
.s_addr
;
268 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(int)))
270 val
= *(int *)CMSG_DATA(cmsg
);
271 if (val
< 1 || val
> 255)
276 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(int)))
278 val
= *(int *)CMSG_DATA(cmsg
);
279 if (val
< 0 || val
> 255)
282 ipc
->priority
= rt_tos2priority(ipc
->tos
);
293 /* Special input handler for packets caught by router alert option.
294 They are selected only by protocol field, and then processed likely
295 local ones; but only if someone wants them! Otherwise, router
296 not running rsvpd will kill RSVP.
298 It is user level problem, what it will make with them.
299 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
300 but receiver should be enough clever f.e. to forward mtrace requests,
301 sent to multicast group to reach destination designated router.
303 struct ip_ra_chain __rcu
*ip_ra_chain
;
304 static DEFINE_SPINLOCK(ip_ra_lock
);
307 static void ip_ra_destroy_rcu(struct rcu_head
*head
)
309 struct ip_ra_chain
*ra
= container_of(head
, struct ip_ra_chain
, rcu
);
311 sock_put(ra
->saved_sk
);
315 int ip_ra_control(struct sock
*sk
, unsigned char on
,
316 void (*destructor
)(struct sock
*))
318 struct ip_ra_chain
*ra
, *new_ra
;
319 struct ip_ra_chain __rcu
**rap
;
321 if (sk
->sk_type
!= SOCK_RAW
|| inet_sk(sk
)->inet_num
== IPPROTO_RAW
)
324 new_ra
= on
? kmalloc(sizeof(*new_ra
), GFP_KERNEL
) : NULL
;
326 spin_lock_bh(&ip_ra_lock
);
327 for (rap
= &ip_ra_chain
;
328 (ra
= rcu_dereference_protected(*rap
,
329 lockdep_is_held(&ip_ra_lock
))) != NULL
;
333 spin_unlock_bh(&ip_ra_lock
);
337 /* dont let ip_call_ra_chain() use sk again */
339 RCU_INIT_POINTER(*rap
, ra
->next
);
340 spin_unlock_bh(&ip_ra_lock
);
345 * Delay sock_put(sk) and kfree(ra) after one rcu grace
346 * period. This guarantee ip_call_ra_chain() dont need
347 * to mess with socket refcounts.
350 call_rcu(&ra
->rcu
, ip_ra_destroy_rcu
);
354 if (new_ra
== NULL
) {
355 spin_unlock_bh(&ip_ra_lock
);
359 new_ra
->destructor
= destructor
;
361 RCU_INIT_POINTER(new_ra
->next
, ra
);
362 rcu_assign_pointer(*rap
, new_ra
);
364 spin_unlock_bh(&ip_ra_lock
);
369 void ip_icmp_error(struct sock
*sk
, struct sk_buff
*skb
, int err
,
370 __be16 port
, u32 info
, u8
*payload
)
372 struct sock_exterr_skb
*serr
;
374 skb
= skb_clone(skb
, GFP_ATOMIC
);
378 serr
= SKB_EXT_ERR(skb
);
379 serr
->ee
.ee_errno
= err
;
380 serr
->ee
.ee_origin
= SO_EE_ORIGIN_ICMP
;
381 serr
->ee
.ee_type
= icmp_hdr(skb
)->type
;
382 serr
->ee
.ee_code
= icmp_hdr(skb
)->code
;
384 serr
->ee
.ee_info
= info
;
385 serr
->ee
.ee_data
= 0;
386 serr
->addr_offset
= (u8
*)&(((struct iphdr
*)(icmp_hdr(skb
) + 1))->daddr
) -
387 skb_network_header(skb
);
390 if (skb_pull(skb
, payload
- skb
->data
) != NULL
) {
391 skb_reset_transport_header(skb
);
392 if (sock_queue_err_skb(sk
, skb
) == 0)
398 void ip_local_error(struct sock
*sk
, int err
, __be32 daddr
, __be16 port
, u32 info
)
400 struct inet_sock
*inet
= inet_sk(sk
);
401 struct sock_exterr_skb
*serr
;
408 skb
= alloc_skb(sizeof(struct iphdr
), GFP_ATOMIC
);
412 skb_put(skb
, sizeof(struct iphdr
));
413 skb_reset_network_header(skb
);
417 serr
= SKB_EXT_ERR(skb
);
418 serr
->ee
.ee_errno
= err
;
419 serr
->ee
.ee_origin
= SO_EE_ORIGIN_LOCAL
;
420 serr
->ee
.ee_type
= 0;
421 serr
->ee
.ee_code
= 0;
423 serr
->ee
.ee_info
= info
;
424 serr
->ee
.ee_data
= 0;
425 serr
->addr_offset
= (u8
*)&iph
->daddr
- skb_network_header(skb
);
428 __skb_pull(skb
, skb_tail_pointer(skb
) - skb
->data
);
429 skb_reset_transport_header(skb
);
431 if (sock_queue_err_skb(sk
, skb
))
435 /* IPv4 supports cmsg on all imcp errors and some timestamps
437 * Timestamp code paths do not initialize the fields expected by cmsg:
438 * the PKTINFO fields in skb->cb[]. Fill those in here.
440 static bool ipv4_datagram_support_cmsg(const struct sock
*sk
,
444 struct in_pktinfo
*info
;
446 if (ee_origin
== SO_EE_ORIGIN_ICMP
)
449 if (ee_origin
== SO_EE_ORIGIN_LOCAL
)
452 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
453 * timestamp with egress dev. Not possible for packets without dev
454 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
456 if ((!(sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_CMSG
)) ||
460 info
= PKTINFO_SKB_CB(skb
);
461 info
->ipi_spec_dst
.s_addr
= ip_hdr(skb
)->saddr
;
462 info
->ipi_ifindex
= skb
->dev
->ifindex
;
467 * Handle MSG_ERRQUEUE
469 int ip_recv_error(struct sock
*sk
, struct msghdr
*msg
, int len
, int *addr_len
)
471 struct sock_exterr_skb
*serr
;
473 DECLARE_SOCKADDR(struct sockaddr_in
*, sin
, msg
->msg_name
);
475 struct sock_extended_err ee
;
476 struct sockaddr_in offender
;
481 WARN_ON_ONCE(sk
->sk_family
== AF_INET6
);
484 skb
= sock_dequeue_err_skb(sk
);
490 msg
->msg_flags
|= MSG_TRUNC
;
493 err
= skb_copy_datagram_msg(skb
, 0, msg
, copied
);
497 sock_recv_timestamp(msg
, sk
, skb
);
499 serr
= SKB_EXT_ERR(skb
);
501 if (sin
&& serr
->port
) {
502 sin
->sin_family
= AF_INET
;
503 sin
->sin_addr
.s_addr
= *(__be32
*)(skb_network_header(skb
) +
505 sin
->sin_port
= serr
->port
;
506 memset(&sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
507 *addr_len
= sizeof(*sin
);
510 memcpy(&errhdr
.ee
, &serr
->ee
, sizeof(struct sock_extended_err
));
511 sin
= &errhdr
.offender
;
512 memset(sin
, 0, sizeof(*sin
));
514 if (ipv4_datagram_support_cmsg(sk
, skb
, serr
->ee
.ee_origin
)) {
515 sin
->sin_family
= AF_INET
;
516 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
517 if (inet_sk(sk
)->cmsg_flags
)
518 ip_cmsg_recv(msg
, skb
);
521 put_cmsg(msg
, SOL_IP
, IP_RECVERR
, sizeof(errhdr
), &errhdr
);
523 /* Now we could try to dump offended packet options */
525 msg
->msg_flags
|= MSG_ERRQUEUE
;
536 * Socket option code for IP. This is the end of the line after any
537 * TCP,UDP etc options on an IP socket.
540 static int do_ip_setsockopt(struct sock
*sk
, int level
,
541 int optname
, char __user
*optval
, unsigned int optlen
)
543 struct inet_sock
*inet
= inet_sk(sk
);
555 case IP_MTU_DISCOVER
:
557 case IP_ROUTER_ALERT
:
564 case IP_MULTICAST_TTL
:
565 case IP_MULTICAST_ALL
:
566 case IP_MULTICAST_LOOP
:
567 case IP_RECVORIGDSTADDR
:
569 if (optlen
>= sizeof(int)) {
570 if (get_user(val
, (int __user
*) optval
))
572 } else if (optlen
>= sizeof(char)) {
575 if (get_user(ucval
, (unsigned char __user
*) optval
))
581 /* If optlen==0, it is equivalent to val == 0 */
583 if (ip_mroute_opt(optname
))
584 return ip_mroute_setsockopt(sk
, optname
, optval
, optlen
);
592 struct ip_options_rcu
*old
, *opt
= NULL
;
596 err
= ip_options_get_from_user(sock_net(sk
), &opt
,
600 old
= rcu_dereference_protected(inet
->inet_opt
,
601 sock_owned_by_user(sk
));
603 struct inet_connection_sock
*icsk
= inet_csk(sk
);
604 #if IS_ENABLED(CONFIG_IPV6)
605 if (sk
->sk_family
== PF_INET
||
606 (!((1 << sk
->sk_state
) &
607 (TCPF_LISTEN
| TCPF_CLOSE
)) &&
608 inet
->inet_daddr
!= LOOPBACK4_IPV6
)) {
611 icsk
->icsk_ext_hdr_len
-= old
->opt
.optlen
;
613 icsk
->icsk_ext_hdr_len
+= opt
->opt
.optlen
;
614 icsk
->icsk_sync_mss(sk
, icsk
->icsk_pmtu_cookie
);
615 #if IS_ENABLED(CONFIG_IPV6)
619 rcu_assign_pointer(inet
->inet_opt
, opt
);
626 inet
->cmsg_flags
|= IP_CMSG_PKTINFO
;
628 inet
->cmsg_flags
&= ~IP_CMSG_PKTINFO
;
632 inet
->cmsg_flags
|= IP_CMSG_TTL
;
634 inet
->cmsg_flags
&= ~IP_CMSG_TTL
;
638 inet
->cmsg_flags
|= IP_CMSG_TOS
;
640 inet
->cmsg_flags
&= ~IP_CMSG_TOS
;
644 inet
->cmsg_flags
|= IP_CMSG_RECVOPTS
;
646 inet
->cmsg_flags
&= ~IP_CMSG_RECVOPTS
;
650 inet
->cmsg_flags
|= IP_CMSG_RETOPTS
;
652 inet
->cmsg_flags
&= ~IP_CMSG_RETOPTS
;
656 inet
->cmsg_flags
|= IP_CMSG_PASSSEC
;
658 inet
->cmsg_flags
&= ~IP_CMSG_PASSSEC
;
660 case IP_RECVORIGDSTADDR
:
662 inet
->cmsg_flags
|= IP_CMSG_ORIGDSTADDR
;
664 inet
->cmsg_flags
&= ~IP_CMSG_ORIGDSTADDR
;
668 if (!(inet
->cmsg_flags
& IP_CMSG_CHECKSUM
)) {
669 inet_inc_convert_csum(sk
);
670 inet
->cmsg_flags
|= IP_CMSG_CHECKSUM
;
673 if (inet
->cmsg_flags
& IP_CMSG_CHECKSUM
) {
674 inet_dec_convert_csum(sk
);
675 inet
->cmsg_flags
&= ~IP_CMSG_CHECKSUM
;
679 case IP_TOS
: /* This sets both TOS and Precedence */
680 if (sk
->sk_type
== SOCK_STREAM
) {
681 val
&= ~INET_ECN_MASK
;
682 val
|= inet
->tos
& INET_ECN_MASK
;
684 if (inet
->tos
!= val
) {
686 sk
->sk_priority
= rt_tos2priority(val
);
693 if (val
!= -1 && (val
< 1 || val
> 255))
698 if (sk
->sk_type
!= SOCK_RAW
) {
702 inet
->hdrincl
= val
? 1 : 0;
705 if (sk
->sk_type
!= SOCK_RAW
) {
709 inet
->nodefrag
= val
? 1 : 0;
711 case IP_MTU_DISCOVER
:
712 if (val
< IP_PMTUDISC_DONT
|| val
> IP_PMTUDISC_OMIT
)
714 inet
->pmtudisc
= val
;
717 inet
->recverr
= !!val
;
719 skb_queue_purge(&sk
->sk_error_queue
);
721 case IP_MULTICAST_TTL
:
722 if (sk
->sk_type
== SOCK_STREAM
)
728 if (val
< 0 || val
> 255)
732 case IP_MULTICAST_LOOP
:
735 inet
->mc_loop
= !!val
;
739 struct net_device
*dev
= NULL
;
742 if (optlen
!= sizeof(int))
745 ifindex
= (__force
int)ntohl((__force __be32
)val
);
752 dev
= dev_get_by_index(sock_net(sk
), ifindex
);
753 err
= -EADDRNOTAVAIL
;
759 if (sk
->sk_bound_dev_if
)
762 inet
->uc_index
= ifindex
;
766 case IP_MULTICAST_IF
:
768 struct ip_mreqn mreq
;
769 struct net_device
*dev
= NULL
;
771 if (sk
->sk_type
== SOCK_STREAM
)
774 * Check the arguments are allowable
777 if (optlen
< sizeof(struct in_addr
))
781 if (optlen
>= sizeof(struct ip_mreqn
)) {
782 if (copy_from_user(&mreq
, optval
, sizeof(mreq
)))
785 memset(&mreq
, 0, sizeof(mreq
));
786 if (optlen
>= sizeof(struct ip_mreq
)) {
787 if (copy_from_user(&mreq
, optval
,
788 sizeof(struct ip_mreq
)))
790 } else if (optlen
>= sizeof(struct in_addr
)) {
791 if (copy_from_user(&mreq
.imr_address
, optval
,
792 sizeof(struct in_addr
)))
797 if (!mreq
.imr_ifindex
) {
798 if (mreq
.imr_address
.s_addr
== htonl(INADDR_ANY
)) {
804 dev
= ip_dev_find(sock_net(sk
), mreq
.imr_address
.s_addr
);
806 mreq
.imr_ifindex
= dev
->ifindex
;
808 dev
= dev_get_by_index(sock_net(sk
), mreq
.imr_ifindex
);
811 err
= -EADDRNOTAVAIL
;
817 if (sk
->sk_bound_dev_if
&&
818 mreq
.imr_ifindex
!= sk
->sk_bound_dev_if
)
821 inet
->mc_index
= mreq
.imr_ifindex
;
822 inet
->mc_addr
= mreq
.imr_address
.s_addr
;
827 case IP_ADD_MEMBERSHIP
:
828 case IP_DROP_MEMBERSHIP
:
830 struct ip_mreqn mreq
;
833 if (inet_sk(sk
)->is_icsk
)
836 if (optlen
< sizeof(struct ip_mreq
))
839 if (optlen
>= sizeof(struct ip_mreqn
)) {
840 if (copy_from_user(&mreq
, optval
, sizeof(mreq
)))
843 memset(&mreq
, 0, sizeof(mreq
));
844 if (copy_from_user(&mreq
, optval
, sizeof(struct ip_mreq
)))
848 if (optname
== IP_ADD_MEMBERSHIP
)
849 err
= ip_mc_join_group(sk
, &mreq
);
851 err
= ip_mc_leave_group(sk
, &mreq
);
856 struct ip_msfilter
*msf
;
858 if (optlen
< IP_MSFILTER_SIZE(0))
860 if (optlen
> sysctl_optmem_max
) {
864 msf
= kmalloc(optlen
, GFP_KERNEL
);
870 if (copy_from_user(msf
, optval
, optlen
)) {
874 /* numsrc >= (1G-4) overflow in 32 bits */
875 if (msf
->imsf_numsrc
>= 0x3ffffffcU
||
876 msf
->imsf_numsrc
> sysctl_igmp_max_msf
) {
881 if (IP_MSFILTER_SIZE(msf
->imsf_numsrc
) > optlen
) {
886 err
= ip_mc_msfilter(sk
, msf
, 0);
890 case IP_BLOCK_SOURCE
:
891 case IP_UNBLOCK_SOURCE
:
892 case IP_ADD_SOURCE_MEMBERSHIP
:
893 case IP_DROP_SOURCE_MEMBERSHIP
:
895 struct ip_mreq_source mreqs
;
898 if (optlen
!= sizeof(struct ip_mreq_source
))
900 if (copy_from_user(&mreqs
, optval
, sizeof(mreqs
))) {
904 if (optname
== IP_BLOCK_SOURCE
) {
905 omode
= MCAST_EXCLUDE
;
907 } else if (optname
== IP_UNBLOCK_SOURCE
) {
908 omode
= MCAST_EXCLUDE
;
910 } else if (optname
== IP_ADD_SOURCE_MEMBERSHIP
) {
911 struct ip_mreqn mreq
;
913 mreq
.imr_multiaddr
.s_addr
= mreqs
.imr_multiaddr
;
914 mreq
.imr_address
.s_addr
= mreqs
.imr_interface
;
915 mreq
.imr_ifindex
= 0;
916 err
= ip_mc_join_group(sk
, &mreq
);
917 if (err
&& err
!= -EADDRINUSE
)
919 omode
= MCAST_INCLUDE
;
921 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
922 omode
= MCAST_INCLUDE
;
925 err
= ip_mc_source(add
, omode
, sk
, &mreqs
, 0);
928 case MCAST_JOIN_GROUP
:
929 case MCAST_LEAVE_GROUP
:
931 struct group_req greq
;
932 struct sockaddr_in
*psin
;
933 struct ip_mreqn mreq
;
935 if (optlen
< sizeof(struct group_req
))
938 if (copy_from_user(&greq
, optval
, sizeof(greq
)))
940 psin
= (struct sockaddr_in
*)&greq
.gr_group
;
941 if (psin
->sin_family
!= AF_INET
)
943 memset(&mreq
, 0, sizeof(mreq
));
944 mreq
.imr_multiaddr
= psin
->sin_addr
;
945 mreq
.imr_ifindex
= greq
.gr_interface
;
947 if (optname
== MCAST_JOIN_GROUP
)
948 err
= ip_mc_join_group(sk
, &mreq
);
950 err
= ip_mc_leave_group(sk
, &mreq
);
953 case MCAST_JOIN_SOURCE_GROUP
:
954 case MCAST_LEAVE_SOURCE_GROUP
:
955 case MCAST_BLOCK_SOURCE
:
956 case MCAST_UNBLOCK_SOURCE
:
958 struct group_source_req greqs
;
959 struct ip_mreq_source mreqs
;
960 struct sockaddr_in
*psin
;
963 if (optlen
!= sizeof(struct group_source_req
))
965 if (copy_from_user(&greqs
, optval
, sizeof(greqs
))) {
969 if (greqs
.gsr_group
.ss_family
!= AF_INET
||
970 greqs
.gsr_source
.ss_family
!= AF_INET
) {
971 err
= -EADDRNOTAVAIL
;
974 psin
= (struct sockaddr_in
*)&greqs
.gsr_group
;
975 mreqs
.imr_multiaddr
= psin
->sin_addr
.s_addr
;
976 psin
= (struct sockaddr_in
*)&greqs
.gsr_source
;
977 mreqs
.imr_sourceaddr
= psin
->sin_addr
.s_addr
;
978 mreqs
.imr_interface
= 0; /* use index for mc_source */
980 if (optname
== MCAST_BLOCK_SOURCE
) {
981 omode
= MCAST_EXCLUDE
;
983 } else if (optname
== MCAST_UNBLOCK_SOURCE
) {
984 omode
= MCAST_EXCLUDE
;
986 } else if (optname
== MCAST_JOIN_SOURCE_GROUP
) {
987 struct ip_mreqn mreq
;
989 psin
= (struct sockaddr_in
*)&greqs
.gsr_group
;
990 mreq
.imr_multiaddr
= psin
->sin_addr
;
991 mreq
.imr_address
.s_addr
= 0;
992 mreq
.imr_ifindex
= greqs
.gsr_interface
;
993 err
= ip_mc_join_group(sk
, &mreq
);
994 if (err
&& err
!= -EADDRINUSE
)
996 greqs
.gsr_interface
= mreq
.imr_ifindex
;
997 omode
= MCAST_INCLUDE
;
999 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
1000 omode
= MCAST_INCLUDE
;
1003 err
= ip_mc_source(add
, omode
, sk
, &mreqs
,
1004 greqs
.gsr_interface
);
1007 case MCAST_MSFILTER
:
1009 struct sockaddr_in
*psin
;
1010 struct ip_msfilter
*msf
= NULL
;
1011 struct group_filter
*gsf
= NULL
;
1012 int msize
, i
, ifindex
;
1014 if (optlen
< GROUP_FILTER_SIZE(0))
1016 if (optlen
> sysctl_optmem_max
) {
1020 gsf
= kmalloc(optlen
, GFP_KERNEL
);
1026 if (copy_from_user(gsf
, optval
, optlen
))
1029 /* numsrc >= (4G-140)/128 overflow in 32 bits */
1030 if (gsf
->gf_numsrc
>= 0x1ffffff ||
1031 gsf
->gf_numsrc
> sysctl_igmp_max_msf
) {
1035 if (GROUP_FILTER_SIZE(gsf
->gf_numsrc
) > optlen
) {
1039 msize
= IP_MSFILTER_SIZE(gsf
->gf_numsrc
);
1040 msf
= kmalloc(msize
, GFP_KERNEL
);
1045 ifindex
= gsf
->gf_interface
;
1046 psin
= (struct sockaddr_in
*)&gsf
->gf_group
;
1047 if (psin
->sin_family
!= AF_INET
) {
1048 err
= -EADDRNOTAVAIL
;
1051 msf
->imsf_multiaddr
= psin
->sin_addr
.s_addr
;
1052 msf
->imsf_interface
= 0;
1053 msf
->imsf_fmode
= gsf
->gf_fmode
;
1054 msf
->imsf_numsrc
= gsf
->gf_numsrc
;
1055 err
= -EADDRNOTAVAIL
;
1056 for (i
= 0; i
< gsf
->gf_numsrc
; ++i
) {
1057 psin
= (struct sockaddr_in
*)&gsf
->gf_slist
[i
];
1059 if (psin
->sin_family
!= AF_INET
)
1061 msf
->imsf_slist
[i
] = psin
->sin_addr
.s_addr
;
1066 err
= ip_mc_msfilter(sk
, msf
, ifindex
);
1072 case IP_MULTICAST_ALL
:
1075 if (val
!= 0 && val
!= 1)
1079 case IP_ROUTER_ALERT
:
1080 err
= ip_ra_control(sk
, val
? 1 : 0, NULL
);
1086 inet
->freebind
= !!val
;
1089 case IP_IPSEC_POLICY
:
1090 case IP_XFRM_POLICY
:
1092 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1094 err
= xfrm_user_policy(sk
, optname
, optval
, optlen
);
1097 case IP_TRANSPARENT
:
1098 if (!!val
&& !ns_capable(sock_net(sk
)->user_ns
, CAP_NET_RAW
) &&
1099 !ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
)) {
1105 inet
->transparent
= !!val
;
1111 if (val
< 0 || val
> 255)
1113 inet
->min_ttl
= val
;
1129 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1133 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1134 * destination in skb->cb[] before dst drop.
1135 * This way, receiver doesn't make cache line misses to read rtable.
1137 void ipv4_pktinfo_prepare(const struct sock
*sk
, struct sk_buff
*skb
)
1139 struct in_pktinfo
*pktinfo
= PKTINFO_SKB_CB(skb
);
1140 bool prepare
= (inet_sk(sk
)->cmsg_flags
& IP_CMSG_PKTINFO
) ||
1143 if (prepare
&& skb_rtable(skb
)) {
1144 pktinfo
->ipi_ifindex
= inet_iif(skb
);
1145 pktinfo
->ipi_spec_dst
.s_addr
= fib_compute_spec_dst(skb
);
1147 pktinfo
->ipi_ifindex
= 0;
1148 pktinfo
->ipi_spec_dst
.s_addr
= 0;
1153 int ip_setsockopt(struct sock
*sk
, int level
,
1154 int optname
, char __user
*optval
, unsigned int optlen
)
1158 if (level
!= SOL_IP
)
1159 return -ENOPROTOOPT
;
1161 err
= do_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1162 #ifdef CONFIG_NETFILTER
1163 /* we need to exclude all possible ENOPROTOOPTs except default case */
1164 if (err
== -ENOPROTOOPT
&& optname
!= IP_HDRINCL
&&
1165 optname
!= IP_IPSEC_POLICY
&&
1166 optname
!= IP_XFRM_POLICY
&&
1167 !ip_mroute_opt(optname
)) {
1169 err
= nf_setsockopt(sk
, PF_INET
, optname
, optval
, optlen
);
1175 EXPORT_SYMBOL(ip_setsockopt
);
1177 #ifdef CONFIG_COMPAT
1178 int compat_ip_setsockopt(struct sock
*sk
, int level
, int optname
,
1179 char __user
*optval
, unsigned int optlen
)
1183 if (level
!= SOL_IP
)
1184 return -ENOPROTOOPT
;
1186 if (optname
>= MCAST_JOIN_GROUP
&& optname
<= MCAST_MSFILTER
)
1187 return compat_mc_setsockopt(sk
, level
, optname
, optval
, optlen
,
1190 err
= do_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1191 #ifdef CONFIG_NETFILTER
1192 /* we need to exclude all possible ENOPROTOOPTs except default case */
1193 if (err
== -ENOPROTOOPT
&& optname
!= IP_HDRINCL
&&
1194 optname
!= IP_IPSEC_POLICY
&&
1195 optname
!= IP_XFRM_POLICY
&&
1196 !ip_mroute_opt(optname
)) {
1198 err
= compat_nf_setsockopt(sk
, PF_INET
, optname
,
1205 EXPORT_SYMBOL(compat_ip_setsockopt
);
1209 * Get the options. Note for future reference. The GET of IP options gets
1210 * the _received_ ones. The set sets the _sent_ ones.
1213 static int do_ip_getsockopt(struct sock
*sk
, int level
, int optname
,
1214 char __user
*optval
, int __user
*optlen
, unsigned int flags
)
1216 struct inet_sock
*inet
= inet_sk(sk
);
1220 if (level
!= SOL_IP
)
1223 if (ip_mroute_opt(optname
))
1224 return ip_mroute_getsockopt(sk
, optname
, optval
, optlen
);
1226 if (get_user(len
, optlen
))
1236 unsigned char optbuf
[sizeof(struct ip_options
)+40];
1237 struct ip_options
*opt
= (struct ip_options
*)optbuf
;
1238 struct ip_options_rcu
*inet_opt
;
1240 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
1241 sock_owned_by_user(sk
));
1244 memcpy(optbuf
, &inet_opt
->opt
,
1245 sizeof(struct ip_options
) +
1246 inet_opt
->opt
.optlen
);
1249 if (opt
->optlen
== 0)
1250 return put_user(0, optlen
);
1252 ip_options_undo(opt
);
1254 len
= min_t(unsigned int, len
, opt
->optlen
);
1255 if (put_user(len
, optlen
))
1257 if (copy_to_user(optval
, opt
->__data
, len
))
1262 val
= (inet
->cmsg_flags
& IP_CMSG_PKTINFO
) != 0;
1265 val
= (inet
->cmsg_flags
& IP_CMSG_TTL
) != 0;
1268 val
= (inet
->cmsg_flags
& IP_CMSG_TOS
) != 0;
1271 val
= (inet
->cmsg_flags
& IP_CMSG_RECVOPTS
) != 0;
1274 val
= (inet
->cmsg_flags
& IP_CMSG_RETOPTS
) != 0;
1277 val
= (inet
->cmsg_flags
& IP_CMSG_PASSSEC
) != 0;
1279 case IP_RECVORIGDSTADDR
:
1280 val
= (inet
->cmsg_flags
& IP_CMSG_ORIGDSTADDR
) != 0;
1283 val
= (inet
->cmsg_flags
& IP_CMSG_CHECKSUM
) != 0;
1289 val
= (inet
->uc_ttl
== -1 ?
1290 sysctl_ip_default_ttl
:
1294 val
= inet
->hdrincl
;
1297 val
= inet
->nodefrag
;
1299 case IP_MTU_DISCOVER
:
1300 val
= inet
->pmtudisc
;
1304 struct dst_entry
*dst
;
1306 dst
= sk_dst_get(sk
);
1318 val
= inet
->recverr
;
1320 case IP_MULTICAST_TTL
:
1323 case IP_MULTICAST_LOOP
:
1324 val
= inet
->mc_loop
;
1327 val
= (__force
int)htonl((__u32
) inet
->uc_index
);
1329 case IP_MULTICAST_IF
:
1331 struct in_addr addr
;
1332 len
= min_t(unsigned int, len
, sizeof(struct in_addr
));
1333 addr
.s_addr
= inet
->mc_addr
;
1336 if (put_user(len
, optlen
))
1338 if (copy_to_user(optval
, &addr
, len
))
1344 struct ip_msfilter msf
;
1347 if (len
< IP_MSFILTER_SIZE(0)) {
1351 if (copy_from_user(&msf
, optval
, IP_MSFILTER_SIZE(0))) {
1355 err
= ip_mc_msfget(sk
, &msf
,
1356 (struct ip_msfilter __user
*)optval
, optlen
);
1360 case MCAST_MSFILTER
:
1362 struct group_filter gsf
;
1365 if (len
< GROUP_FILTER_SIZE(0)) {
1369 if (copy_from_user(&gsf
, optval
, GROUP_FILTER_SIZE(0))) {
1373 err
= ip_mc_gsfget(sk
, &gsf
,
1374 (struct group_filter __user
*)optval
,
1379 case IP_MULTICAST_ALL
:
1388 if (sk
->sk_type
!= SOCK_STREAM
)
1389 return -ENOPROTOOPT
;
1391 msg
.msg_control
= (__force
void *) optval
;
1392 msg
.msg_controllen
= len
;
1393 msg
.msg_flags
= flags
;
1395 if (inet
->cmsg_flags
& IP_CMSG_PKTINFO
) {
1396 struct in_pktinfo info
;
1398 info
.ipi_addr
.s_addr
= inet
->inet_rcv_saddr
;
1399 info
.ipi_spec_dst
.s_addr
= inet
->inet_rcv_saddr
;
1400 info
.ipi_ifindex
= inet
->mc_index
;
1401 put_cmsg(&msg
, SOL_IP
, IP_PKTINFO
, sizeof(info
), &info
);
1403 if (inet
->cmsg_flags
& IP_CMSG_TTL
) {
1404 int hlim
= inet
->mc_ttl
;
1405 put_cmsg(&msg
, SOL_IP
, IP_TTL
, sizeof(hlim
), &hlim
);
1407 if (inet
->cmsg_flags
& IP_CMSG_TOS
) {
1408 int tos
= inet
->rcv_tos
;
1409 put_cmsg(&msg
, SOL_IP
, IP_TOS
, sizeof(tos
), &tos
);
1411 len
-= msg
.msg_controllen
;
1412 return put_user(len
, optlen
);
1415 val
= inet
->freebind
;
1417 case IP_TRANSPARENT
:
1418 val
= inet
->transparent
;
1421 val
= inet
->min_ttl
;
1425 return -ENOPROTOOPT
;
1429 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
<= 255) {
1430 unsigned char ucval
= (unsigned char)val
;
1432 if (put_user(len
, optlen
))
1434 if (copy_to_user(optval
, &ucval
, 1))
1437 len
= min_t(unsigned int, sizeof(int), len
);
1438 if (put_user(len
, optlen
))
1440 if (copy_to_user(optval
, &val
, len
))
1446 int ip_getsockopt(struct sock
*sk
, int level
,
1447 int optname
, char __user
*optval
, int __user
*optlen
)
1451 err
= do_ip_getsockopt(sk
, level
, optname
, optval
, optlen
, 0);
1452 #ifdef CONFIG_NETFILTER
1453 /* we need to exclude all possible ENOPROTOOPTs except default case */
1454 if (err
== -ENOPROTOOPT
&& optname
!= IP_PKTOPTIONS
&&
1455 !ip_mroute_opt(optname
)) {
1458 if (get_user(len
, optlen
))
1462 err
= nf_getsockopt(sk
, PF_INET
, optname
, optval
,
1466 err
= put_user(len
, optlen
);
1472 EXPORT_SYMBOL(ip_getsockopt
);
1474 #ifdef CONFIG_COMPAT
1475 int compat_ip_getsockopt(struct sock
*sk
, int level
, int optname
,
1476 char __user
*optval
, int __user
*optlen
)
1480 if (optname
== MCAST_MSFILTER
)
1481 return compat_mc_getsockopt(sk
, level
, optname
, optval
, optlen
,
1484 err
= do_ip_getsockopt(sk
, level
, optname
, optval
, optlen
,
1487 #ifdef CONFIG_NETFILTER
1488 /* we need to exclude all possible ENOPROTOOPTs except default case */
1489 if (err
== -ENOPROTOOPT
&& optname
!= IP_PKTOPTIONS
&&
1490 !ip_mroute_opt(optname
)) {
1493 if (get_user(len
, optlen
))
1497 err
= compat_nf_getsockopt(sk
, PF_INET
, optname
, optval
, &len
);
1500 err
= put_user(len
, optlen
);
1506 EXPORT_SYMBOL(compat_ip_getsockopt
);