2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
15 * Mike McLagan : Routing by source
18 #include <linux/module.h>
19 #include <linux/types.h>
21 #include <linux/skbuff.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
39 #include <net/compat.h>
40 #if IS_ENABLED(CONFIG_IPV6)
41 #include <net/transp_v6.h>
43 #include <net/ip_fib.h>
45 #include <linux/errqueue.h>
46 #include <asm/uaccess.h>
48 #define IP_CMSG_PKTINFO 1
51 #define IP_CMSG_RECVOPTS 8
52 #define IP_CMSG_RETOPTS 16
53 #define IP_CMSG_PASSSEC 32
54 #define IP_CMSG_ORIGDSTADDR 64
57 * SOL_IP control messages.
59 #define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb))
61 static void ip_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
63 struct in_pktinfo info
= *PKTINFO_SKB_CB(skb
);
65 info
.ipi_addr
.s_addr
= ip_hdr(skb
)->daddr
;
67 put_cmsg(msg
, SOL_IP
, IP_PKTINFO
, sizeof(info
), &info
);
70 static void ip_cmsg_recv_ttl(struct msghdr
*msg
, struct sk_buff
*skb
)
72 int ttl
= ip_hdr(skb
)->ttl
;
73 put_cmsg(msg
, SOL_IP
, IP_TTL
, sizeof(int), &ttl
);
76 static void ip_cmsg_recv_tos(struct msghdr
*msg
, struct sk_buff
*skb
)
78 put_cmsg(msg
, SOL_IP
, IP_TOS
, 1, &ip_hdr(skb
)->tos
);
81 static void ip_cmsg_recv_opts(struct msghdr
*msg
, struct sk_buff
*skb
)
83 if (IPCB(skb
)->opt
.optlen
== 0)
86 put_cmsg(msg
, SOL_IP
, IP_RECVOPTS
, IPCB(skb
)->opt
.optlen
,
91 static void ip_cmsg_recv_retopts(struct msghdr
*msg
, struct sk_buff
*skb
)
93 unsigned char optbuf
[sizeof(struct ip_options
) + 40];
94 struct ip_options
*opt
= (struct ip_options
*)optbuf
;
96 if (IPCB(skb
)->opt
.optlen
== 0)
99 if (ip_options_echo(opt
, skb
)) {
100 msg
->msg_flags
|= MSG_CTRUNC
;
103 ip_options_undo(opt
);
105 put_cmsg(msg
, SOL_IP
, IP_RETOPTS
, opt
->optlen
, opt
->__data
);
108 static void ip_cmsg_recv_security(struct msghdr
*msg
, struct sk_buff
*skb
)
114 err
= security_socket_getpeersec_dgram(NULL
, skb
, &secid
);
118 err
= security_secid_to_secctx(secid
, &secdata
, &seclen
);
122 put_cmsg(msg
, SOL_IP
, SCM_SECURITY
, seclen
, secdata
);
123 security_release_secctx(secdata
, seclen
);
126 static void ip_cmsg_recv_dstaddr(struct msghdr
*msg
, struct sk_buff
*skb
)
128 struct sockaddr_in sin
;
129 const struct iphdr
*iph
= ip_hdr(skb
);
130 __be16
*ports
= (__be16
*)skb_transport_header(skb
);
132 if (skb_transport_offset(skb
) + 4 > skb
->len
)
135 /* All current transport protocols have the port numbers in the
136 * first four bytes of the transport header and this function is
137 * written with this assumption in mind.
140 sin
.sin_family
= AF_INET
;
141 sin
.sin_addr
.s_addr
= iph
->daddr
;
142 sin
.sin_port
= ports
[1];
143 memset(sin
.sin_zero
, 0, sizeof(sin
.sin_zero
));
145 put_cmsg(msg
, SOL_IP
, IP_ORIGDSTADDR
, sizeof(sin
), &sin
);
148 void ip_cmsg_recv(struct msghdr
*msg
, struct sk_buff
*skb
)
150 struct inet_sock
*inet
= inet_sk(skb
->sk
);
151 unsigned int flags
= inet
->cmsg_flags
;
153 /* Ordered by supposed usage frequency */
155 ip_cmsg_recv_pktinfo(msg
, skb
);
156 if ((flags
>>= 1) == 0)
160 ip_cmsg_recv_ttl(msg
, skb
);
161 if ((flags
>>= 1) == 0)
165 ip_cmsg_recv_tos(msg
, skb
);
166 if ((flags
>>= 1) == 0)
170 ip_cmsg_recv_opts(msg
, skb
);
171 if ((flags
>>= 1) == 0)
175 ip_cmsg_recv_retopts(msg
, skb
);
176 if ((flags
>>= 1) == 0)
180 ip_cmsg_recv_security(msg
, skb
);
182 if ((flags
>>= 1) == 0)
185 ip_cmsg_recv_dstaddr(msg
, skb
);
188 EXPORT_SYMBOL(ip_cmsg_recv
);
190 int ip_cmsg_send(struct net
*net
, struct msghdr
*msg
, struct ipcm_cookie
*ipc
)
193 struct cmsghdr
*cmsg
;
195 for (cmsg
= CMSG_FIRSTHDR(msg
); cmsg
; cmsg
= CMSG_NXTHDR(msg
, cmsg
)) {
196 if (!CMSG_OK(msg
, cmsg
))
198 if (cmsg
->cmsg_level
!= SOL_IP
)
200 switch (cmsg
->cmsg_type
) {
202 err
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof(struct cmsghdr
));
203 err
= ip_options_get(net
, &ipc
->opt
, CMSG_DATA(cmsg
),
204 err
< 40 ? err
: 40);
210 struct in_pktinfo
*info
;
211 if (cmsg
->cmsg_len
!= CMSG_LEN(sizeof(struct in_pktinfo
)))
213 info
= (struct in_pktinfo
*)CMSG_DATA(cmsg
);
214 ipc
->oif
= info
->ipi_ifindex
;
215 ipc
->addr
= info
->ipi_spec_dst
.s_addr
;
226 /* Special input handler for packets caught by router alert option.
227 They are selected only by protocol field, and then processed likely
228 local ones; but only if someone wants them! Otherwise, router
229 not running rsvpd will kill RSVP.
231 It is user level problem, what it will make with them.
232 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
233 but receiver should be enough clever f.e. to forward mtrace requests,
234 sent to multicast group to reach destination designated router.
236 struct ip_ra_chain __rcu
*ip_ra_chain
;
237 static DEFINE_SPINLOCK(ip_ra_lock
);
240 static void ip_ra_destroy_rcu(struct rcu_head
*head
)
242 struct ip_ra_chain
*ra
= container_of(head
, struct ip_ra_chain
, rcu
);
244 sock_put(ra
->saved_sk
);
248 int ip_ra_control(struct sock
*sk
, unsigned char on
,
249 void (*destructor
)(struct sock
*))
251 struct ip_ra_chain
*ra
, *new_ra
;
252 struct ip_ra_chain __rcu
**rap
;
254 if (sk
->sk_type
!= SOCK_RAW
|| inet_sk(sk
)->inet_num
== IPPROTO_RAW
)
257 new_ra
= on
? kmalloc(sizeof(*new_ra
), GFP_KERNEL
) : NULL
;
259 spin_lock_bh(&ip_ra_lock
);
260 for (rap
= &ip_ra_chain
;
261 (ra
= rcu_dereference_protected(*rap
,
262 lockdep_is_held(&ip_ra_lock
))) != NULL
;
266 spin_unlock_bh(&ip_ra_lock
);
270 /* dont let ip_call_ra_chain() use sk again */
272 rcu_assign_pointer(*rap
, ra
->next
);
273 spin_unlock_bh(&ip_ra_lock
);
278 * Delay sock_put(sk) and kfree(ra) after one rcu grace
279 * period. This guarantee ip_call_ra_chain() dont need
280 * to mess with socket refcounts.
283 call_rcu(&ra
->rcu
, ip_ra_destroy_rcu
);
287 if (new_ra
== NULL
) {
288 spin_unlock_bh(&ip_ra_lock
);
292 new_ra
->destructor
= destructor
;
295 rcu_assign_pointer(*rap
, new_ra
);
297 spin_unlock_bh(&ip_ra_lock
);
302 void ip_icmp_error(struct sock
*sk
, struct sk_buff
*skb
, int err
,
303 __be16 port
, u32 info
, u8
*payload
)
305 struct sock_exterr_skb
*serr
;
307 skb
= skb_clone(skb
, GFP_ATOMIC
);
311 serr
= SKB_EXT_ERR(skb
);
312 serr
->ee
.ee_errno
= err
;
313 serr
->ee
.ee_origin
= SO_EE_ORIGIN_ICMP
;
314 serr
->ee
.ee_type
= icmp_hdr(skb
)->type
;
315 serr
->ee
.ee_code
= icmp_hdr(skb
)->code
;
317 serr
->ee
.ee_info
= info
;
318 serr
->ee
.ee_data
= 0;
319 serr
->addr_offset
= (u8
*)&(((struct iphdr
*)(icmp_hdr(skb
) + 1))->daddr
) -
320 skb_network_header(skb
);
323 if (skb_pull(skb
, payload
- skb
->data
) != NULL
) {
324 skb_reset_transport_header(skb
);
325 if (sock_queue_err_skb(sk
, skb
) == 0)
331 void ip_local_error(struct sock
*sk
, int err
, __be32 daddr
, __be16 port
, u32 info
)
333 struct inet_sock
*inet
= inet_sk(sk
);
334 struct sock_exterr_skb
*serr
;
341 skb
= alloc_skb(sizeof(struct iphdr
), GFP_ATOMIC
);
345 skb_put(skb
, sizeof(struct iphdr
));
346 skb_reset_network_header(skb
);
350 serr
= SKB_EXT_ERR(skb
);
351 serr
->ee
.ee_errno
= err
;
352 serr
->ee
.ee_origin
= SO_EE_ORIGIN_LOCAL
;
353 serr
->ee
.ee_type
= 0;
354 serr
->ee
.ee_code
= 0;
356 serr
->ee
.ee_info
= info
;
357 serr
->ee
.ee_data
= 0;
358 serr
->addr_offset
= (u8
*)&iph
->daddr
- skb_network_header(skb
);
361 __skb_pull(skb
, skb_tail_pointer(skb
) - skb
->data
);
362 skb_reset_transport_header(skb
);
364 if (sock_queue_err_skb(sk
, skb
))
369 * Handle MSG_ERRQUEUE
371 int ip_recv_error(struct sock
*sk
, struct msghdr
*msg
, int len
, int *addr_len
)
373 struct sock_exterr_skb
*serr
;
374 struct sk_buff
*skb
, *skb2
;
375 struct sockaddr_in
*sin
;
377 struct sock_extended_err ee
;
378 struct sockaddr_in offender
;
384 skb
= skb_dequeue(&sk
->sk_error_queue
);
390 msg
->msg_flags
|= MSG_TRUNC
;
393 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
397 sock_recv_timestamp(msg
, sk
, skb
);
399 serr
= SKB_EXT_ERR(skb
);
401 sin
= (struct sockaddr_in
*)msg
->msg_name
;
403 sin
->sin_family
= AF_INET
;
404 sin
->sin_addr
.s_addr
= *(__be32
*)(skb_network_header(skb
) +
406 sin
->sin_port
= serr
->port
;
407 memset(&sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
408 *addr_len
= sizeof(*sin
);
411 memcpy(&errhdr
.ee
, &serr
->ee
, sizeof(struct sock_extended_err
));
412 sin
= &errhdr
.offender
;
413 sin
->sin_family
= AF_UNSPEC
;
414 if (serr
->ee
.ee_origin
== SO_EE_ORIGIN_ICMP
) {
415 struct inet_sock
*inet
= inet_sk(sk
);
417 sin
->sin_family
= AF_INET
;
418 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
420 memset(&sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
421 if (inet
->cmsg_flags
)
422 ip_cmsg_recv(msg
, skb
);
425 put_cmsg(msg
, SOL_IP
, IP_RECVERR
, sizeof(errhdr
), &errhdr
);
427 /* Now we could try to dump offended packet options */
429 msg
->msg_flags
|= MSG_ERRQUEUE
;
432 /* Reset and regenerate socket error */
433 spin_lock_bh(&sk
->sk_error_queue
.lock
);
435 skb2
= skb_peek(&sk
->sk_error_queue
);
437 sk
->sk_err
= SKB_EXT_ERR(skb2
)->ee
.ee_errno
;
438 spin_unlock_bh(&sk
->sk_error_queue
.lock
);
439 sk
->sk_error_report(sk
);
441 spin_unlock_bh(&sk
->sk_error_queue
.lock
);
451 * Socket option code for IP. This is the end of the line after any
452 * TCP,UDP etc options on an IP socket.
455 static int do_ip_setsockopt(struct sock
*sk
, int level
,
456 int optname
, char __user
*optval
, unsigned int optlen
)
458 struct inet_sock
*inet
= inet_sk(sk
);
470 case IP_MTU_DISCOVER
:
472 case IP_ROUTER_ALERT
:
479 case IP_MULTICAST_TTL
:
480 case IP_MULTICAST_ALL
:
481 case IP_MULTICAST_LOOP
:
482 case IP_RECVORIGDSTADDR
:
483 if (optlen
>= sizeof(int)) {
484 if (get_user(val
, (int __user
*) optval
))
486 } else if (optlen
>= sizeof(char)) {
489 if (get_user(ucval
, (unsigned char __user
*) optval
))
495 /* If optlen==0, it is equivalent to val == 0 */
497 if (ip_mroute_opt(optname
))
498 return ip_mroute_setsockopt(sk
, optname
, optval
, optlen
);
506 struct ip_options_rcu
*old
, *opt
= NULL
;
510 err
= ip_options_get_from_user(sock_net(sk
), &opt
,
514 old
= rcu_dereference_protected(inet
->inet_opt
,
515 sock_owned_by_user(sk
));
517 struct inet_connection_sock
*icsk
= inet_csk(sk
);
518 #if IS_ENABLED(CONFIG_IPV6)
519 if (sk
->sk_family
== PF_INET
||
520 (!((1 << sk
->sk_state
) &
521 (TCPF_LISTEN
| TCPF_CLOSE
)) &&
522 inet
->inet_daddr
!= LOOPBACK4_IPV6
)) {
525 icsk
->icsk_ext_hdr_len
-= old
->opt
.optlen
;
527 icsk
->icsk_ext_hdr_len
+= opt
->opt
.optlen
;
528 icsk
->icsk_sync_mss(sk
, icsk
->icsk_pmtu_cookie
);
529 #if IS_ENABLED(CONFIG_IPV6)
533 rcu_assign_pointer(inet
->inet_opt
, opt
);
540 inet
->cmsg_flags
|= IP_CMSG_PKTINFO
;
542 inet
->cmsg_flags
&= ~IP_CMSG_PKTINFO
;
546 inet
->cmsg_flags
|= IP_CMSG_TTL
;
548 inet
->cmsg_flags
&= ~IP_CMSG_TTL
;
552 inet
->cmsg_flags
|= IP_CMSG_TOS
;
554 inet
->cmsg_flags
&= ~IP_CMSG_TOS
;
558 inet
->cmsg_flags
|= IP_CMSG_RECVOPTS
;
560 inet
->cmsg_flags
&= ~IP_CMSG_RECVOPTS
;
564 inet
->cmsg_flags
|= IP_CMSG_RETOPTS
;
566 inet
->cmsg_flags
&= ~IP_CMSG_RETOPTS
;
570 inet
->cmsg_flags
|= IP_CMSG_PASSSEC
;
572 inet
->cmsg_flags
&= ~IP_CMSG_PASSSEC
;
574 case IP_RECVORIGDSTADDR
:
576 inet
->cmsg_flags
|= IP_CMSG_ORIGDSTADDR
;
578 inet
->cmsg_flags
&= ~IP_CMSG_ORIGDSTADDR
;
580 case IP_TOS
: /* This sets both TOS and Precedence */
581 if (sk
->sk_type
== SOCK_STREAM
) {
582 val
&= ~INET_ECN_MASK
;
583 val
|= inet
->tos
& INET_ECN_MASK
;
585 if (inet
->tos
!= val
) {
587 sk
->sk_priority
= rt_tos2priority(val
);
594 if (val
!= -1 && (val
< 1 || val
> 255))
599 if (sk
->sk_type
!= SOCK_RAW
) {
603 inet
->hdrincl
= val
? 1 : 0;
606 if (sk
->sk_type
!= SOCK_RAW
) {
610 inet
->nodefrag
= val
? 1 : 0;
612 case IP_MTU_DISCOVER
:
613 if (val
< IP_PMTUDISC_DONT
|| val
> IP_PMTUDISC_PROBE
)
615 inet
->pmtudisc
= val
;
618 inet
->recverr
= !!val
;
620 skb_queue_purge(&sk
->sk_error_queue
);
622 case IP_MULTICAST_TTL
:
623 if (sk
->sk_type
== SOCK_STREAM
)
629 if (val
< 0 || val
> 255)
633 case IP_MULTICAST_LOOP
:
636 inet
->mc_loop
= !!val
;
640 struct net_device
*dev
= NULL
;
643 if (optlen
!= sizeof(int))
646 ifindex
= (__force
int)ntohl((__force __be32
)val
);
653 dev
= dev_get_by_index(sock_net(sk
), ifindex
);
654 err
= -EADDRNOTAVAIL
;
660 if (sk
->sk_bound_dev_if
)
663 inet
->uc_index
= ifindex
;
667 case IP_MULTICAST_IF
:
669 struct ip_mreqn mreq
;
670 struct net_device
*dev
= NULL
;
672 if (sk
->sk_type
== SOCK_STREAM
)
675 * Check the arguments are allowable
678 if (optlen
< sizeof(struct in_addr
))
682 if (optlen
>= sizeof(struct ip_mreqn
)) {
683 if (copy_from_user(&mreq
, optval
, sizeof(mreq
)))
686 memset(&mreq
, 0, sizeof(mreq
));
687 if (optlen
>= sizeof(struct ip_mreq
)) {
688 if (copy_from_user(&mreq
, optval
,
689 sizeof(struct ip_mreq
)))
691 } else if (optlen
>= sizeof(struct in_addr
)) {
692 if (copy_from_user(&mreq
.imr_address
, optval
,
693 sizeof(struct in_addr
)))
698 if (!mreq
.imr_ifindex
) {
699 if (mreq
.imr_address
.s_addr
== htonl(INADDR_ANY
)) {
705 dev
= ip_dev_find(sock_net(sk
), mreq
.imr_address
.s_addr
);
707 mreq
.imr_ifindex
= dev
->ifindex
;
709 dev
= dev_get_by_index(sock_net(sk
), mreq
.imr_ifindex
);
712 err
= -EADDRNOTAVAIL
;
718 if (sk
->sk_bound_dev_if
&&
719 mreq
.imr_ifindex
!= sk
->sk_bound_dev_if
)
722 inet
->mc_index
= mreq
.imr_ifindex
;
723 inet
->mc_addr
= mreq
.imr_address
.s_addr
;
728 case IP_ADD_MEMBERSHIP
:
729 case IP_DROP_MEMBERSHIP
:
731 struct ip_mreqn mreq
;
734 if (inet_sk(sk
)->is_icsk
)
737 if (optlen
< sizeof(struct ip_mreq
))
740 if (optlen
>= sizeof(struct ip_mreqn
)) {
741 if (copy_from_user(&mreq
, optval
, sizeof(mreq
)))
744 memset(&mreq
, 0, sizeof(mreq
));
745 if (copy_from_user(&mreq
, optval
, sizeof(struct ip_mreq
)))
749 if (optname
== IP_ADD_MEMBERSHIP
)
750 err
= ip_mc_join_group(sk
, &mreq
);
752 err
= ip_mc_leave_group(sk
, &mreq
);
757 struct ip_msfilter
*msf
;
759 if (optlen
< IP_MSFILTER_SIZE(0))
761 if (optlen
> sysctl_optmem_max
) {
765 msf
= kmalloc(optlen
, GFP_KERNEL
);
771 if (copy_from_user(msf
, optval
, optlen
)) {
775 /* numsrc >= (1G-4) overflow in 32 bits */
776 if (msf
->imsf_numsrc
>= 0x3ffffffcU
||
777 msf
->imsf_numsrc
> sysctl_igmp_max_msf
) {
782 if (IP_MSFILTER_SIZE(msf
->imsf_numsrc
) > optlen
) {
787 err
= ip_mc_msfilter(sk
, msf
, 0);
791 case IP_BLOCK_SOURCE
:
792 case IP_UNBLOCK_SOURCE
:
793 case IP_ADD_SOURCE_MEMBERSHIP
:
794 case IP_DROP_SOURCE_MEMBERSHIP
:
796 struct ip_mreq_source mreqs
;
799 if (optlen
!= sizeof(struct ip_mreq_source
))
801 if (copy_from_user(&mreqs
, optval
, sizeof(mreqs
))) {
805 if (optname
== IP_BLOCK_SOURCE
) {
806 omode
= MCAST_EXCLUDE
;
808 } else if (optname
== IP_UNBLOCK_SOURCE
) {
809 omode
= MCAST_EXCLUDE
;
811 } else if (optname
== IP_ADD_SOURCE_MEMBERSHIP
) {
812 struct ip_mreqn mreq
;
814 mreq
.imr_multiaddr
.s_addr
= mreqs
.imr_multiaddr
;
815 mreq
.imr_address
.s_addr
= mreqs
.imr_interface
;
816 mreq
.imr_ifindex
= 0;
817 err
= ip_mc_join_group(sk
, &mreq
);
818 if (err
&& err
!= -EADDRINUSE
)
820 omode
= MCAST_INCLUDE
;
822 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
823 omode
= MCAST_INCLUDE
;
826 err
= ip_mc_source(add
, omode
, sk
, &mreqs
, 0);
829 case MCAST_JOIN_GROUP
:
830 case MCAST_LEAVE_GROUP
:
832 struct group_req greq
;
833 struct sockaddr_in
*psin
;
834 struct ip_mreqn mreq
;
836 if (optlen
< sizeof(struct group_req
))
839 if (copy_from_user(&greq
, optval
, sizeof(greq
)))
841 psin
= (struct sockaddr_in
*)&greq
.gr_group
;
842 if (psin
->sin_family
!= AF_INET
)
844 memset(&mreq
, 0, sizeof(mreq
));
845 mreq
.imr_multiaddr
= psin
->sin_addr
;
846 mreq
.imr_ifindex
= greq
.gr_interface
;
848 if (optname
== MCAST_JOIN_GROUP
)
849 err
= ip_mc_join_group(sk
, &mreq
);
851 err
= ip_mc_leave_group(sk
, &mreq
);
854 case MCAST_JOIN_SOURCE_GROUP
:
855 case MCAST_LEAVE_SOURCE_GROUP
:
856 case MCAST_BLOCK_SOURCE
:
857 case MCAST_UNBLOCK_SOURCE
:
859 struct group_source_req greqs
;
860 struct ip_mreq_source mreqs
;
861 struct sockaddr_in
*psin
;
864 if (optlen
!= sizeof(struct group_source_req
))
866 if (copy_from_user(&greqs
, optval
, sizeof(greqs
))) {
870 if (greqs
.gsr_group
.ss_family
!= AF_INET
||
871 greqs
.gsr_source
.ss_family
!= AF_INET
) {
872 err
= -EADDRNOTAVAIL
;
875 psin
= (struct sockaddr_in
*)&greqs
.gsr_group
;
876 mreqs
.imr_multiaddr
= psin
->sin_addr
.s_addr
;
877 psin
= (struct sockaddr_in
*)&greqs
.gsr_source
;
878 mreqs
.imr_sourceaddr
= psin
->sin_addr
.s_addr
;
879 mreqs
.imr_interface
= 0; /* use index for mc_source */
881 if (optname
== MCAST_BLOCK_SOURCE
) {
882 omode
= MCAST_EXCLUDE
;
884 } else if (optname
== MCAST_UNBLOCK_SOURCE
) {
885 omode
= MCAST_EXCLUDE
;
887 } else if (optname
== MCAST_JOIN_SOURCE_GROUP
) {
888 struct ip_mreqn mreq
;
890 psin
= (struct sockaddr_in
*)&greqs
.gsr_group
;
891 mreq
.imr_multiaddr
= psin
->sin_addr
;
892 mreq
.imr_address
.s_addr
= 0;
893 mreq
.imr_ifindex
= greqs
.gsr_interface
;
894 err
= ip_mc_join_group(sk
, &mreq
);
895 if (err
&& err
!= -EADDRINUSE
)
897 greqs
.gsr_interface
= mreq
.imr_ifindex
;
898 omode
= MCAST_INCLUDE
;
900 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
901 omode
= MCAST_INCLUDE
;
904 err
= ip_mc_source(add
, omode
, sk
, &mreqs
,
905 greqs
.gsr_interface
);
910 struct sockaddr_in
*psin
;
911 struct ip_msfilter
*msf
= NULL
;
912 struct group_filter
*gsf
= NULL
;
913 int msize
, i
, ifindex
;
915 if (optlen
< GROUP_FILTER_SIZE(0))
917 if (optlen
> sysctl_optmem_max
) {
921 gsf
= kmalloc(optlen
, GFP_KERNEL
);
927 if (copy_from_user(gsf
, optval
, optlen
))
930 /* numsrc >= (4G-140)/128 overflow in 32 bits */
931 if (gsf
->gf_numsrc
>= 0x1ffffff ||
932 gsf
->gf_numsrc
> sysctl_igmp_max_msf
) {
936 if (GROUP_FILTER_SIZE(gsf
->gf_numsrc
) > optlen
) {
940 msize
= IP_MSFILTER_SIZE(gsf
->gf_numsrc
);
941 msf
= kmalloc(msize
, GFP_KERNEL
);
946 ifindex
= gsf
->gf_interface
;
947 psin
= (struct sockaddr_in
*)&gsf
->gf_group
;
948 if (psin
->sin_family
!= AF_INET
) {
949 err
= -EADDRNOTAVAIL
;
952 msf
->imsf_multiaddr
= psin
->sin_addr
.s_addr
;
953 msf
->imsf_interface
= 0;
954 msf
->imsf_fmode
= gsf
->gf_fmode
;
955 msf
->imsf_numsrc
= gsf
->gf_numsrc
;
956 err
= -EADDRNOTAVAIL
;
957 for (i
= 0; i
< gsf
->gf_numsrc
; ++i
) {
958 psin
= (struct sockaddr_in
*)&gsf
->gf_slist
[i
];
960 if (psin
->sin_family
!= AF_INET
)
962 msf
->imsf_slist
[i
] = psin
->sin_addr
.s_addr
;
967 err
= ip_mc_msfilter(sk
, msf
, ifindex
);
973 case IP_MULTICAST_ALL
:
976 if (val
!= 0 && val
!= 1)
980 case IP_ROUTER_ALERT
:
981 err
= ip_ra_control(sk
, val
? 1 : 0, NULL
);
987 inet
->freebind
= !!val
;
990 case IP_IPSEC_POLICY
:
993 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
995 err
= xfrm_user_policy(sk
, optname
, optval
, optlen
);
999 if (!!val
&& !ns_capable(sock_net(sk
)->user_ns
, CAP_NET_RAW
) &&
1000 !ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
)) {
1006 inet
->transparent
= !!val
;
1012 if (val
< 0 || val
> 255)
1014 inet
->min_ttl
= val
;
1030 * ipv4_pktinfo_prepare - transfert some info from rtable to skb
1034 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1035 * destination in skb->cb[] before dst drop.
1036 * This way, receiver doesnt make cache line misses to read rtable.
1038 void ipv4_pktinfo_prepare(struct sk_buff
*skb
)
1040 struct in_pktinfo
*pktinfo
= PKTINFO_SKB_CB(skb
);
1042 if (skb_rtable(skb
)) {
1043 pktinfo
->ipi_ifindex
= inet_iif(skb
);
1044 pktinfo
->ipi_spec_dst
.s_addr
= fib_compute_spec_dst(skb
);
1046 pktinfo
->ipi_ifindex
= 0;
1047 pktinfo
->ipi_spec_dst
.s_addr
= 0;
1052 int ip_setsockopt(struct sock
*sk
, int level
,
1053 int optname
, char __user
*optval
, unsigned int optlen
)
1057 if (level
!= SOL_IP
)
1058 return -ENOPROTOOPT
;
1060 err
= do_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1061 #ifdef CONFIG_NETFILTER
1062 /* we need to exclude all possible ENOPROTOOPTs except default case */
1063 if (err
== -ENOPROTOOPT
&& optname
!= IP_HDRINCL
&&
1064 optname
!= IP_IPSEC_POLICY
&&
1065 optname
!= IP_XFRM_POLICY
&&
1066 !ip_mroute_opt(optname
)) {
1068 err
= nf_setsockopt(sk
, PF_INET
, optname
, optval
, optlen
);
1074 EXPORT_SYMBOL(ip_setsockopt
);
1076 #ifdef CONFIG_COMPAT
1077 int compat_ip_setsockopt(struct sock
*sk
, int level
, int optname
,
1078 char __user
*optval
, unsigned int optlen
)
1082 if (level
!= SOL_IP
)
1083 return -ENOPROTOOPT
;
1085 if (optname
>= MCAST_JOIN_GROUP
&& optname
<= MCAST_MSFILTER
)
1086 return compat_mc_setsockopt(sk
, level
, optname
, optval
, optlen
,
1089 err
= do_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1090 #ifdef CONFIG_NETFILTER
1091 /* we need to exclude all possible ENOPROTOOPTs except default case */
1092 if (err
== -ENOPROTOOPT
&& optname
!= IP_HDRINCL
&&
1093 optname
!= IP_IPSEC_POLICY
&&
1094 optname
!= IP_XFRM_POLICY
&&
1095 !ip_mroute_opt(optname
)) {
1097 err
= compat_nf_setsockopt(sk
, PF_INET
, optname
,
1104 EXPORT_SYMBOL(compat_ip_setsockopt
);
1108 * Get the options. Note for future reference. The GET of IP options gets
1109 * the _received_ ones. The set sets the _sent_ ones.
1112 static int do_ip_getsockopt(struct sock
*sk
, int level
, int optname
,
1113 char __user
*optval
, int __user
*optlen
, unsigned int flags
)
1115 struct inet_sock
*inet
= inet_sk(sk
);
1119 if (level
!= SOL_IP
)
1122 if (ip_mroute_opt(optname
))
1123 return ip_mroute_getsockopt(sk
, optname
, optval
, optlen
);
1125 if (get_user(len
, optlen
))
1135 unsigned char optbuf
[sizeof(struct ip_options
)+40];
1136 struct ip_options
*opt
= (struct ip_options
*)optbuf
;
1137 struct ip_options_rcu
*inet_opt
;
1139 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
1140 sock_owned_by_user(sk
));
1143 memcpy(optbuf
, &inet_opt
->opt
,
1144 sizeof(struct ip_options
) +
1145 inet_opt
->opt
.optlen
);
1148 if (opt
->optlen
== 0)
1149 return put_user(0, optlen
);
1151 ip_options_undo(opt
);
1153 len
= min_t(unsigned int, len
, opt
->optlen
);
1154 if (put_user(len
, optlen
))
1156 if (copy_to_user(optval
, opt
->__data
, len
))
1161 val
= (inet
->cmsg_flags
& IP_CMSG_PKTINFO
) != 0;
1164 val
= (inet
->cmsg_flags
& IP_CMSG_TTL
) != 0;
1167 val
= (inet
->cmsg_flags
& IP_CMSG_TOS
) != 0;
1170 val
= (inet
->cmsg_flags
& IP_CMSG_RECVOPTS
) != 0;
1173 val
= (inet
->cmsg_flags
& IP_CMSG_RETOPTS
) != 0;
1176 val
= (inet
->cmsg_flags
& IP_CMSG_PASSSEC
) != 0;
1178 case IP_RECVORIGDSTADDR
:
1179 val
= (inet
->cmsg_flags
& IP_CMSG_ORIGDSTADDR
) != 0;
1185 val
= (inet
->uc_ttl
== -1 ?
1186 sysctl_ip_default_ttl
:
1190 val
= inet
->hdrincl
;
1193 val
= inet
->nodefrag
;
1195 case IP_MTU_DISCOVER
:
1196 val
= inet
->pmtudisc
;
1200 struct dst_entry
*dst
;
1202 dst
= sk_dst_get(sk
);
1214 val
= inet
->recverr
;
1216 case IP_MULTICAST_TTL
:
1219 case IP_MULTICAST_LOOP
:
1220 val
= inet
->mc_loop
;
1223 val
= (__force
int)htonl((__u32
) inet
->uc_index
);
1225 case IP_MULTICAST_IF
:
1227 struct in_addr addr
;
1228 len
= min_t(unsigned int, len
, sizeof(struct in_addr
));
1229 addr
.s_addr
= inet
->mc_addr
;
1232 if (put_user(len
, optlen
))
1234 if (copy_to_user(optval
, &addr
, len
))
1240 struct ip_msfilter msf
;
1243 if (len
< IP_MSFILTER_SIZE(0)) {
1247 if (copy_from_user(&msf
, optval
, IP_MSFILTER_SIZE(0))) {
1251 err
= ip_mc_msfget(sk
, &msf
,
1252 (struct ip_msfilter __user
*)optval
, optlen
);
1256 case MCAST_MSFILTER
:
1258 struct group_filter gsf
;
1261 if (len
< GROUP_FILTER_SIZE(0)) {
1265 if (copy_from_user(&gsf
, optval
, GROUP_FILTER_SIZE(0))) {
1269 err
= ip_mc_gsfget(sk
, &gsf
,
1270 (struct group_filter __user
*)optval
,
1275 case IP_MULTICAST_ALL
:
1284 if (sk
->sk_type
!= SOCK_STREAM
)
1285 return -ENOPROTOOPT
;
1287 msg
.msg_control
= optval
;
1288 msg
.msg_controllen
= len
;
1289 msg
.msg_flags
= flags
;
1291 if (inet
->cmsg_flags
& IP_CMSG_PKTINFO
) {
1292 struct in_pktinfo info
;
1294 info
.ipi_addr
.s_addr
= inet
->inet_rcv_saddr
;
1295 info
.ipi_spec_dst
.s_addr
= inet
->inet_rcv_saddr
;
1296 info
.ipi_ifindex
= inet
->mc_index
;
1297 put_cmsg(&msg
, SOL_IP
, IP_PKTINFO
, sizeof(info
), &info
);
1299 if (inet
->cmsg_flags
& IP_CMSG_TTL
) {
1300 int hlim
= inet
->mc_ttl
;
1301 put_cmsg(&msg
, SOL_IP
, IP_TTL
, sizeof(hlim
), &hlim
);
1303 if (inet
->cmsg_flags
& IP_CMSG_TOS
) {
1304 int tos
= inet
->rcv_tos
;
1305 put_cmsg(&msg
, SOL_IP
, IP_TOS
, sizeof(tos
), &tos
);
1307 len
-= msg
.msg_controllen
;
1308 return put_user(len
, optlen
);
1311 val
= inet
->freebind
;
1313 case IP_TRANSPARENT
:
1314 val
= inet
->transparent
;
1317 val
= inet
->min_ttl
;
1321 return -ENOPROTOOPT
;
1325 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
<= 255) {
1326 unsigned char ucval
= (unsigned char)val
;
1328 if (put_user(len
, optlen
))
1330 if (copy_to_user(optval
, &ucval
, 1))
1333 len
= min_t(unsigned int, sizeof(int), len
);
1334 if (put_user(len
, optlen
))
1336 if (copy_to_user(optval
, &val
, len
))
1342 int ip_getsockopt(struct sock
*sk
, int level
,
1343 int optname
, char __user
*optval
, int __user
*optlen
)
1347 err
= do_ip_getsockopt(sk
, level
, optname
, optval
, optlen
, 0);
1348 #ifdef CONFIG_NETFILTER
1349 /* we need to exclude all possible ENOPROTOOPTs except default case */
1350 if (err
== -ENOPROTOOPT
&& optname
!= IP_PKTOPTIONS
&&
1351 !ip_mroute_opt(optname
)) {
1354 if (get_user(len
, optlen
))
1358 err
= nf_getsockopt(sk
, PF_INET
, optname
, optval
,
1362 err
= put_user(len
, optlen
);
1368 EXPORT_SYMBOL(ip_getsockopt
);
1370 #ifdef CONFIG_COMPAT
1371 int compat_ip_getsockopt(struct sock
*sk
, int level
, int optname
,
1372 char __user
*optval
, int __user
*optlen
)
1376 if (optname
== MCAST_MSFILTER
)
1377 return compat_mc_getsockopt(sk
, level
, optname
, optval
, optlen
,
1380 err
= do_ip_getsockopt(sk
, level
, optname
, optval
, optlen
,
1383 #ifdef CONFIG_NETFILTER
1384 /* we need to exclude all possible ENOPROTOOPTs except default case */
1385 if (err
== -ENOPROTOOPT
&& optname
!= IP_PKTOPTIONS
&&
1386 !ip_mroute_opt(optname
)) {
1389 if (get_user(len
, optlen
))
1393 err
= compat_nf_getsockopt(sk
, PF_INET
, optname
, optval
, &len
);
1396 err
= put_user(len
, optlen
);
1402 EXPORT_SYMBOL(compat_ip_getsockopt
);