Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / ipv4 / ip_sockglue.c
blobec6036713e2c256261002f8ec82a9f5e749782d3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The IP to API glue.
9 * Authors: see ip.c
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/icmp.h>
25 #include <linux/inetdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/slab.h>
28 #include <net/sock.h>
29 #include <net/ip.h>
30 #include <net/icmp.h>
31 #include <net/tcp_states.h>
32 #include <linux/udp.h>
33 #include <linux/igmp.h>
34 #include <linux/netfilter.h>
35 #include <linux/route.h>
36 #include <linux/mroute.h>
37 #include <net/inet_ecn.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40 #include <net/compat.h>
41 #include <net/checksum.h>
42 #if IS_ENABLED(CONFIG_IPV6)
43 #include <net/transp_v6.h>
44 #endif
45 #include <net/ip_fib.h>
47 #include <linux/errqueue.h>
48 #include <linux/uaccess.h>
50 #include <linux/bpfilter.h>
53 * SOL_IP control messages.
56 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
58 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
60 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
62 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
65 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
67 int ttl = ip_hdr(skb)->ttl;
68 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
71 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
73 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
76 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
78 if (IPCB(skb)->opt.optlen == 0)
79 return;
81 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
82 ip_hdr(skb) + 1);
86 static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
87 struct sk_buff *skb)
89 unsigned char optbuf[sizeof(struct ip_options) + 40];
90 struct ip_options *opt = (struct ip_options *)optbuf;
92 if (IPCB(skb)->opt.optlen == 0)
93 return;
95 if (ip_options_echo(net, opt, skb)) {
96 msg->msg_flags |= MSG_CTRUNC;
97 return;
99 ip_options_undo(opt);
101 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
104 static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
106 int val;
108 if (IPCB(skb)->frag_max_size == 0)
109 return;
111 val = IPCB(skb)->frag_max_size;
112 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
115 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
116 int tlen, int offset)
118 __wsum csum = skb->csum;
120 if (skb->ip_summed != CHECKSUM_COMPLETE)
121 return;
123 if (offset != 0) {
124 int tend_off = skb_transport_offset(skb) + tlen;
125 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
128 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
131 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
133 char *secdata;
134 u32 seclen, secid;
135 int err;
137 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
138 if (err)
139 return;
141 err = security_secid_to_secctx(secid, &secdata, &seclen);
142 if (err)
143 return;
145 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
146 security_release_secctx(secdata, seclen);
149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
151 __be16 _ports[2], *ports;
152 struct sockaddr_in sin;
154 /* All current transport protocols have the port numbers in the
155 * first four bytes of the transport header and this function is
156 * written with this assumption in mind.
158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
163 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
165 sin.sin_port = ports[1];
166 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
168 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
171 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
172 struct sk_buff *skb, int tlen, int offset)
174 struct inet_sock *inet = inet_sk(sk);
175 unsigned int flags = inet->cmsg_flags;
177 /* Ordered by supposed usage frequency */
178 if (flags & IP_CMSG_PKTINFO) {
179 ip_cmsg_recv_pktinfo(msg, skb);
181 flags &= ~IP_CMSG_PKTINFO;
182 if (!flags)
183 return;
186 if (flags & IP_CMSG_TTL) {
187 ip_cmsg_recv_ttl(msg, skb);
189 flags &= ~IP_CMSG_TTL;
190 if (!flags)
191 return;
194 if (flags & IP_CMSG_TOS) {
195 ip_cmsg_recv_tos(msg, skb);
197 flags &= ~IP_CMSG_TOS;
198 if (!flags)
199 return;
202 if (flags & IP_CMSG_RECVOPTS) {
203 ip_cmsg_recv_opts(msg, skb);
205 flags &= ~IP_CMSG_RECVOPTS;
206 if (!flags)
207 return;
210 if (flags & IP_CMSG_RETOPTS) {
211 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
213 flags &= ~IP_CMSG_RETOPTS;
214 if (!flags)
215 return;
218 if (flags & IP_CMSG_PASSSEC) {
219 ip_cmsg_recv_security(msg, skb);
221 flags &= ~IP_CMSG_PASSSEC;
222 if (!flags)
223 return;
226 if (flags & IP_CMSG_ORIGDSTADDR) {
227 ip_cmsg_recv_dstaddr(msg, skb);
229 flags &= ~IP_CMSG_ORIGDSTADDR;
230 if (!flags)
231 return;
234 if (flags & IP_CMSG_CHECKSUM)
235 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
237 if (flags & IP_CMSG_RECVFRAGSIZE)
238 ip_cmsg_recv_fragsize(msg, skb);
240 EXPORT_SYMBOL(ip_cmsg_recv_offset);
242 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
243 bool allow_ipv6)
245 int err, val;
246 struct cmsghdr *cmsg;
247 struct net *net = sock_net(sk);
249 for_each_cmsghdr(cmsg, msg) {
250 if (!CMSG_OK(msg, cmsg))
251 return -EINVAL;
252 #if IS_ENABLED(CONFIG_IPV6)
253 if (allow_ipv6 &&
254 cmsg->cmsg_level == SOL_IPV6 &&
255 cmsg->cmsg_type == IPV6_PKTINFO) {
256 struct in6_pktinfo *src_info;
258 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
259 return -EINVAL;
260 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
261 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
262 return -EINVAL;
263 if (src_info->ipi6_ifindex)
264 ipc->oif = src_info->ipi6_ifindex;
265 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
266 continue;
268 #endif
269 if (cmsg->cmsg_level == SOL_SOCKET) {
270 err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
271 if (err)
272 return err;
273 continue;
276 if (cmsg->cmsg_level != SOL_IP)
277 continue;
278 switch (cmsg->cmsg_type) {
279 case IP_RETOPTS:
280 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
282 /* Our caller is responsible for freeing ipc->opt */
283 err = ip_options_get(net, &ipc->opt,
284 KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
285 err < 40 ? err : 40);
286 if (err)
287 return err;
288 break;
289 case IP_PKTINFO:
291 struct in_pktinfo *info;
292 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
293 return -EINVAL;
294 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
295 if (info->ipi_ifindex)
296 ipc->oif = info->ipi_ifindex;
297 ipc->addr = info->ipi_spec_dst.s_addr;
298 break;
300 case IP_TTL:
301 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
302 return -EINVAL;
303 val = *(int *)CMSG_DATA(cmsg);
304 if (val < 1 || val > 255)
305 return -EINVAL;
306 ipc->ttl = val;
307 break;
308 case IP_TOS:
309 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
310 val = *(int *)CMSG_DATA(cmsg);
311 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
312 val = *(u8 *)CMSG_DATA(cmsg);
313 else
314 return -EINVAL;
315 if (val < 0 || val > 255)
316 return -EINVAL;
317 ipc->tos = val;
318 ipc->priority = rt_tos2priority(ipc->tos);
319 break;
321 default:
322 return -EINVAL;
325 return 0;
328 static void ip_ra_destroy_rcu(struct rcu_head *head)
330 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
332 sock_put(ra->saved_sk);
333 kfree(ra);
336 int ip_ra_control(struct sock *sk, unsigned char on,
337 void (*destructor)(struct sock *))
339 struct ip_ra_chain *ra, *new_ra;
340 struct ip_ra_chain __rcu **rap;
341 struct net *net = sock_net(sk);
343 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
344 return -EINVAL;
346 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
347 if (on && !new_ra)
348 return -ENOMEM;
350 mutex_lock(&net->ipv4.ra_mutex);
351 for (rap = &net->ipv4.ra_chain;
352 (ra = rcu_dereference_protected(*rap,
353 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
354 rap = &ra->next) {
355 if (ra->sk == sk) {
356 if (on) {
357 mutex_unlock(&net->ipv4.ra_mutex);
358 kfree(new_ra);
359 return -EADDRINUSE;
361 /* dont let ip_call_ra_chain() use sk again */
362 ra->sk = NULL;
363 RCU_INIT_POINTER(*rap, ra->next);
364 mutex_unlock(&net->ipv4.ra_mutex);
366 if (ra->destructor)
367 ra->destructor(sk);
369 * Delay sock_put(sk) and kfree(ra) after one rcu grace
370 * period. This guarantee ip_call_ra_chain() dont need
371 * to mess with socket refcounts.
373 ra->saved_sk = sk;
374 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
375 return 0;
378 if (!new_ra) {
379 mutex_unlock(&net->ipv4.ra_mutex);
380 return -ENOBUFS;
382 new_ra->sk = sk;
383 new_ra->destructor = destructor;
385 RCU_INIT_POINTER(new_ra->next, ra);
386 rcu_assign_pointer(*rap, new_ra);
387 sock_hold(sk);
388 mutex_unlock(&net->ipv4.ra_mutex);
390 return 0;
393 static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
394 struct sock_ee_data_rfc4884 *out)
396 switch (icmp_hdr(skb)->type) {
397 case ICMP_DEST_UNREACH:
398 case ICMP_TIME_EXCEEDED:
399 case ICMP_PARAMETERPROB:
400 ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
401 icmp_hdr(skb)->un.reserved[1] * 4);
405 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
406 __be16 port, u32 info, u8 *payload)
408 struct sock_exterr_skb *serr;
410 skb = skb_clone(skb, GFP_ATOMIC);
411 if (!skb)
412 return;
414 serr = SKB_EXT_ERR(skb);
415 serr->ee.ee_errno = err;
416 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
417 serr->ee.ee_type = icmp_hdr(skb)->type;
418 serr->ee.ee_code = icmp_hdr(skb)->code;
419 serr->ee.ee_pad = 0;
420 serr->ee.ee_info = info;
421 serr->ee.ee_data = 0;
422 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
423 skb_network_header(skb);
424 serr->port = port;
426 if (skb_pull(skb, payload - skb->data)) {
427 if (inet_sk(sk)->recverr_rfc4884)
428 ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
430 skb_reset_transport_header(skb);
431 if (sock_queue_err_skb(sk, skb) == 0)
432 return;
434 kfree_skb(skb);
437 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
439 struct inet_sock *inet = inet_sk(sk);
440 struct sock_exterr_skb *serr;
441 struct iphdr *iph;
442 struct sk_buff *skb;
444 if (!inet->recverr)
445 return;
447 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
448 if (!skb)
449 return;
451 skb_put(skb, sizeof(struct iphdr));
452 skb_reset_network_header(skb);
453 iph = ip_hdr(skb);
454 iph->daddr = daddr;
456 serr = SKB_EXT_ERR(skb);
457 serr->ee.ee_errno = err;
458 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
459 serr->ee.ee_type = 0;
460 serr->ee.ee_code = 0;
461 serr->ee.ee_pad = 0;
462 serr->ee.ee_info = info;
463 serr->ee.ee_data = 0;
464 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
465 serr->port = port;
467 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
468 skb_reset_transport_header(skb);
470 if (sock_queue_err_skb(sk, skb))
471 kfree_skb(skb);
474 /* For some errors we have valid addr_offset even with zero payload and
475 * zero port. Also, addr_offset should be supported if port is set.
477 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
479 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
480 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
483 /* IPv4 supports cmsg on all imcp errors and some timestamps
485 * Timestamp code paths do not initialize the fields expected by cmsg:
486 * the PKTINFO fields in skb->cb[]. Fill those in here.
488 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
489 struct sk_buff *skb,
490 int ee_origin)
492 struct in_pktinfo *info;
494 if (ee_origin == SO_EE_ORIGIN_ICMP)
495 return true;
497 if (ee_origin == SO_EE_ORIGIN_LOCAL)
498 return false;
500 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
501 * timestamp with egress dev. Not possible for packets without iif
502 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
504 info = PKTINFO_SKB_CB(skb);
505 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
506 !info->ipi_ifindex)
507 return false;
509 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
510 return true;
514 * Handle MSG_ERRQUEUE
516 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
518 struct sock_exterr_skb *serr;
519 struct sk_buff *skb;
520 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
521 struct {
522 struct sock_extended_err ee;
523 struct sockaddr_in offender;
524 } errhdr;
525 int err;
526 int copied;
528 err = -EAGAIN;
529 skb = sock_dequeue_err_skb(sk);
530 if (!skb)
531 goto out;
533 copied = skb->len;
534 if (copied > len) {
535 msg->msg_flags |= MSG_TRUNC;
536 copied = len;
538 err = skb_copy_datagram_msg(skb, 0, msg, copied);
539 if (unlikely(err)) {
540 kfree_skb(skb);
541 return err;
543 sock_recv_timestamp(msg, sk, skb);
545 serr = SKB_EXT_ERR(skb);
547 if (sin && ipv4_datagram_support_addr(serr)) {
548 sin->sin_family = AF_INET;
549 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
550 serr->addr_offset);
551 sin->sin_port = serr->port;
552 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
553 *addr_len = sizeof(*sin);
556 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
557 sin = &errhdr.offender;
558 memset(sin, 0, sizeof(*sin));
560 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
561 sin->sin_family = AF_INET;
562 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
563 if (inet_sk(sk)->cmsg_flags)
564 ip_cmsg_recv(msg, skb);
567 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
569 /* Now we could try to dump offended packet options */
571 msg->msg_flags |= MSG_ERRQUEUE;
572 err = copied;
574 consume_skb(skb);
575 out:
576 return err;
579 static void __ip_sock_set_tos(struct sock *sk, int val)
581 if (sk->sk_type == SOCK_STREAM) {
582 val &= ~INET_ECN_MASK;
583 val |= inet_sk(sk)->tos & INET_ECN_MASK;
585 if (inet_sk(sk)->tos != val) {
586 inet_sk(sk)->tos = val;
587 sk->sk_priority = rt_tos2priority(val);
588 sk_dst_reset(sk);
592 void ip_sock_set_tos(struct sock *sk, int val)
594 lock_sock(sk);
595 __ip_sock_set_tos(sk, val);
596 release_sock(sk);
598 EXPORT_SYMBOL(ip_sock_set_tos);
600 void ip_sock_set_freebind(struct sock *sk)
602 lock_sock(sk);
603 inet_sk(sk)->freebind = true;
604 release_sock(sk);
606 EXPORT_SYMBOL(ip_sock_set_freebind);
608 void ip_sock_set_recverr(struct sock *sk)
610 lock_sock(sk);
611 inet_sk(sk)->recverr = true;
612 release_sock(sk);
614 EXPORT_SYMBOL(ip_sock_set_recverr);
616 int ip_sock_set_mtu_discover(struct sock *sk, int val)
618 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
619 return -EINVAL;
620 lock_sock(sk);
621 inet_sk(sk)->pmtudisc = val;
622 release_sock(sk);
623 return 0;
625 EXPORT_SYMBOL(ip_sock_set_mtu_discover);
627 void ip_sock_set_pktinfo(struct sock *sk)
629 lock_sock(sk);
630 inet_sk(sk)->cmsg_flags |= IP_CMSG_PKTINFO;
631 release_sock(sk);
633 EXPORT_SYMBOL(ip_sock_set_pktinfo);
636 * Socket option code for IP. This is the end of the line after any
637 * TCP,UDP etc options on an IP socket.
639 static bool setsockopt_needs_rtnl(int optname)
641 switch (optname) {
642 case IP_ADD_MEMBERSHIP:
643 case IP_ADD_SOURCE_MEMBERSHIP:
644 case IP_BLOCK_SOURCE:
645 case IP_DROP_MEMBERSHIP:
646 case IP_DROP_SOURCE_MEMBERSHIP:
647 case IP_MSFILTER:
648 case IP_UNBLOCK_SOURCE:
649 case MCAST_BLOCK_SOURCE:
650 case MCAST_MSFILTER:
651 case MCAST_JOIN_GROUP:
652 case MCAST_JOIN_SOURCE_GROUP:
653 case MCAST_LEAVE_GROUP:
654 case MCAST_LEAVE_SOURCE_GROUP:
655 case MCAST_UNBLOCK_SOURCE:
656 return true;
658 return false;
661 static int set_mcast_msfilter(struct sock *sk, int ifindex,
662 int numsrc, int fmode,
663 struct sockaddr_storage *group,
664 struct sockaddr_storage *list)
666 int msize = IP_MSFILTER_SIZE(numsrc);
667 struct ip_msfilter *msf;
668 struct sockaddr_in *psin;
669 int err, i;
671 msf = kmalloc(msize, GFP_KERNEL);
672 if (!msf)
673 return -ENOBUFS;
675 psin = (struct sockaddr_in *)group;
676 if (psin->sin_family != AF_INET)
677 goto Eaddrnotavail;
678 msf->imsf_multiaddr = psin->sin_addr.s_addr;
679 msf->imsf_interface = 0;
680 msf->imsf_fmode = fmode;
681 msf->imsf_numsrc = numsrc;
682 for (i = 0; i < numsrc; ++i) {
683 psin = (struct sockaddr_in *)&list[i];
685 if (psin->sin_family != AF_INET)
686 goto Eaddrnotavail;
687 msf->imsf_slist[i] = psin->sin_addr.s_addr;
689 err = ip_mc_msfilter(sk, msf, ifindex);
690 kfree(msf);
691 return err;
693 Eaddrnotavail:
694 kfree(msf);
695 return -EADDRNOTAVAIL;
698 static int copy_group_source_from_sockptr(struct group_source_req *greqs,
699 sockptr_t optval, int optlen)
701 if (in_compat_syscall()) {
702 struct compat_group_source_req gr32;
704 if (optlen != sizeof(gr32))
705 return -EINVAL;
706 if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
707 return -EFAULT;
708 greqs->gsr_interface = gr32.gsr_interface;
709 greqs->gsr_group = gr32.gsr_group;
710 greqs->gsr_source = gr32.gsr_source;
711 } else {
712 if (optlen != sizeof(*greqs))
713 return -EINVAL;
714 if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
715 return -EFAULT;
718 return 0;
721 static int do_mcast_group_source(struct sock *sk, int optname,
722 sockptr_t optval, int optlen)
724 struct group_source_req greqs;
725 struct ip_mreq_source mreqs;
726 struct sockaddr_in *psin;
727 int omode, add, err;
729 err = copy_group_source_from_sockptr(&greqs, optval, optlen);
730 if (err)
731 return err;
733 if (greqs.gsr_group.ss_family != AF_INET ||
734 greqs.gsr_source.ss_family != AF_INET)
735 return -EADDRNOTAVAIL;
737 psin = (struct sockaddr_in *)&greqs.gsr_group;
738 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
739 psin = (struct sockaddr_in *)&greqs.gsr_source;
740 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
741 mreqs.imr_interface = 0; /* use index for mc_source */
743 if (optname == MCAST_BLOCK_SOURCE) {
744 omode = MCAST_EXCLUDE;
745 add = 1;
746 } else if (optname == MCAST_UNBLOCK_SOURCE) {
747 omode = MCAST_EXCLUDE;
748 add = 0;
749 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
750 struct ip_mreqn mreq;
752 psin = (struct sockaddr_in *)&greqs.gsr_group;
753 mreq.imr_multiaddr = psin->sin_addr;
754 mreq.imr_address.s_addr = 0;
755 mreq.imr_ifindex = greqs.gsr_interface;
756 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
757 if (err && err != -EADDRINUSE)
758 return err;
759 greqs.gsr_interface = mreq.imr_ifindex;
760 omode = MCAST_INCLUDE;
761 add = 1;
762 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
763 omode = MCAST_INCLUDE;
764 add = 0;
766 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
769 static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
771 struct group_filter *gsf = NULL;
772 int err;
774 if (optlen < GROUP_FILTER_SIZE(0))
775 return -EINVAL;
776 if (optlen > sysctl_optmem_max)
777 return -ENOBUFS;
779 gsf = memdup_sockptr(optval, optlen);
780 if (IS_ERR(gsf))
781 return PTR_ERR(gsf);
783 /* numsrc >= (4G-140)/128 overflow in 32 bits */
784 err = -ENOBUFS;
785 if (gsf->gf_numsrc >= 0x1ffffff ||
786 gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
787 goto out_free_gsf;
789 err = -EINVAL;
790 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
791 goto out_free_gsf;
793 err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
794 gsf->gf_fmode, &gsf->gf_group, gsf->gf_slist);
795 out_free_gsf:
796 kfree(gsf);
797 return err;
800 static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
801 int optlen)
803 const int size0 = offsetof(struct compat_group_filter, gf_slist);
804 struct compat_group_filter *gf32;
805 unsigned int n;
806 void *p;
807 int err;
809 if (optlen < size0)
810 return -EINVAL;
811 if (optlen > sysctl_optmem_max - 4)
812 return -ENOBUFS;
814 p = kmalloc(optlen + 4, GFP_KERNEL);
815 if (!p)
816 return -ENOMEM;
817 gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */
819 err = -EFAULT;
820 if (copy_from_sockptr(gf32, optval, optlen))
821 goto out_free_gsf;
823 /* numsrc >= (4G-140)/128 overflow in 32 bits */
824 n = gf32->gf_numsrc;
825 err = -ENOBUFS;
826 if (n >= 0x1ffffff)
827 goto out_free_gsf;
829 err = -EINVAL;
830 if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen)
831 goto out_free_gsf;
833 /* numsrc >= (4G-140)/128 overflow in 32 bits */
834 err = -ENOBUFS;
835 if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
836 goto out_free_gsf;
837 err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
838 &gf32->gf_group, gf32->gf_slist);
839 out_free_gsf:
840 kfree(p);
841 return err;
844 static int ip_mcast_join_leave(struct sock *sk, int optname,
845 sockptr_t optval, int optlen)
847 struct ip_mreqn mreq = { };
848 struct sockaddr_in *psin;
849 struct group_req greq;
851 if (optlen < sizeof(struct group_req))
852 return -EINVAL;
853 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
854 return -EFAULT;
856 psin = (struct sockaddr_in *)&greq.gr_group;
857 if (psin->sin_family != AF_INET)
858 return -EINVAL;
859 mreq.imr_multiaddr = psin->sin_addr;
860 mreq.imr_ifindex = greq.gr_interface;
861 if (optname == MCAST_JOIN_GROUP)
862 return ip_mc_join_group(sk, &mreq);
863 return ip_mc_leave_group(sk, &mreq);
866 static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
867 sockptr_t optval, int optlen)
869 struct compat_group_req greq;
870 struct ip_mreqn mreq = { };
871 struct sockaddr_in *psin;
873 if (optlen < sizeof(struct compat_group_req))
874 return -EINVAL;
875 if (copy_from_sockptr(&greq, optval, sizeof(greq)))
876 return -EFAULT;
878 psin = (struct sockaddr_in *)&greq.gr_group;
879 if (psin->sin_family != AF_INET)
880 return -EINVAL;
881 mreq.imr_multiaddr = psin->sin_addr;
882 mreq.imr_ifindex = greq.gr_interface;
884 if (optname == MCAST_JOIN_GROUP)
885 return ip_mc_join_group(sk, &mreq);
886 return ip_mc_leave_group(sk, &mreq);
889 static int do_ip_setsockopt(struct sock *sk, int level, int optname,
890 sockptr_t optval, unsigned int optlen)
892 struct inet_sock *inet = inet_sk(sk);
893 struct net *net = sock_net(sk);
894 int val = 0, err;
895 bool needs_rtnl = setsockopt_needs_rtnl(optname);
897 switch (optname) {
898 case IP_PKTINFO:
899 case IP_RECVTTL:
900 case IP_RECVOPTS:
901 case IP_RECVTOS:
902 case IP_RETOPTS:
903 case IP_TOS:
904 case IP_TTL:
905 case IP_HDRINCL:
906 case IP_MTU_DISCOVER:
907 case IP_RECVERR:
908 case IP_ROUTER_ALERT:
909 case IP_FREEBIND:
910 case IP_PASSSEC:
911 case IP_TRANSPARENT:
912 case IP_MINTTL:
913 case IP_NODEFRAG:
914 case IP_BIND_ADDRESS_NO_PORT:
915 case IP_UNICAST_IF:
916 case IP_MULTICAST_TTL:
917 case IP_MULTICAST_ALL:
918 case IP_MULTICAST_LOOP:
919 case IP_RECVORIGDSTADDR:
920 case IP_CHECKSUM:
921 case IP_RECVFRAGSIZE:
922 case IP_RECVERR_RFC4884:
923 if (optlen >= sizeof(int)) {
924 if (copy_from_sockptr(&val, optval, sizeof(val)))
925 return -EFAULT;
926 } else if (optlen >= sizeof(char)) {
927 unsigned char ucval;
929 if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
930 return -EFAULT;
931 val = (int) ucval;
935 /* If optlen==0, it is equivalent to val == 0 */
937 if (optname == IP_ROUTER_ALERT)
938 return ip_ra_control(sk, val ? 1 : 0, NULL);
939 if (ip_mroute_opt(optname))
940 return ip_mroute_setsockopt(sk, optname, optval, optlen);
942 err = 0;
943 if (needs_rtnl)
944 rtnl_lock();
945 lock_sock(sk);
947 switch (optname) {
948 case IP_OPTIONS:
950 struct ip_options_rcu *old, *opt = NULL;
952 if (optlen > 40)
953 goto e_inval;
954 err = ip_options_get(sock_net(sk), &opt, optval, optlen);
955 if (err)
956 break;
957 old = rcu_dereference_protected(inet->inet_opt,
958 lockdep_sock_is_held(sk));
959 if (inet->is_icsk) {
960 struct inet_connection_sock *icsk = inet_csk(sk);
961 #if IS_ENABLED(CONFIG_IPV6)
962 if (sk->sk_family == PF_INET ||
963 (!((1 << sk->sk_state) &
964 (TCPF_LISTEN | TCPF_CLOSE)) &&
965 inet->inet_daddr != LOOPBACK4_IPV6)) {
966 #endif
967 if (old)
968 icsk->icsk_ext_hdr_len -= old->opt.optlen;
969 if (opt)
970 icsk->icsk_ext_hdr_len += opt->opt.optlen;
971 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
972 #if IS_ENABLED(CONFIG_IPV6)
974 #endif
976 rcu_assign_pointer(inet->inet_opt, opt);
977 if (old)
978 kfree_rcu(old, rcu);
979 break;
981 case IP_PKTINFO:
982 if (val)
983 inet->cmsg_flags |= IP_CMSG_PKTINFO;
984 else
985 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
986 break;
987 case IP_RECVTTL:
988 if (val)
989 inet->cmsg_flags |= IP_CMSG_TTL;
990 else
991 inet->cmsg_flags &= ~IP_CMSG_TTL;
992 break;
993 case IP_RECVTOS:
994 if (val)
995 inet->cmsg_flags |= IP_CMSG_TOS;
996 else
997 inet->cmsg_flags &= ~IP_CMSG_TOS;
998 break;
999 case IP_RECVOPTS:
1000 if (val)
1001 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
1002 else
1003 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
1004 break;
1005 case IP_RETOPTS:
1006 if (val)
1007 inet->cmsg_flags |= IP_CMSG_RETOPTS;
1008 else
1009 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
1010 break;
1011 case IP_PASSSEC:
1012 if (val)
1013 inet->cmsg_flags |= IP_CMSG_PASSSEC;
1014 else
1015 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
1016 break;
1017 case IP_RECVORIGDSTADDR:
1018 if (val)
1019 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
1020 else
1021 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
1022 break;
1023 case IP_CHECKSUM:
1024 if (val) {
1025 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
1026 inet_inc_convert_csum(sk);
1027 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
1029 } else {
1030 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
1031 inet_dec_convert_csum(sk);
1032 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
1035 break;
1036 case IP_RECVFRAGSIZE:
1037 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
1038 goto e_inval;
1039 if (val)
1040 inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
1041 else
1042 inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
1043 break;
1044 case IP_TOS: /* This sets both TOS and Precedence */
1045 __ip_sock_set_tos(sk, val);
1046 break;
1047 case IP_TTL:
1048 if (optlen < 1)
1049 goto e_inval;
1050 if (val != -1 && (val < 1 || val > 255))
1051 goto e_inval;
1052 inet->uc_ttl = val;
1053 break;
1054 case IP_HDRINCL:
1055 if (sk->sk_type != SOCK_RAW) {
1056 err = -ENOPROTOOPT;
1057 break;
1059 inet->hdrincl = val ? 1 : 0;
1060 break;
1061 case IP_NODEFRAG:
1062 if (sk->sk_type != SOCK_RAW) {
1063 err = -ENOPROTOOPT;
1064 break;
1066 inet->nodefrag = val ? 1 : 0;
1067 break;
1068 case IP_BIND_ADDRESS_NO_PORT:
1069 inet->bind_address_no_port = val ? 1 : 0;
1070 break;
1071 case IP_MTU_DISCOVER:
1072 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
1073 goto e_inval;
1074 inet->pmtudisc = val;
1075 break;
1076 case IP_RECVERR:
1077 inet->recverr = !!val;
1078 if (!val)
1079 skb_queue_purge(&sk->sk_error_queue);
1080 break;
1081 case IP_RECVERR_RFC4884:
1082 if (val < 0 || val > 1)
1083 goto e_inval;
1084 inet->recverr_rfc4884 = !!val;
1085 break;
1086 case IP_MULTICAST_TTL:
1087 if (sk->sk_type == SOCK_STREAM)
1088 goto e_inval;
1089 if (optlen < 1)
1090 goto e_inval;
1091 if (val == -1)
1092 val = 1;
1093 if (val < 0 || val > 255)
1094 goto e_inval;
1095 inet->mc_ttl = val;
1096 break;
1097 case IP_MULTICAST_LOOP:
1098 if (optlen < 1)
1099 goto e_inval;
1100 inet->mc_loop = !!val;
1101 break;
1102 case IP_UNICAST_IF:
1104 struct net_device *dev = NULL;
1105 int ifindex;
1106 int midx;
1108 if (optlen != sizeof(int))
1109 goto e_inval;
1111 ifindex = (__force int)ntohl((__force __be32)val);
1112 if (ifindex == 0) {
1113 inet->uc_index = 0;
1114 err = 0;
1115 break;
1118 dev = dev_get_by_index(sock_net(sk), ifindex);
1119 err = -EADDRNOTAVAIL;
1120 if (!dev)
1121 break;
1123 midx = l3mdev_master_ifindex(dev);
1124 dev_put(dev);
1126 err = -EINVAL;
1127 if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
1128 break;
1130 inet->uc_index = ifindex;
1131 err = 0;
1132 break;
1134 case IP_MULTICAST_IF:
1136 struct ip_mreqn mreq;
1137 struct net_device *dev = NULL;
1138 int midx;
1140 if (sk->sk_type == SOCK_STREAM)
1141 goto e_inval;
1143 * Check the arguments are allowable
1146 if (optlen < sizeof(struct in_addr))
1147 goto e_inval;
1149 err = -EFAULT;
1150 if (optlen >= sizeof(struct ip_mreqn)) {
1151 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1152 break;
1153 } else {
1154 memset(&mreq, 0, sizeof(mreq));
1155 if (optlen >= sizeof(struct ip_mreq)) {
1156 if (copy_from_sockptr(&mreq, optval,
1157 sizeof(struct ip_mreq)))
1158 break;
1159 } else if (optlen >= sizeof(struct in_addr)) {
1160 if (copy_from_sockptr(&mreq.imr_address, optval,
1161 sizeof(struct in_addr)))
1162 break;
1166 if (!mreq.imr_ifindex) {
1167 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
1168 inet->mc_index = 0;
1169 inet->mc_addr = 0;
1170 err = 0;
1171 break;
1173 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
1174 if (dev)
1175 mreq.imr_ifindex = dev->ifindex;
1176 } else
1177 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
1180 err = -EADDRNOTAVAIL;
1181 if (!dev)
1182 break;
1184 midx = l3mdev_master_ifindex(dev);
1186 dev_put(dev);
1188 err = -EINVAL;
1189 if (sk->sk_bound_dev_if &&
1190 mreq.imr_ifindex != sk->sk_bound_dev_if &&
1191 midx != sk->sk_bound_dev_if)
1192 break;
1194 inet->mc_index = mreq.imr_ifindex;
1195 inet->mc_addr = mreq.imr_address.s_addr;
1196 err = 0;
1197 break;
1200 case IP_ADD_MEMBERSHIP:
1201 case IP_DROP_MEMBERSHIP:
1203 struct ip_mreqn mreq;
1205 err = -EPROTO;
1206 if (inet_sk(sk)->is_icsk)
1207 break;
1209 if (optlen < sizeof(struct ip_mreq))
1210 goto e_inval;
1211 err = -EFAULT;
1212 if (optlen >= sizeof(struct ip_mreqn)) {
1213 if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
1214 break;
1215 } else {
1216 memset(&mreq, 0, sizeof(mreq));
1217 if (copy_from_sockptr(&mreq, optval,
1218 sizeof(struct ip_mreq)))
1219 break;
1222 if (optname == IP_ADD_MEMBERSHIP)
1223 err = ip_mc_join_group(sk, &mreq);
1224 else
1225 err = ip_mc_leave_group(sk, &mreq);
1226 break;
1228 case IP_MSFILTER:
1230 struct ip_msfilter *msf;
1232 if (optlen < IP_MSFILTER_SIZE(0))
1233 goto e_inval;
1234 if (optlen > sysctl_optmem_max) {
1235 err = -ENOBUFS;
1236 break;
1238 msf = memdup_sockptr(optval, optlen);
1239 if (IS_ERR(msf)) {
1240 err = PTR_ERR(msf);
1241 break;
1243 /* numsrc >= (1G-4) overflow in 32 bits */
1244 if (msf->imsf_numsrc >= 0x3ffffffcU ||
1245 msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
1246 kfree(msf);
1247 err = -ENOBUFS;
1248 break;
1250 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
1251 kfree(msf);
1252 err = -EINVAL;
1253 break;
1255 err = ip_mc_msfilter(sk, msf, 0);
1256 kfree(msf);
1257 break;
1259 case IP_BLOCK_SOURCE:
1260 case IP_UNBLOCK_SOURCE:
1261 case IP_ADD_SOURCE_MEMBERSHIP:
1262 case IP_DROP_SOURCE_MEMBERSHIP:
1264 struct ip_mreq_source mreqs;
1265 int omode, add;
1267 if (optlen != sizeof(struct ip_mreq_source))
1268 goto e_inval;
1269 if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
1270 err = -EFAULT;
1271 break;
1273 if (optname == IP_BLOCK_SOURCE) {
1274 omode = MCAST_EXCLUDE;
1275 add = 1;
1276 } else if (optname == IP_UNBLOCK_SOURCE) {
1277 omode = MCAST_EXCLUDE;
1278 add = 0;
1279 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
1280 struct ip_mreqn mreq;
1282 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
1283 mreq.imr_address.s_addr = mreqs.imr_interface;
1284 mreq.imr_ifindex = 0;
1285 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1286 if (err && err != -EADDRINUSE)
1287 break;
1288 omode = MCAST_INCLUDE;
1289 add = 1;
1290 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
1291 omode = MCAST_INCLUDE;
1292 add = 0;
1294 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1295 break;
1297 case MCAST_JOIN_GROUP:
1298 case MCAST_LEAVE_GROUP:
1299 if (in_compat_syscall())
1300 err = compat_ip_mcast_join_leave(sk, optname, optval,
1301 optlen);
1302 else
1303 err = ip_mcast_join_leave(sk, optname, optval, optlen);
1304 break;
1305 case MCAST_JOIN_SOURCE_GROUP:
1306 case MCAST_LEAVE_SOURCE_GROUP:
1307 case MCAST_BLOCK_SOURCE:
1308 case MCAST_UNBLOCK_SOURCE:
1309 err = do_mcast_group_source(sk, optname, optval, optlen);
1310 break;
1311 case MCAST_MSFILTER:
1312 if (in_compat_syscall())
1313 err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
1314 else
1315 err = ip_set_mcast_msfilter(sk, optval, optlen);
1316 break;
1317 case IP_MULTICAST_ALL:
1318 if (optlen < 1)
1319 goto e_inval;
1320 if (val != 0 && val != 1)
1321 goto e_inval;
1322 inet->mc_all = val;
1323 break;
1325 case IP_FREEBIND:
1326 if (optlen < 1)
1327 goto e_inval;
1328 inet->freebind = !!val;
1329 break;
1331 case IP_IPSEC_POLICY:
1332 case IP_XFRM_POLICY:
1333 err = -EPERM;
1334 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1335 break;
1336 err = xfrm_user_policy(sk, optname, optval, optlen);
1337 break;
1339 case IP_TRANSPARENT:
1340 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1341 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1342 err = -EPERM;
1343 break;
1345 if (optlen < 1)
1346 goto e_inval;
1347 inet->transparent = !!val;
1348 break;
1350 case IP_MINTTL:
1351 if (optlen < 1)
1352 goto e_inval;
1353 if (val < 0 || val > 255)
1354 goto e_inval;
1355 inet->min_ttl = val;
1356 break;
1358 default:
1359 err = -ENOPROTOOPT;
1360 break;
1362 release_sock(sk);
1363 if (needs_rtnl)
1364 rtnl_unlock();
1365 return err;
1367 e_inval:
1368 release_sock(sk);
1369 if (needs_rtnl)
1370 rtnl_unlock();
1371 return -EINVAL;
1375 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1376 * @sk: socket
1377 * @skb: buffer
1379 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1380 * destination in skb->cb[] before dst drop.
1381 * This way, receiver doesn't make cache line misses to read rtable.
1383 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1385 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1386 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1387 ipv6_sk_rxinfo(sk);
1389 if (prepare && skb_rtable(skb)) {
1390 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1391 * which has interface index (iif) as the first member of the
1392 * underlying inet{6}_skb_parm struct. This code then overlays
1393 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1394 * element so the iif is picked up from the prior IPCB. If iif
1395 * is the loopback interface, then return the sending interface
1396 * (e.g., process binds socket to eth0 for Tx which is
1397 * redirected to loopback in the rtable/dst).
1399 struct rtable *rt = skb_rtable(skb);
1400 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1402 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1403 pktinfo->ipi_ifindex = inet_iif(skb);
1404 else if (l3slave && rt && rt->rt_iif)
1405 pktinfo->ipi_ifindex = rt->rt_iif;
1407 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1408 } else {
1409 pktinfo->ipi_ifindex = 0;
1410 pktinfo->ipi_spec_dst.s_addr = 0;
1412 skb_dst_drop(skb);
1415 int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1416 unsigned int optlen)
1418 int err;
1420 if (level != SOL_IP)
1421 return -ENOPROTOOPT;
1423 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1424 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1425 if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
1426 optname < BPFILTER_IPT_SET_MAX)
1427 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
1428 #endif
1429 #ifdef CONFIG_NETFILTER
1430 /* we need to exclude all possible ENOPROTOOPTs except default case */
1431 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1432 optname != IP_IPSEC_POLICY &&
1433 optname != IP_XFRM_POLICY &&
1434 !ip_mroute_opt(optname))
1435 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1436 #endif
1437 return err;
1439 EXPORT_SYMBOL(ip_setsockopt);
1442 * Get the options. Note for future reference. The GET of IP options gets
1443 * the _received_ ones. The set sets the _sent_ ones.
1446 static bool getsockopt_needs_rtnl(int optname)
1448 switch (optname) {
1449 case IP_MSFILTER:
1450 case MCAST_MSFILTER:
1451 return true;
1453 return false;
1456 static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval,
1457 int __user *optlen, int len)
1459 const int size0 = offsetof(struct group_filter, gf_slist);
1460 struct group_filter __user *p = optval;
1461 struct group_filter gsf;
1462 int num;
1463 int err;
1465 if (len < size0)
1466 return -EINVAL;
1467 if (copy_from_user(&gsf, p, size0))
1468 return -EFAULT;
1470 num = gsf.gf_numsrc;
1471 err = ip_mc_gsfget(sk, &gsf, p->gf_slist);
1472 if (err)
1473 return err;
1474 if (gsf.gf_numsrc < num)
1475 num = gsf.gf_numsrc;
1476 if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
1477 copy_to_user(p, &gsf, size0))
1478 return -EFAULT;
1479 return 0;
1482 static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval,
1483 int __user *optlen, int len)
1485 const int size0 = offsetof(struct compat_group_filter, gf_slist);
1486 struct compat_group_filter __user *p = optval;
1487 struct compat_group_filter gf32;
1488 struct group_filter gf;
1489 int num;
1490 int err;
1492 if (len < size0)
1493 return -EINVAL;
1494 if (copy_from_user(&gf32, p, size0))
1495 return -EFAULT;
1497 gf.gf_interface = gf32.gf_interface;
1498 gf.gf_fmode = gf32.gf_fmode;
1499 num = gf.gf_numsrc = gf32.gf_numsrc;
1500 gf.gf_group = gf32.gf_group;
1502 err = ip_mc_gsfget(sk, &gf, p->gf_slist);
1503 if (err)
1504 return err;
1505 if (gf.gf_numsrc < num)
1506 num = gf.gf_numsrc;
1507 len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
1508 if (put_user(len, optlen) ||
1509 put_user(gf.gf_fmode, &p->gf_fmode) ||
1510 put_user(gf.gf_numsrc, &p->gf_numsrc))
1511 return -EFAULT;
1512 return 0;
1515 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1516 char __user *optval, int __user *optlen)
1518 struct inet_sock *inet = inet_sk(sk);
1519 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1520 int val, err = 0;
1521 int len;
1523 if (level != SOL_IP)
1524 return -EOPNOTSUPP;
1526 if (ip_mroute_opt(optname))
1527 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1529 if (get_user(len, optlen))
1530 return -EFAULT;
1531 if (len < 0)
1532 return -EINVAL;
1534 if (needs_rtnl)
1535 rtnl_lock();
1536 lock_sock(sk);
1538 switch (optname) {
1539 case IP_OPTIONS:
1541 unsigned char optbuf[sizeof(struct ip_options)+40];
1542 struct ip_options *opt = (struct ip_options *)optbuf;
1543 struct ip_options_rcu *inet_opt;
1545 inet_opt = rcu_dereference_protected(inet->inet_opt,
1546 lockdep_sock_is_held(sk));
1547 opt->optlen = 0;
1548 if (inet_opt)
1549 memcpy(optbuf, &inet_opt->opt,
1550 sizeof(struct ip_options) +
1551 inet_opt->opt.optlen);
1552 release_sock(sk);
1554 if (opt->optlen == 0)
1555 return put_user(0, optlen);
1557 ip_options_undo(opt);
1559 len = min_t(unsigned int, len, opt->optlen);
1560 if (put_user(len, optlen))
1561 return -EFAULT;
1562 if (copy_to_user(optval, opt->__data, len))
1563 return -EFAULT;
1564 return 0;
1566 case IP_PKTINFO:
1567 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1568 break;
1569 case IP_RECVTTL:
1570 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1571 break;
1572 case IP_RECVTOS:
1573 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1574 break;
1575 case IP_RECVOPTS:
1576 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1577 break;
1578 case IP_RETOPTS:
1579 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1580 break;
1581 case IP_PASSSEC:
1582 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1583 break;
1584 case IP_RECVORIGDSTADDR:
1585 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1586 break;
1587 case IP_CHECKSUM:
1588 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1589 break;
1590 case IP_RECVFRAGSIZE:
1591 val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
1592 break;
1593 case IP_TOS:
1594 val = inet->tos;
1595 break;
1596 case IP_TTL:
1598 struct net *net = sock_net(sk);
1599 val = (inet->uc_ttl == -1 ?
1600 net->ipv4.sysctl_ip_default_ttl :
1601 inet->uc_ttl);
1602 break;
1604 case IP_HDRINCL:
1605 val = inet->hdrincl;
1606 break;
1607 case IP_NODEFRAG:
1608 val = inet->nodefrag;
1609 break;
1610 case IP_BIND_ADDRESS_NO_PORT:
1611 val = inet->bind_address_no_port;
1612 break;
1613 case IP_MTU_DISCOVER:
1614 val = inet->pmtudisc;
1615 break;
1616 case IP_MTU:
1618 struct dst_entry *dst;
1619 val = 0;
1620 dst = sk_dst_get(sk);
1621 if (dst) {
1622 val = dst_mtu(dst);
1623 dst_release(dst);
1625 if (!val) {
1626 release_sock(sk);
1627 return -ENOTCONN;
1629 break;
1631 case IP_RECVERR:
1632 val = inet->recverr;
1633 break;
1634 case IP_RECVERR_RFC4884:
1635 val = inet->recverr_rfc4884;
1636 break;
1637 case IP_MULTICAST_TTL:
1638 val = inet->mc_ttl;
1639 break;
1640 case IP_MULTICAST_LOOP:
1641 val = inet->mc_loop;
1642 break;
1643 case IP_UNICAST_IF:
1644 val = (__force int)htonl((__u32) inet->uc_index);
1645 break;
1646 case IP_MULTICAST_IF:
1648 struct in_addr addr;
1649 len = min_t(unsigned int, len, sizeof(struct in_addr));
1650 addr.s_addr = inet->mc_addr;
1651 release_sock(sk);
1653 if (put_user(len, optlen))
1654 return -EFAULT;
1655 if (copy_to_user(optval, &addr, len))
1656 return -EFAULT;
1657 return 0;
1659 case IP_MSFILTER:
1661 struct ip_msfilter msf;
1663 if (len < IP_MSFILTER_SIZE(0)) {
1664 err = -EINVAL;
1665 goto out;
1667 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1668 err = -EFAULT;
1669 goto out;
1671 err = ip_mc_msfget(sk, &msf,
1672 (struct ip_msfilter __user *)optval, optlen);
1673 goto out;
1675 case MCAST_MSFILTER:
1676 if (in_compat_syscall())
1677 err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
1678 len);
1679 else
1680 err = ip_get_mcast_msfilter(sk, optval, optlen, len);
1681 goto out;
1682 case IP_MULTICAST_ALL:
1683 val = inet->mc_all;
1684 break;
1685 case IP_PKTOPTIONS:
1687 struct msghdr msg;
1689 release_sock(sk);
1691 if (sk->sk_type != SOCK_STREAM)
1692 return -ENOPROTOOPT;
1694 msg.msg_control_is_user = true;
1695 msg.msg_control_user = optval;
1696 msg.msg_controllen = len;
1697 msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
1699 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1700 struct in_pktinfo info;
1702 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1703 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1704 info.ipi_ifindex = inet->mc_index;
1705 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1707 if (inet->cmsg_flags & IP_CMSG_TTL) {
1708 int hlim = inet->mc_ttl;
1709 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1711 if (inet->cmsg_flags & IP_CMSG_TOS) {
1712 int tos = inet->rcv_tos;
1713 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1715 len -= msg.msg_controllen;
1716 return put_user(len, optlen);
1718 case IP_FREEBIND:
1719 val = inet->freebind;
1720 break;
1721 case IP_TRANSPARENT:
1722 val = inet->transparent;
1723 break;
1724 case IP_MINTTL:
1725 val = inet->min_ttl;
1726 break;
1727 default:
1728 release_sock(sk);
1729 return -ENOPROTOOPT;
1731 release_sock(sk);
1733 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1734 unsigned char ucval = (unsigned char)val;
1735 len = 1;
1736 if (put_user(len, optlen))
1737 return -EFAULT;
1738 if (copy_to_user(optval, &ucval, 1))
1739 return -EFAULT;
1740 } else {
1741 len = min_t(unsigned int, sizeof(int), len);
1742 if (put_user(len, optlen))
1743 return -EFAULT;
1744 if (copy_to_user(optval, &val, len))
1745 return -EFAULT;
1747 return 0;
1749 out:
1750 release_sock(sk);
1751 if (needs_rtnl)
1752 rtnl_unlock();
1753 return err;
1756 int ip_getsockopt(struct sock *sk, int level,
1757 int optname, char __user *optval, int __user *optlen)
1759 int err;
1761 err = do_ip_getsockopt(sk, level, optname, optval, optlen);
1763 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
1764 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1765 optname < BPFILTER_IPT_GET_MAX)
1766 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
1767 #endif
1768 #ifdef CONFIG_NETFILTER
1769 /* we need to exclude all possible ENOPROTOOPTs except default case */
1770 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1771 !ip_mroute_opt(optname)) {
1772 int len;
1774 if (get_user(len, optlen))
1775 return -EFAULT;
1777 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1778 if (err >= 0)
1779 err = put_user(len, optlen);
1780 return err;
1782 #endif
1783 return err;
1785 EXPORT_SYMBOL(ip_getsockopt);