rxrpc: Show some more information through /proc files
[linux/fpc-iii.git] / net / ipv4 / ip_sockglue.c
blobc0fe5ad996f238091f5b9585adb586a571f653f0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The IP to API glue.
9 * Authors: see ip.c
11 * Fixes:
12 * Many : Split from ip.c , see ip.c for history.
13 * Martin Mares : TOS setting fixed.
14 * Alan Cox : Fixed a couple of oopses in Martin's
15 * TOS tweaks.
16 * Mike McLagan : Routing by source
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/skbuff.h>
23 #include <linux/ip.h>
24 #include <linux/icmp.h>
25 #include <linux/inetdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/slab.h>
28 #include <net/sock.h>
29 #include <net/ip.h>
30 #include <net/icmp.h>
31 #include <net/tcp_states.h>
32 #include <linux/udp.h>
33 #include <linux/igmp.h>
34 #include <linux/netfilter.h>
35 #include <linux/route.h>
36 #include <linux/mroute.h>
37 #include <net/inet_ecn.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40 #include <net/compat.h>
41 #include <net/checksum.h>
42 #if IS_ENABLED(CONFIG_IPV6)
43 #include <net/transp_v6.h>
44 #endif
45 #include <net/ip_fib.h>
47 #include <linux/errqueue.h>
48 #include <linux/uaccess.h>
50 #include <linux/bpfilter.h>
53 * SOL_IP control messages.
56 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
58 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
60 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
62 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
65 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
67 int ttl = ip_hdr(skb)->ttl;
68 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
71 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
73 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
76 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
78 if (IPCB(skb)->opt.optlen == 0)
79 return;
81 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
82 ip_hdr(skb) + 1);
86 static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg,
87 struct sk_buff *skb)
89 unsigned char optbuf[sizeof(struct ip_options) + 40];
90 struct ip_options *opt = (struct ip_options *)optbuf;
92 if (IPCB(skb)->opt.optlen == 0)
93 return;
95 if (ip_options_echo(net, opt, skb)) {
96 msg->msg_flags |= MSG_CTRUNC;
97 return;
99 ip_options_undo(opt);
101 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
104 static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
106 int val;
108 if (IPCB(skb)->frag_max_size == 0)
109 return;
111 val = IPCB(skb)->frag_max_size;
112 put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
115 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
116 int tlen, int offset)
118 __wsum csum = skb->csum;
120 if (skb->ip_summed != CHECKSUM_COMPLETE)
121 return;
123 if (offset != 0) {
124 int tend_off = skb_transport_offset(skb) + tlen;
125 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
128 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
131 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
133 char *secdata;
134 u32 seclen, secid;
135 int err;
137 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
138 if (err)
139 return;
141 err = security_secid_to_secctx(secid, &secdata, &seclen);
142 if (err)
143 return;
145 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
146 security_release_secctx(secdata, seclen);
149 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
151 struct sockaddr_in sin;
152 const struct iphdr *iph = ip_hdr(skb);
153 __be16 *ports;
154 int end;
156 end = skb_transport_offset(skb) + 4;
157 if (end > 0 && !pskb_may_pull(skb, end))
158 return;
160 /* All current transport protocols have the port numbers in the
161 * first four bytes of the transport header and this function is
162 * written with this assumption in mind.
164 ports = (__be16 *)skb_transport_header(skb);
166 sin.sin_family = AF_INET;
167 sin.sin_addr.s_addr = iph->daddr;
168 sin.sin_port = ports[1];
169 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
171 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
174 void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
175 struct sk_buff *skb, int tlen, int offset)
177 struct inet_sock *inet = inet_sk(sk);
178 unsigned int flags = inet->cmsg_flags;
180 /* Ordered by supposed usage frequency */
181 if (flags & IP_CMSG_PKTINFO) {
182 ip_cmsg_recv_pktinfo(msg, skb);
184 flags &= ~IP_CMSG_PKTINFO;
185 if (!flags)
186 return;
189 if (flags & IP_CMSG_TTL) {
190 ip_cmsg_recv_ttl(msg, skb);
192 flags &= ~IP_CMSG_TTL;
193 if (!flags)
194 return;
197 if (flags & IP_CMSG_TOS) {
198 ip_cmsg_recv_tos(msg, skb);
200 flags &= ~IP_CMSG_TOS;
201 if (!flags)
202 return;
205 if (flags & IP_CMSG_RECVOPTS) {
206 ip_cmsg_recv_opts(msg, skb);
208 flags &= ~IP_CMSG_RECVOPTS;
209 if (!flags)
210 return;
213 if (flags & IP_CMSG_RETOPTS) {
214 ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
216 flags &= ~IP_CMSG_RETOPTS;
217 if (!flags)
218 return;
221 if (flags & IP_CMSG_PASSSEC) {
222 ip_cmsg_recv_security(msg, skb);
224 flags &= ~IP_CMSG_PASSSEC;
225 if (!flags)
226 return;
229 if (flags & IP_CMSG_ORIGDSTADDR) {
230 ip_cmsg_recv_dstaddr(msg, skb);
232 flags &= ~IP_CMSG_ORIGDSTADDR;
233 if (!flags)
234 return;
237 if (flags & IP_CMSG_CHECKSUM)
238 ip_cmsg_recv_checksum(msg, skb, tlen, offset);
240 if (flags & IP_CMSG_RECVFRAGSIZE)
241 ip_cmsg_recv_fragsize(msg, skb);
243 EXPORT_SYMBOL(ip_cmsg_recv_offset);
245 int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
246 bool allow_ipv6)
248 int err, val;
249 struct cmsghdr *cmsg;
250 struct net *net = sock_net(sk);
252 for_each_cmsghdr(cmsg, msg) {
253 if (!CMSG_OK(msg, cmsg))
254 return -EINVAL;
255 #if IS_ENABLED(CONFIG_IPV6)
256 if (allow_ipv6 &&
257 cmsg->cmsg_level == SOL_IPV6 &&
258 cmsg->cmsg_type == IPV6_PKTINFO) {
259 struct in6_pktinfo *src_info;
261 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
262 return -EINVAL;
263 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
264 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
265 return -EINVAL;
266 if (src_info->ipi6_ifindex)
267 ipc->oif = src_info->ipi6_ifindex;
268 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
269 continue;
271 #endif
272 if (cmsg->cmsg_level == SOL_SOCKET) {
273 err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
274 if (err)
275 return err;
276 continue;
279 if (cmsg->cmsg_level != SOL_IP)
280 continue;
281 switch (cmsg->cmsg_type) {
282 case IP_RETOPTS:
283 err = cmsg->cmsg_len - sizeof(struct cmsghdr);
285 /* Our caller is responsible for freeing ipc->opt */
286 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
287 err < 40 ? err : 40);
288 if (err)
289 return err;
290 break;
291 case IP_PKTINFO:
293 struct in_pktinfo *info;
294 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
295 return -EINVAL;
296 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
297 if (info->ipi_ifindex)
298 ipc->oif = info->ipi_ifindex;
299 ipc->addr = info->ipi_spec_dst.s_addr;
300 break;
302 case IP_TTL:
303 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
304 return -EINVAL;
305 val = *(int *)CMSG_DATA(cmsg);
306 if (val < 1 || val > 255)
307 return -EINVAL;
308 ipc->ttl = val;
309 break;
310 case IP_TOS:
311 if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
312 val = *(int *)CMSG_DATA(cmsg);
313 else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
314 val = *(u8 *)CMSG_DATA(cmsg);
315 else
316 return -EINVAL;
317 if (val < 0 || val > 255)
318 return -EINVAL;
319 ipc->tos = val;
320 ipc->priority = rt_tos2priority(ipc->tos);
321 break;
323 default:
324 return -EINVAL;
327 return 0;
330 static void ip_ra_destroy_rcu(struct rcu_head *head)
332 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
334 sock_put(ra->saved_sk);
335 kfree(ra);
338 int ip_ra_control(struct sock *sk, unsigned char on,
339 void (*destructor)(struct sock *))
341 struct ip_ra_chain *ra, *new_ra;
342 struct ip_ra_chain __rcu **rap;
343 struct net *net = sock_net(sk);
345 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
346 return -EINVAL;
348 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
350 mutex_lock(&net->ipv4.ra_mutex);
351 for (rap = &net->ipv4.ra_chain;
352 (ra = rcu_dereference_protected(*rap,
353 lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
354 rap = &ra->next) {
355 if (ra->sk == sk) {
356 if (on) {
357 mutex_unlock(&net->ipv4.ra_mutex);
358 kfree(new_ra);
359 return -EADDRINUSE;
361 /* dont let ip_call_ra_chain() use sk again */
362 ra->sk = NULL;
363 RCU_INIT_POINTER(*rap, ra->next);
364 mutex_unlock(&net->ipv4.ra_mutex);
366 if (ra->destructor)
367 ra->destructor(sk);
369 * Delay sock_put(sk) and kfree(ra) after one rcu grace
370 * period. This guarantee ip_call_ra_chain() dont need
371 * to mess with socket refcounts.
373 ra->saved_sk = sk;
374 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
375 return 0;
378 if (!new_ra) {
379 mutex_unlock(&net->ipv4.ra_mutex);
380 return -ENOBUFS;
382 new_ra->sk = sk;
383 new_ra->destructor = destructor;
385 RCU_INIT_POINTER(new_ra->next, ra);
386 rcu_assign_pointer(*rap, new_ra);
387 sock_hold(sk);
388 mutex_unlock(&net->ipv4.ra_mutex);
390 return 0;
393 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
394 __be16 port, u32 info, u8 *payload)
396 struct sock_exterr_skb *serr;
398 skb = skb_clone(skb, GFP_ATOMIC);
399 if (!skb)
400 return;
402 serr = SKB_EXT_ERR(skb);
403 serr->ee.ee_errno = err;
404 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
405 serr->ee.ee_type = icmp_hdr(skb)->type;
406 serr->ee.ee_code = icmp_hdr(skb)->code;
407 serr->ee.ee_pad = 0;
408 serr->ee.ee_info = info;
409 serr->ee.ee_data = 0;
410 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
411 skb_network_header(skb);
412 serr->port = port;
414 if (skb_pull(skb, payload - skb->data)) {
415 skb_reset_transport_header(skb);
416 if (sock_queue_err_skb(sk, skb) == 0)
417 return;
419 kfree_skb(skb);
422 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
424 struct inet_sock *inet = inet_sk(sk);
425 struct sock_exterr_skb *serr;
426 struct iphdr *iph;
427 struct sk_buff *skb;
429 if (!inet->recverr)
430 return;
432 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
433 if (!skb)
434 return;
436 skb_put(skb, sizeof(struct iphdr));
437 skb_reset_network_header(skb);
438 iph = ip_hdr(skb);
439 iph->daddr = daddr;
441 serr = SKB_EXT_ERR(skb);
442 serr->ee.ee_errno = err;
443 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
444 serr->ee.ee_type = 0;
445 serr->ee.ee_code = 0;
446 serr->ee.ee_pad = 0;
447 serr->ee.ee_info = info;
448 serr->ee.ee_data = 0;
449 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
450 serr->port = port;
452 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
453 skb_reset_transport_header(skb);
455 if (sock_queue_err_skb(sk, skb))
456 kfree_skb(skb);
459 /* For some errors we have valid addr_offset even with zero payload and
460 * zero port. Also, addr_offset should be supported if port is set.
462 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
464 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
465 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
468 /* IPv4 supports cmsg on all imcp errors and some timestamps
470 * Timestamp code paths do not initialize the fields expected by cmsg:
471 * the PKTINFO fields in skb->cb[]. Fill those in here.
473 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
474 struct sk_buff *skb,
475 int ee_origin)
477 struct in_pktinfo *info;
479 if (ee_origin == SO_EE_ORIGIN_ICMP)
480 return true;
482 if (ee_origin == SO_EE_ORIGIN_LOCAL)
483 return false;
485 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
486 * timestamp with egress dev. Not possible for packets without iif
487 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
489 info = PKTINFO_SKB_CB(skb);
490 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
491 !info->ipi_ifindex)
492 return false;
494 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
495 return true;
499 * Handle MSG_ERRQUEUE
501 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
503 struct sock_exterr_skb *serr;
504 struct sk_buff *skb;
505 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
506 struct {
507 struct sock_extended_err ee;
508 struct sockaddr_in offender;
509 } errhdr;
510 int err;
511 int copied;
513 err = -EAGAIN;
514 skb = sock_dequeue_err_skb(sk);
515 if (!skb)
516 goto out;
518 copied = skb->len;
519 if (copied > len) {
520 msg->msg_flags |= MSG_TRUNC;
521 copied = len;
523 err = skb_copy_datagram_msg(skb, 0, msg, copied);
524 if (unlikely(err)) {
525 kfree_skb(skb);
526 return err;
528 sock_recv_timestamp(msg, sk, skb);
530 serr = SKB_EXT_ERR(skb);
532 if (sin && ipv4_datagram_support_addr(serr)) {
533 sin->sin_family = AF_INET;
534 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
535 serr->addr_offset);
536 sin->sin_port = serr->port;
537 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
538 *addr_len = sizeof(*sin);
541 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
542 sin = &errhdr.offender;
543 memset(sin, 0, sizeof(*sin));
545 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
546 sin->sin_family = AF_INET;
547 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
548 if (inet_sk(sk)->cmsg_flags)
549 ip_cmsg_recv(msg, skb);
552 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
554 /* Now we could try to dump offended packet options */
556 msg->msg_flags |= MSG_ERRQUEUE;
557 err = copied;
559 consume_skb(skb);
560 out:
561 return err;
566 * Socket option code for IP. This is the end of the line after any
567 * TCP,UDP etc options on an IP socket.
569 static bool setsockopt_needs_rtnl(int optname)
571 switch (optname) {
572 case IP_ADD_MEMBERSHIP:
573 case IP_ADD_SOURCE_MEMBERSHIP:
574 case IP_BLOCK_SOURCE:
575 case IP_DROP_MEMBERSHIP:
576 case IP_DROP_SOURCE_MEMBERSHIP:
577 case IP_MSFILTER:
578 case IP_UNBLOCK_SOURCE:
579 case MCAST_BLOCK_SOURCE:
580 case MCAST_MSFILTER:
581 case MCAST_JOIN_GROUP:
582 case MCAST_JOIN_SOURCE_GROUP:
583 case MCAST_LEAVE_GROUP:
584 case MCAST_LEAVE_SOURCE_GROUP:
585 case MCAST_UNBLOCK_SOURCE:
586 return true;
588 return false;
591 static int do_ip_setsockopt(struct sock *sk, int level,
592 int optname, char __user *optval, unsigned int optlen)
594 struct inet_sock *inet = inet_sk(sk);
595 struct net *net = sock_net(sk);
596 int val = 0, err;
597 bool needs_rtnl = setsockopt_needs_rtnl(optname);
599 switch (optname) {
600 case IP_PKTINFO:
601 case IP_RECVTTL:
602 case IP_RECVOPTS:
603 case IP_RECVTOS:
604 case IP_RETOPTS:
605 case IP_TOS:
606 case IP_TTL:
607 case IP_HDRINCL:
608 case IP_MTU_DISCOVER:
609 case IP_RECVERR:
610 case IP_ROUTER_ALERT:
611 case IP_FREEBIND:
612 case IP_PASSSEC:
613 case IP_TRANSPARENT:
614 case IP_MINTTL:
615 case IP_NODEFRAG:
616 case IP_BIND_ADDRESS_NO_PORT:
617 case IP_UNICAST_IF:
618 case IP_MULTICAST_TTL:
619 case IP_MULTICAST_ALL:
620 case IP_MULTICAST_LOOP:
621 case IP_RECVORIGDSTADDR:
622 case IP_CHECKSUM:
623 case IP_RECVFRAGSIZE:
624 if (optlen >= sizeof(int)) {
625 if (get_user(val, (int __user *) optval))
626 return -EFAULT;
627 } else if (optlen >= sizeof(char)) {
628 unsigned char ucval;
630 if (get_user(ucval, (unsigned char __user *) optval))
631 return -EFAULT;
632 val = (int) ucval;
636 /* If optlen==0, it is equivalent to val == 0 */
638 if (optname == IP_ROUTER_ALERT)
639 return ip_ra_control(sk, val ? 1 : 0, NULL);
640 if (ip_mroute_opt(optname))
641 return ip_mroute_setsockopt(sk, optname, optval, optlen);
643 err = 0;
644 if (needs_rtnl)
645 rtnl_lock();
646 lock_sock(sk);
648 switch (optname) {
649 case IP_OPTIONS:
651 struct ip_options_rcu *old, *opt = NULL;
653 if (optlen > 40)
654 goto e_inval;
655 err = ip_options_get_from_user(sock_net(sk), &opt,
656 optval, optlen);
657 if (err)
658 break;
659 old = rcu_dereference_protected(inet->inet_opt,
660 lockdep_sock_is_held(sk));
661 if (inet->is_icsk) {
662 struct inet_connection_sock *icsk = inet_csk(sk);
663 #if IS_ENABLED(CONFIG_IPV6)
664 if (sk->sk_family == PF_INET ||
665 (!((1 << sk->sk_state) &
666 (TCPF_LISTEN | TCPF_CLOSE)) &&
667 inet->inet_daddr != LOOPBACK4_IPV6)) {
668 #endif
669 if (old)
670 icsk->icsk_ext_hdr_len -= old->opt.optlen;
671 if (opt)
672 icsk->icsk_ext_hdr_len += opt->opt.optlen;
673 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
674 #if IS_ENABLED(CONFIG_IPV6)
676 #endif
678 rcu_assign_pointer(inet->inet_opt, opt);
679 if (old)
680 kfree_rcu(old, rcu);
681 break;
683 case IP_PKTINFO:
684 if (val)
685 inet->cmsg_flags |= IP_CMSG_PKTINFO;
686 else
687 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
688 break;
689 case IP_RECVTTL:
690 if (val)
691 inet->cmsg_flags |= IP_CMSG_TTL;
692 else
693 inet->cmsg_flags &= ~IP_CMSG_TTL;
694 break;
695 case IP_RECVTOS:
696 if (val)
697 inet->cmsg_flags |= IP_CMSG_TOS;
698 else
699 inet->cmsg_flags &= ~IP_CMSG_TOS;
700 break;
701 case IP_RECVOPTS:
702 if (val)
703 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
704 else
705 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
706 break;
707 case IP_RETOPTS:
708 if (val)
709 inet->cmsg_flags |= IP_CMSG_RETOPTS;
710 else
711 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
712 break;
713 case IP_PASSSEC:
714 if (val)
715 inet->cmsg_flags |= IP_CMSG_PASSSEC;
716 else
717 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
718 break;
719 case IP_RECVORIGDSTADDR:
720 if (val)
721 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
722 else
723 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
724 break;
725 case IP_CHECKSUM:
726 if (val) {
727 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
728 inet_inc_convert_csum(sk);
729 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
731 } else {
732 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
733 inet_dec_convert_csum(sk);
734 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
737 break;
738 case IP_RECVFRAGSIZE:
739 if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
740 goto e_inval;
741 if (val)
742 inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
743 else
744 inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
745 break;
746 case IP_TOS: /* This sets both TOS and Precedence */
747 if (sk->sk_type == SOCK_STREAM) {
748 val &= ~INET_ECN_MASK;
749 val |= inet->tos & INET_ECN_MASK;
751 if (inet->tos != val) {
752 inet->tos = val;
753 sk->sk_priority = rt_tos2priority(val);
754 sk_dst_reset(sk);
756 break;
757 case IP_TTL:
758 if (optlen < 1)
759 goto e_inval;
760 if (val != -1 && (val < 1 || val > 255))
761 goto e_inval;
762 inet->uc_ttl = val;
763 break;
764 case IP_HDRINCL:
765 if (sk->sk_type != SOCK_RAW) {
766 err = -ENOPROTOOPT;
767 break;
769 inet->hdrincl = val ? 1 : 0;
770 break;
771 case IP_NODEFRAG:
772 if (sk->sk_type != SOCK_RAW) {
773 err = -ENOPROTOOPT;
774 break;
776 inet->nodefrag = val ? 1 : 0;
777 break;
778 case IP_BIND_ADDRESS_NO_PORT:
779 inet->bind_address_no_port = val ? 1 : 0;
780 break;
781 case IP_MTU_DISCOVER:
782 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
783 goto e_inval;
784 inet->pmtudisc = val;
785 break;
786 case IP_RECVERR:
787 inet->recverr = !!val;
788 if (!val)
789 skb_queue_purge(&sk->sk_error_queue);
790 break;
791 case IP_MULTICAST_TTL:
792 if (sk->sk_type == SOCK_STREAM)
793 goto e_inval;
794 if (optlen < 1)
795 goto e_inval;
796 if (val == -1)
797 val = 1;
798 if (val < 0 || val > 255)
799 goto e_inval;
800 inet->mc_ttl = val;
801 break;
802 case IP_MULTICAST_LOOP:
803 if (optlen < 1)
804 goto e_inval;
805 inet->mc_loop = !!val;
806 break;
807 case IP_UNICAST_IF:
809 struct net_device *dev = NULL;
810 int ifindex;
811 int midx;
813 if (optlen != sizeof(int))
814 goto e_inval;
816 ifindex = (__force int)ntohl((__force __be32)val);
817 if (ifindex == 0) {
818 inet->uc_index = 0;
819 err = 0;
820 break;
823 dev = dev_get_by_index(sock_net(sk), ifindex);
824 err = -EADDRNOTAVAIL;
825 if (!dev)
826 break;
828 midx = l3mdev_master_ifindex(dev);
829 dev_put(dev);
831 err = -EINVAL;
832 if (sk->sk_bound_dev_if &&
833 (!midx || midx != sk->sk_bound_dev_if))
834 break;
836 inet->uc_index = ifindex;
837 err = 0;
838 break;
840 case IP_MULTICAST_IF:
842 struct ip_mreqn mreq;
843 struct net_device *dev = NULL;
844 int midx;
846 if (sk->sk_type == SOCK_STREAM)
847 goto e_inval;
849 * Check the arguments are allowable
852 if (optlen < sizeof(struct in_addr))
853 goto e_inval;
855 err = -EFAULT;
856 if (optlen >= sizeof(struct ip_mreqn)) {
857 if (copy_from_user(&mreq, optval, sizeof(mreq)))
858 break;
859 } else {
860 memset(&mreq, 0, sizeof(mreq));
861 if (optlen >= sizeof(struct ip_mreq)) {
862 if (copy_from_user(&mreq, optval,
863 sizeof(struct ip_mreq)))
864 break;
865 } else if (optlen >= sizeof(struct in_addr)) {
866 if (copy_from_user(&mreq.imr_address, optval,
867 sizeof(struct in_addr)))
868 break;
872 if (!mreq.imr_ifindex) {
873 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
874 inet->mc_index = 0;
875 inet->mc_addr = 0;
876 err = 0;
877 break;
879 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
880 if (dev)
881 mreq.imr_ifindex = dev->ifindex;
882 } else
883 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
886 err = -EADDRNOTAVAIL;
887 if (!dev)
888 break;
890 midx = l3mdev_master_ifindex(dev);
892 dev_put(dev);
894 err = -EINVAL;
895 if (sk->sk_bound_dev_if &&
896 mreq.imr_ifindex != sk->sk_bound_dev_if &&
897 (!midx || midx != sk->sk_bound_dev_if))
898 break;
900 inet->mc_index = mreq.imr_ifindex;
901 inet->mc_addr = mreq.imr_address.s_addr;
902 err = 0;
903 break;
906 case IP_ADD_MEMBERSHIP:
907 case IP_DROP_MEMBERSHIP:
909 struct ip_mreqn mreq;
911 err = -EPROTO;
912 if (inet_sk(sk)->is_icsk)
913 break;
915 if (optlen < sizeof(struct ip_mreq))
916 goto e_inval;
917 err = -EFAULT;
918 if (optlen >= sizeof(struct ip_mreqn)) {
919 if (copy_from_user(&mreq, optval, sizeof(mreq)))
920 break;
921 } else {
922 memset(&mreq, 0, sizeof(mreq));
923 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
924 break;
927 if (optname == IP_ADD_MEMBERSHIP)
928 err = ip_mc_join_group(sk, &mreq);
929 else
930 err = ip_mc_leave_group(sk, &mreq);
931 break;
933 case IP_MSFILTER:
935 struct ip_msfilter *msf;
937 if (optlen < IP_MSFILTER_SIZE(0))
938 goto e_inval;
939 if (optlen > sysctl_optmem_max) {
940 err = -ENOBUFS;
941 break;
943 msf = memdup_user(optval, optlen);
944 if (IS_ERR(msf)) {
945 err = PTR_ERR(msf);
946 break;
948 /* numsrc >= (1G-4) overflow in 32 bits */
949 if (msf->imsf_numsrc >= 0x3ffffffcU ||
950 msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
951 kfree(msf);
952 err = -ENOBUFS;
953 break;
955 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
956 kfree(msf);
957 err = -EINVAL;
958 break;
960 err = ip_mc_msfilter(sk, msf, 0);
961 kfree(msf);
962 break;
964 case IP_BLOCK_SOURCE:
965 case IP_UNBLOCK_SOURCE:
966 case IP_ADD_SOURCE_MEMBERSHIP:
967 case IP_DROP_SOURCE_MEMBERSHIP:
969 struct ip_mreq_source mreqs;
970 int omode, add;
972 if (optlen != sizeof(struct ip_mreq_source))
973 goto e_inval;
974 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
975 err = -EFAULT;
976 break;
978 if (optname == IP_BLOCK_SOURCE) {
979 omode = MCAST_EXCLUDE;
980 add = 1;
981 } else if (optname == IP_UNBLOCK_SOURCE) {
982 omode = MCAST_EXCLUDE;
983 add = 0;
984 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
985 struct ip_mreqn mreq;
987 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
988 mreq.imr_address.s_addr = mreqs.imr_interface;
989 mreq.imr_ifindex = 0;
990 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
991 if (err && err != -EADDRINUSE)
992 break;
993 omode = MCAST_INCLUDE;
994 add = 1;
995 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
996 omode = MCAST_INCLUDE;
997 add = 0;
999 err = ip_mc_source(add, omode, sk, &mreqs, 0);
1000 break;
1002 case MCAST_JOIN_GROUP:
1003 case MCAST_LEAVE_GROUP:
1005 struct group_req greq;
1006 struct sockaddr_in *psin;
1007 struct ip_mreqn mreq;
1009 if (optlen < sizeof(struct group_req))
1010 goto e_inval;
1011 err = -EFAULT;
1012 if (copy_from_user(&greq, optval, sizeof(greq)))
1013 break;
1014 psin = (struct sockaddr_in *)&greq.gr_group;
1015 if (psin->sin_family != AF_INET)
1016 goto e_inval;
1017 memset(&mreq, 0, sizeof(mreq));
1018 mreq.imr_multiaddr = psin->sin_addr;
1019 mreq.imr_ifindex = greq.gr_interface;
1021 if (optname == MCAST_JOIN_GROUP)
1022 err = ip_mc_join_group(sk, &mreq);
1023 else
1024 err = ip_mc_leave_group(sk, &mreq);
1025 break;
1027 case MCAST_JOIN_SOURCE_GROUP:
1028 case MCAST_LEAVE_SOURCE_GROUP:
1029 case MCAST_BLOCK_SOURCE:
1030 case MCAST_UNBLOCK_SOURCE:
1032 struct group_source_req greqs;
1033 struct ip_mreq_source mreqs;
1034 struct sockaddr_in *psin;
1035 int omode, add;
1037 if (optlen != sizeof(struct group_source_req))
1038 goto e_inval;
1039 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
1040 err = -EFAULT;
1041 break;
1043 if (greqs.gsr_group.ss_family != AF_INET ||
1044 greqs.gsr_source.ss_family != AF_INET) {
1045 err = -EADDRNOTAVAIL;
1046 break;
1048 psin = (struct sockaddr_in *)&greqs.gsr_group;
1049 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
1050 psin = (struct sockaddr_in *)&greqs.gsr_source;
1051 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
1052 mreqs.imr_interface = 0; /* use index for mc_source */
1054 if (optname == MCAST_BLOCK_SOURCE) {
1055 omode = MCAST_EXCLUDE;
1056 add = 1;
1057 } else if (optname == MCAST_UNBLOCK_SOURCE) {
1058 omode = MCAST_EXCLUDE;
1059 add = 0;
1060 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
1061 struct ip_mreqn mreq;
1063 psin = (struct sockaddr_in *)&greqs.gsr_group;
1064 mreq.imr_multiaddr = psin->sin_addr;
1065 mreq.imr_address.s_addr = 0;
1066 mreq.imr_ifindex = greqs.gsr_interface;
1067 err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
1068 if (err && err != -EADDRINUSE)
1069 break;
1070 greqs.gsr_interface = mreq.imr_ifindex;
1071 omode = MCAST_INCLUDE;
1072 add = 1;
1073 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
1074 omode = MCAST_INCLUDE;
1075 add = 0;
1077 err = ip_mc_source(add, omode, sk, &mreqs,
1078 greqs.gsr_interface);
1079 break;
1081 case MCAST_MSFILTER:
1083 struct sockaddr_in *psin;
1084 struct ip_msfilter *msf = NULL;
1085 struct group_filter *gsf = NULL;
1086 int msize, i, ifindex;
1088 if (optlen < GROUP_FILTER_SIZE(0))
1089 goto e_inval;
1090 if (optlen > sysctl_optmem_max) {
1091 err = -ENOBUFS;
1092 break;
1094 gsf = memdup_user(optval, optlen);
1095 if (IS_ERR(gsf)) {
1096 err = PTR_ERR(gsf);
1097 break;
1100 /* numsrc >= (4G-140)/128 overflow in 32 bits */
1101 if (gsf->gf_numsrc >= 0x1ffffff ||
1102 gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
1103 err = -ENOBUFS;
1104 goto mc_msf_out;
1106 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
1107 err = -EINVAL;
1108 goto mc_msf_out;
1110 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
1111 msf = kmalloc(msize, GFP_KERNEL);
1112 if (!msf) {
1113 err = -ENOBUFS;
1114 goto mc_msf_out;
1116 ifindex = gsf->gf_interface;
1117 psin = (struct sockaddr_in *)&gsf->gf_group;
1118 if (psin->sin_family != AF_INET) {
1119 err = -EADDRNOTAVAIL;
1120 goto mc_msf_out;
1122 msf->imsf_multiaddr = psin->sin_addr.s_addr;
1123 msf->imsf_interface = 0;
1124 msf->imsf_fmode = gsf->gf_fmode;
1125 msf->imsf_numsrc = gsf->gf_numsrc;
1126 err = -EADDRNOTAVAIL;
1127 for (i = 0; i < gsf->gf_numsrc; ++i) {
1128 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
1130 if (psin->sin_family != AF_INET)
1131 goto mc_msf_out;
1132 msf->imsf_slist[i] = psin->sin_addr.s_addr;
1134 kfree(gsf);
1135 gsf = NULL;
1137 err = ip_mc_msfilter(sk, msf, ifindex);
1138 mc_msf_out:
1139 kfree(msf);
1140 kfree(gsf);
1141 break;
1143 case IP_MULTICAST_ALL:
1144 if (optlen < 1)
1145 goto e_inval;
1146 if (val != 0 && val != 1)
1147 goto e_inval;
1148 inet->mc_all = val;
1149 break;
1151 case IP_FREEBIND:
1152 if (optlen < 1)
1153 goto e_inval;
1154 inet->freebind = !!val;
1155 break;
1157 case IP_IPSEC_POLICY:
1158 case IP_XFRM_POLICY:
1159 err = -EPERM;
1160 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1161 break;
1162 err = xfrm_user_policy(sk, optname, optval, optlen);
1163 break;
1165 case IP_TRANSPARENT:
1166 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1167 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1168 err = -EPERM;
1169 break;
1171 if (optlen < 1)
1172 goto e_inval;
1173 inet->transparent = !!val;
1174 break;
1176 case IP_MINTTL:
1177 if (optlen < 1)
1178 goto e_inval;
1179 if (val < 0 || val > 255)
1180 goto e_inval;
1181 inet->min_ttl = val;
1182 break;
1184 default:
1185 err = -ENOPROTOOPT;
1186 break;
1188 release_sock(sk);
1189 if (needs_rtnl)
1190 rtnl_unlock();
1191 return err;
1193 e_inval:
1194 release_sock(sk);
1195 if (needs_rtnl)
1196 rtnl_unlock();
1197 return -EINVAL;
1201 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1202 * @sk: socket
1203 * @skb: buffer
1205 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1206 * destination in skb->cb[] before dst drop.
1207 * This way, receiver doesn't make cache line misses to read rtable.
1209 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1211 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1212 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1213 ipv6_sk_rxinfo(sk);
1215 if (prepare && skb_rtable(skb)) {
1216 /* skb->cb is overloaded: prior to this point it is IP{6}CB
1217 * which has interface index (iif) as the first member of the
1218 * underlying inet{6}_skb_parm struct. This code then overlays
1219 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1220 * element so the iif is picked up from the prior IPCB. If iif
1221 * is the loopback interface, then return the sending interface
1222 * (e.g., process binds socket to eth0 for Tx which is
1223 * redirected to loopback in the rtable/dst).
1225 struct rtable *rt = skb_rtable(skb);
1226 bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags);
1228 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1229 pktinfo->ipi_ifindex = inet_iif(skb);
1230 else if (l3slave && rt && rt->rt_iif)
1231 pktinfo->ipi_ifindex = rt->rt_iif;
1233 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1234 } else {
1235 pktinfo->ipi_ifindex = 0;
1236 pktinfo->ipi_spec_dst.s_addr = 0;
1238 skb_dst_drop(skb);
1241 int ip_setsockopt(struct sock *sk, int level,
1242 int optname, char __user *optval, unsigned int optlen)
1244 int err;
1246 if (level != SOL_IP)
1247 return -ENOPROTOOPT;
1249 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1250 #ifdef CONFIG_BPFILTER
1251 if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
1252 optname < BPFILTER_IPT_SET_MAX)
1253 err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
1254 #endif
1255 #ifdef CONFIG_NETFILTER
1256 /* we need to exclude all possible ENOPROTOOPTs except default case */
1257 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1258 optname != IP_IPSEC_POLICY &&
1259 optname != IP_XFRM_POLICY &&
1260 !ip_mroute_opt(optname))
1261 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1262 #endif
1263 return err;
1265 EXPORT_SYMBOL(ip_setsockopt);
1267 #ifdef CONFIG_COMPAT
1268 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1269 char __user *optval, unsigned int optlen)
1271 int err;
1273 if (level != SOL_IP)
1274 return -ENOPROTOOPT;
1276 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1277 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1278 ip_setsockopt);
1280 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1281 #ifdef CONFIG_NETFILTER
1282 /* we need to exclude all possible ENOPROTOOPTs except default case */
1283 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1284 optname != IP_IPSEC_POLICY &&
1285 optname != IP_XFRM_POLICY &&
1286 !ip_mroute_opt(optname))
1287 err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
1288 optlen);
1289 #endif
1290 return err;
1292 EXPORT_SYMBOL(compat_ip_setsockopt);
1293 #endif
1296 * Get the options. Note for future reference. The GET of IP options gets
1297 * the _received_ ones. The set sets the _sent_ ones.
1300 static bool getsockopt_needs_rtnl(int optname)
1302 switch (optname) {
1303 case IP_MSFILTER:
1304 case MCAST_MSFILTER:
1305 return true;
1307 return false;
1310 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1311 char __user *optval, int __user *optlen, unsigned int flags)
1313 struct inet_sock *inet = inet_sk(sk);
1314 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1315 int val, err = 0;
1316 int len;
1318 if (level != SOL_IP)
1319 return -EOPNOTSUPP;
1321 if (ip_mroute_opt(optname))
1322 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1324 if (get_user(len, optlen))
1325 return -EFAULT;
1326 if (len < 0)
1327 return -EINVAL;
1329 if (needs_rtnl)
1330 rtnl_lock();
1331 lock_sock(sk);
1333 switch (optname) {
1334 case IP_OPTIONS:
1336 unsigned char optbuf[sizeof(struct ip_options)+40];
1337 struct ip_options *opt = (struct ip_options *)optbuf;
1338 struct ip_options_rcu *inet_opt;
1340 inet_opt = rcu_dereference_protected(inet->inet_opt,
1341 lockdep_sock_is_held(sk));
1342 opt->optlen = 0;
1343 if (inet_opt)
1344 memcpy(optbuf, &inet_opt->opt,
1345 sizeof(struct ip_options) +
1346 inet_opt->opt.optlen);
1347 release_sock(sk);
1349 if (opt->optlen == 0)
1350 return put_user(0, optlen);
1352 ip_options_undo(opt);
1354 len = min_t(unsigned int, len, opt->optlen);
1355 if (put_user(len, optlen))
1356 return -EFAULT;
1357 if (copy_to_user(optval, opt->__data, len))
1358 return -EFAULT;
1359 return 0;
1361 case IP_PKTINFO:
1362 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1363 break;
1364 case IP_RECVTTL:
1365 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1366 break;
1367 case IP_RECVTOS:
1368 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1369 break;
1370 case IP_RECVOPTS:
1371 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1372 break;
1373 case IP_RETOPTS:
1374 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1375 break;
1376 case IP_PASSSEC:
1377 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1378 break;
1379 case IP_RECVORIGDSTADDR:
1380 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1381 break;
1382 case IP_CHECKSUM:
1383 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1384 break;
1385 case IP_RECVFRAGSIZE:
1386 val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
1387 break;
1388 case IP_TOS:
1389 val = inet->tos;
1390 break;
1391 case IP_TTL:
1393 struct net *net = sock_net(sk);
1394 val = (inet->uc_ttl == -1 ?
1395 net->ipv4.sysctl_ip_default_ttl :
1396 inet->uc_ttl);
1397 break;
1399 case IP_HDRINCL:
1400 val = inet->hdrincl;
1401 break;
1402 case IP_NODEFRAG:
1403 val = inet->nodefrag;
1404 break;
1405 case IP_BIND_ADDRESS_NO_PORT:
1406 val = inet->bind_address_no_port;
1407 break;
1408 case IP_MTU_DISCOVER:
1409 val = inet->pmtudisc;
1410 break;
1411 case IP_MTU:
1413 struct dst_entry *dst;
1414 val = 0;
1415 dst = sk_dst_get(sk);
1416 if (dst) {
1417 val = dst_mtu(dst);
1418 dst_release(dst);
1420 if (!val) {
1421 release_sock(sk);
1422 return -ENOTCONN;
1424 break;
1426 case IP_RECVERR:
1427 val = inet->recverr;
1428 break;
1429 case IP_MULTICAST_TTL:
1430 val = inet->mc_ttl;
1431 break;
1432 case IP_MULTICAST_LOOP:
1433 val = inet->mc_loop;
1434 break;
1435 case IP_UNICAST_IF:
1436 val = (__force int)htonl((__u32) inet->uc_index);
1437 break;
1438 case IP_MULTICAST_IF:
1440 struct in_addr addr;
1441 len = min_t(unsigned int, len, sizeof(struct in_addr));
1442 addr.s_addr = inet->mc_addr;
1443 release_sock(sk);
1445 if (put_user(len, optlen))
1446 return -EFAULT;
1447 if (copy_to_user(optval, &addr, len))
1448 return -EFAULT;
1449 return 0;
1451 case IP_MSFILTER:
1453 struct ip_msfilter msf;
1455 if (len < IP_MSFILTER_SIZE(0)) {
1456 err = -EINVAL;
1457 goto out;
1459 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1460 err = -EFAULT;
1461 goto out;
1463 err = ip_mc_msfget(sk, &msf,
1464 (struct ip_msfilter __user *)optval, optlen);
1465 goto out;
1467 case MCAST_MSFILTER:
1469 struct group_filter gsf;
1471 if (len < GROUP_FILTER_SIZE(0)) {
1472 err = -EINVAL;
1473 goto out;
1475 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1476 err = -EFAULT;
1477 goto out;
1479 err = ip_mc_gsfget(sk, &gsf,
1480 (struct group_filter __user *)optval,
1481 optlen);
1482 goto out;
1484 case IP_MULTICAST_ALL:
1485 val = inet->mc_all;
1486 break;
1487 case IP_PKTOPTIONS:
1489 struct msghdr msg;
1491 release_sock(sk);
1493 if (sk->sk_type != SOCK_STREAM)
1494 return -ENOPROTOOPT;
1496 msg.msg_control = (__force void *) optval;
1497 msg.msg_controllen = len;
1498 msg.msg_flags = flags;
1500 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1501 struct in_pktinfo info;
1503 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1504 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1505 info.ipi_ifindex = inet->mc_index;
1506 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1508 if (inet->cmsg_flags & IP_CMSG_TTL) {
1509 int hlim = inet->mc_ttl;
1510 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1512 if (inet->cmsg_flags & IP_CMSG_TOS) {
1513 int tos = inet->rcv_tos;
1514 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1516 len -= msg.msg_controllen;
1517 return put_user(len, optlen);
1519 case IP_FREEBIND:
1520 val = inet->freebind;
1521 break;
1522 case IP_TRANSPARENT:
1523 val = inet->transparent;
1524 break;
1525 case IP_MINTTL:
1526 val = inet->min_ttl;
1527 break;
1528 default:
1529 release_sock(sk);
1530 return -ENOPROTOOPT;
1532 release_sock(sk);
1534 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1535 unsigned char ucval = (unsigned char)val;
1536 len = 1;
1537 if (put_user(len, optlen))
1538 return -EFAULT;
1539 if (copy_to_user(optval, &ucval, 1))
1540 return -EFAULT;
1541 } else {
1542 len = min_t(unsigned int, sizeof(int), len);
1543 if (put_user(len, optlen))
1544 return -EFAULT;
1545 if (copy_to_user(optval, &val, len))
1546 return -EFAULT;
1548 return 0;
1550 out:
1551 release_sock(sk);
1552 if (needs_rtnl)
1553 rtnl_unlock();
1554 return err;
1557 int ip_getsockopt(struct sock *sk, int level,
1558 int optname, char __user *optval, int __user *optlen)
1560 int err;
1562 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1563 #ifdef CONFIG_BPFILTER
1564 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1565 optname < BPFILTER_IPT_GET_MAX)
1566 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
1567 #endif
1568 #ifdef CONFIG_NETFILTER
1569 /* we need to exclude all possible ENOPROTOOPTs except default case */
1570 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1571 !ip_mroute_opt(optname)) {
1572 int len;
1574 if (get_user(len, optlen))
1575 return -EFAULT;
1577 err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
1578 if (err >= 0)
1579 err = put_user(len, optlen);
1580 return err;
1582 #endif
1583 return err;
1585 EXPORT_SYMBOL(ip_getsockopt);
1587 #ifdef CONFIG_COMPAT
1588 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1589 char __user *optval, int __user *optlen)
1591 int err;
1593 if (optname == MCAST_MSFILTER)
1594 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1595 ip_getsockopt);
1597 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1598 MSG_CMSG_COMPAT);
1600 #ifdef CONFIG_BPFILTER
1601 if (optname >= BPFILTER_IPT_SO_GET_INFO &&
1602 optname < BPFILTER_IPT_GET_MAX)
1603 err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
1604 #endif
1605 #ifdef CONFIG_NETFILTER
1606 /* we need to exclude all possible ENOPROTOOPTs except default case */
1607 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1608 !ip_mroute_opt(optname)) {
1609 int len;
1611 if (get_user(len, optlen))
1612 return -EFAULT;
1614 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1615 if (err >= 0)
1616 err = put_user(len, optlen);
1617 return err;
1619 #endif
1620 return err;
1622 EXPORT_SYMBOL(compat_ip_getsockopt);
1623 #endif