Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / net / ipv4 / ip_sockglue.c
blob035ad645a8d9d8abd55321ad2bcd0e44d1d0fc3c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The IP to API glue.
8 * Authors: see ip.c
10 * Fixes:
11 * Many : Split from ip.c , see ip.c for history.
12 * Martin Mares : TOS setting fixed.
13 * Alan Cox : Fixed a couple of oopses in Martin's
14 * TOS tweaks.
15 * Mike McLagan : Routing by source
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/skbuff.h>
22 #include <linux/ip.h>
23 #include <linux/icmp.h>
24 #include <linux/inetdevice.h>
25 #include <linux/netdevice.h>
26 #include <linux/slab.h>
27 #include <net/sock.h>
28 #include <net/ip.h>
29 #include <net/icmp.h>
30 #include <net/tcp_states.h>
31 #include <linux/udp.h>
32 #include <linux/igmp.h>
33 #include <linux/netfilter.h>
34 #include <linux/route.h>
35 #include <linux/mroute.h>
36 #include <net/inet_ecn.h>
37 #include <net/route.h>
38 #include <net/xfrm.h>
39 #include <net/compat.h>
40 #include <net/checksum.h>
41 #if IS_ENABLED(CONFIG_IPV6)
42 #include <net/transp_v6.h>
43 #endif
44 #include <net/ip_fib.h>
46 #include <linux/errqueue.h>
47 #include <asm/uaccess.h>
50 * SOL_IP control messages.
53 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
55 struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
57 info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
59 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
62 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
64 int ttl = ip_hdr(skb)->ttl;
65 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
68 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
70 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
73 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
75 if (IPCB(skb)->opt.optlen == 0)
76 return;
78 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
79 ip_hdr(skb) + 1);
83 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
85 unsigned char optbuf[sizeof(struct ip_options) + 40];
86 struct ip_options *opt = (struct ip_options *)optbuf;
88 if (IPCB(skb)->opt.optlen == 0)
89 return;
91 if (ip_options_echo(opt, skb)) {
92 msg->msg_flags |= MSG_CTRUNC;
93 return;
95 ip_options_undo(opt);
97 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
100 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
101 int offset)
103 __wsum csum = skb->csum;
105 if (skb->ip_summed != CHECKSUM_COMPLETE)
106 return;
108 if (offset != 0)
109 csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
111 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
114 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
116 char *secdata;
117 u32 seclen, secid;
118 int err;
120 err = security_socket_getpeersec_dgram(NULL, skb, &secid);
121 if (err)
122 return;
124 err = security_secid_to_secctx(secid, &secdata, &seclen);
125 if (err)
126 return;
128 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
129 security_release_secctx(secdata, seclen);
132 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
134 struct sockaddr_in sin;
135 const struct iphdr *iph = ip_hdr(skb);
136 __be16 *ports = (__be16 *)skb_transport_header(skb);
138 if (skb_transport_offset(skb) + 4 > skb->len)
139 return;
141 /* All current transport protocols have the port numbers in the
142 * first four bytes of the transport header and this function is
143 * written with this assumption in mind.
146 sin.sin_family = AF_INET;
147 sin.sin_addr.s_addr = iph->daddr;
148 sin.sin_port = ports[1];
149 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
151 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
154 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
155 int offset)
157 struct inet_sock *inet = inet_sk(skb->sk);
158 unsigned int flags = inet->cmsg_flags;
160 /* Ordered by supposed usage frequency */
161 if (flags & IP_CMSG_PKTINFO) {
162 ip_cmsg_recv_pktinfo(msg, skb);
164 flags &= ~IP_CMSG_PKTINFO;
165 if (!flags)
166 return;
169 if (flags & IP_CMSG_TTL) {
170 ip_cmsg_recv_ttl(msg, skb);
172 flags &= ~IP_CMSG_TTL;
173 if (!flags)
174 return;
177 if (flags & IP_CMSG_TOS) {
178 ip_cmsg_recv_tos(msg, skb);
180 flags &= ~IP_CMSG_TOS;
181 if (!flags)
182 return;
185 if (flags & IP_CMSG_RECVOPTS) {
186 ip_cmsg_recv_opts(msg, skb);
188 flags &= ~IP_CMSG_RECVOPTS;
189 if (!flags)
190 return;
193 if (flags & IP_CMSG_RETOPTS) {
194 ip_cmsg_recv_retopts(msg, skb);
196 flags &= ~IP_CMSG_RETOPTS;
197 if (!flags)
198 return;
201 if (flags & IP_CMSG_PASSSEC) {
202 ip_cmsg_recv_security(msg, skb);
204 flags &= ~IP_CMSG_PASSSEC;
205 if (!flags)
206 return;
209 if (flags & IP_CMSG_ORIGDSTADDR) {
210 ip_cmsg_recv_dstaddr(msg, skb);
212 flags &= ~IP_CMSG_ORIGDSTADDR;
213 if (!flags)
214 return;
217 if (flags & IP_CMSG_CHECKSUM)
218 ip_cmsg_recv_checksum(msg, skb, offset);
220 EXPORT_SYMBOL(ip_cmsg_recv_offset);
222 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
223 bool allow_ipv6)
225 int err, val;
226 struct cmsghdr *cmsg;
228 for_each_cmsghdr(cmsg, msg) {
229 if (!CMSG_OK(msg, cmsg))
230 return -EINVAL;
231 #if IS_ENABLED(CONFIG_IPV6)
232 if (allow_ipv6 &&
233 cmsg->cmsg_level == SOL_IPV6 &&
234 cmsg->cmsg_type == IPV6_PKTINFO) {
235 struct in6_pktinfo *src_info;
237 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
238 return -EINVAL;
239 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
240 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
241 return -EINVAL;
242 ipc->oif = src_info->ipi6_ifindex;
243 ipc->addr = src_info->ipi6_addr.s6_addr32[3];
244 continue;
246 #endif
247 if (cmsg->cmsg_level != SOL_IP)
248 continue;
249 switch (cmsg->cmsg_type) {
250 case IP_RETOPTS:
251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
253 /* Our caller is responsible for freeing ipc->opt */
254 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
255 err < 40 ? err : 40);
256 if (err)
257 return err;
258 break;
259 case IP_PKTINFO:
261 struct in_pktinfo *info;
262 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
263 return -EINVAL;
264 info = (struct in_pktinfo *)CMSG_DATA(cmsg);
265 ipc->oif = info->ipi_ifindex;
266 ipc->addr = info->ipi_spec_dst.s_addr;
267 break;
269 case IP_TTL:
270 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
271 return -EINVAL;
272 val = *(int *)CMSG_DATA(cmsg);
273 if (val < 1 || val > 255)
274 return -EINVAL;
275 ipc->ttl = val;
276 break;
277 case IP_TOS:
278 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
279 return -EINVAL;
280 val = *(int *)CMSG_DATA(cmsg);
281 if (val < 0 || val > 255)
282 return -EINVAL;
283 ipc->tos = val;
284 ipc->priority = rt_tos2priority(ipc->tos);
285 break;
287 default:
288 return -EINVAL;
291 return 0;
295 /* Special input handler for packets caught by router alert option.
296 They are selected only by protocol field, and then processed likely
297 local ones; but only if someone wants them! Otherwise, router
298 not running rsvpd will kill RSVP.
300 It is user level problem, what it will make with them.
301 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
302 but receiver should be enough clever f.e. to forward mtrace requests,
303 sent to multicast group to reach destination designated router.
305 struct ip_ra_chain __rcu *ip_ra_chain;
306 static DEFINE_SPINLOCK(ip_ra_lock);
309 static void ip_ra_destroy_rcu(struct rcu_head *head)
311 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
313 sock_put(ra->saved_sk);
314 kfree(ra);
317 int ip_ra_control(struct sock *sk, unsigned char on,
318 void (*destructor)(struct sock *))
320 struct ip_ra_chain *ra, *new_ra;
321 struct ip_ra_chain __rcu **rap;
323 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
324 return -EINVAL;
326 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
328 spin_lock_bh(&ip_ra_lock);
329 for (rap = &ip_ra_chain;
330 (ra = rcu_dereference_protected(*rap,
331 lockdep_is_held(&ip_ra_lock))) != NULL;
332 rap = &ra->next) {
333 if (ra->sk == sk) {
334 if (on) {
335 spin_unlock_bh(&ip_ra_lock);
336 kfree(new_ra);
337 return -EADDRINUSE;
339 /* dont let ip_call_ra_chain() use sk again */
340 ra->sk = NULL;
341 RCU_INIT_POINTER(*rap, ra->next);
342 spin_unlock_bh(&ip_ra_lock);
344 if (ra->destructor)
345 ra->destructor(sk);
347 * Delay sock_put(sk) and kfree(ra) after one rcu grace
348 * period. This guarantee ip_call_ra_chain() dont need
349 * to mess with socket refcounts.
351 ra->saved_sk = sk;
352 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
353 return 0;
356 if (!new_ra) {
357 spin_unlock_bh(&ip_ra_lock);
358 return -ENOBUFS;
360 new_ra->sk = sk;
361 new_ra->destructor = destructor;
363 RCU_INIT_POINTER(new_ra->next, ra);
364 rcu_assign_pointer(*rap, new_ra);
365 sock_hold(sk);
366 spin_unlock_bh(&ip_ra_lock);
368 return 0;
371 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
372 __be16 port, u32 info, u8 *payload)
374 struct sock_exterr_skb *serr;
376 skb = skb_clone(skb, GFP_ATOMIC);
377 if (!skb)
378 return;
380 serr = SKB_EXT_ERR(skb);
381 serr->ee.ee_errno = err;
382 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
383 serr->ee.ee_type = icmp_hdr(skb)->type;
384 serr->ee.ee_code = icmp_hdr(skb)->code;
385 serr->ee.ee_pad = 0;
386 serr->ee.ee_info = info;
387 serr->ee.ee_data = 0;
388 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
389 skb_network_header(skb);
390 serr->port = port;
392 if (skb_pull(skb, payload - skb->data)) {
393 skb_reset_transport_header(skb);
394 if (sock_queue_err_skb(sk, skb) == 0)
395 return;
397 kfree_skb(skb);
400 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
402 struct inet_sock *inet = inet_sk(sk);
403 struct sock_exterr_skb *serr;
404 struct iphdr *iph;
405 struct sk_buff *skb;
407 if (!inet->recverr)
408 return;
410 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
411 if (!skb)
412 return;
414 skb_put(skb, sizeof(struct iphdr));
415 skb_reset_network_header(skb);
416 iph = ip_hdr(skb);
417 iph->daddr = daddr;
419 serr = SKB_EXT_ERR(skb);
420 serr->ee.ee_errno = err;
421 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
422 serr->ee.ee_type = 0;
423 serr->ee.ee_code = 0;
424 serr->ee.ee_pad = 0;
425 serr->ee.ee_info = info;
426 serr->ee.ee_data = 0;
427 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
428 serr->port = port;
430 __skb_pull(skb, skb_tail_pointer(skb) - skb->data);
431 skb_reset_transport_header(skb);
433 if (sock_queue_err_skb(sk, skb))
434 kfree_skb(skb);
437 /* For some errors we have valid addr_offset even with zero payload and
438 * zero port. Also, addr_offset should be supported if port is set.
440 static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
442 return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
443 serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
446 /* IPv4 supports cmsg on all imcp errors and some timestamps
448 * Timestamp code paths do not initialize the fields expected by cmsg:
449 * the PKTINFO fields in skb->cb[]. Fill those in here.
451 static bool ipv4_datagram_support_cmsg(const struct sock *sk,
452 struct sk_buff *skb,
453 int ee_origin)
455 struct in_pktinfo *info;
457 if (ee_origin == SO_EE_ORIGIN_ICMP)
458 return true;
460 if (ee_origin == SO_EE_ORIGIN_LOCAL)
461 return false;
463 /* Support IP_PKTINFO on tstamp packets if requested, to correlate
464 * timestamp with egress dev. Not possible for packets without dev
465 * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
467 if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
468 (!skb->dev))
469 return false;
471 info = PKTINFO_SKB_CB(skb);
472 info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
473 info->ipi_ifindex = skb->dev->ifindex;
474 return true;
478 * Handle MSG_ERRQUEUE
480 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
482 struct sock_exterr_skb *serr;
483 struct sk_buff *skb;
484 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
485 struct {
486 struct sock_extended_err ee;
487 struct sockaddr_in offender;
488 } errhdr;
489 int err;
490 int copied;
492 WARN_ON_ONCE(sk->sk_family == AF_INET6);
494 err = -EAGAIN;
495 skb = sock_dequeue_err_skb(sk);
496 if (!skb)
497 goto out;
499 copied = skb->len;
500 if (copied > len) {
501 msg->msg_flags |= MSG_TRUNC;
502 copied = len;
504 err = skb_copy_datagram_msg(skb, 0, msg, copied);
505 if (err)
506 goto out_free_skb;
508 sock_recv_timestamp(msg, sk, skb);
510 serr = SKB_EXT_ERR(skb);
512 if (sin && ipv4_datagram_support_addr(serr)) {
513 sin->sin_family = AF_INET;
514 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
515 serr->addr_offset);
516 sin->sin_port = serr->port;
517 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
518 *addr_len = sizeof(*sin);
521 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
522 sin = &errhdr.offender;
523 memset(sin, 0, sizeof(*sin));
525 if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
526 sin->sin_family = AF_INET;
527 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
528 if (inet_sk(sk)->cmsg_flags)
529 ip_cmsg_recv(msg, skb);
532 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
534 /* Now we could try to dump offended packet options */
536 msg->msg_flags |= MSG_ERRQUEUE;
537 err = copied;
539 out_free_skb:
540 kfree_skb(skb);
541 out:
542 return err;
547 * Socket option code for IP. This is the end of the line after any
548 * TCP,UDP etc options on an IP socket.
550 static bool setsockopt_needs_rtnl(int optname)
552 switch (optname) {
553 case IP_ADD_MEMBERSHIP:
554 case IP_ADD_SOURCE_MEMBERSHIP:
555 case IP_BLOCK_SOURCE:
556 case IP_DROP_MEMBERSHIP:
557 case IP_DROP_SOURCE_MEMBERSHIP:
558 case IP_MSFILTER:
559 case IP_UNBLOCK_SOURCE:
560 case MCAST_BLOCK_SOURCE:
561 case MCAST_MSFILTER:
562 case MCAST_JOIN_GROUP:
563 case MCAST_JOIN_SOURCE_GROUP:
564 case MCAST_LEAVE_GROUP:
565 case MCAST_LEAVE_SOURCE_GROUP:
566 case MCAST_UNBLOCK_SOURCE:
567 return true;
569 return false;
572 static int do_ip_setsockopt(struct sock *sk, int level,
573 int optname, char __user *optval, unsigned int optlen)
575 struct inet_sock *inet = inet_sk(sk);
576 struct net *net = sock_net(sk);
577 int val = 0, err;
578 bool needs_rtnl = setsockopt_needs_rtnl(optname);
580 switch (optname) {
581 case IP_PKTINFO:
582 case IP_RECVTTL:
583 case IP_RECVOPTS:
584 case IP_RECVTOS:
585 case IP_RETOPTS:
586 case IP_TOS:
587 case IP_TTL:
588 case IP_HDRINCL:
589 case IP_MTU_DISCOVER:
590 case IP_RECVERR:
591 case IP_ROUTER_ALERT:
592 case IP_FREEBIND:
593 case IP_PASSSEC:
594 case IP_TRANSPARENT:
595 case IP_MINTTL:
596 case IP_NODEFRAG:
597 case IP_BIND_ADDRESS_NO_PORT:
598 case IP_UNICAST_IF:
599 case IP_MULTICAST_TTL:
600 case IP_MULTICAST_ALL:
601 case IP_MULTICAST_LOOP:
602 case IP_RECVORIGDSTADDR:
603 case IP_CHECKSUM:
604 if (optlen >= sizeof(int)) {
605 if (get_user(val, (int __user *) optval))
606 return -EFAULT;
607 } else if (optlen >= sizeof(char)) {
608 unsigned char ucval;
610 if (get_user(ucval, (unsigned char __user *) optval))
611 return -EFAULT;
612 val = (int) ucval;
616 /* If optlen==0, it is equivalent to val == 0 */
618 if (ip_mroute_opt(optname))
619 return ip_mroute_setsockopt(sk, optname, optval, optlen);
621 err = 0;
622 if (needs_rtnl)
623 rtnl_lock();
624 lock_sock(sk);
626 switch (optname) {
627 case IP_OPTIONS:
629 struct ip_options_rcu *old, *opt = NULL;
631 if (optlen > 40)
632 goto e_inval;
633 err = ip_options_get_from_user(sock_net(sk), &opt,
634 optval, optlen);
635 if (err)
636 break;
637 old = rcu_dereference_protected(inet->inet_opt,
638 sock_owned_by_user(sk));
639 if (inet->is_icsk) {
640 struct inet_connection_sock *icsk = inet_csk(sk);
641 #if IS_ENABLED(CONFIG_IPV6)
642 if (sk->sk_family == PF_INET ||
643 (!((1 << sk->sk_state) &
644 (TCPF_LISTEN | TCPF_CLOSE)) &&
645 inet->inet_daddr != LOOPBACK4_IPV6)) {
646 #endif
647 if (old)
648 icsk->icsk_ext_hdr_len -= old->opt.optlen;
649 if (opt)
650 icsk->icsk_ext_hdr_len += opt->opt.optlen;
651 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
652 #if IS_ENABLED(CONFIG_IPV6)
654 #endif
656 rcu_assign_pointer(inet->inet_opt, opt);
657 if (old)
658 kfree_rcu(old, rcu);
659 break;
661 case IP_PKTINFO:
662 if (val)
663 inet->cmsg_flags |= IP_CMSG_PKTINFO;
664 else
665 inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
666 break;
667 case IP_RECVTTL:
668 if (val)
669 inet->cmsg_flags |= IP_CMSG_TTL;
670 else
671 inet->cmsg_flags &= ~IP_CMSG_TTL;
672 break;
673 case IP_RECVTOS:
674 if (val)
675 inet->cmsg_flags |= IP_CMSG_TOS;
676 else
677 inet->cmsg_flags &= ~IP_CMSG_TOS;
678 break;
679 case IP_RECVOPTS:
680 if (val)
681 inet->cmsg_flags |= IP_CMSG_RECVOPTS;
682 else
683 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
684 break;
685 case IP_RETOPTS:
686 if (val)
687 inet->cmsg_flags |= IP_CMSG_RETOPTS;
688 else
689 inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
690 break;
691 case IP_PASSSEC:
692 if (val)
693 inet->cmsg_flags |= IP_CMSG_PASSSEC;
694 else
695 inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
696 break;
697 case IP_RECVORIGDSTADDR:
698 if (val)
699 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
700 else
701 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
702 break;
703 case IP_CHECKSUM:
704 if (val) {
705 if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
706 inet_inc_convert_csum(sk);
707 inet->cmsg_flags |= IP_CMSG_CHECKSUM;
709 } else {
710 if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
711 inet_dec_convert_csum(sk);
712 inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
715 break;
716 case IP_TOS: /* This sets both TOS and Precedence */
717 if (sk->sk_type == SOCK_STREAM) {
718 val &= ~INET_ECN_MASK;
719 val |= inet->tos & INET_ECN_MASK;
721 if (inet->tos != val) {
722 inet->tos = val;
723 sk->sk_priority = rt_tos2priority(val);
724 sk_dst_reset(sk);
726 break;
727 case IP_TTL:
728 if (optlen < 1)
729 goto e_inval;
730 if (val != -1 && (val < 1 || val > 255))
731 goto e_inval;
732 inet->uc_ttl = val;
733 break;
734 case IP_HDRINCL:
735 if (sk->sk_type != SOCK_RAW) {
736 err = -ENOPROTOOPT;
737 break;
739 inet->hdrincl = val ? 1 : 0;
740 break;
741 case IP_NODEFRAG:
742 if (sk->sk_type != SOCK_RAW) {
743 err = -ENOPROTOOPT;
744 break;
746 inet->nodefrag = val ? 1 : 0;
747 break;
748 case IP_BIND_ADDRESS_NO_PORT:
749 inet->bind_address_no_port = val ? 1 : 0;
750 break;
751 case IP_MTU_DISCOVER:
752 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
753 goto e_inval;
754 inet->pmtudisc = val;
755 break;
756 case IP_RECVERR:
757 inet->recverr = !!val;
758 if (!val)
759 skb_queue_purge(&sk->sk_error_queue);
760 break;
761 case IP_MULTICAST_TTL:
762 if (sk->sk_type == SOCK_STREAM)
763 goto e_inval;
764 if (optlen < 1)
765 goto e_inval;
766 if (val == -1)
767 val = 1;
768 if (val < 0 || val > 255)
769 goto e_inval;
770 inet->mc_ttl = val;
771 break;
772 case IP_MULTICAST_LOOP:
773 if (optlen < 1)
774 goto e_inval;
775 inet->mc_loop = !!val;
776 break;
777 case IP_UNICAST_IF:
779 struct net_device *dev = NULL;
780 int ifindex;
782 if (optlen != sizeof(int))
783 goto e_inval;
785 ifindex = (__force int)ntohl((__force __be32)val);
786 if (ifindex == 0) {
787 inet->uc_index = 0;
788 err = 0;
789 break;
792 dev = dev_get_by_index(sock_net(sk), ifindex);
793 err = -EADDRNOTAVAIL;
794 if (!dev)
795 break;
796 dev_put(dev);
798 err = -EINVAL;
799 if (sk->sk_bound_dev_if)
800 break;
802 inet->uc_index = ifindex;
803 err = 0;
804 break;
806 case IP_MULTICAST_IF:
808 struct ip_mreqn mreq;
809 struct net_device *dev = NULL;
811 if (sk->sk_type == SOCK_STREAM)
812 goto e_inval;
814 * Check the arguments are allowable
817 if (optlen < sizeof(struct in_addr))
818 goto e_inval;
820 err = -EFAULT;
821 if (optlen >= sizeof(struct ip_mreqn)) {
822 if (copy_from_user(&mreq, optval, sizeof(mreq)))
823 break;
824 } else {
825 memset(&mreq, 0, sizeof(mreq));
826 if (optlen >= sizeof(struct ip_mreq)) {
827 if (copy_from_user(&mreq, optval,
828 sizeof(struct ip_mreq)))
829 break;
830 } else if (optlen >= sizeof(struct in_addr)) {
831 if (copy_from_user(&mreq.imr_address, optval,
832 sizeof(struct in_addr)))
833 break;
837 if (!mreq.imr_ifindex) {
838 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
839 inet->mc_index = 0;
840 inet->mc_addr = 0;
841 err = 0;
842 break;
844 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
845 if (dev)
846 mreq.imr_ifindex = dev->ifindex;
847 } else
848 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
851 err = -EADDRNOTAVAIL;
852 if (!dev)
853 break;
854 dev_put(dev);
856 err = -EINVAL;
857 if (sk->sk_bound_dev_if &&
858 mreq.imr_ifindex != sk->sk_bound_dev_if)
859 break;
861 inet->mc_index = mreq.imr_ifindex;
862 inet->mc_addr = mreq.imr_address.s_addr;
863 err = 0;
864 break;
867 case IP_ADD_MEMBERSHIP:
868 case IP_DROP_MEMBERSHIP:
870 struct ip_mreqn mreq;
872 err = -EPROTO;
873 if (inet_sk(sk)->is_icsk)
874 break;
876 if (optlen < sizeof(struct ip_mreq))
877 goto e_inval;
878 err = -EFAULT;
879 if (optlen >= sizeof(struct ip_mreqn)) {
880 if (copy_from_user(&mreq, optval, sizeof(mreq)))
881 break;
882 } else {
883 memset(&mreq, 0, sizeof(mreq));
884 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
885 break;
888 if (optname == IP_ADD_MEMBERSHIP)
889 err = ip_mc_join_group(sk, &mreq);
890 else
891 err = ip_mc_leave_group(sk, &mreq);
892 break;
894 case IP_MSFILTER:
896 struct ip_msfilter *msf;
898 if (optlen < IP_MSFILTER_SIZE(0))
899 goto e_inval;
900 if (optlen > sysctl_optmem_max) {
901 err = -ENOBUFS;
902 break;
904 msf = kmalloc(optlen, GFP_KERNEL);
905 if (!msf) {
906 err = -ENOBUFS;
907 break;
909 err = -EFAULT;
910 if (copy_from_user(msf, optval, optlen)) {
911 kfree(msf);
912 break;
914 /* numsrc >= (1G-4) overflow in 32 bits */
915 if (msf->imsf_numsrc >= 0x3ffffffcU ||
916 msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
917 kfree(msf);
918 err = -ENOBUFS;
919 break;
921 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
922 kfree(msf);
923 err = -EINVAL;
924 break;
926 err = ip_mc_msfilter(sk, msf, 0);
927 kfree(msf);
928 break;
930 case IP_BLOCK_SOURCE:
931 case IP_UNBLOCK_SOURCE:
932 case IP_ADD_SOURCE_MEMBERSHIP:
933 case IP_DROP_SOURCE_MEMBERSHIP:
935 struct ip_mreq_source mreqs;
936 int omode, add;
938 if (optlen != sizeof(struct ip_mreq_source))
939 goto e_inval;
940 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
941 err = -EFAULT;
942 break;
944 if (optname == IP_BLOCK_SOURCE) {
945 omode = MCAST_EXCLUDE;
946 add = 1;
947 } else if (optname == IP_UNBLOCK_SOURCE) {
948 omode = MCAST_EXCLUDE;
949 add = 0;
950 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
951 struct ip_mreqn mreq;
953 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
954 mreq.imr_address.s_addr = mreqs.imr_interface;
955 mreq.imr_ifindex = 0;
956 err = ip_mc_join_group(sk, &mreq);
957 if (err && err != -EADDRINUSE)
958 break;
959 omode = MCAST_INCLUDE;
960 add = 1;
961 } else /* IP_DROP_SOURCE_MEMBERSHIP */ {
962 omode = MCAST_INCLUDE;
963 add = 0;
965 err = ip_mc_source(add, omode, sk, &mreqs, 0);
966 break;
968 case MCAST_JOIN_GROUP:
969 case MCAST_LEAVE_GROUP:
971 struct group_req greq;
972 struct sockaddr_in *psin;
973 struct ip_mreqn mreq;
975 if (optlen < sizeof(struct group_req))
976 goto e_inval;
977 err = -EFAULT;
978 if (copy_from_user(&greq, optval, sizeof(greq)))
979 break;
980 psin = (struct sockaddr_in *)&greq.gr_group;
981 if (psin->sin_family != AF_INET)
982 goto e_inval;
983 memset(&mreq, 0, sizeof(mreq));
984 mreq.imr_multiaddr = psin->sin_addr;
985 mreq.imr_ifindex = greq.gr_interface;
987 if (optname == MCAST_JOIN_GROUP)
988 err = ip_mc_join_group(sk, &mreq);
989 else
990 err = ip_mc_leave_group(sk, &mreq);
991 break;
993 case MCAST_JOIN_SOURCE_GROUP:
994 case MCAST_LEAVE_SOURCE_GROUP:
995 case MCAST_BLOCK_SOURCE:
996 case MCAST_UNBLOCK_SOURCE:
998 struct group_source_req greqs;
999 struct ip_mreq_source mreqs;
1000 struct sockaddr_in *psin;
1001 int omode, add;
1003 if (optlen != sizeof(struct group_source_req))
1004 goto e_inval;
1005 if (copy_from_user(&greqs, optval, sizeof(greqs))) {
1006 err = -EFAULT;
1007 break;
1009 if (greqs.gsr_group.ss_family != AF_INET ||
1010 greqs.gsr_source.ss_family != AF_INET) {
1011 err = -EADDRNOTAVAIL;
1012 break;
1014 psin = (struct sockaddr_in *)&greqs.gsr_group;
1015 mreqs.imr_multiaddr = psin->sin_addr.s_addr;
1016 psin = (struct sockaddr_in *)&greqs.gsr_source;
1017 mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
1018 mreqs.imr_interface = 0; /* use index for mc_source */
1020 if (optname == MCAST_BLOCK_SOURCE) {
1021 omode = MCAST_EXCLUDE;
1022 add = 1;
1023 } else if (optname == MCAST_UNBLOCK_SOURCE) {
1024 omode = MCAST_EXCLUDE;
1025 add = 0;
1026 } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
1027 struct ip_mreqn mreq;
1029 psin = (struct sockaddr_in *)&greqs.gsr_group;
1030 mreq.imr_multiaddr = psin->sin_addr;
1031 mreq.imr_address.s_addr = 0;
1032 mreq.imr_ifindex = greqs.gsr_interface;
1033 err = ip_mc_join_group(sk, &mreq);
1034 if (err && err != -EADDRINUSE)
1035 break;
1036 greqs.gsr_interface = mreq.imr_ifindex;
1037 omode = MCAST_INCLUDE;
1038 add = 1;
1039 } else /* MCAST_LEAVE_SOURCE_GROUP */ {
1040 omode = MCAST_INCLUDE;
1041 add = 0;
1043 err = ip_mc_source(add, omode, sk, &mreqs,
1044 greqs.gsr_interface);
1045 break;
1047 case MCAST_MSFILTER:
1049 struct sockaddr_in *psin;
1050 struct ip_msfilter *msf = NULL;
1051 struct group_filter *gsf = NULL;
1052 int msize, i, ifindex;
1054 if (optlen < GROUP_FILTER_SIZE(0))
1055 goto e_inval;
1056 if (optlen > sysctl_optmem_max) {
1057 err = -ENOBUFS;
1058 break;
1060 gsf = kmalloc(optlen, GFP_KERNEL);
1061 if (!gsf) {
1062 err = -ENOBUFS;
1063 break;
1065 err = -EFAULT;
1066 if (copy_from_user(gsf, optval, optlen))
1067 goto mc_msf_out;
1069 /* numsrc >= (4G-140)/128 overflow in 32 bits */
1070 if (gsf->gf_numsrc >= 0x1ffffff ||
1071 gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
1072 err = -ENOBUFS;
1073 goto mc_msf_out;
1075 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
1076 err = -EINVAL;
1077 goto mc_msf_out;
1079 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
1080 msf = kmalloc(msize, GFP_KERNEL);
1081 if (!msf) {
1082 err = -ENOBUFS;
1083 goto mc_msf_out;
1085 ifindex = gsf->gf_interface;
1086 psin = (struct sockaddr_in *)&gsf->gf_group;
1087 if (psin->sin_family != AF_INET) {
1088 err = -EADDRNOTAVAIL;
1089 goto mc_msf_out;
1091 msf->imsf_multiaddr = psin->sin_addr.s_addr;
1092 msf->imsf_interface = 0;
1093 msf->imsf_fmode = gsf->gf_fmode;
1094 msf->imsf_numsrc = gsf->gf_numsrc;
1095 err = -EADDRNOTAVAIL;
1096 for (i = 0; i < gsf->gf_numsrc; ++i) {
1097 psin = (struct sockaddr_in *)&gsf->gf_slist[i];
1099 if (psin->sin_family != AF_INET)
1100 goto mc_msf_out;
1101 msf->imsf_slist[i] = psin->sin_addr.s_addr;
1103 kfree(gsf);
1104 gsf = NULL;
1106 err = ip_mc_msfilter(sk, msf, ifindex);
1107 mc_msf_out:
1108 kfree(msf);
1109 kfree(gsf);
1110 break;
1112 case IP_MULTICAST_ALL:
1113 if (optlen < 1)
1114 goto e_inval;
1115 if (val != 0 && val != 1)
1116 goto e_inval;
1117 inet->mc_all = val;
1118 break;
1119 case IP_ROUTER_ALERT:
1120 err = ip_ra_control(sk, val ? 1 : 0, NULL);
1121 break;
1123 case IP_FREEBIND:
1124 if (optlen < 1)
1125 goto e_inval;
1126 inet->freebind = !!val;
1127 break;
1129 case IP_IPSEC_POLICY:
1130 case IP_XFRM_POLICY:
1131 err = -EPERM;
1132 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1133 break;
1134 err = xfrm_user_policy(sk, optname, optval, optlen);
1135 break;
1137 case IP_TRANSPARENT:
1138 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
1139 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1140 err = -EPERM;
1141 break;
1143 if (optlen < 1)
1144 goto e_inval;
1145 inet->transparent = !!val;
1146 break;
1148 case IP_MINTTL:
1149 if (optlen < 1)
1150 goto e_inval;
1151 if (val < 0 || val > 255)
1152 goto e_inval;
1153 inet->min_ttl = val;
1154 break;
1156 default:
1157 err = -ENOPROTOOPT;
1158 break;
1160 release_sock(sk);
1161 if (needs_rtnl)
1162 rtnl_unlock();
1163 return err;
1165 e_inval:
1166 release_sock(sk);
1167 if (needs_rtnl)
1168 rtnl_unlock();
1169 return -EINVAL;
1173 * ipv4_pktinfo_prepare - transfer some info from rtable to skb
1174 * @sk: socket
1175 * @skb: buffer
1177 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
1178 * destination in skb->cb[] before dst drop.
1179 * This way, receiver doesn't make cache line misses to read rtable.
1181 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1183 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
1184 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
1185 ipv6_sk_rxinfo(sk);
1187 if (prepare && skb_rtable(skb)) {
1188 pktinfo->ipi_ifindex = inet_iif(skb);
1189 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1190 } else {
1191 pktinfo->ipi_ifindex = 0;
1192 pktinfo->ipi_spec_dst.s_addr = 0;
1194 skb_dst_drop(skb);
1197 int ip_setsockopt(struct sock *sk, int level,
1198 int optname, char __user *optval, unsigned int optlen)
1200 int err;
1202 if (level != SOL_IP)
1203 return -ENOPROTOOPT;
1205 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1206 #ifdef CONFIG_NETFILTER
1207 /* we need to exclude all possible ENOPROTOOPTs except default case */
1208 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1209 optname != IP_IPSEC_POLICY &&
1210 optname != IP_XFRM_POLICY &&
1211 !ip_mroute_opt(optname)) {
1212 lock_sock(sk);
1213 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
1214 release_sock(sk);
1216 #endif
1217 return err;
1219 EXPORT_SYMBOL(ip_setsockopt);
1221 #ifdef CONFIG_COMPAT
1222 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
1223 char __user *optval, unsigned int optlen)
1225 int err;
1227 if (level != SOL_IP)
1228 return -ENOPROTOOPT;
1230 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
1231 return compat_mc_setsockopt(sk, level, optname, optval, optlen,
1232 ip_setsockopt);
1234 err = do_ip_setsockopt(sk, level, optname, optval, optlen);
1235 #ifdef CONFIG_NETFILTER
1236 /* we need to exclude all possible ENOPROTOOPTs except default case */
1237 if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
1238 optname != IP_IPSEC_POLICY &&
1239 optname != IP_XFRM_POLICY &&
1240 !ip_mroute_opt(optname)) {
1241 lock_sock(sk);
1242 err = compat_nf_setsockopt(sk, PF_INET, optname,
1243 optval, optlen);
1244 release_sock(sk);
1246 #endif
1247 return err;
1249 EXPORT_SYMBOL(compat_ip_setsockopt);
1250 #endif
1253 * Get the options. Note for future reference. The GET of IP options gets
1254 * the _received_ ones. The set sets the _sent_ ones.
1257 static bool getsockopt_needs_rtnl(int optname)
1259 switch (optname) {
1260 case IP_MSFILTER:
1261 case MCAST_MSFILTER:
1262 return true;
1264 return false;
1267 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1268 char __user *optval, int __user *optlen, unsigned int flags)
1270 struct inet_sock *inet = inet_sk(sk);
1271 bool needs_rtnl = getsockopt_needs_rtnl(optname);
1272 int val, err = 0;
1273 int len;
1275 if (level != SOL_IP)
1276 return -EOPNOTSUPP;
1278 if (ip_mroute_opt(optname))
1279 return ip_mroute_getsockopt(sk, optname, optval, optlen);
1281 if (get_user(len, optlen))
1282 return -EFAULT;
1283 if (len < 0)
1284 return -EINVAL;
1286 if (needs_rtnl)
1287 rtnl_lock();
1288 lock_sock(sk);
1290 switch (optname) {
1291 case IP_OPTIONS:
1293 unsigned char optbuf[sizeof(struct ip_options)+40];
1294 struct ip_options *opt = (struct ip_options *)optbuf;
1295 struct ip_options_rcu *inet_opt;
1297 inet_opt = rcu_dereference_protected(inet->inet_opt,
1298 sock_owned_by_user(sk));
1299 opt->optlen = 0;
1300 if (inet_opt)
1301 memcpy(optbuf, &inet_opt->opt,
1302 sizeof(struct ip_options) +
1303 inet_opt->opt.optlen);
1304 release_sock(sk);
1306 if (opt->optlen == 0)
1307 return put_user(0, optlen);
1309 ip_options_undo(opt);
1311 len = min_t(unsigned int, len, opt->optlen);
1312 if (put_user(len, optlen))
1313 return -EFAULT;
1314 if (copy_to_user(optval, opt->__data, len))
1315 return -EFAULT;
1316 return 0;
1318 case IP_PKTINFO:
1319 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
1320 break;
1321 case IP_RECVTTL:
1322 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
1323 break;
1324 case IP_RECVTOS:
1325 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
1326 break;
1327 case IP_RECVOPTS:
1328 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
1329 break;
1330 case IP_RETOPTS:
1331 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
1332 break;
1333 case IP_PASSSEC:
1334 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
1335 break;
1336 case IP_RECVORIGDSTADDR:
1337 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
1338 break;
1339 case IP_CHECKSUM:
1340 val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
1341 break;
1342 case IP_TOS:
1343 val = inet->tos;
1344 break;
1345 case IP_TTL:
1347 struct net *net = sock_net(sk);
1348 val = (inet->uc_ttl == -1 ?
1349 net->ipv4.sysctl_ip_default_ttl :
1350 inet->uc_ttl);
1351 break;
1353 case IP_HDRINCL:
1354 val = inet->hdrincl;
1355 break;
1356 case IP_NODEFRAG:
1357 val = inet->nodefrag;
1358 break;
1359 case IP_BIND_ADDRESS_NO_PORT:
1360 val = inet->bind_address_no_port;
1361 break;
1362 case IP_MTU_DISCOVER:
1363 val = inet->pmtudisc;
1364 break;
1365 case IP_MTU:
1367 struct dst_entry *dst;
1368 val = 0;
1369 dst = sk_dst_get(sk);
1370 if (dst) {
1371 val = dst_mtu(dst);
1372 dst_release(dst);
1374 if (!val) {
1375 release_sock(sk);
1376 return -ENOTCONN;
1378 break;
1380 case IP_RECVERR:
1381 val = inet->recverr;
1382 break;
1383 case IP_MULTICAST_TTL:
1384 val = inet->mc_ttl;
1385 break;
1386 case IP_MULTICAST_LOOP:
1387 val = inet->mc_loop;
1388 break;
1389 case IP_UNICAST_IF:
1390 val = (__force int)htonl((__u32) inet->uc_index);
1391 break;
1392 case IP_MULTICAST_IF:
1394 struct in_addr addr;
1395 len = min_t(unsigned int, len, sizeof(struct in_addr));
1396 addr.s_addr = inet->mc_addr;
1397 release_sock(sk);
1399 if (put_user(len, optlen))
1400 return -EFAULT;
1401 if (copy_to_user(optval, &addr, len))
1402 return -EFAULT;
1403 return 0;
1405 case IP_MSFILTER:
1407 struct ip_msfilter msf;
1409 if (len < IP_MSFILTER_SIZE(0)) {
1410 err = -EINVAL;
1411 goto out;
1413 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
1414 err = -EFAULT;
1415 goto out;
1417 err = ip_mc_msfget(sk, &msf,
1418 (struct ip_msfilter __user *)optval, optlen);
1419 goto out;
1421 case MCAST_MSFILTER:
1423 struct group_filter gsf;
1425 if (len < GROUP_FILTER_SIZE(0)) {
1426 err = -EINVAL;
1427 goto out;
1429 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
1430 err = -EFAULT;
1431 goto out;
1433 err = ip_mc_gsfget(sk, &gsf,
1434 (struct group_filter __user *)optval,
1435 optlen);
1436 goto out;
1438 case IP_MULTICAST_ALL:
1439 val = inet->mc_all;
1440 break;
1441 case IP_PKTOPTIONS:
1443 struct msghdr msg;
1445 release_sock(sk);
1447 if (sk->sk_type != SOCK_STREAM)
1448 return -ENOPROTOOPT;
1450 msg.msg_control = (__force void *) optval;
1451 msg.msg_controllen = len;
1452 msg.msg_flags = flags;
1454 if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
1455 struct in_pktinfo info;
1457 info.ipi_addr.s_addr = inet->inet_rcv_saddr;
1458 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
1459 info.ipi_ifindex = inet->mc_index;
1460 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
1462 if (inet->cmsg_flags & IP_CMSG_TTL) {
1463 int hlim = inet->mc_ttl;
1464 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
1466 if (inet->cmsg_flags & IP_CMSG_TOS) {
1467 int tos = inet->rcv_tos;
1468 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
1470 len -= msg.msg_controllen;
1471 return put_user(len, optlen);
1473 case IP_FREEBIND:
1474 val = inet->freebind;
1475 break;
1476 case IP_TRANSPARENT:
1477 val = inet->transparent;
1478 break;
1479 case IP_MINTTL:
1480 val = inet->min_ttl;
1481 break;
1482 default:
1483 release_sock(sk);
1484 return -ENOPROTOOPT;
1486 release_sock(sk);
1488 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
1489 unsigned char ucval = (unsigned char)val;
1490 len = 1;
1491 if (put_user(len, optlen))
1492 return -EFAULT;
1493 if (copy_to_user(optval, &ucval, 1))
1494 return -EFAULT;
1495 } else {
1496 len = min_t(unsigned int, sizeof(int), len);
1497 if (put_user(len, optlen))
1498 return -EFAULT;
1499 if (copy_to_user(optval, &val, len))
1500 return -EFAULT;
1502 return 0;
1504 out:
1505 release_sock(sk);
1506 if (needs_rtnl)
1507 rtnl_unlock();
1508 return err;
1511 int ip_getsockopt(struct sock *sk, int level,
1512 int optname, char __user *optval, int __user *optlen)
1514 int err;
1516 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
1517 #ifdef CONFIG_NETFILTER
1518 /* we need to exclude all possible ENOPROTOOPTs except default case */
1519 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1520 !ip_mroute_opt(optname)) {
1521 int len;
1523 if (get_user(len, optlen))
1524 return -EFAULT;
1526 lock_sock(sk);
1527 err = nf_getsockopt(sk, PF_INET, optname, optval,
1528 &len);
1529 release_sock(sk);
1530 if (err >= 0)
1531 err = put_user(len, optlen);
1532 return err;
1534 #endif
1535 return err;
1537 EXPORT_SYMBOL(ip_getsockopt);
1539 #ifdef CONFIG_COMPAT
1540 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
1541 char __user *optval, int __user *optlen)
1543 int err;
1545 if (optname == MCAST_MSFILTER)
1546 return compat_mc_getsockopt(sk, level, optname, optval, optlen,
1547 ip_getsockopt);
1549 err = do_ip_getsockopt(sk, level, optname, optval, optlen,
1550 MSG_CMSG_COMPAT);
1552 #ifdef CONFIG_NETFILTER
1553 /* we need to exclude all possible ENOPROTOOPTs except default case */
1554 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
1555 !ip_mroute_opt(optname)) {
1556 int len;
1558 if (get_user(len, optlen))
1559 return -EFAULT;
1561 lock_sock(sk);
1562 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
1563 release_sock(sk);
1564 if (err >= 0)
1565 err = put_user(len, optlen);
1566 return err;
1568 #endif
1569 return err;
1571 EXPORT_SYMBOL(compat_ip_getsockopt);
1572 #endif