netem: fix skb_orphan_partial()
[linux/fpc-iii.git] / net / core / sock.c
blob03dcfc581fb4b61aee5f4d694c7bde9064630572
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
83 * To Fix:
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
121 #include <asm/uaccess.h>
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 #include <linux/sock_diag.h>
136 #include <linux/filter.h>
137 #include <net/sock_reuseport.h>
139 #include <trace/events/sock.h>
141 #ifdef CONFIG_INET
142 #include <net/tcp.h>
143 #endif
145 #include <net/busy_poll.h>
147 static DEFINE_MUTEX(proto_list_mutex);
148 static LIST_HEAD(proto_list);
151 * sk_ns_capable - General socket capability test
152 * @sk: Socket to use a capability on or through
153 * @user_ns: The user namespace of the capability to use
154 * @cap: The capability to use
156 * Test to see if the opener of the socket had when the socket was
157 * created and the current process has the capability @cap in the user
158 * namespace @user_ns.
160 bool sk_ns_capable(const struct sock *sk,
161 struct user_namespace *user_ns, int cap)
163 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
164 ns_capable(user_ns, cap);
166 EXPORT_SYMBOL(sk_ns_capable);
169 * sk_capable - Socket global capability test
170 * @sk: Socket to use a capability on or through
171 * @cap: The global capability to use
173 * Test to see if the opener of the socket had when the socket was
174 * created and the current process has the capability @cap in all user
175 * namespaces.
177 bool sk_capable(const struct sock *sk, int cap)
179 return sk_ns_capable(sk, &init_user_ns, cap);
181 EXPORT_SYMBOL(sk_capable);
184 * sk_net_capable - Network namespace socket capability test
185 * @sk: Socket to use a capability on or through
186 * @cap: The capability to use
188 * Test to see if the opener of the socket had when the socket was created
189 * and the current process has the capability @cap over the network namespace
190 * the socket is a member of.
192 bool sk_net_capable(const struct sock *sk, int cap)
194 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
196 EXPORT_SYMBOL(sk_net_capable);
199 * Each address family might have different locking rules, so we have
200 * one slock key per address family:
202 static struct lock_class_key af_family_keys[AF_MAX];
203 static struct lock_class_key af_family_slock_keys[AF_MAX];
206 * Make lock validator output more readable. (we pre-construct these
207 * strings build-time, so that runtime initialization of socket
208 * locks is fast):
210 static const char *const af_family_key_strings[AF_MAX+1] = {
211 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
212 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
213 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
214 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
215 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
216 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
217 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
218 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
219 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
220 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
221 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
227 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
229 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
230 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
231 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
232 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
233 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
234 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
235 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
236 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
237 "slock-27" , "slock-28" , "slock-AF_CAN" ,
238 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_QIPCRTR", "slock-AF_MAX"
244 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
246 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
247 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
248 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
249 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
250 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
251 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
252 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
253 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
254 "clock-27" , "clock-28" , "clock-AF_CAN" ,
255 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_QIPCRTR", "clock-AF_MAX"
263 * sk_callback_lock locking rules are per-address-family,
264 * so split the lock classes by using a per-AF key:
266 static struct lock_class_key af_callback_keys[AF_MAX];
268 /* Take into consideration the size of the struct sk_buff overhead in the
269 * determination of these values, since that is non-constant across
270 * platforms. This makes socket queueing behavior and performance
271 * not depend upon such differences.
273 #define _SK_MEM_PACKETS 256
274 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
275 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
276 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
278 /* Run time adjustable parameters. */
279 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
280 EXPORT_SYMBOL(sysctl_wmem_max);
281 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
282 EXPORT_SYMBOL(sysctl_rmem_max);
283 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
284 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
286 /* Maximal space eaten by iovec or ancillary data plus some space */
287 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
288 EXPORT_SYMBOL(sysctl_optmem_max);
290 int sysctl_tstamp_allow_data __read_mostly = 1;
292 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
293 EXPORT_SYMBOL_GPL(memalloc_socks);
296 * sk_set_memalloc - sets %SOCK_MEMALLOC
297 * @sk: socket to set it on
299 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
300 * It's the responsibility of the admin to adjust min_free_kbytes
301 * to meet the requirements
303 void sk_set_memalloc(struct sock *sk)
305 sock_set_flag(sk, SOCK_MEMALLOC);
306 sk->sk_allocation |= __GFP_MEMALLOC;
307 static_key_slow_inc(&memalloc_socks);
309 EXPORT_SYMBOL_GPL(sk_set_memalloc);
311 void sk_clear_memalloc(struct sock *sk)
313 sock_reset_flag(sk, SOCK_MEMALLOC);
314 sk->sk_allocation &= ~__GFP_MEMALLOC;
315 static_key_slow_dec(&memalloc_socks);
318 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
319 * progress of swapping. SOCK_MEMALLOC may be cleared while
320 * it has rmem allocations due to the last swapfile being deactivated
321 * but there is a risk that the socket is unusable due to exceeding
322 * the rmem limits. Reclaim the reserves and obey rmem limits again.
324 sk_mem_reclaim(sk);
326 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
328 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
330 int ret;
331 unsigned long pflags = current->flags;
333 /* these should have been dropped before queueing */
334 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
336 current->flags |= PF_MEMALLOC;
337 ret = sk->sk_backlog_rcv(sk, skb);
338 tsk_restore_flags(current, pflags, PF_MEMALLOC);
340 return ret;
342 EXPORT_SYMBOL(__sk_backlog_rcv);
344 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
346 struct timeval tv;
348 if (optlen < sizeof(tv))
349 return -EINVAL;
350 if (copy_from_user(&tv, optval, sizeof(tv)))
351 return -EFAULT;
352 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
353 return -EDOM;
355 if (tv.tv_sec < 0) {
356 static int warned __read_mostly;
358 *timeo_p = 0;
359 if (warned < 10 && net_ratelimit()) {
360 warned++;
361 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
362 __func__, current->comm, task_pid_nr(current));
364 return 0;
366 *timeo_p = MAX_SCHEDULE_TIMEOUT;
367 if (tv.tv_sec == 0 && tv.tv_usec == 0)
368 return 0;
369 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
370 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
371 return 0;
374 static void sock_warn_obsolete_bsdism(const char *name)
376 static int warned;
377 static char warncomm[TASK_COMM_LEN];
378 if (strcmp(warncomm, current->comm) && warned < 5) {
379 strcpy(warncomm, current->comm);
380 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
381 warncomm, name);
382 warned++;
386 static bool sock_needs_netstamp(const struct sock *sk)
388 switch (sk->sk_family) {
389 case AF_UNSPEC:
390 case AF_UNIX:
391 return false;
392 default:
393 return true;
397 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
399 if (sk->sk_flags & flags) {
400 sk->sk_flags &= ~flags;
401 if (sock_needs_netstamp(sk) &&
402 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
403 net_disable_timestamp();
408 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
410 unsigned long flags;
411 struct sk_buff_head *list = &sk->sk_receive_queue;
413 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
414 atomic_inc(&sk->sk_drops);
415 trace_sock_rcvqueue_full(sk, skb);
416 return -ENOMEM;
419 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
420 atomic_inc(&sk->sk_drops);
421 return -ENOBUFS;
424 skb->dev = NULL;
425 skb_set_owner_r(skb, sk);
427 /* we escape from rcu protected region, make sure we dont leak
428 * a norefcounted dst
430 skb_dst_force(skb);
432 spin_lock_irqsave(&list->lock, flags);
433 sock_skb_set_dropcount(sk, skb);
434 __skb_queue_tail(list, skb);
435 spin_unlock_irqrestore(&list->lock, flags);
437 if (!sock_flag(sk, SOCK_DEAD))
438 sk->sk_data_ready(sk);
439 return 0;
441 EXPORT_SYMBOL(__sock_queue_rcv_skb);
443 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
445 int err;
447 err = sk_filter(sk, skb);
448 if (err)
449 return err;
451 return __sock_queue_rcv_skb(sk, skb);
453 EXPORT_SYMBOL(sock_queue_rcv_skb);
455 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
456 const int nested, unsigned int trim_cap, bool refcounted)
458 int rc = NET_RX_SUCCESS;
460 if (sk_filter_trim_cap(sk, skb, trim_cap))
461 goto discard_and_relse;
463 skb->dev = NULL;
465 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
466 atomic_inc(&sk->sk_drops);
467 goto discard_and_relse;
469 if (nested)
470 bh_lock_sock_nested(sk);
471 else
472 bh_lock_sock(sk);
473 if (!sock_owned_by_user(sk)) {
475 * trylock + unlock semantics:
477 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
479 rc = sk_backlog_rcv(sk, skb);
481 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
482 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
483 bh_unlock_sock(sk);
484 atomic_inc(&sk->sk_drops);
485 goto discard_and_relse;
488 bh_unlock_sock(sk);
489 out:
490 if (refcounted)
491 sock_put(sk);
492 return rc;
493 discard_and_relse:
494 kfree_skb(skb);
495 goto out;
497 EXPORT_SYMBOL(__sk_receive_skb);
499 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
501 struct dst_entry *dst = __sk_dst_get(sk);
503 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
504 sk_tx_queue_clear(sk);
505 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
506 dst_release(dst);
507 return NULL;
510 return dst;
512 EXPORT_SYMBOL(__sk_dst_check);
514 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
516 struct dst_entry *dst = sk_dst_get(sk);
518 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
519 sk_dst_reset(sk);
520 dst_release(dst);
521 return NULL;
524 return dst;
526 EXPORT_SYMBOL(sk_dst_check);
528 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
529 int optlen)
531 int ret = -ENOPROTOOPT;
532 #ifdef CONFIG_NETDEVICES
533 struct net *net = sock_net(sk);
534 char devname[IFNAMSIZ];
535 int index;
537 /* Sorry... */
538 ret = -EPERM;
539 if (!ns_capable(net->user_ns, CAP_NET_RAW))
540 goto out;
542 ret = -EINVAL;
543 if (optlen < 0)
544 goto out;
546 /* Bind this socket to a particular device like "eth0",
547 * as specified in the passed interface name. If the
548 * name is "" or the option length is zero the socket
549 * is not bound.
551 if (optlen > IFNAMSIZ - 1)
552 optlen = IFNAMSIZ - 1;
553 memset(devname, 0, sizeof(devname));
555 ret = -EFAULT;
556 if (copy_from_user(devname, optval, optlen))
557 goto out;
559 index = 0;
560 if (devname[0] != '\0') {
561 struct net_device *dev;
563 rcu_read_lock();
564 dev = dev_get_by_name_rcu(net, devname);
565 if (dev)
566 index = dev->ifindex;
567 rcu_read_unlock();
568 ret = -ENODEV;
569 if (!dev)
570 goto out;
573 lock_sock(sk);
574 sk->sk_bound_dev_if = index;
575 sk_dst_reset(sk);
576 release_sock(sk);
578 ret = 0;
580 out:
581 #endif
583 return ret;
586 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
587 int __user *optlen, int len)
589 int ret = -ENOPROTOOPT;
590 #ifdef CONFIG_NETDEVICES
591 struct net *net = sock_net(sk);
592 char devname[IFNAMSIZ];
594 if (sk->sk_bound_dev_if == 0) {
595 len = 0;
596 goto zero;
599 ret = -EINVAL;
600 if (len < IFNAMSIZ)
601 goto out;
603 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
604 if (ret)
605 goto out;
607 len = strlen(devname) + 1;
609 ret = -EFAULT;
610 if (copy_to_user(optval, devname, len))
611 goto out;
613 zero:
614 ret = -EFAULT;
615 if (put_user(len, optlen))
616 goto out;
618 ret = 0;
620 out:
621 #endif
623 return ret;
626 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
628 if (valbool)
629 sock_set_flag(sk, bit);
630 else
631 sock_reset_flag(sk, bit);
634 bool sk_mc_loop(struct sock *sk)
636 if (dev_recursion_level())
637 return false;
638 if (!sk)
639 return true;
640 switch (sk->sk_family) {
641 case AF_INET:
642 return inet_sk(sk)->mc_loop;
643 #if IS_ENABLED(CONFIG_IPV6)
644 case AF_INET6:
645 return inet6_sk(sk)->mc_loop;
646 #endif
648 WARN_ON(1);
649 return true;
651 EXPORT_SYMBOL(sk_mc_loop);
654 * This is meant for all protocols to use and covers goings on
655 * at the socket level. Everything here is generic.
658 int sock_setsockopt(struct socket *sock, int level, int optname,
659 char __user *optval, unsigned int optlen)
661 struct sock *sk = sock->sk;
662 int val;
663 int valbool;
664 struct linger ling;
665 int ret = 0;
668 * Options without arguments
671 if (optname == SO_BINDTODEVICE)
672 return sock_setbindtodevice(sk, optval, optlen);
674 if (optlen < sizeof(int))
675 return -EINVAL;
677 if (get_user(val, (int __user *)optval))
678 return -EFAULT;
680 valbool = val ? 1 : 0;
682 lock_sock(sk);
684 switch (optname) {
685 case SO_DEBUG:
686 if (val && !capable(CAP_NET_ADMIN))
687 ret = -EACCES;
688 else
689 sock_valbool_flag(sk, SOCK_DBG, valbool);
690 break;
691 case SO_REUSEADDR:
692 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
693 break;
694 case SO_REUSEPORT:
695 sk->sk_reuseport = valbool;
696 break;
697 case SO_TYPE:
698 case SO_PROTOCOL:
699 case SO_DOMAIN:
700 case SO_ERROR:
701 ret = -ENOPROTOOPT;
702 break;
703 case SO_DONTROUTE:
704 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
705 break;
706 case SO_BROADCAST:
707 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
708 break;
709 case SO_SNDBUF:
710 /* Don't error on this BSD doesn't and if you think
711 * about it this is right. Otherwise apps have to
712 * play 'guess the biggest size' games. RCVBUF/SNDBUF
713 * are treated in BSD as hints
715 val = min_t(u32, val, sysctl_wmem_max);
716 set_sndbuf:
717 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
718 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
719 /* Wake up sending tasks if we upped the value. */
720 sk->sk_write_space(sk);
721 break;
723 case SO_SNDBUFFORCE:
724 if (!capable(CAP_NET_ADMIN)) {
725 ret = -EPERM;
726 break;
728 goto set_sndbuf;
730 case SO_RCVBUF:
731 /* Don't error on this BSD doesn't and if you think
732 * about it this is right. Otherwise apps have to
733 * play 'guess the biggest size' games. RCVBUF/SNDBUF
734 * are treated in BSD as hints
736 val = min_t(u32, val, sysctl_rmem_max);
737 set_rcvbuf:
738 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
740 * We double it on the way in to account for
741 * "struct sk_buff" etc. overhead. Applications
742 * assume that the SO_RCVBUF setting they make will
743 * allow that much actual data to be received on that
744 * socket.
746 * Applications are unaware that "struct sk_buff" and
747 * other overheads allocate from the receive buffer
748 * during socket buffer allocation.
750 * And after considering the possible alternatives,
751 * returning the value we actually used in getsockopt
752 * is the most desirable behavior.
754 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
755 break;
757 case SO_RCVBUFFORCE:
758 if (!capable(CAP_NET_ADMIN)) {
759 ret = -EPERM;
760 break;
762 goto set_rcvbuf;
764 case SO_KEEPALIVE:
765 #ifdef CONFIG_INET
766 if (sk->sk_protocol == IPPROTO_TCP &&
767 sk->sk_type == SOCK_STREAM)
768 tcp_set_keepalive(sk, valbool);
769 #endif
770 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
771 break;
773 case SO_OOBINLINE:
774 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
775 break;
777 case SO_NO_CHECK:
778 sk->sk_no_check_tx = valbool;
779 break;
781 case SO_PRIORITY:
782 if ((val >= 0 && val <= 6) ||
783 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
784 sk->sk_priority = val;
785 else
786 ret = -EPERM;
787 break;
789 case SO_LINGER:
790 if (optlen < sizeof(ling)) {
791 ret = -EINVAL; /* 1003.1g */
792 break;
794 if (copy_from_user(&ling, optval, sizeof(ling))) {
795 ret = -EFAULT;
796 break;
798 if (!ling.l_onoff)
799 sock_reset_flag(sk, SOCK_LINGER);
800 else {
801 #if (BITS_PER_LONG == 32)
802 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
803 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
804 else
805 #endif
806 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
807 sock_set_flag(sk, SOCK_LINGER);
809 break;
811 case SO_BSDCOMPAT:
812 sock_warn_obsolete_bsdism("setsockopt");
813 break;
815 case SO_PASSCRED:
816 if (valbool)
817 set_bit(SOCK_PASSCRED, &sock->flags);
818 else
819 clear_bit(SOCK_PASSCRED, &sock->flags);
820 break;
822 case SO_TIMESTAMP:
823 case SO_TIMESTAMPNS:
824 if (valbool) {
825 if (optname == SO_TIMESTAMP)
826 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
827 else
828 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
829 sock_set_flag(sk, SOCK_RCVTSTAMP);
830 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
831 } else {
832 sock_reset_flag(sk, SOCK_RCVTSTAMP);
833 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
835 break;
837 case SO_TIMESTAMPING:
838 if (val & ~SOF_TIMESTAMPING_MASK) {
839 ret = -EINVAL;
840 break;
843 if (val & SOF_TIMESTAMPING_OPT_ID &&
844 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
845 if (sk->sk_protocol == IPPROTO_TCP &&
846 sk->sk_type == SOCK_STREAM) {
847 if ((1 << sk->sk_state) &
848 (TCPF_CLOSE | TCPF_LISTEN)) {
849 ret = -EINVAL;
850 break;
852 sk->sk_tskey = tcp_sk(sk)->snd_una;
853 } else {
854 sk->sk_tskey = 0;
857 sk->sk_tsflags = val;
858 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
859 sock_enable_timestamp(sk,
860 SOCK_TIMESTAMPING_RX_SOFTWARE);
861 else
862 sock_disable_timestamp(sk,
863 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
864 break;
866 case SO_RCVLOWAT:
867 if (val < 0)
868 val = INT_MAX;
869 sk->sk_rcvlowat = val ? : 1;
870 break;
872 case SO_RCVTIMEO:
873 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
874 break;
876 case SO_SNDTIMEO:
877 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
878 break;
880 case SO_ATTACH_FILTER:
881 ret = -EINVAL;
882 if (optlen == sizeof(struct sock_fprog)) {
883 struct sock_fprog fprog;
885 ret = -EFAULT;
886 if (copy_from_user(&fprog, optval, sizeof(fprog)))
887 break;
889 ret = sk_attach_filter(&fprog, sk);
891 break;
893 case SO_ATTACH_BPF:
894 ret = -EINVAL;
895 if (optlen == sizeof(u32)) {
896 u32 ufd;
898 ret = -EFAULT;
899 if (copy_from_user(&ufd, optval, sizeof(ufd)))
900 break;
902 ret = sk_attach_bpf(ufd, sk);
904 break;
906 case SO_ATTACH_REUSEPORT_CBPF:
907 ret = -EINVAL;
908 if (optlen == sizeof(struct sock_fprog)) {
909 struct sock_fprog fprog;
911 ret = -EFAULT;
912 if (copy_from_user(&fprog, optval, sizeof(fprog)))
913 break;
915 ret = sk_reuseport_attach_filter(&fprog, sk);
917 break;
919 case SO_ATTACH_REUSEPORT_EBPF:
920 ret = -EINVAL;
921 if (optlen == sizeof(u32)) {
922 u32 ufd;
924 ret = -EFAULT;
925 if (copy_from_user(&ufd, optval, sizeof(ufd)))
926 break;
928 ret = sk_reuseport_attach_bpf(ufd, sk);
930 break;
932 case SO_DETACH_FILTER:
933 ret = sk_detach_filter(sk);
934 break;
936 case SO_LOCK_FILTER:
937 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
938 ret = -EPERM;
939 else
940 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
941 break;
943 case SO_PASSSEC:
944 if (valbool)
945 set_bit(SOCK_PASSSEC, &sock->flags);
946 else
947 clear_bit(SOCK_PASSSEC, &sock->flags);
948 break;
949 case SO_MARK:
950 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
951 ret = -EPERM;
952 else
953 sk->sk_mark = val;
954 break;
956 case SO_RXQ_OVFL:
957 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
958 break;
960 case SO_WIFI_STATUS:
961 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
962 break;
964 case SO_PEEK_OFF:
965 if (sock->ops->set_peek_off)
966 ret = sock->ops->set_peek_off(sk, val);
967 else
968 ret = -EOPNOTSUPP;
969 break;
971 case SO_NOFCS:
972 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
973 break;
975 case SO_SELECT_ERR_QUEUE:
976 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
977 break;
979 #ifdef CONFIG_NET_RX_BUSY_POLL
980 case SO_BUSY_POLL:
981 /* allow unprivileged users to decrease the value */
982 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
983 ret = -EPERM;
984 else {
985 if (val < 0)
986 ret = -EINVAL;
987 else
988 sk->sk_ll_usec = val;
990 break;
991 #endif
993 case SO_MAX_PACING_RATE:
994 sk->sk_max_pacing_rate = val;
995 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
996 sk->sk_max_pacing_rate);
997 break;
999 case SO_INCOMING_CPU:
1000 sk->sk_incoming_cpu = val;
1001 break;
1003 case SO_CNX_ADVICE:
1004 if (val == 1)
1005 dst_negative_advice(sk);
1006 break;
1007 default:
1008 ret = -ENOPROTOOPT;
1009 break;
1011 release_sock(sk);
1012 return ret;
1014 EXPORT_SYMBOL(sock_setsockopt);
1017 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1018 struct ucred *ucred)
1020 ucred->pid = pid_vnr(pid);
1021 ucred->uid = ucred->gid = -1;
1022 if (cred) {
1023 struct user_namespace *current_ns = current_user_ns();
1025 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1026 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1030 int sock_getsockopt(struct socket *sock, int level, int optname,
1031 char __user *optval, int __user *optlen)
1033 struct sock *sk = sock->sk;
1035 union {
1036 int val;
1037 struct linger ling;
1038 struct timeval tm;
1039 } v;
1041 int lv = sizeof(int);
1042 int len;
1044 if (get_user(len, optlen))
1045 return -EFAULT;
1046 if (len < 0)
1047 return -EINVAL;
1049 memset(&v, 0, sizeof(v));
1051 switch (optname) {
1052 case SO_DEBUG:
1053 v.val = sock_flag(sk, SOCK_DBG);
1054 break;
1056 case SO_DONTROUTE:
1057 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1058 break;
1060 case SO_BROADCAST:
1061 v.val = sock_flag(sk, SOCK_BROADCAST);
1062 break;
1064 case SO_SNDBUF:
1065 v.val = sk->sk_sndbuf;
1066 break;
1068 case SO_RCVBUF:
1069 v.val = sk->sk_rcvbuf;
1070 break;
1072 case SO_REUSEADDR:
1073 v.val = sk->sk_reuse;
1074 break;
1076 case SO_REUSEPORT:
1077 v.val = sk->sk_reuseport;
1078 break;
1080 case SO_KEEPALIVE:
1081 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1082 break;
1084 case SO_TYPE:
1085 v.val = sk->sk_type;
1086 break;
1088 case SO_PROTOCOL:
1089 v.val = sk->sk_protocol;
1090 break;
1092 case SO_DOMAIN:
1093 v.val = sk->sk_family;
1094 break;
1096 case SO_ERROR:
1097 v.val = -sock_error(sk);
1098 if (v.val == 0)
1099 v.val = xchg(&sk->sk_err_soft, 0);
1100 break;
1102 case SO_OOBINLINE:
1103 v.val = sock_flag(sk, SOCK_URGINLINE);
1104 break;
1106 case SO_NO_CHECK:
1107 v.val = sk->sk_no_check_tx;
1108 break;
1110 case SO_PRIORITY:
1111 v.val = sk->sk_priority;
1112 break;
1114 case SO_LINGER:
1115 lv = sizeof(v.ling);
1116 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1117 v.ling.l_linger = sk->sk_lingertime / HZ;
1118 break;
1120 case SO_BSDCOMPAT:
1121 sock_warn_obsolete_bsdism("getsockopt");
1122 break;
1124 case SO_TIMESTAMP:
1125 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1126 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1127 break;
1129 case SO_TIMESTAMPNS:
1130 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1131 break;
1133 case SO_TIMESTAMPING:
1134 v.val = sk->sk_tsflags;
1135 break;
1137 case SO_RCVTIMEO:
1138 lv = sizeof(struct timeval);
1139 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1140 v.tm.tv_sec = 0;
1141 v.tm.tv_usec = 0;
1142 } else {
1143 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1144 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1146 break;
1148 case SO_SNDTIMEO:
1149 lv = sizeof(struct timeval);
1150 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1151 v.tm.tv_sec = 0;
1152 v.tm.tv_usec = 0;
1153 } else {
1154 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1155 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1157 break;
1159 case SO_RCVLOWAT:
1160 v.val = sk->sk_rcvlowat;
1161 break;
1163 case SO_SNDLOWAT:
1164 v.val = 1;
1165 break;
1167 case SO_PASSCRED:
1168 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1169 break;
1171 case SO_PEERCRED:
1173 struct ucred peercred;
1174 if (len > sizeof(peercred))
1175 len = sizeof(peercred);
1176 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1177 if (copy_to_user(optval, &peercred, len))
1178 return -EFAULT;
1179 goto lenout;
1182 case SO_PEERNAME:
1184 char address[128];
1186 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1187 return -ENOTCONN;
1188 if (lv < len)
1189 return -EINVAL;
1190 if (copy_to_user(optval, address, len))
1191 return -EFAULT;
1192 goto lenout;
1195 /* Dubious BSD thing... Probably nobody even uses it, but
1196 * the UNIX standard wants it for whatever reason... -DaveM
1198 case SO_ACCEPTCONN:
1199 v.val = sk->sk_state == TCP_LISTEN;
1200 break;
1202 case SO_PASSSEC:
1203 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1204 break;
1206 case SO_PEERSEC:
1207 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1209 case SO_MARK:
1210 v.val = sk->sk_mark;
1211 break;
1213 case SO_RXQ_OVFL:
1214 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1215 break;
1217 case SO_WIFI_STATUS:
1218 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1219 break;
1221 case SO_PEEK_OFF:
1222 if (!sock->ops->set_peek_off)
1223 return -EOPNOTSUPP;
1225 v.val = sk->sk_peek_off;
1226 break;
1227 case SO_NOFCS:
1228 v.val = sock_flag(sk, SOCK_NOFCS);
1229 break;
1231 case SO_BINDTODEVICE:
1232 return sock_getbindtodevice(sk, optval, optlen, len);
1234 case SO_GET_FILTER:
1235 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1236 if (len < 0)
1237 return len;
1239 goto lenout;
1241 case SO_LOCK_FILTER:
1242 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1243 break;
1245 case SO_BPF_EXTENSIONS:
1246 v.val = bpf_tell_extensions();
1247 break;
1249 case SO_SELECT_ERR_QUEUE:
1250 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1251 break;
1253 #ifdef CONFIG_NET_RX_BUSY_POLL
1254 case SO_BUSY_POLL:
1255 v.val = sk->sk_ll_usec;
1256 break;
1257 #endif
1259 case SO_MAX_PACING_RATE:
1260 v.val = sk->sk_max_pacing_rate;
1261 break;
1263 case SO_INCOMING_CPU:
1264 v.val = sk->sk_incoming_cpu;
1265 break;
1267 default:
1268 /* We implement the SO_SNDLOWAT etc to not be settable
1269 * (1003.1g 7).
1271 return -ENOPROTOOPT;
1274 if (len > lv)
1275 len = lv;
1276 if (copy_to_user(optval, &v, len))
1277 return -EFAULT;
1278 lenout:
1279 if (put_user(len, optlen))
1280 return -EFAULT;
1281 return 0;
1285 * Initialize an sk_lock.
1287 * (We also register the sk_lock with the lock validator.)
1289 static inline void sock_lock_init(struct sock *sk)
1291 sock_lock_init_class_and_name(sk,
1292 af_family_slock_key_strings[sk->sk_family],
1293 af_family_slock_keys + sk->sk_family,
1294 af_family_key_strings[sk->sk_family],
1295 af_family_keys + sk->sk_family);
1299 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1300 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1301 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1303 static void sock_copy(struct sock *nsk, const struct sock *osk)
1305 #ifdef CONFIG_SECURITY_NETWORK
1306 void *sptr = nsk->sk_security;
1307 #endif
1308 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1310 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1311 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1313 #ifdef CONFIG_SECURITY_NETWORK
1314 nsk->sk_security = sptr;
1315 security_sk_clone(osk, nsk);
1316 #endif
1319 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1320 int family)
1322 struct sock *sk;
1323 struct kmem_cache *slab;
1325 slab = prot->slab;
1326 if (slab != NULL) {
1327 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1328 if (!sk)
1329 return sk;
1330 if (priority & __GFP_ZERO)
1331 sk_prot_clear_nulls(sk, prot->obj_size);
1332 } else
1333 sk = kmalloc(prot->obj_size, priority);
1335 if (sk != NULL) {
1336 kmemcheck_annotate_bitfield(sk, flags);
1338 if (security_sk_alloc(sk, family, priority))
1339 goto out_free;
1341 if (!try_module_get(prot->owner))
1342 goto out_free_sec;
1343 sk_tx_queue_clear(sk);
1346 return sk;
1348 out_free_sec:
1349 security_sk_free(sk);
1350 out_free:
1351 if (slab != NULL)
1352 kmem_cache_free(slab, sk);
1353 else
1354 kfree(sk);
1355 return NULL;
1358 static void sk_prot_free(struct proto *prot, struct sock *sk)
1360 struct kmem_cache *slab;
1361 struct module *owner;
1363 owner = prot->owner;
1364 slab = prot->slab;
1366 cgroup_sk_free(&sk->sk_cgrp_data);
1367 mem_cgroup_sk_free(sk);
1368 security_sk_free(sk);
1369 if (slab != NULL)
1370 kmem_cache_free(slab, sk);
1371 else
1372 kfree(sk);
1373 module_put(owner);
1377 * sk_alloc - All socket objects are allocated here
1378 * @net: the applicable net namespace
1379 * @family: protocol family
1380 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1381 * @prot: struct proto associated with this new sock instance
1382 * @kern: is this to be a kernel socket?
1384 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1385 struct proto *prot, int kern)
1387 struct sock *sk;
1389 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1390 if (sk) {
1391 sk->sk_family = family;
1393 * See comment in struct sock definition to understand
1394 * why we need sk_prot_creator -acme
1396 sk->sk_prot = sk->sk_prot_creator = prot;
1397 sock_lock_init(sk);
1398 sk->sk_net_refcnt = kern ? 0 : 1;
1399 if (likely(sk->sk_net_refcnt))
1400 get_net(net);
1401 sock_net_set(sk, net);
1402 atomic_set(&sk->sk_wmem_alloc, 1);
1404 mem_cgroup_sk_alloc(sk);
1405 cgroup_sk_alloc(&sk->sk_cgrp_data);
1406 sock_update_classid(&sk->sk_cgrp_data);
1407 sock_update_netprioidx(&sk->sk_cgrp_data);
1410 return sk;
1412 EXPORT_SYMBOL(sk_alloc);
1414 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
1415 * grace period. This is the case for UDP sockets and TCP listeners.
1417 static void __sk_destruct(struct rcu_head *head)
1419 struct sock *sk = container_of(head, struct sock, sk_rcu);
1420 struct sk_filter *filter;
1422 if (sk->sk_destruct)
1423 sk->sk_destruct(sk);
1425 filter = rcu_dereference_check(sk->sk_filter,
1426 atomic_read(&sk->sk_wmem_alloc) == 0);
1427 if (filter) {
1428 sk_filter_uncharge(sk, filter);
1429 RCU_INIT_POINTER(sk->sk_filter, NULL);
1431 if (rcu_access_pointer(sk->sk_reuseport_cb))
1432 reuseport_detach_sock(sk);
1434 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1436 if (atomic_read(&sk->sk_omem_alloc))
1437 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1438 __func__, atomic_read(&sk->sk_omem_alloc));
1440 if (sk->sk_frag.page) {
1441 put_page(sk->sk_frag.page);
1442 sk->sk_frag.page = NULL;
1445 if (sk->sk_peer_cred)
1446 put_cred(sk->sk_peer_cred);
1447 put_pid(sk->sk_peer_pid);
1448 if (likely(sk->sk_net_refcnt))
1449 put_net(sock_net(sk));
1450 sk_prot_free(sk->sk_prot_creator, sk);
1453 void sk_destruct(struct sock *sk)
1455 if (sock_flag(sk, SOCK_RCU_FREE))
1456 call_rcu(&sk->sk_rcu, __sk_destruct);
1457 else
1458 __sk_destruct(&sk->sk_rcu);
1461 static void __sk_free(struct sock *sk)
1463 if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
1464 sock_diag_broadcast_destroy(sk);
1465 else
1466 sk_destruct(sk);
1469 void sk_free(struct sock *sk)
1472 * We subtract one from sk_wmem_alloc and can know if
1473 * some packets are still in some tx queue.
1474 * If not null, sock_wfree() will call __sk_free(sk) later
1476 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1477 __sk_free(sk);
1479 EXPORT_SYMBOL(sk_free);
1482 * sk_clone_lock - clone a socket, and lock its clone
1483 * @sk: the socket to clone
1484 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1486 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1488 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1490 struct sock *newsk;
1491 bool is_charged = true;
1493 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1494 if (newsk != NULL) {
1495 struct sk_filter *filter;
1497 sock_copy(newsk, sk);
1499 /* SANITY */
1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1502 sk_node_init(&newsk->sk_node);
1503 sock_lock_init(newsk);
1504 bh_lock_sock(newsk);
1505 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1506 newsk->sk_backlog.len = 0;
1508 atomic_set(&newsk->sk_rmem_alloc, 0);
1510 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1512 atomic_set(&newsk->sk_wmem_alloc, 1);
1513 atomic_set(&newsk->sk_omem_alloc, 0);
1514 skb_queue_head_init(&newsk->sk_receive_queue);
1515 skb_queue_head_init(&newsk->sk_write_queue);
1517 rwlock_init(&newsk->sk_callback_lock);
1518 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1519 af_callback_keys + newsk->sk_family,
1520 af_family_clock_key_strings[newsk->sk_family]);
1522 newsk->sk_dst_cache = NULL;
1523 newsk->sk_wmem_queued = 0;
1524 newsk->sk_forward_alloc = 0;
1525 atomic_set(&newsk->sk_drops, 0);
1526 newsk->sk_send_head = NULL;
1527 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1529 sock_reset_flag(newsk, SOCK_DONE);
1530 skb_queue_head_init(&newsk->sk_error_queue);
1532 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1533 if (filter != NULL)
1534 /* though it's an empty new sock, the charging may fail
1535 * if sysctl_optmem_max was changed between creation of
1536 * original socket and cloning
1538 is_charged = sk_filter_charge(newsk, filter);
1540 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1541 /* We need to make sure that we don't uncharge the new
1542 * socket if we couldn't charge it in the first place
1543 * as otherwise we uncharge the parent's filter.
1545 if (!is_charged)
1546 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1547 /* It is still raw copy of parent, so invalidate
1548 * destructor and make plain sk_free() */
1549 newsk->sk_destruct = NULL;
1550 bh_unlock_sock(newsk);
1551 sk_free(newsk);
1552 newsk = NULL;
1553 goto out;
1555 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1557 newsk->sk_err = 0;
1558 newsk->sk_err_soft = 0;
1559 newsk->sk_priority = 0;
1560 newsk->sk_incoming_cpu = raw_smp_processor_id();
1561 atomic64_set(&newsk->sk_cookie, 0);
1563 mem_cgroup_sk_alloc(newsk);
1564 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1567 * Before updating sk_refcnt, we must commit prior changes to memory
1568 * (Documentation/RCU/rculist_nulls.txt for details)
1570 smp_wmb();
1571 atomic_set(&newsk->sk_refcnt, 2);
1574 * Increment the counter in the same struct proto as the master
1575 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1576 * is the same as sk->sk_prot->socks, as this field was copied
1577 * with memcpy).
1579 * This _changes_ the previous behaviour, where
1580 * tcp_create_openreq_child always was incrementing the
1581 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1582 * to be taken into account in all callers. -acme
1584 sk_refcnt_debug_inc(newsk);
1585 sk_set_socket(newsk, NULL);
1586 newsk->sk_wq = NULL;
1588 if (newsk->sk_prot->sockets_allocated)
1589 sk_sockets_allocated_inc(newsk);
1591 if (sock_needs_netstamp(sk) &&
1592 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1593 net_enable_timestamp();
1595 out:
1596 return newsk;
1598 EXPORT_SYMBOL_GPL(sk_clone_lock);
1600 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1602 u32 max_segs = 1;
1604 sk_dst_set(sk, dst);
1605 sk->sk_route_caps = dst->dev->features;
1606 if (sk->sk_route_caps & NETIF_F_GSO)
1607 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1608 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1609 if (sk_can_gso(sk)) {
1610 if (dst->header_len) {
1611 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1612 } else {
1613 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1614 sk->sk_gso_max_size = dst->dev->gso_max_size;
1615 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1618 sk->sk_gso_max_segs = max_segs;
1620 EXPORT_SYMBOL_GPL(sk_setup_caps);
1623 * Simple resource managers for sockets.
1628 * Write buffer destructor automatically called from kfree_skb.
1630 void sock_wfree(struct sk_buff *skb)
1632 struct sock *sk = skb->sk;
1633 unsigned int len = skb->truesize;
1635 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1637 * Keep a reference on sk_wmem_alloc, this will be released
1638 * after sk_write_space() call
1640 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1641 sk->sk_write_space(sk);
1642 len = 1;
1645 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1646 * could not do because of in-flight packets
1648 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1649 __sk_free(sk);
1651 EXPORT_SYMBOL(sock_wfree);
1653 /* This variant of sock_wfree() is used by TCP,
1654 * since it sets SOCK_USE_WRITE_QUEUE.
1656 void __sock_wfree(struct sk_buff *skb)
1658 struct sock *sk = skb->sk;
1660 if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1661 __sk_free(sk);
1664 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1666 skb_orphan(skb);
1667 skb->sk = sk;
1668 #ifdef CONFIG_INET
1669 if (unlikely(!sk_fullsock(sk))) {
1670 skb->destructor = sock_edemux;
1671 sock_hold(sk);
1672 return;
1674 #endif
1675 skb->destructor = sock_wfree;
1676 skb_set_hash_from_sk(skb, sk);
1678 * We used to take a refcount on sk, but following operation
1679 * is enough to guarantee sk_free() wont free this sock until
1680 * all in-flight packets are completed
1682 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1684 EXPORT_SYMBOL(skb_set_owner_w);
1686 /* This helper is used by netem, as it can hold packets in its
1687 * delay queue. We want to allow the owner socket to send more
1688 * packets, as if they were already TX completed by a typical driver.
1689 * But we also want to keep skb->sk set because some packet schedulers
1690 * rely on it (sch_fq for example).
1692 void skb_orphan_partial(struct sk_buff *skb)
1694 if (skb_is_tcp_pure_ack(skb))
1695 return;
1697 if (skb->destructor == sock_wfree
1698 #ifdef CONFIG_INET
1699 || skb->destructor == tcp_wfree
1700 #endif
1702 struct sock *sk = skb->sk;
1704 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1705 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1706 skb->destructor = sock_efree;
1708 } else {
1709 skb_orphan(skb);
1712 EXPORT_SYMBOL(skb_orphan_partial);
1715 * Read buffer destructor automatically called from kfree_skb.
1717 void sock_rfree(struct sk_buff *skb)
1719 struct sock *sk = skb->sk;
1720 unsigned int len = skb->truesize;
1722 atomic_sub(len, &sk->sk_rmem_alloc);
1723 sk_mem_uncharge(sk, len);
1725 EXPORT_SYMBOL(sock_rfree);
1728 * Buffer destructor for skbs that are not used directly in read or write
1729 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1731 void sock_efree(struct sk_buff *skb)
1733 sock_put(skb->sk);
1735 EXPORT_SYMBOL(sock_efree);
1737 kuid_t sock_i_uid(struct sock *sk)
1739 kuid_t uid;
1741 read_lock_bh(&sk->sk_callback_lock);
1742 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1743 read_unlock_bh(&sk->sk_callback_lock);
1744 return uid;
1746 EXPORT_SYMBOL(sock_i_uid);
1748 unsigned long sock_i_ino(struct sock *sk)
1750 unsigned long ino;
1752 read_lock_bh(&sk->sk_callback_lock);
1753 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1754 read_unlock_bh(&sk->sk_callback_lock);
1755 return ino;
1757 EXPORT_SYMBOL(sock_i_ino);
1760 * Allocate a skb from the socket's send buffer.
1762 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1763 gfp_t priority)
1765 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1766 struct sk_buff *skb = alloc_skb(size, priority);
1767 if (skb) {
1768 skb_set_owner_w(skb, sk);
1769 return skb;
1772 return NULL;
1774 EXPORT_SYMBOL(sock_wmalloc);
1777 * Allocate a memory block from the socket's option memory buffer.
1779 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1781 if ((unsigned int)size <= sysctl_optmem_max &&
1782 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1783 void *mem;
1784 /* First do the add, to avoid the race if kmalloc
1785 * might sleep.
1787 atomic_add(size, &sk->sk_omem_alloc);
1788 mem = kmalloc(size, priority);
1789 if (mem)
1790 return mem;
1791 atomic_sub(size, &sk->sk_omem_alloc);
1793 return NULL;
1795 EXPORT_SYMBOL(sock_kmalloc);
1797 /* Free an option memory block. Note, we actually want the inline
1798 * here as this allows gcc to detect the nullify and fold away the
1799 * condition entirely.
1801 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1802 const bool nullify)
1804 if (WARN_ON_ONCE(!mem))
1805 return;
1806 if (nullify)
1807 kzfree(mem);
1808 else
1809 kfree(mem);
1810 atomic_sub(size, &sk->sk_omem_alloc);
1813 void sock_kfree_s(struct sock *sk, void *mem, int size)
1815 __sock_kfree_s(sk, mem, size, false);
1817 EXPORT_SYMBOL(sock_kfree_s);
1819 void sock_kzfree_s(struct sock *sk, void *mem, int size)
1821 __sock_kfree_s(sk, mem, size, true);
1823 EXPORT_SYMBOL(sock_kzfree_s);
1825 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1826 I think, these locks should be removed for datagram sockets.
1828 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1830 DEFINE_WAIT(wait);
1832 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1833 for (;;) {
1834 if (!timeo)
1835 break;
1836 if (signal_pending(current))
1837 break;
1838 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1839 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1840 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1841 break;
1842 if (sk->sk_shutdown & SEND_SHUTDOWN)
1843 break;
1844 if (sk->sk_err)
1845 break;
1846 timeo = schedule_timeout(timeo);
1848 finish_wait(sk_sleep(sk), &wait);
1849 return timeo;
1854 * Generic send/receive buffer handlers
1857 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1858 unsigned long data_len, int noblock,
1859 int *errcode, int max_page_order)
1861 struct sk_buff *skb;
1862 long timeo;
1863 int err;
1865 timeo = sock_sndtimeo(sk, noblock);
1866 for (;;) {
1867 err = sock_error(sk);
1868 if (err != 0)
1869 goto failure;
1871 err = -EPIPE;
1872 if (sk->sk_shutdown & SEND_SHUTDOWN)
1873 goto failure;
1875 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1876 break;
1878 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1879 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1880 err = -EAGAIN;
1881 if (!timeo)
1882 goto failure;
1883 if (signal_pending(current))
1884 goto interrupted;
1885 timeo = sock_wait_for_wmem(sk, timeo);
1887 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1888 errcode, sk->sk_allocation);
1889 if (skb)
1890 skb_set_owner_w(skb, sk);
1891 return skb;
1893 interrupted:
1894 err = sock_intr_errno(timeo);
1895 failure:
1896 *errcode = err;
1897 return NULL;
1899 EXPORT_SYMBOL(sock_alloc_send_pskb);
1901 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1902 int noblock, int *errcode)
1904 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1906 EXPORT_SYMBOL(sock_alloc_send_skb);
1908 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1909 struct sockcm_cookie *sockc)
1911 u32 tsflags;
1913 switch (cmsg->cmsg_type) {
1914 case SO_MARK:
1915 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1916 return -EPERM;
1917 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1918 return -EINVAL;
1919 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1920 break;
1921 case SO_TIMESTAMPING:
1922 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1923 return -EINVAL;
1925 tsflags = *(u32 *)CMSG_DATA(cmsg);
1926 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
1927 return -EINVAL;
1929 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
1930 sockc->tsflags |= tsflags;
1931 break;
1932 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
1933 case SCM_RIGHTS:
1934 case SCM_CREDENTIALS:
1935 break;
1936 default:
1937 return -EINVAL;
1939 return 0;
1941 EXPORT_SYMBOL(__sock_cmsg_send);
1943 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1944 struct sockcm_cookie *sockc)
1946 struct cmsghdr *cmsg;
1947 int ret;
1949 for_each_cmsghdr(cmsg, msg) {
1950 if (!CMSG_OK(msg, cmsg))
1951 return -EINVAL;
1952 if (cmsg->cmsg_level != SOL_SOCKET)
1953 continue;
1954 ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
1955 if (ret)
1956 return ret;
1958 return 0;
1960 EXPORT_SYMBOL(sock_cmsg_send);
1962 /* On 32bit arches, an skb frag is limited to 2^15 */
1963 #define SKB_FRAG_PAGE_ORDER get_order(32768)
1966 * skb_page_frag_refill - check that a page_frag contains enough room
1967 * @sz: minimum size of the fragment we want to get
1968 * @pfrag: pointer to page_frag
1969 * @gfp: priority for memory allocation
1971 * Note: While this allocator tries to use high order pages, there is
1972 * no guarantee that allocations succeed. Therefore, @sz MUST be
1973 * less or equal than PAGE_SIZE.
1975 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1977 if (pfrag->page) {
1978 if (page_ref_count(pfrag->page) == 1) {
1979 pfrag->offset = 0;
1980 return true;
1982 if (pfrag->offset + sz <= pfrag->size)
1983 return true;
1984 put_page(pfrag->page);
1987 pfrag->offset = 0;
1988 if (SKB_FRAG_PAGE_ORDER) {
1989 /* Avoid direct reclaim but allow kswapd to wake */
1990 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
1991 __GFP_COMP | __GFP_NOWARN |
1992 __GFP_NORETRY,
1993 SKB_FRAG_PAGE_ORDER);
1994 if (likely(pfrag->page)) {
1995 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
1996 return true;
1999 pfrag->page = alloc_page(gfp);
2000 if (likely(pfrag->page)) {
2001 pfrag->size = PAGE_SIZE;
2002 return true;
2004 return false;
2006 EXPORT_SYMBOL(skb_page_frag_refill);
2008 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2010 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2011 return true;
2013 sk_enter_memory_pressure(sk);
2014 sk_stream_moderate_sndbuf(sk);
2015 return false;
2017 EXPORT_SYMBOL(sk_page_frag_refill);
2019 static void __lock_sock(struct sock *sk)
2020 __releases(&sk->sk_lock.slock)
2021 __acquires(&sk->sk_lock.slock)
2023 DEFINE_WAIT(wait);
2025 for (;;) {
2026 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2027 TASK_UNINTERRUPTIBLE);
2028 spin_unlock_bh(&sk->sk_lock.slock);
2029 schedule();
2030 spin_lock_bh(&sk->sk_lock.slock);
2031 if (!sock_owned_by_user(sk))
2032 break;
2034 finish_wait(&sk->sk_lock.wq, &wait);
2037 static void __release_sock(struct sock *sk)
2038 __releases(&sk->sk_lock.slock)
2039 __acquires(&sk->sk_lock.slock)
2041 struct sk_buff *skb, *next;
2043 while ((skb = sk->sk_backlog.head) != NULL) {
2044 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2046 spin_unlock_bh(&sk->sk_lock.slock);
2048 do {
2049 next = skb->next;
2050 prefetch(next);
2051 WARN_ON_ONCE(skb_dst_is_noref(skb));
2052 skb->next = NULL;
2053 sk_backlog_rcv(sk, skb);
2055 cond_resched();
2057 skb = next;
2058 } while (skb != NULL);
2060 spin_lock_bh(&sk->sk_lock.slock);
2064 * Doing the zeroing here guarantee we can not loop forever
2065 * while a wild producer attempts to flood us.
2067 sk->sk_backlog.len = 0;
2070 void __sk_flush_backlog(struct sock *sk)
2072 spin_lock_bh(&sk->sk_lock.slock);
2073 __release_sock(sk);
2074 spin_unlock_bh(&sk->sk_lock.slock);
2078 * sk_wait_data - wait for data to arrive at sk_receive_queue
2079 * @sk: sock to wait on
2080 * @timeo: for how long
2081 * @skb: last skb seen on sk_receive_queue
2083 * Now socket state including sk->sk_err is changed only under lock,
2084 * hence we may omit checks after joining wait queue.
2085 * We check receive queue before schedule() only as optimization;
2086 * it is very likely that release_sock() added new data.
2088 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2090 int rc;
2091 DEFINE_WAIT(wait);
2093 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2094 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2095 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
2096 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2097 finish_wait(sk_sleep(sk), &wait);
2098 return rc;
2100 EXPORT_SYMBOL(sk_wait_data);
2103 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2104 * @sk: socket
2105 * @size: memory size to allocate
2106 * @kind: allocation type
2108 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2109 * rmem allocation. This function assumes that protocols which have
2110 * memory_pressure use sk_wmem_queued as write buffer accounting.
2112 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2114 struct proto *prot = sk->sk_prot;
2115 int amt = sk_mem_pages(size);
2116 long allocated;
2118 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2120 allocated = sk_memory_allocated_add(sk, amt);
2122 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2123 !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
2124 goto suppress_allocation;
2126 /* Under limit. */
2127 if (allocated <= sk_prot_mem_limits(sk, 0)) {
2128 sk_leave_memory_pressure(sk);
2129 return 1;
2132 /* Under pressure. */
2133 if (allocated > sk_prot_mem_limits(sk, 1))
2134 sk_enter_memory_pressure(sk);
2136 /* Over hard limit. */
2137 if (allocated > sk_prot_mem_limits(sk, 2))
2138 goto suppress_allocation;
2140 /* guarantee minimum buffer size under pressure */
2141 if (kind == SK_MEM_RECV) {
2142 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2143 return 1;
2145 } else { /* SK_MEM_SEND */
2146 if (sk->sk_type == SOCK_STREAM) {
2147 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2148 return 1;
2149 } else if (atomic_read(&sk->sk_wmem_alloc) <
2150 prot->sysctl_wmem[0])
2151 return 1;
2154 if (sk_has_memory_pressure(sk)) {
2155 int alloc;
2157 if (!sk_under_memory_pressure(sk))
2158 return 1;
2159 alloc = sk_sockets_allocated_read_positive(sk);
2160 if (sk_prot_mem_limits(sk, 2) > alloc *
2161 sk_mem_pages(sk->sk_wmem_queued +
2162 atomic_read(&sk->sk_rmem_alloc) +
2163 sk->sk_forward_alloc))
2164 return 1;
2167 suppress_allocation:
2169 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2170 sk_stream_moderate_sndbuf(sk);
2172 /* Fail only if socket is _under_ its sndbuf.
2173 * In this case we cannot block, so that we have to fail.
2175 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2176 return 1;
2179 trace_sock_exceed_buf_limit(sk, prot, allocated);
2181 /* Alas. Undo changes. */
2182 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2184 sk_memory_allocated_sub(sk, amt);
2186 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2187 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2189 return 0;
2191 EXPORT_SYMBOL(__sk_mem_schedule);
2194 * __sk_mem_reclaim - reclaim memory_allocated
2195 * @sk: socket
2196 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2198 void __sk_mem_reclaim(struct sock *sk, int amount)
2200 amount >>= SK_MEM_QUANTUM_SHIFT;
2201 sk_memory_allocated_sub(sk, amount);
2202 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2204 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2205 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2207 if (sk_under_memory_pressure(sk) &&
2208 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2209 sk_leave_memory_pressure(sk);
2211 EXPORT_SYMBOL(__sk_mem_reclaim);
2213 int sk_set_peek_off(struct sock *sk, int val)
2215 if (val < 0)
2216 return -EINVAL;
2218 sk->sk_peek_off = val;
2219 return 0;
2221 EXPORT_SYMBOL_GPL(sk_set_peek_off);
2224 * Set of default routines for initialising struct proto_ops when
2225 * the protocol does not support a particular function. In certain
2226 * cases where it makes no sense for a protocol to have a "do nothing"
2227 * function, some default processing is provided.
2230 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2232 return -EOPNOTSUPP;
2234 EXPORT_SYMBOL(sock_no_bind);
2236 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2237 int len, int flags)
2239 return -EOPNOTSUPP;
2241 EXPORT_SYMBOL(sock_no_connect);
2243 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2245 return -EOPNOTSUPP;
2247 EXPORT_SYMBOL(sock_no_socketpair);
2249 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2251 return -EOPNOTSUPP;
2253 EXPORT_SYMBOL(sock_no_accept);
2255 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2256 int *len, int peer)
2258 return -EOPNOTSUPP;
2260 EXPORT_SYMBOL(sock_no_getname);
2262 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2264 return 0;
2266 EXPORT_SYMBOL(sock_no_poll);
2268 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2270 return -EOPNOTSUPP;
2272 EXPORT_SYMBOL(sock_no_ioctl);
2274 int sock_no_listen(struct socket *sock, int backlog)
2276 return -EOPNOTSUPP;
2278 EXPORT_SYMBOL(sock_no_listen);
2280 int sock_no_shutdown(struct socket *sock, int how)
2282 return -EOPNOTSUPP;
2284 EXPORT_SYMBOL(sock_no_shutdown);
2286 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2287 char __user *optval, unsigned int optlen)
2289 return -EOPNOTSUPP;
2291 EXPORT_SYMBOL(sock_no_setsockopt);
2293 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2294 char __user *optval, int __user *optlen)
2296 return -EOPNOTSUPP;
2298 EXPORT_SYMBOL(sock_no_getsockopt);
2300 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2302 return -EOPNOTSUPP;
2304 EXPORT_SYMBOL(sock_no_sendmsg);
2306 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2307 int flags)
2309 return -EOPNOTSUPP;
2311 EXPORT_SYMBOL(sock_no_recvmsg);
2313 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2315 /* Mirror missing mmap method error code */
2316 return -ENODEV;
2318 EXPORT_SYMBOL(sock_no_mmap);
2320 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2322 ssize_t res;
2323 struct msghdr msg = {.msg_flags = flags};
2324 struct kvec iov;
2325 char *kaddr = kmap(page);
2326 iov.iov_base = kaddr + offset;
2327 iov.iov_len = size;
2328 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2329 kunmap(page);
2330 return res;
2332 EXPORT_SYMBOL(sock_no_sendpage);
2335 * Default Socket Callbacks
2338 static void sock_def_wakeup(struct sock *sk)
2340 struct socket_wq *wq;
2342 rcu_read_lock();
2343 wq = rcu_dereference(sk->sk_wq);
2344 if (skwq_has_sleeper(wq))
2345 wake_up_interruptible_all(&wq->wait);
2346 rcu_read_unlock();
2349 static void sock_def_error_report(struct sock *sk)
2351 struct socket_wq *wq;
2353 rcu_read_lock();
2354 wq = rcu_dereference(sk->sk_wq);
2355 if (skwq_has_sleeper(wq))
2356 wake_up_interruptible_poll(&wq->wait, POLLERR);
2357 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2358 rcu_read_unlock();
2361 static void sock_def_readable(struct sock *sk)
2363 struct socket_wq *wq;
2365 rcu_read_lock();
2366 wq = rcu_dereference(sk->sk_wq);
2367 if (skwq_has_sleeper(wq))
2368 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2369 POLLRDNORM | POLLRDBAND);
2370 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2371 rcu_read_unlock();
2374 static void sock_def_write_space(struct sock *sk)
2376 struct socket_wq *wq;
2378 rcu_read_lock();
2380 /* Do not wake up a writer until he can make "significant"
2381 * progress. --DaveM
2383 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2384 wq = rcu_dereference(sk->sk_wq);
2385 if (skwq_has_sleeper(wq))
2386 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2387 POLLWRNORM | POLLWRBAND);
2389 /* Should agree with poll, otherwise some programs break */
2390 if (sock_writeable(sk))
2391 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2394 rcu_read_unlock();
2397 static void sock_def_destruct(struct sock *sk)
2401 void sk_send_sigurg(struct sock *sk)
2403 if (sk->sk_socket && sk->sk_socket->file)
2404 if (send_sigurg(&sk->sk_socket->file->f_owner))
2405 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2407 EXPORT_SYMBOL(sk_send_sigurg);
2409 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2410 unsigned long expires)
2412 if (!mod_timer(timer, expires))
2413 sock_hold(sk);
2415 EXPORT_SYMBOL(sk_reset_timer);
2417 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2419 if (del_timer(timer))
2420 __sock_put(sk);
2422 EXPORT_SYMBOL(sk_stop_timer);
2424 void sock_init_data(struct socket *sock, struct sock *sk)
2426 skb_queue_head_init(&sk->sk_receive_queue);
2427 skb_queue_head_init(&sk->sk_write_queue);
2428 skb_queue_head_init(&sk->sk_error_queue);
2430 sk->sk_send_head = NULL;
2432 init_timer(&sk->sk_timer);
2434 sk->sk_allocation = GFP_KERNEL;
2435 sk->sk_rcvbuf = sysctl_rmem_default;
2436 sk->sk_sndbuf = sysctl_wmem_default;
2437 sk->sk_state = TCP_CLOSE;
2438 sk_set_socket(sk, sock);
2440 sock_set_flag(sk, SOCK_ZAPPED);
2442 if (sock) {
2443 sk->sk_type = sock->type;
2444 sk->sk_wq = sock->wq;
2445 sock->sk = sk;
2446 } else
2447 sk->sk_wq = NULL;
2449 rwlock_init(&sk->sk_callback_lock);
2450 lockdep_set_class_and_name(&sk->sk_callback_lock,
2451 af_callback_keys + sk->sk_family,
2452 af_family_clock_key_strings[sk->sk_family]);
2454 sk->sk_state_change = sock_def_wakeup;
2455 sk->sk_data_ready = sock_def_readable;
2456 sk->sk_write_space = sock_def_write_space;
2457 sk->sk_error_report = sock_def_error_report;
2458 sk->sk_destruct = sock_def_destruct;
2460 sk->sk_frag.page = NULL;
2461 sk->sk_frag.offset = 0;
2462 sk->sk_peek_off = -1;
2464 sk->sk_peer_pid = NULL;
2465 sk->sk_peer_cred = NULL;
2466 sk->sk_write_pending = 0;
2467 sk->sk_rcvlowat = 1;
2468 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2469 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2471 sk->sk_stamp = ktime_set(-1L, 0);
2473 #ifdef CONFIG_NET_RX_BUSY_POLL
2474 sk->sk_napi_id = 0;
2475 sk->sk_ll_usec = sysctl_net_busy_read;
2476 #endif
2478 sk->sk_max_pacing_rate = ~0U;
2479 sk->sk_pacing_rate = ~0U;
2480 sk->sk_incoming_cpu = -1;
2482 * Before updating sk_refcnt, we must commit prior changes to memory
2483 * (Documentation/RCU/rculist_nulls.txt for details)
2485 smp_wmb();
2486 atomic_set(&sk->sk_refcnt, 1);
2487 atomic_set(&sk->sk_drops, 0);
2489 EXPORT_SYMBOL(sock_init_data);
2491 void lock_sock_nested(struct sock *sk, int subclass)
2493 might_sleep();
2494 spin_lock_bh(&sk->sk_lock.slock);
2495 if (sk->sk_lock.owned)
2496 __lock_sock(sk);
2497 sk->sk_lock.owned = 1;
2498 spin_unlock(&sk->sk_lock.slock);
2500 * The sk_lock has mutex_lock() semantics here:
2502 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2503 local_bh_enable();
2505 EXPORT_SYMBOL(lock_sock_nested);
2507 void release_sock(struct sock *sk)
2509 spin_lock_bh(&sk->sk_lock.slock);
2510 if (sk->sk_backlog.tail)
2511 __release_sock(sk);
2513 /* Warning : release_cb() might need to release sk ownership,
2514 * ie call sock_release_ownership(sk) before us.
2516 if (sk->sk_prot->release_cb)
2517 sk->sk_prot->release_cb(sk);
2519 sock_release_ownership(sk);
2520 if (waitqueue_active(&sk->sk_lock.wq))
2521 wake_up(&sk->sk_lock.wq);
2522 spin_unlock_bh(&sk->sk_lock.slock);
2524 EXPORT_SYMBOL(release_sock);
2527 * lock_sock_fast - fast version of lock_sock
2528 * @sk: socket
2530 * This version should be used for very small section, where process wont block
2531 * return false if fast path is taken
2532 * sk_lock.slock locked, owned = 0, BH disabled
2533 * return true if slow path is taken
2534 * sk_lock.slock unlocked, owned = 1, BH enabled
2536 bool lock_sock_fast(struct sock *sk)
2538 might_sleep();
2539 spin_lock_bh(&sk->sk_lock.slock);
2541 if (!sk->sk_lock.owned)
2543 * Note : We must disable BH
2545 return false;
2547 __lock_sock(sk);
2548 sk->sk_lock.owned = 1;
2549 spin_unlock(&sk->sk_lock.slock);
2551 * The sk_lock has mutex_lock() semantics here:
2553 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2554 local_bh_enable();
2555 return true;
2557 EXPORT_SYMBOL(lock_sock_fast);
2559 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2561 struct timeval tv;
2562 if (!sock_flag(sk, SOCK_TIMESTAMP))
2563 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2564 tv = ktime_to_timeval(sk->sk_stamp);
2565 if (tv.tv_sec == -1)
2566 return -ENOENT;
2567 if (tv.tv_sec == 0) {
2568 sk->sk_stamp = ktime_get_real();
2569 tv = ktime_to_timeval(sk->sk_stamp);
2571 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2573 EXPORT_SYMBOL(sock_get_timestamp);
2575 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2577 struct timespec ts;
2578 if (!sock_flag(sk, SOCK_TIMESTAMP))
2579 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2580 ts = ktime_to_timespec(sk->sk_stamp);
2581 if (ts.tv_sec == -1)
2582 return -ENOENT;
2583 if (ts.tv_sec == 0) {
2584 sk->sk_stamp = ktime_get_real();
2585 ts = ktime_to_timespec(sk->sk_stamp);
2587 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2589 EXPORT_SYMBOL(sock_get_timestampns);
2591 void sock_enable_timestamp(struct sock *sk, int flag)
2593 if (!sock_flag(sk, flag)) {
2594 unsigned long previous_flags = sk->sk_flags;
2596 sock_set_flag(sk, flag);
2598 * we just set one of the two flags which require net
2599 * time stamping, but time stamping might have been on
2600 * already because of the other one
2602 if (sock_needs_netstamp(sk) &&
2603 !(previous_flags & SK_FLAGS_TIMESTAMP))
2604 net_enable_timestamp();
2608 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2609 int level, int type)
2611 struct sock_exterr_skb *serr;
2612 struct sk_buff *skb;
2613 int copied, err;
2615 err = -EAGAIN;
2616 skb = sock_dequeue_err_skb(sk);
2617 if (skb == NULL)
2618 goto out;
2620 copied = skb->len;
2621 if (copied > len) {
2622 msg->msg_flags |= MSG_TRUNC;
2623 copied = len;
2625 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2626 if (err)
2627 goto out_free_skb;
2629 sock_recv_timestamp(msg, sk, skb);
2631 serr = SKB_EXT_ERR(skb);
2632 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2634 msg->msg_flags |= MSG_ERRQUEUE;
2635 err = copied;
2637 out_free_skb:
2638 kfree_skb(skb);
2639 out:
2640 return err;
2642 EXPORT_SYMBOL(sock_recv_errqueue);
2645 * Get a socket option on an socket.
2647 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2648 * asynchronous errors should be reported by getsockopt. We assume
2649 * this means if you specify SO_ERROR (otherwise whats the point of it).
2651 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2652 char __user *optval, int __user *optlen)
2654 struct sock *sk = sock->sk;
2656 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2658 EXPORT_SYMBOL(sock_common_getsockopt);
2660 #ifdef CONFIG_COMPAT
2661 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2662 char __user *optval, int __user *optlen)
2664 struct sock *sk = sock->sk;
2666 if (sk->sk_prot->compat_getsockopt != NULL)
2667 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2668 optval, optlen);
2669 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2671 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2672 #endif
2674 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2675 int flags)
2677 struct sock *sk = sock->sk;
2678 int addr_len = 0;
2679 int err;
2681 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2682 flags & ~MSG_DONTWAIT, &addr_len);
2683 if (err >= 0)
2684 msg->msg_namelen = addr_len;
2685 return err;
2687 EXPORT_SYMBOL(sock_common_recvmsg);
2690 * Set socket options on an inet socket.
2692 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2693 char __user *optval, unsigned int optlen)
2695 struct sock *sk = sock->sk;
2697 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2699 EXPORT_SYMBOL(sock_common_setsockopt);
2701 #ifdef CONFIG_COMPAT
2702 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2703 char __user *optval, unsigned int optlen)
2705 struct sock *sk = sock->sk;
2707 if (sk->sk_prot->compat_setsockopt != NULL)
2708 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2709 optval, optlen);
2710 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2712 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2713 #endif
2715 void sk_common_release(struct sock *sk)
2717 if (sk->sk_prot->destroy)
2718 sk->sk_prot->destroy(sk);
2721 * Observation: when sock_common_release is called, processes have
2722 * no access to socket. But net still has.
2723 * Step one, detach it from networking:
2725 * A. Remove from hash tables.
2728 sk->sk_prot->unhash(sk);
2731 * In this point socket cannot receive new packets, but it is possible
2732 * that some packets are in flight because some CPU runs receiver and
2733 * did hash table lookup before we unhashed socket. They will achieve
2734 * receive queue and will be purged by socket destructor.
2736 * Also we still have packets pending on receive queue and probably,
2737 * our own packets waiting in device queues. sock_destroy will drain
2738 * receive queue, but transmitted packets will delay socket destruction
2739 * until the last reference will be released.
2742 sock_orphan(sk);
2744 xfrm_sk_free_policy(sk);
2746 sk_refcnt_debug_release(sk);
2748 sock_put(sk);
2750 EXPORT_SYMBOL(sk_common_release);
2752 #ifdef CONFIG_PROC_FS
2753 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2754 struct prot_inuse {
2755 int val[PROTO_INUSE_NR];
2758 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2760 #ifdef CONFIG_NET_NS
2761 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2763 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2765 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2767 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2769 int cpu, idx = prot->inuse_idx;
2770 int res = 0;
2772 for_each_possible_cpu(cpu)
2773 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2775 return res >= 0 ? res : 0;
2777 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2779 static int __net_init sock_inuse_init_net(struct net *net)
2781 net->core.inuse = alloc_percpu(struct prot_inuse);
2782 return net->core.inuse ? 0 : -ENOMEM;
2785 static void __net_exit sock_inuse_exit_net(struct net *net)
2787 free_percpu(net->core.inuse);
2790 static struct pernet_operations net_inuse_ops = {
2791 .init = sock_inuse_init_net,
2792 .exit = sock_inuse_exit_net,
2795 static __init int net_inuse_init(void)
2797 if (register_pernet_subsys(&net_inuse_ops))
2798 panic("Cannot initialize net inuse counters");
2800 return 0;
2803 core_initcall(net_inuse_init);
2804 #else
2805 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2807 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2809 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2811 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2813 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2815 int cpu, idx = prot->inuse_idx;
2816 int res = 0;
2818 for_each_possible_cpu(cpu)
2819 res += per_cpu(prot_inuse, cpu).val[idx];
2821 return res >= 0 ? res : 0;
2823 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2824 #endif
2826 static void assign_proto_idx(struct proto *prot)
2828 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2830 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2831 pr_err("PROTO_INUSE_NR exhausted\n");
2832 return;
2835 set_bit(prot->inuse_idx, proto_inuse_idx);
2838 static void release_proto_idx(struct proto *prot)
2840 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2841 clear_bit(prot->inuse_idx, proto_inuse_idx);
2843 #else
2844 static inline void assign_proto_idx(struct proto *prot)
2848 static inline void release_proto_idx(struct proto *prot)
2851 #endif
2853 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2855 if (!rsk_prot)
2856 return;
2857 kfree(rsk_prot->slab_name);
2858 rsk_prot->slab_name = NULL;
2859 kmem_cache_destroy(rsk_prot->slab);
2860 rsk_prot->slab = NULL;
2863 static int req_prot_init(const struct proto *prot)
2865 struct request_sock_ops *rsk_prot = prot->rsk_prot;
2867 if (!rsk_prot)
2868 return 0;
2870 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2871 prot->name);
2872 if (!rsk_prot->slab_name)
2873 return -ENOMEM;
2875 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2876 rsk_prot->obj_size, 0,
2877 prot->slab_flags, NULL);
2879 if (!rsk_prot->slab) {
2880 pr_crit("%s: Can't create request sock SLAB cache!\n",
2881 prot->name);
2882 return -ENOMEM;
2884 return 0;
2887 int proto_register(struct proto *prot, int alloc_slab)
2889 if (alloc_slab) {
2890 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2891 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2892 NULL);
2894 if (prot->slab == NULL) {
2895 pr_crit("%s: Can't create sock SLAB cache!\n",
2896 prot->name);
2897 goto out;
2900 if (req_prot_init(prot))
2901 goto out_free_request_sock_slab;
2903 if (prot->twsk_prot != NULL) {
2904 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2906 if (prot->twsk_prot->twsk_slab_name == NULL)
2907 goto out_free_request_sock_slab;
2909 prot->twsk_prot->twsk_slab =
2910 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2911 prot->twsk_prot->twsk_obj_size,
2913 prot->slab_flags,
2914 NULL);
2915 if (prot->twsk_prot->twsk_slab == NULL)
2916 goto out_free_timewait_sock_slab_name;
2920 mutex_lock(&proto_list_mutex);
2921 list_add(&prot->node, &proto_list);
2922 assign_proto_idx(prot);
2923 mutex_unlock(&proto_list_mutex);
2924 return 0;
2926 out_free_timewait_sock_slab_name:
2927 kfree(prot->twsk_prot->twsk_slab_name);
2928 out_free_request_sock_slab:
2929 req_prot_cleanup(prot->rsk_prot);
2931 kmem_cache_destroy(prot->slab);
2932 prot->slab = NULL;
2933 out:
2934 return -ENOBUFS;
2936 EXPORT_SYMBOL(proto_register);
2938 void proto_unregister(struct proto *prot)
2940 mutex_lock(&proto_list_mutex);
2941 release_proto_idx(prot);
2942 list_del(&prot->node);
2943 mutex_unlock(&proto_list_mutex);
2945 kmem_cache_destroy(prot->slab);
2946 prot->slab = NULL;
2948 req_prot_cleanup(prot->rsk_prot);
2950 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2951 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2952 kfree(prot->twsk_prot->twsk_slab_name);
2953 prot->twsk_prot->twsk_slab = NULL;
2956 EXPORT_SYMBOL(proto_unregister);
2958 #ifdef CONFIG_PROC_FS
2959 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2960 __acquires(proto_list_mutex)
2962 mutex_lock(&proto_list_mutex);
2963 return seq_list_start_head(&proto_list, *pos);
2966 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2968 return seq_list_next(v, &proto_list, pos);
2971 static void proto_seq_stop(struct seq_file *seq, void *v)
2972 __releases(proto_list_mutex)
2974 mutex_unlock(&proto_list_mutex);
2977 static char proto_method_implemented(const void *method)
2979 return method == NULL ? 'n' : 'y';
2981 static long sock_prot_memory_allocated(struct proto *proto)
2983 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2986 static char *sock_prot_memory_pressure(struct proto *proto)
2988 return proto->memory_pressure != NULL ?
2989 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2992 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2995 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2996 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2997 proto->name,
2998 proto->obj_size,
2999 sock_prot_inuse_get(seq_file_net(seq), proto),
3000 sock_prot_memory_allocated(proto),
3001 sock_prot_memory_pressure(proto),
3002 proto->max_header,
3003 proto->slab == NULL ? "no" : "yes",
3004 module_name(proto->owner),
3005 proto_method_implemented(proto->close),
3006 proto_method_implemented(proto->connect),
3007 proto_method_implemented(proto->disconnect),
3008 proto_method_implemented(proto->accept),
3009 proto_method_implemented(proto->ioctl),
3010 proto_method_implemented(proto->init),
3011 proto_method_implemented(proto->destroy),
3012 proto_method_implemented(proto->shutdown),
3013 proto_method_implemented(proto->setsockopt),
3014 proto_method_implemented(proto->getsockopt),
3015 proto_method_implemented(proto->sendmsg),
3016 proto_method_implemented(proto->recvmsg),
3017 proto_method_implemented(proto->sendpage),
3018 proto_method_implemented(proto->bind),
3019 proto_method_implemented(proto->backlog_rcv),
3020 proto_method_implemented(proto->hash),
3021 proto_method_implemented(proto->unhash),
3022 proto_method_implemented(proto->get_port),
3023 proto_method_implemented(proto->enter_memory_pressure));
3026 static int proto_seq_show(struct seq_file *seq, void *v)
3028 if (v == &proto_list)
3029 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3030 "protocol",
3031 "size",
3032 "sockets",
3033 "memory",
3034 "press",
3035 "maxhdr",
3036 "slab",
3037 "module",
3038 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3039 else
3040 proto_seq_printf(seq, list_entry(v, struct proto, node));
3041 return 0;
3044 static const struct seq_operations proto_seq_ops = {
3045 .start = proto_seq_start,
3046 .next = proto_seq_next,
3047 .stop = proto_seq_stop,
3048 .show = proto_seq_show,
3051 static int proto_seq_open(struct inode *inode, struct file *file)
3053 return seq_open_net(inode, file, &proto_seq_ops,
3054 sizeof(struct seq_net_private));
3057 static const struct file_operations proto_seq_fops = {
3058 .owner = THIS_MODULE,
3059 .open = proto_seq_open,
3060 .read = seq_read,
3061 .llseek = seq_lseek,
3062 .release = seq_release_net,
3065 static __net_init int proto_init_net(struct net *net)
3067 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
3068 return -ENOMEM;
3070 return 0;
3073 static __net_exit void proto_exit_net(struct net *net)
3075 remove_proc_entry("protocols", net->proc_net);
3079 static __net_initdata struct pernet_operations proto_net_ops = {
3080 .init = proto_init_net,
3081 .exit = proto_exit_net,
3084 static int __init proto_init(void)
3086 return register_pernet_subsys(&proto_net_ops);
3089 subsys_initcall(proto_init);
3091 #endif /* PROC_FS */