ALSA: usb-audio: Fix an out-of-bound read in create_composite_quirks
[linux/fpc-iii.git] / net / core / sock.c
blob9fb1c073d0c428a39b2812c7fe35fa72e0b4feb5
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
83 * To Fix:
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
121 #include <asm/uaccess.h>
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134 #include <linux/sock_diag.h>
136 #include <linux/filter.h>
138 #include <trace/events/sock.h>
140 #ifdef CONFIG_INET
141 #include <net/tcp.h>
142 #endif
144 #include <net/busy_poll.h>
146 static DEFINE_MUTEX(proto_list_mutex);
147 static LIST_HEAD(proto_list);
150 * sk_ns_capable - General socket capability test
151 * @sk: Socket to use a capability on or through
152 * @user_ns: The user namespace of the capability to use
153 * @cap: The capability to use
155 * Test to see if the opener of the socket had when the socket was
156 * created and the current process has the capability @cap in the user
157 * namespace @user_ns.
159 bool sk_ns_capable(const struct sock *sk,
160 struct user_namespace *user_ns, int cap)
162 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
163 ns_capable(user_ns, cap);
165 EXPORT_SYMBOL(sk_ns_capable);
168 * sk_capable - Socket global capability test
169 * @sk: Socket to use a capability on or through
170 * @cap: The global capability to use
172 * Test to see if the opener of the socket had when the socket was
173 * created and the current process has the capability @cap in all user
174 * namespaces.
176 bool sk_capable(const struct sock *sk, int cap)
178 return sk_ns_capable(sk, &init_user_ns, cap);
180 EXPORT_SYMBOL(sk_capable);
183 * sk_net_capable - Network namespace socket capability test
184 * @sk: Socket to use a capability on or through
185 * @cap: The capability to use
187 * Test to see if the opener of the socket had when the socket was created
188 * and the current process has the capability @cap over the network namespace
189 * the socket is a member of.
191 bool sk_net_capable(const struct sock *sk, int cap)
193 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
195 EXPORT_SYMBOL(sk_net_capable);
198 #ifdef CONFIG_MEMCG_KMEM
199 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
201 struct proto *proto;
202 int ret = 0;
204 mutex_lock(&proto_list_mutex);
205 list_for_each_entry(proto, &proto_list, node) {
206 if (proto->init_cgroup) {
207 ret = proto->init_cgroup(memcg, ss);
208 if (ret)
209 goto out;
213 mutex_unlock(&proto_list_mutex);
214 return ret;
215 out:
216 list_for_each_entry_continue_reverse(proto, &proto_list, node)
217 if (proto->destroy_cgroup)
218 proto->destroy_cgroup(memcg);
219 mutex_unlock(&proto_list_mutex);
220 return ret;
223 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
225 struct proto *proto;
227 mutex_lock(&proto_list_mutex);
228 list_for_each_entry_reverse(proto, &proto_list, node)
229 if (proto->destroy_cgroup)
230 proto->destroy_cgroup(memcg);
231 mutex_unlock(&proto_list_mutex);
233 #endif
236 * Each address family might have different locking rules, so we have
237 * one slock key per address family:
239 static struct lock_class_key af_family_keys[AF_MAX];
240 static struct lock_class_key af_family_slock_keys[AF_MAX];
242 #if defined(CONFIG_MEMCG_KMEM)
243 struct static_key memcg_socket_limit_enabled;
244 EXPORT_SYMBOL(memcg_socket_limit_enabled);
245 #endif
248 * Make lock validator output more readable. (we pre-construct these
249 * strings build-time, so that runtime initialization of socket
250 * locks is fast):
252 static const char *const af_family_key_strings[AF_MAX+1] = {
253 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
254 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
255 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
256 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
257 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
258 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
259 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
260 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
261 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
262 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
263 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
264 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
265 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
266 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
268 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
269 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
270 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
271 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
272 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
273 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
274 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
275 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
276 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
277 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
278 "slock-27" , "slock-28" , "slock-AF_CAN" ,
279 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
280 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
281 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
282 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
284 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
285 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
286 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
287 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
288 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
289 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
290 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
291 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
292 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
293 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
294 "clock-27" , "clock-28" , "clock-AF_CAN" ,
295 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
296 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
297 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
298 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
302 * sk_callback_lock locking rules are per-address-family,
303 * so split the lock classes by using a per-AF key:
305 static struct lock_class_key af_callback_keys[AF_MAX];
307 /* Take into consideration the size of the struct sk_buff overhead in the
308 * determination of these values, since that is non-constant across
309 * platforms. This makes socket queueing behavior and performance
310 * not depend upon such differences.
312 #define _SK_MEM_PACKETS 256
313 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
314 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
317 /* Run time adjustable parameters. */
318 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
319 EXPORT_SYMBOL(sysctl_wmem_max);
320 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
321 EXPORT_SYMBOL(sysctl_rmem_max);
322 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
323 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
325 /* Maximal space eaten by iovec or ancillary data plus some space */
326 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
327 EXPORT_SYMBOL(sysctl_optmem_max);
329 int sysctl_tstamp_allow_data __read_mostly = 1;
331 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
332 EXPORT_SYMBOL_GPL(memalloc_socks);
335 * sk_set_memalloc - sets %SOCK_MEMALLOC
336 * @sk: socket to set it on
338 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
339 * It's the responsibility of the admin to adjust min_free_kbytes
340 * to meet the requirements
342 void sk_set_memalloc(struct sock *sk)
344 sock_set_flag(sk, SOCK_MEMALLOC);
345 sk->sk_allocation |= __GFP_MEMALLOC;
346 static_key_slow_inc(&memalloc_socks);
348 EXPORT_SYMBOL_GPL(sk_set_memalloc);
350 void sk_clear_memalloc(struct sock *sk)
352 sock_reset_flag(sk, SOCK_MEMALLOC);
353 sk->sk_allocation &= ~__GFP_MEMALLOC;
354 static_key_slow_dec(&memalloc_socks);
357 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
358 * progress of swapping. SOCK_MEMALLOC may be cleared while
359 * it has rmem allocations due to the last swapfile being deactivated
360 * but there is a risk that the socket is unusable due to exceeding
361 * the rmem limits. Reclaim the reserves and obey rmem limits again.
363 sk_mem_reclaim(sk);
365 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
367 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
369 int ret;
370 unsigned long pflags = current->flags;
372 /* these should have been dropped before queueing */
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
375 current->flags |= PF_MEMALLOC;
376 ret = sk->sk_backlog_rcv(sk, skb);
377 tsk_restore_flags(current, pflags, PF_MEMALLOC);
379 return ret;
381 EXPORT_SYMBOL(__sk_backlog_rcv);
383 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
385 struct timeval tv;
387 if (optlen < sizeof(tv))
388 return -EINVAL;
389 if (copy_from_user(&tv, optval, sizeof(tv)))
390 return -EFAULT;
391 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 return -EDOM;
394 if (tv.tv_sec < 0) {
395 static int warned __read_mostly;
397 *timeo_p = 0;
398 if (warned < 10 && net_ratelimit()) {
399 warned++;
400 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 __func__, current->comm, task_pid_nr(current));
403 return 0;
405 *timeo_p = MAX_SCHEDULE_TIMEOUT;
406 if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 return 0;
408 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 return 0;
413 static void sock_warn_obsolete_bsdism(const char *name)
415 static int warned;
416 static char warncomm[TASK_COMM_LEN];
417 if (strcmp(warncomm, current->comm) && warned < 5) {
418 strcpy(warncomm, current->comm);
419 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 warncomm, name);
421 warned++;
425 static bool sock_needs_netstamp(const struct sock *sk)
427 switch (sk->sk_family) {
428 case AF_UNSPEC:
429 case AF_UNIX:
430 return false;
431 default:
432 return true;
436 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
438 if (sk->sk_flags & flags) {
439 sk->sk_flags &= ~flags;
440 if (sock_needs_netstamp(sk) &&
441 !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
442 net_disable_timestamp();
447 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
449 int err;
450 unsigned long flags;
451 struct sk_buff_head *list = &sk->sk_receive_queue;
453 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
454 atomic_inc(&sk->sk_drops);
455 trace_sock_rcvqueue_full(sk, skb);
456 return -ENOMEM;
459 err = sk_filter(sk, skb);
460 if (err)
461 return err;
463 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
464 atomic_inc(&sk->sk_drops);
465 return -ENOBUFS;
468 skb->dev = NULL;
469 skb_set_owner_r(skb, sk);
471 /* we escape from rcu protected region, make sure we dont leak
472 * a norefcounted dst
474 skb_dst_force(skb);
476 spin_lock_irqsave(&list->lock, flags);
477 sock_skb_set_dropcount(sk, skb);
478 __skb_queue_tail(list, skb);
479 spin_unlock_irqrestore(&list->lock, flags);
481 if (!sock_flag(sk, SOCK_DEAD))
482 sk->sk_data_ready(sk);
483 return 0;
485 EXPORT_SYMBOL(sock_queue_rcv_skb);
487 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
489 int rc = NET_RX_SUCCESS;
491 if (sk_filter(sk, skb))
492 goto discard_and_relse;
494 skb->dev = NULL;
496 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
497 atomic_inc(&sk->sk_drops);
498 goto discard_and_relse;
500 if (nested)
501 bh_lock_sock_nested(sk);
502 else
503 bh_lock_sock(sk);
504 if (!sock_owned_by_user(sk)) {
506 * trylock + unlock semantics:
508 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
510 rc = sk_backlog_rcv(sk, skb);
512 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
513 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
514 bh_unlock_sock(sk);
515 atomic_inc(&sk->sk_drops);
516 goto discard_and_relse;
519 bh_unlock_sock(sk);
520 out:
521 sock_put(sk);
522 return rc;
523 discard_and_relse:
524 kfree_skb(skb);
525 goto out;
527 EXPORT_SYMBOL(sk_receive_skb);
529 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
531 struct dst_entry *dst = __sk_dst_get(sk);
533 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
534 sk_tx_queue_clear(sk);
535 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
536 dst_release(dst);
537 return NULL;
540 return dst;
542 EXPORT_SYMBOL(__sk_dst_check);
544 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
546 struct dst_entry *dst = sk_dst_get(sk);
548 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
549 sk_dst_reset(sk);
550 dst_release(dst);
551 return NULL;
554 return dst;
556 EXPORT_SYMBOL(sk_dst_check);
558 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
559 int optlen)
561 int ret = -ENOPROTOOPT;
562 #ifdef CONFIG_NETDEVICES
563 struct net *net = sock_net(sk);
564 char devname[IFNAMSIZ];
565 int index;
567 /* Sorry... */
568 ret = -EPERM;
569 if (!ns_capable(net->user_ns, CAP_NET_RAW))
570 goto out;
572 ret = -EINVAL;
573 if (optlen < 0)
574 goto out;
576 /* Bind this socket to a particular device like "eth0",
577 * as specified in the passed interface name. If the
578 * name is "" or the option length is zero the socket
579 * is not bound.
581 if (optlen > IFNAMSIZ - 1)
582 optlen = IFNAMSIZ - 1;
583 memset(devname, 0, sizeof(devname));
585 ret = -EFAULT;
586 if (copy_from_user(devname, optval, optlen))
587 goto out;
589 index = 0;
590 if (devname[0] != '\0') {
591 struct net_device *dev;
593 rcu_read_lock();
594 dev = dev_get_by_name_rcu(net, devname);
595 if (dev)
596 index = dev->ifindex;
597 rcu_read_unlock();
598 ret = -ENODEV;
599 if (!dev)
600 goto out;
603 lock_sock(sk);
604 sk->sk_bound_dev_if = index;
605 sk_dst_reset(sk);
606 release_sock(sk);
608 ret = 0;
610 out:
611 #endif
613 return ret;
616 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
617 int __user *optlen, int len)
619 int ret = -ENOPROTOOPT;
620 #ifdef CONFIG_NETDEVICES
621 struct net *net = sock_net(sk);
622 char devname[IFNAMSIZ];
624 if (sk->sk_bound_dev_if == 0) {
625 len = 0;
626 goto zero;
629 ret = -EINVAL;
630 if (len < IFNAMSIZ)
631 goto out;
633 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
634 if (ret)
635 goto out;
637 len = strlen(devname) + 1;
639 ret = -EFAULT;
640 if (copy_to_user(optval, devname, len))
641 goto out;
643 zero:
644 ret = -EFAULT;
645 if (put_user(len, optlen))
646 goto out;
648 ret = 0;
650 out:
651 #endif
653 return ret;
656 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
658 if (valbool)
659 sock_set_flag(sk, bit);
660 else
661 sock_reset_flag(sk, bit);
664 bool sk_mc_loop(struct sock *sk)
666 if (dev_recursion_level())
667 return false;
668 if (!sk)
669 return true;
670 switch (sk->sk_family) {
671 case AF_INET:
672 return inet_sk(sk)->mc_loop;
673 #if IS_ENABLED(CONFIG_IPV6)
674 case AF_INET6:
675 return inet6_sk(sk)->mc_loop;
676 #endif
678 WARN_ON(1);
679 return true;
681 EXPORT_SYMBOL(sk_mc_loop);
684 * This is meant for all protocols to use and covers goings on
685 * at the socket level. Everything here is generic.
688 int sock_setsockopt(struct socket *sock, int level, int optname,
689 char __user *optval, unsigned int optlen)
691 struct sock *sk = sock->sk;
692 int val;
693 int valbool;
694 struct linger ling;
695 int ret = 0;
698 * Options without arguments
701 if (optname == SO_BINDTODEVICE)
702 return sock_setbindtodevice(sk, optval, optlen);
704 if (optlen < sizeof(int))
705 return -EINVAL;
707 if (get_user(val, (int __user *)optval))
708 return -EFAULT;
710 valbool = val ? 1 : 0;
712 lock_sock(sk);
714 switch (optname) {
715 case SO_DEBUG:
716 if (val && !capable(CAP_NET_ADMIN))
717 ret = -EACCES;
718 else
719 sock_valbool_flag(sk, SOCK_DBG, valbool);
720 break;
721 case SO_REUSEADDR:
722 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
723 break;
724 case SO_REUSEPORT:
725 sk->sk_reuseport = valbool;
726 break;
727 case SO_TYPE:
728 case SO_PROTOCOL:
729 case SO_DOMAIN:
730 case SO_ERROR:
731 ret = -ENOPROTOOPT;
732 break;
733 case SO_DONTROUTE:
734 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
735 break;
736 case SO_BROADCAST:
737 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
738 break;
739 case SO_SNDBUF:
740 /* Don't error on this BSD doesn't and if you think
741 * about it this is right. Otherwise apps have to
742 * play 'guess the biggest size' games. RCVBUF/SNDBUF
743 * are treated in BSD as hints
745 val = min_t(u32, val, sysctl_wmem_max);
746 set_sndbuf:
747 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
748 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
749 /* Wake up sending tasks if we upped the value. */
750 sk->sk_write_space(sk);
751 break;
753 case SO_SNDBUFFORCE:
754 if (!capable(CAP_NET_ADMIN)) {
755 ret = -EPERM;
756 break;
758 goto set_sndbuf;
760 case SO_RCVBUF:
761 /* Don't error on this BSD doesn't and if you think
762 * about it this is right. Otherwise apps have to
763 * play 'guess the biggest size' games. RCVBUF/SNDBUF
764 * are treated in BSD as hints
766 val = min_t(u32, val, sysctl_rmem_max);
767 set_rcvbuf:
768 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
770 * We double it on the way in to account for
771 * "struct sk_buff" etc. overhead. Applications
772 * assume that the SO_RCVBUF setting they make will
773 * allow that much actual data to be received on that
774 * socket.
776 * Applications are unaware that "struct sk_buff" and
777 * other overheads allocate from the receive buffer
778 * during socket buffer allocation.
780 * And after considering the possible alternatives,
781 * returning the value we actually used in getsockopt
782 * is the most desirable behavior.
784 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
785 break;
787 case SO_RCVBUFFORCE:
788 if (!capable(CAP_NET_ADMIN)) {
789 ret = -EPERM;
790 break;
792 goto set_rcvbuf;
794 case SO_KEEPALIVE:
795 #ifdef CONFIG_INET
796 if (sk->sk_protocol == IPPROTO_TCP &&
797 sk->sk_type == SOCK_STREAM)
798 tcp_set_keepalive(sk, valbool);
799 #endif
800 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
801 break;
803 case SO_OOBINLINE:
804 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
805 break;
807 case SO_NO_CHECK:
808 sk->sk_no_check_tx = valbool;
809 break;
811 case SO_PRIORITY:
812 if ((val >= 0 && val <= 6) ||
813 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
814 sk->sk_priority = val;
815 else
816 ret = -EPERM;
817 break;
819 case SO_LINGER:
820 if (optlen < sizeof(ling)) {
821 ret = -EINVAL; /* 1003.1g */
822 break;
824 if (copy_from_user(&ling, optval, sizeof(ling))) {
825 ret = -EFAULT;
826 break;
828 if (!ling.l_onoff)
829 sock_reset_flag(sk, SOCK_LINGER);
830 else {
831 #if (BITS_PER_LONG == 32)
832 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
833 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
834 else
835 #endif
836 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
837 sock_set_flag(sk, SOCK_LINGER);
839 break;
841 case SO_BSDCOMPAT:
842 sock_warn_obsolete_bsdism("setsockopt");
843 break;
845 case SO_PASSCRED:
846 if (valbool)
847 set_bit(SOCK_PASSCRED, &sock->flags);
848 else
849 clear_bit(SOCK_PASSCRED, &sock->flags);
850 break;
852 case SO_TIMESTAMP:
853 case SO_TIMESTAMPNS:
854 if (valbool) {
855 if (optname == SO_TIMESTAMP)
856 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
857 else
858 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
859 sock_set_flag(sk, SOCK_RCVTSTAMP);
860 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
861 } else {
862 sock_reset_flag(sk, SOCK_RCVTSTAMP);
863 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
865 break;
867 case SO_TIMESTAMPING:
868 if (val & ~SOF_TIMESTAMPING_MASK) {
869 ret = -EINVAL;
870 break;
873 if (val & SOF_TIMESTAMPING_OPT_ID &&
874 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
875 if (sk->sk_protocol == IPPROTO_TCP &&
876 sk->sk_type == SOCK_STREAM) {
877 if (sk->sk_state != TCP_ESTABLISHED) {
878 ret = -EINVAL;
879 break;
881 sk->sk_tskey = tcp_sk(sk)->snd_una;
882 } else {
883 sk->sk_tskey = 0;
886 sk->sk_tsflags = val;
887 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
888 sock_enable_timestamp(sk,
889 SOCK_TIMESTAMPING_RX_SOFTWARE);
890 else
891 sock_disable_timestamp(sk,
892 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
893 break;
895 case SO_RCVLOWAT:
896 if (val < 0)
897 val = INT_MAX;
898 sk->sk_rcvlowat = val ? : 1;
899 break;
901 case SO_RCVTIMEO:
902 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
903 break;
905 case SO_SNDTIMEO:
906 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
907 break;
909 case SO_ATTACH_FILTER:
910 ret = -EINVAL;
911 if (optlen == sizeof(struct sock_fprog)) {
912 struct sock_fprog fprog;
914 ret = -EFAULT;
915 if (copy_from_user(&fprog, optval, sizeof(fprog)))
916 break;
918 ret = sk_attach_filter(&fprog, sk);
920 break;
922 case SO_ATTACH_BPF:
923 ret = -EINVAL;
924 if (optlen == sizeof(u32)) {
925 u32 ufd;
927 ret = -EFAULT;
928 if (copy_from_user(&ufd, optval, sizeof(ufd)))
929 break;
931 ret = sk_attach_bpf(ufd, sk);
933 break;
935 case SO_DETACH_FILTER:
936 ret = sk_detach_filter(sk);
937 break;
939 case SO_LOCK_FILTER:
940 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
941 ret = -EPERM;
942 else
943 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
944 break;
946 case SO_PASSSEC:
947 if (valbool)
948 set_bit(SOCK_PASSSEC, &sock->flags);
949 else
950 clear_bit(SOCK_PASSSEC, &sock->flags);
951 break;
952 case SO_MARK:
953 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
954 ret = -EPERM;
955 else
956 sk->sk_mark = val;
957 break;
959 case SO_RXQ_OVFL:
960 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
961 break;
963 case SO_WIFI_STATUS:
964 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
965 break;
967 case SO_PEEK_OFF:
968 if (sock->ops->set_peek_off)
969 ret = sock->ops->set_peek_off(sk, val);
970 else
971 ret = -EOPNOTSUPP;
972 break;
974 case SO_NOFCS:
975 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
976 break;
978 case SO_SELECT_ERR_QUEUE:
979 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
980 break;
982 #ifdef CONFIG_NET_RX_BUSY_POLL
983 case SO_BUSY_POLL:
984 /* allow unprivileged users to decrease the value */
985 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
986 ret = -EPERM;
987 else {
988 if (val < 0)
989 ret = -EINVAL;
990 else
991 sk->sk_ll_usec = val;
993 break;
994 #endif
996 case SO_MAX_PACING_RATE:
997 sk->sk_max_pacing_rate = val;
998 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
999 sk->sk_max_pacing_rate);
1000 break;
1002 case SO_INCOMING_CPU:
1003 sk->sk_incoming_cpu = val;
1004 break;
1006 default:
1007 ret = -ENOPROTOOPT;
1008 break;
1010 release_sock(sk);
1011 return ret;
1013 EXPORT_SYMBOL(sock_setsockopt);
1016 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1017 struct ucred *ucred)
1019 ucred->pid = pid_vnr(pid);
1020 ucred->uid = ucred->gid = -1;
1021 if (cred) {
1022 struct user_namespace *current_ns = current_user_ns();
1024 ucred->uid = from_kuid_munged(current_ns, cred->euid);
1025 ucred->gid = from_kgid_munged(current_ns, cred->egid);
1029 int sock_getsockopt(struct socket *sock, int level, int optname,
1030 char __user *optval, int __user *optlen)
1032 struct sock *sk = sock->sk;
1034 union {
1035 int val;
1036 struct linger ling;
1037 struct timeval tm;
1038 } v;
1040 int lv = sizeof(int);
1041 int len;
1043 if (get_user(len, optlen))
1044 return -EFAULT;
1045 if (len < 0)
1046 return -EINVAL;
1048 memset(&v, 0, sizeof(v));
1050 switch (optname) {
1051 case SO_DEBUG:
1052 v.val = sock_flag(sk, SOCK_DBG);
1053 break;
1055 case SO_DONTROUTE:
1056 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1057 break;
1059 case SO_BROADCAST:
1060 v.val = sock_flag(sk, SOCK_BROADCAST);
1061 break;
1063 case SO_SNDBUF:
1064 v.val = sk->sk_sndbuf;
1065 break;
1067 case SO_RCVBUF:
1068 v.val = sk->sk_rcvbuf;
1069 break;
1071 case SO_REUSEADDR:
1072 v.val = sk->sk_reuse;
1073 break;
1075 case SO_REUSEPORT:
1076 v.val = sk->sk_reuseport;
1077 break;
1079 case SO_KEEPALIVE:
1080 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1081 break;
1083 case SO_TYPE:
1084 v.val = sk->sk_type;
1085 break;
1087 case SO_PROTOCOL:
1088 v.val = sk->sk_protocol;
1089 break;
1091 case SO_DOMAIN:
1092 v.val = sk->sk_family;
1093 break;
1095 case SO_ERROR:
1096 v.val = -sock_error(sk);
1097 if (v.val == 0)
1098 v.val = xchg(&sk->sk_err_soft, 0);
1099 break;
1101 case SO_OOBINLINE:
1102 v.val = sock_flag(sk, SOCK_URGINLINE);
1103 break;
1105 case SO_NO_CHECK:
1106 v.val = sk->sk_no_check_tx;
1107 break;
1109 case SO_PRIORITY:
1110 v.val = sk->sk_priority;
1111 break;
1113 case SO_LINGER:
1114 lv = sizeof(v.ling);
1115 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1116 v.ling.l_linger = sk->sk_lingertime / HZ;
1117 break;
1119 case SO_BSDCOMPAT:
1120 sock_warn_obsolete_bsdism("getsockopt");
1121 break;
1123 case SO_TIMESTAMP:
1124 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1125 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1126 break;
1128 case SO_TIMESTAMPNS:
1129 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1130 break;
1132 case SO_TIMESTAMPING:
1133 v.val = sk->sk_tsflags;
1134 break;
1136 case SO_RCVTIMEO:
1137 lv = sizeof(struct timeval);
1138 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1139 v.tm.tv_sec = 0;
1140 v.tm.tv_usec = 0;
1141 } else {
1142 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1143 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1145 break;
1147 case SO_SNDTIMEO:
1148 lv = sizeof(struct timeval);
1149 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1150 v.tm.tv_sec = 0;
1151 v.tm.tv_usec = 0;
1152 } else {
1153 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1154 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1156 break;
1158 case SO_RCVLOWAT:
1159 v.val = sk->sk_rcvlowat;
1160 break;
1162 case SO_SNDLOWAT:
1163 v.val = 1;
1164 break;
1166 case SO_PASSCRED:
1167 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1168 break;
1170 case SO_PEERCRED:
1172 struct ucred peercred;
1173 if (len > sizeof(peercred))
1174 len = sizeof(peercred);
1175 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1176 if (copy_to_user(optval, &peercred, len))
1177 return -EFAULT;
1178 goto lenout;
1181 case SO_PEERNAME:
1183 char address[128];
1185 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1186 return -ENOTCONN;
1187 if (lv < len)
1188 return -EINVAL;
1189 if (copy_to_user(optval, address, len))
1190 return -EFAULT;
1191 goto lenout;
1194 /* Dubious BSD thing... Probably nobody even uses it, but
1195 * the UNIX standard wants it for whatever reason... -DaveM
1197 case SO_ACCEPTCONN:
1198 v.val = sk->sk_state == TCP_LISTEN;
1199 break;
1201 case SO_PASSSEC:
1202 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1203 break;
1205 case SO_PEERSEC:
1206 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1208 case SO_MARK:
1209 v.val = sk->sk_mark;
1210 break;
1212 case SO_RXQ_OVFL:
1213 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1214 break;
1216 case SO_WIFI_STATUS:
1217 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1218 break;
1220 case SO_PEEK_OFF:
1221 if (!sock->ops->set_peek_off)
1222 return -EOPNOTSUPP;
1224 v.val = sk->sk_peek_off;
1225 break;
1226 case SO_NOFCS:
1227 v.val = sock_flag(sk, SOCK_NOFCS);
1228 break;
1230 case SO_BINDTODEVICE:
1231 return sock_getbindtodevice(sk, optval, optlen, len);
1233 case SO_GET_FILTER:
1234 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1235 if (len < 0)
1236 return len;
1238 goto lenout;
1240 case SO_LOCK_FILTER:
1241 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1242 break;
1244 case SO_BPF_EXTENSIONS:
1245 v.val = bpf_tell_extensions();
1246 break;
1248 case SO_SELECT_ERR_QUEUE:
1249 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1250 break;
1252 #ifdef CONFIG_NET_RX_BUSY_POLL
1253 case SO_BUSY_POLL:
1254 v.val = sk->sk_ll_usec;
1255 break;
1256 #endif
1258 case SO_MAX_PACING_RATE:
1259 v.val = sk->sk_max_pacing_rate;
1260 break;
1262 case SO_INCOMING_CPU:
1263 v.val = sk->sk_incoming_cpu;
1264 break;
1266 default:
1267 /* We implement the SO_SNDLOWAT etc to not be settable
1268 * (1003.1g 7).
1270 return -ENOPROTOOPT;
1273 if (len > lv)
1274 len = lv;
1275 if (copy_to_user(optval, &v, len))
1276 return -EFAULT;
1277 lenout:
1278 if (put_user(len, optlen))
1279 return -EFAULT;
1280 return 0;
1284 * Initialize an sk_lock.
1286 * (We also register the sk_lock with the lock validator.)
1288 static inline void sock_lock_init(struct sock *sk)
1290 sock_lock_init_class_and_name(sk,
1291 af_family_slock_key_strings[sk->sk_family],
1292 af_family_slock_keys + sk->sk_family,
1293 af_family_key_strings[sk->sk_family],
1294 af_family_keys + sk->sk_family);
1298 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1299 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1300 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1302 static void sock_copy(struct sock *nsk, const struct sock *osk)
1304 #ifdef CONFIG_SECURITY_NETWORK
1305 void *sptr = nsk->sk_security;
1306 #endif
1307 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1309 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1310 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1312 #ifdef CONFIG_SECURITY_NETWORK
1313 nsk->sk_security = sptr;
1314 security_sk_clone(osk, nsk);
1315 #endif
1318 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1320 unsigned long nulls1, nulls2;
1322 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1323 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1324 if (nulls1 > nulls2)
1325 swap(nulls1, nulls2);
1327 if (nulls1 != 0)
1328 memset((char *)sk, 0, nulls1);
1329 memset((char *)sk + nulls1 + sizeof(void *), 0,
1330 nulls2 - nulls1 - sizeof(void *));
1331 memset((char *)sk + nulls2 + sizeof(void *), 0,
1332 size - nulls2 - sizeof(void *));
1334 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1336 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1337 int family)
1339 struct sock *sk;
1340 struct kmem_cache *slab;
1342 slab = prot->slab;
1343 if (slab != NULL) {
1344 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1345 if (!sk)
1346 return sk;
1347 if (priority & __GFP_ZERO) {
1348 if (prot->clear_sk)
1349 prot->clear_sk(sk, prot->obj_size);
1350 else
1351 sk_prot_clear_nulls(sk, prot->obj_size);
1353 } else
1354 sk = kmalloc(prot->obj_size, priority);
1356 if (sk != NULL) {
1357 kmemcheck_annotate_bitfield(sk, flags);
1359 if (security_sk_alloc(sk, family, priority))
1360 goto out_free;
1362 if (!try_module_get(prot->owner))
1363 goto out_free_sec;
1364 sk_tx_queue_clear(sk);
1367 return sk;
1369 out_free_sec:
1370 security_sk_free(sk);
1371 out_free:
1372 if (slab != NULL)
1373 kmem_cache_free(slab, sk);
1374 else
1375 kfree(sk);
1376 return NULL;
1379 static void sk_prot_free(struct proto *prot, struct sock *sk)
1381 struct kmem_cache *slab;
1382 struct module *owner;
1384 owner = prot->owner;
1385 slab = prot->slab;
1387 security_sk_free(sk);
1388 if (slab != NULL)
1389 kmem_cache_free(slab, sk);
1390 else
1391 kfree(sk);
1392 module_put(owner);
1395 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1396 void sock_update_netprioidx(struct sock *sk)
1398 if (in_interrupt())
1399 return;
1401 sk->sk_cgrp_prioidx = task_netprioidx(current);
1403 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1404 #endif
1407 * sk_alloc - All socket objects are allocated here
1408 * @net: the applicable net namespace
1409 * @family: protocol family
1410 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1411 * @prot: struct proto associated with this new sock instance
1412 * @kern: is this to be a kernel socket?
1414 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1415 struct proto *prot, int kern)
1417 struct sock *sk;
1419 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1420 if (sk) {
1421 sk->sk_family = family;
1423 * See comment in struct sock definition to understand
1424 * why we need sk_prot_creator -acme
1426 sk->sk_prot = sk->sk_prot_creator = prot;
1427 sock_lock_init(sk);
1428 sk->sk_net_refcnt = kern ? 0 : 1;
1429 if (likely(sk->sk_net_refcnt))
1430 get_net(net);
1431 sock_net_set(sk, net);
1432 atomic_set(&sk->sk_wmem_alloc, 1);
1434 sock_update_classid(sk);
1435 sock_update_netprioidx(sk);
1438 return sk;
1440 EXPORT_SYMBOL(sk_alloc);
1442 void sk_destruct(struct sock *sk)
1444 struct sk_filter *filter;
1446 if (sk->sk_destruct)
1447 sk->sk_destruct(sk);
1449 filter = rcu_dereference_check(sk->sk_filter,
1450 atomic_read(&sk->sk_wmem_alloc) == 0);
1451 if (filter) {
1452 sk_filter_uncharge(sk, filter);
1453 RCU_INIT_POINTER(sk->sk_filter, NULL);
1456 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1458 if (atomic_read(&sk->sk_omem_alloc))
1459 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1460 __func__, atomic_read(&sk->sk_omem_alloc));
1462 if (sk->sk_frag.page) {
1463 put_page(sk->sk_frag.page);
1464 sk->sk_frag.page = NULL;
1467 if (sk->sk_peer_cred)
1468 put_cred(sk->sk_peer_cred);
1469 put_pid(sk->sk_peer_pid);
1470 if (likely(sk->sk_net_refcnt))
1471 put_net(sock_net(sk));
1472 sk_prot_free(sk->sk_prot_creator, sk);
1475 static void __sk_free(struct sock *sk)
1477 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1478 sock_diag_broadcast_destroy(sk);
1479 else
1480 sk_destruct(sk);
1483 void sk_free(struct sock *sk)
1486 * We subtract one from sk_wmem_alloc and can know if
1487 * some packets are still in some tx queue.
1488 * If not null, sock_wfree() will call __sk_free(sk) later
1490 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1491 __sk_free(sk);
1493 EXPORT_SYMBOL(sk_free);
1495 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1497 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1498 sock_update_memcg(newsk);
1502 * sk_clone_lock - clone a socket, and lock its clone
1503 * @sk: the socket to clone
1504 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1506 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1508 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1510 struct sock *newsk;
1511 bool is_charged = true;
1513 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1514 if (newsk != NULL) {
1515 struct sk_filter *filter;
1517 sock_copy(newsk, sk);
1519 newsk->sk_prot_creator = sk->sk_prot;
1521 /* SANITY */
1522 if (likely(newsk->sk_net_refcnt))
1523 get_net(sock_net(newsk));
1524 sk_node_init(&newsk->sk_node);
1525 sock_lock_init(newsk);
1526 bh_lock_sock(newsk);
1527 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1528 newsk->sk_backlog.len = 0;
1530 atomic_set(&newsk->sk_rmem_alloc, 0);
1532 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1534 atomic_set(&newsk->sk_wmem_alloc, 1);
1535 atomic_set(&newsk->sk_omem_alloc, 0);
1536 skb_queue_head_init(&newsk->sk_receive_queue);
1537 skb_queue_head_init(&newsk->sk_write_queue);
1539 rwlock_init(&newsk->sk_callback_lock);
1540 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1541 af_callback_keys + newsk->sk_family,
1542 af_family_clock_key_strings[newsk->sk_family]);
1544 newsk->sk_dst_cache = NULL;
1545 newsk->sk_wmem_queued = 0;
1546 newsk->sk_forward_alloc = 0;
1547 newsk->sk_send_head = NULL;
1548 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1550 sock_reset_flag(newsk, SOCK_DONE);
1551 skb_queue_head_init(&newsk->sk_error_queue);
1553 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1554 if (filter != NULL)
1555 /* though it's an empty new sock, the charging may fail
1556 * if sysctl_optmem_max was changed between creation of
1557 * original socket and cloning
1559 is_charged = sk_filter_charge(newsk, filter);
1561 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1562 /* We need to make sure that we don't uncharge the new
1563 * socket if we couldn't charge it in the first place
1564 * as otherwise we uncharge the parent's filter.
1566 if (!is_charged)
1567 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1568 /* It is still raw copy of parent, so invalidate
1569 * destructor and make plain sk_free() */
1570 newsk->sk_destruct = NULL;
1571 bh_unlock_sock(newsk);
1572 sk_free(newsk);
1573 newsk = NULL;
1574 goto out;
1577 newsk->sk_err = 0;
1578 newsk->sk_err_soft = 0;
1579 newsk->sk_priority = 0;
1580 newsk->sk_incoming_cpu = raw_smp_processor_id();
1581 atomic64_set(&newsk->sk_cookie, 0);
1583 * Before updating sk_refcnt, we must commit prior changes to memory
1584 * (Documentation/RCU/rculist_nulls.txt for details)
1586 smp_wmb();
1587 atomic_set(&newsk->sk_refcnt, 2);
1590 * Increment the counter in the same struct proto as the master
1591 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1592 * is the same as sk->sk_prot->socks, as this field was copied
1593 * with memcpy).
1595 * This _changes_ the previous behaviour, where
1596 * tcp_create_openreq_child always was incrementing the
1597 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1598 * to be taken into account in all callers. -acme
1600 sk_refcnt_debug_inc(newsk);
1601 sk_set_socket(newsk, NULL);
1602 newsk->sk_wq = NULL;
1604 sk_update_clone(sk, newsk);
1606 if (newsk->sk_prot->sockets_allocated)
1607 sk_sockets_allocated_inc(newsk);
1609 if (sock_needs_netstamp(sk) &&
1610 newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1611 net_enable_timestamp();
1613 out:
1614 return newsk;
1616 EXPORT_SYMBOL_GPL(sk_clone_lock);
1618 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1620 u32 max_segs = 1;
1622 sk_dst_set(sk, dst);
1623 sk->sk_route_caps = dst->dev->features;
1624 if (sk->sk_route_caps & NETIF_F_GSO)
1625 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1626 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1627 if (sk_can_gso(sk)) {
1628 if (dst->header_len) {
1629 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1630 } else {
1631 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1632 sk->sk_gso_max_size = dst->dev->gso_max_size;
1633 max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1636 sk->sk_gso_max_segs = max_segs;
1638 EXPORT_SYMBOL_GPL(sk_setup_caps);
1641 * Simple resource managers for sockets.
1646 * Write buffer destructor automatically called from kfree_skb.
1648 void sock_wfree(struct sk_buff *skb)
1650 struct sock *sk = skb->sk;
1651 unsigned int len = skb->truesize;
1653 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1655 * Keep a reference on sk_wmem_alloc, this will be released
1656 * after sk_write_space() call
1658 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1659 sk->sk_write_space(sk);
1660 len = 1;
1663 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1664 * could not do because of in-flight packets
1666 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1667 __sk_free(sk);
1669 EXPORT_SYMBOL(sock_wfree);
1671 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1673 skb_orphan(skb);
1674 skb->sk = sk;
1675 #ifdef CONFIG_INET
1676 if (unlikely(!sk_fullsock(sk))) {
1677 skb->destructor = sock_edemux;
1678 sock_hold(sk);
1679 return;
1681 #endif
1682 skb->destructor = sock_wfree;
1683 skb_set_hash_from_sk(skb, sk);
1685 * We used to take a refcount on sk, but following operation
1686 * is enough to guarantee sk_free() wont free this sock until
1687 * all in-flight packets are completed
1689 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1691 EXPORT_SYMBOL(skb_set_owner_w);
1693 void skb_orphan_partial(struct sk_buff *skb)
1695 if (skb->destructor == sock_wfree
1696 #ifdef CONFIG_INET
1697 || skb->destructor == tcp_wfree
1698 #endif
1700 struct sock *sk = skb->sk;
1702 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1703 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1704 skb->destructor = sock_efree;
1706 } else {
1707 skb_orphan(skb);
1710 EXPORT_SYMBOL(skb_orphan_partial);
1713 * Read buffer destructor automatically called from kfree_skb.
1715 void sock_rfree(struct sk_buff *skb)
1717 struct sock *sk = skb->sk;
1718 unsigned int len = skb->truesize;
1720 atomic_sub(len, &sk->sk_rmem_alloc);
1721 sk_mem_uncharge(sk, len);
1723 EXPORT_SYMBOL(sock_rfree);
1726 * Buffer destructor for skbs that are not used directly in read or write
1727 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1729 void sock_efree(struct sk_buff *skb)
1731 sock_put(skb->sk);
1733 EXPORT_SYMBOL(sock_efree);
1735 kuid_t sock_i_uid(struct sock *sk)
1737 kuid_t uid;
1739 read_lock_bh(&sk->sk_callback_lock);
1740 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1741 read_unlock_bh(&sk->sk_callback_lock);
1742 return uid;
1744 EXPORT_SYMBOL(sock_i_uid);
1746 unsigned long sock_i_ino(struct sock *sk)
1748 unsigned long ino;
1750 read_lock_bh(&sk->sk_callback_lock);
1751 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1752 read_unlock_bh(&sk->sk_callback_lock);
1753 return ino;
1755 EXPORT_SYMBOL(sock_i_ino);
1758 * Allocate a skb from the socket's send buffer.
1760 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1761 gfp_t priority)
1763 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1764 struct sk_buff *skb = alloc_skb(size, priority);
1765 if (skb) {
1766 skb_set_owner_w(skb, sk);
1767 return skb;
1770 return NULL;
1772 EXPORT_SYMBOL(sock_wmalloc);
1775 * Allocate a memory block from the socket's option memory buffer.
1777 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1779 if ((unsigned int)size <= sysctl_optmem_max &&
1780 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1781 void *mem;
1782 /* First do the add, to avoid the race if kmalloc
1783 * might sleep.
1785 atomic_add(size, &sk->sk_omem_alloc);
1786 mem = kmalloc(size, priority);
1787 if (mem)
1788 return mem;
1789 atomic_sub(size, &sk->sk_omem_alloc);
1791 return NULL;
1793 EXPORT_SYMBOL(sock_kmalloc);
1795 /* Free an option memory block. Note, we actually want the inline
1796 * here as this allows gcc to detect the nullify and fold away the
1797 * condition entirely.
1799 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1800 const bool nullify)
1802 if (WARN_ON_ONCE(!mem))
1803 return;
1804 if (nullify)
1805 kzfree(mem);
1806 else
1807 kfree(mem);
1808 atomic_sub(size, &sk->sk_omem_alloc);
1811 void sock_kfree_s(struct sock *sk, void *mem, int size)
1813 __sock_kfree_s(sk, mem, size, false);
1815 EXPORT_SYMBOL(sock_kfree_s);
1817 void sock_kzfree_s(struct sock *sk, void *mem, int size)
1819 __sock_kfree_s(sk, mem, size, true);
1821 EXPORT_SYMBOL(sock_kzfree_s);
1823 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1824 I think, these locks should be removed for datagram sockets.
1826 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1828 DEFINE_WAIT(wait);
1830 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1831 for (;;) {
1832 if (!timeo)
1833 break;
1834 if (signal_pending(current))
1835 break;
1836 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1837 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1838 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1839 break;
1840 if (sk->sk_shutdown & SEND_SHUTDOWN)
1841 break;
1842 if (sk->sk_err)
1843 break;
1844 timeo = schedule_timeout(timeo);
1846 finish_wait(sk_sleep(sk), &wait);
1847 return timeo;
1852 * Generic send/receive buffer handlers
1855 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1856 unsigned long data_len, int noblock,
1857 int *errcode, int max_page_order)
1859 struct sk_buff *skb;
1860 long timeo;
1861 int err;
1863 timeo = sock_sndtimeo(sk, noblock);
1864 for (;;) {
1865 err = sock_error(sk);
1866 if (err != 0)
1867 goto failure;
1869 err = -EPIPE;
1870 if (sk->sk_shutdown & SEND_SHUTDOWN)
1871 goto failure;
1873 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1874 break;
1876 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1877 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1878 err = -EAGAIN;
1879 if (!timeo)
1880 goto failure;
1881 if (signal_pending(current))
1882 goto interrupted;
1883 timeo = sock_wait_for_wmem(sk, timeo);
1885 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1886 errcode, sk->sk_allocation);
1887 if (skb)
1888 skb_set_owner_w(skb, sk);
1889 return skb;
1891 interrupted:
1892 err = sock_intr_errno(timeo);
1893 failure:
1894 *errcode = err;
1895 return NULL;
1897 EXPORT_SYMBOL(sock_alloc_send_pskb);
1899 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1900 int noblock, int *errcode)
1902 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1904 EXPORT_SYMBOL(sock_alloc_send_skb);
1906 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1907 struct sockcm_cookie *sockc)
1909 struct cmsghdr *cmsg;
1911 for_each_cmsghdr(cmsg, msg) {
1912 if (!CMSG_OK(msg, cmsg))
1913 return -EINVAL;
1914 if (cmsg->cmsg_level != SOL_SOCKET)
1915 continue;
1916 switch (cmsg->cmsg_type) {
1917 case SO_MARK:
1918 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1919 return -EPERM;
1920 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1921 return -EINVAL;
1922 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1923 break;
1924 default:
1925 return -EINVAL;
1928 return 0;
1930 EXPORT_SYMBOL(sock_cmsg_send);
1932 /* On 32bit arches, an skb frag is limited to 2^15 */
1933 #define SKB_FRAG_PAGE_ORDER get_order(32768)
1936 * skb_page_frag_refill - check that a page_frag contains enough room
1937 * @sz: minimum size of the fragment we want to get
1938 * @pfrag: pointer to page_frag
1939 * @gfp: priority for memory allocation
1941 * Note: While this allocator tries to use high order pages, there is
1942 * no guarantee that allocations succeed. Therefore, @sz MUST be
1943 * less or equal than PAGE_SIZE.
1945 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1947 if (pfrag->page) {
1948 if (atomic_read(&pfrag->page->_count) == 1) {
1949 pfrag->offset = 0;
1950 return true;
1952 if (pfrag->offset + sz <= pfrag->size)
1953 return true;
1954 put_page(pfrag->page);
1957 pfrag->offset = 0;
1958 if (SKB_FRAG_PAGE_ORDER) {
1959 /* Avoid direct reclaim but allow kswapd to wake */
1960 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
1961 __GFP_COMP | __GFP_NOWARN |
1962 __GFP_NORETRY,
1963 SKB_FRAG_PAGE_ORDER);
1964 if (likely(pfrag->page)) {
1965 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
1966 return true;
1969 pfrag->page = alloc_page(gfp);
1970 if (likely(pfrag->page)) {
1971 pfrag->size = PAGE_SIZE;
1972 return true;
1974 return false;
1976 EXPORT_SYMBOL(skb_page_frag_refill);
1978 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1980 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1981 return true;
1983 sk_enter_memory_pressure(sk);
1984 sk_stream_moderate_sndbuf(sk);
1985 return false;
1987 EXPORT_SYMBOL(sk_page_frag_refill);
1989 static void __lock_sock(struct sock *sk)
1990 __releases(&sk->sk_lock.slock)
1991 __acquires(&sk->sk_lock.slock)
1993 DEFINE_WAIT(wait);
1995 for (;;) {
1996 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1997 TASK_UNINTERRUPTIBLE);
1998 spin_unlock_bh(&sk->sk_lock.slock);
1999 schedule();
2000 spin_lock_bh(&sk->sk_lock.slock);
2001 if (!sock_owned_by_user(sk))
2002 break;
2004 finish_wait(&sk->sk_lock.wq, &wait);
2007 static void __release_sock(struct sock *sk)
2008 __releases(&sk->sk_lock.slock)
2009 __acquires(&sk->sk_lock.slock)
2011 struct sk_buff *skb = sk->sk_backlog.head;
2013 do {
2014 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2015 bh_unlock_sock(sk);
2017 do {
2018 struct sk_buff *next = skb->next;
2020 prefetch(next);
2021 WARN_ON_ONCE(skb_dst_is_noref(skb));
2022 skb->next = NULL;
2023 sk_backlog_rcv(sk, skb);
2026 * We are in process context here with softirqs
2027 * disabled, use cond_resched_softirq() to preempt.
2028 * This is safe to do because we've taken the backlog
2029 * queue private:
2031 cond_resched_softirq();
2033 skb = next;
2034 } while (skb != NULL);
2036 bh_lock_sock(sk);
2037 } while ((skb = sk->sk_backlog.head) != NULL);
2040 * Doing the zeroing here guarantee we can not loop forever
2041 * while a wild producer attempts to flood us.
2043 sk->sk_backlog.len = 0;
2047 * sk_wait_data - wait for data to arrive at sk_receive_queue
2048 * @sk: sock to wait on
2049 * @timeo: for how long
2050 * @skb: last skb seen on sk_receive_queue
2052 * Now socket state including sk->sk_err is changed only under lock,
2053 * hence we may omit checks after joining wait queue.
2054 * We check receive queue before schedule() only as optimization;
2055 * it is very likely that release_sock() added new data.
2057 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2059 int rc;
2060 DEFINE_WAIT(wait);
2062 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2063 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2064 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
2065 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2066 finish_wait(sk_sleep(sk), &wait);
2067 return rc;
2069 EXPORT_SYMBOL(sk_wait_data);
2072 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2073 * @sk: socket
2074 * @size: memory size to allocate
2075 * @kind: allocation type
2077 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2078 * rmem allocation. This function assumes that protocols which have
2079 * memory_pressure use sk_wmem_queued as write buffer accounting.
2081 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2083 struct proto *prot = sk->sk_prot;
2084 int amt = sk_mem_pages(size);
2085 long allocated;
2086 int parent_status = UNDER_LIMIT;
2088 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2090 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
2092 /* Under limit. */
2093 if (parent_status == UNDER_LIMIT &&
2094 allocated <= sk_prot_mem_limits(sk, 0)) {
2095 sk_leave_memory_pressure(sk);
2096 return 1;
2099 /* Under pressure. (we or our parents) */
2100 if ((parent_status > SOFT_LIMIT) ||
2101 allocated > sk_prot_mem_limits(sk, 1))
2102 sk_enter_memory_pressure(sk);
2104 /* Over hard limit (we or our parents) */
2105 if ((parent_status == OVER_LIMIT) ||
2106 (allocated > sk_prot_mem_limits(sk, 2)))
2107 goto suppress_allocation;
2109 /* guarantee minimum buffer size under pressure */
2110 if (kind == SK_MEM_RECV) {
2111 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2112 return 1;
2114 } else { /* SK_MEM_SEND */
2115 if (sk->sk_type == SOCK_STREAM) {
2116 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2117 return 1;
2118 } else if (atomic_read(&sk->sk_wmem_alloc) <
2119 prot->sysctl_wmem[0])
2120 return 1;
2123 if (sk_has_memory_pressure(sk)) {
2124 int alloc;
2126 if (!sk_under_memory_pressure(sk))
2127 return 1;
2128 alloc = sk_sockets_allocated_read_positive(sk);
2129 if (sk_prot_mem_limits(sk, 2) > alloc *
2130 sk_mem_pages(sk->sk_wmem_queued +
2131 atomic_read(&sk->sk_rmem_alloc) +
2132 sk->sk_forward_alloc))
2133 return 1;
2136 suppress_allocation:
2138 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2139 sk_stream_moderate_sndbuf(sk);
2141 /* Fail only if socket is _under_ its sndbuf.
2142 * In this case we cannot block, so that we have to fail.
2144 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2145 return 1;
2148 trace_sock_exceed_buf_limit(sk, prot, allocated);
2150 /* Alas. Undo changes. */
2151 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2153 sk_memory_allocated_sub(sk, amt);
2155 return 0;
2157 EXPORT_SYMBOL(__sk_mem_schedule);
2160 * __sk_mem_reclaim - reclaim memory_allocated
2161 * @sk: socket
2162 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2164 void __sk_mem_reclaim(struct sock *sk, int amount)
2166 amount >>= SK_MEM_QUANTUM_SHIFT;
2167 sk_memory_allocated_sub(sk, amount);
2168 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2170 if (sk_under_memory_pressure(sk) &&
2171 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2172 sk_leave_memory_pressure(sk);
2174 EXPORT_SYMBOL(__sk_mem_reclaim);
2178 * Set of default routines for initialising struct proto_ops when
2179 * the protocol does not support a particular function. In certain
2180 * cases where it makes no sense for a protocol to have a "do nothing"
2181 * function, some default processing is provided.
2184 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2186 return -EOPNOTSUPP;
2188 EXPORT_SYMBOL(sock_no_bind);
2190 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2191 int len, int flags)
2193 return -EOPNOTSUPP;
2195 EXPORT_SYMBOL(sock_no_connect);
2197 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2199 return -EOPNOTSUPP;
2201 EXPORT_SYMBOL(sock_no_socketpair);
2203 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2205 return -EOPNOTSUPP;
2207 EXPORT_SYMBOL(sock_no_accept);
2209 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2210 int *len, int peer)
2212 return -EOPNOTSUPP;
2214 EXPORT_SYMBOL(sock_no_getname);
2216 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2218 return 0;
2220 EXPORT_SYMBOL(sock_no_poll);
2222 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2224 return -EOPNOTSUPP;
2226 EXPORT_SYMBOL(sock_no_ioctl);
2228 int sock_no_listen(struct socket *sock, int backlog)
2230 return -EOPNOTSUPP;
2232 EXPORT_SYMBOL(sock_no_listen);
2234 int sock_no_shutdown(struct socket *sock, int how)
2236 return -EOPNOTSUPP;
2238 EXPORT_SYMBOL(sock_no_shutdown);
2240 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2241 char __user *optval, unsigned int optlen)
2243 return -EOPNOTSUPP;
2245 EXPORT_SYMBOL(sock_no_setsockopt);
2247 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2248 char __user *optval, int __user *optlen)
2250 return -EOPNOTSUPP;
2252 EXPORT_SYMBOL(sock_no_getsockopt);
2254 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2256 return -EOPNOTSUPP;
2258 EXPORT_SYMBOL(sock_no_sendmsg);
2260 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2261 int flags)
2263 return -EOPNOTSUPP;
2265 EXPORT_SYMBOL(sock_no_recvmsg);
2267 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2269 /* Mirror missing mmap method error code */
2270 return -ENODEV;
2272 EXPORT_SYMBOL(sock_no_mmap);
2274 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2276 ssize_t res;
2277 struct msghdr msg = {.msg_flags = flags};
2278 struct kvec iov;
2279 char *kaddr = kmap(page);
2280 iov.iov_base = kaddr + offset;
2281 iov.iov_len = size;
2282 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2283 kunmap(page);
2284 return res;
2286 EXPORT_SYMBOL(sock_no_sendpage);
2289 * Default Socket Callbacks
2292 static void sock_def_wakeup(struct sock *sk)
2294 struct socket_wq *wq;
2296 rcu_read_lock();
2297 wq = rcu_dereference(sk->sk_wq);
2298 if (wq_has_sleeper(wq))
2299 wake_up_interruptible_all(&wq->wait);
2300 rcu_read_unlock();
2303 static void sock_def_error_report(struct sock *sk)
2305 struct socket_wq *wq;
2307 rcu_read_lock();
2308 wq = rcu_dereference(sk->sk_wq);
2309 if (wq_has_sleeper(wq))
2310 wake_up_interruptible_poll(&wq->wait, POLLERR);
2311 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2312 rcu_read_unlock();
2315 static void sock_def_readable(struct sock *sk)
2317 struct socket_wq *wq;
2319 rcu_read_lock();
2320 wq = rcu_dereference(sk->sk_wq);
2321 if (wq_has_sleeper(wq))
2322 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2323 POLLRDNORM | POLLRDBAND);
2324 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2325 rcu_read_unlock();
2328 static void sock_def_write_space(struct sock *sk)
2330 struct socket_wq *wq;
2332 rcu_read_lock();
2334 /* Do not wake up a writer until he can make "significant"
2335 * progress. --DaveM
2337 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2338 wq = rcu_dereference(sk->sk_wq);
2339 if (wq_has_sleeper(wq))
2340 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2341 POLLWRNORM | POLLWRBAND);
2343 /* Should agree with poll, otherwise some programs break */
2344 if (sock_writeable(sk))
2345 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2348 rcu_read_unlock();
2351 static void sock_def_destruct(struct sock *sk)
2355 void sk_send_sigurg(struct sock *sk)
2357 if (sk->sk_socket && sk->sk_socket->file)
2358 if (send_sigurg(&sk->sk_socket->file->f_owner))
2359 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2361 EXPORT_SYMBOL(sk_send_sigurg);
2363 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2364 unsigned long expires)
2366 if (!mod_timer(timer, expires))
2367 sock_hold(sk);
2369 EXPORT_SYMBOL(sk_reset_timer);
2371 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2373 if (del_timer(timer))
2374 __sock_put(sk);
2376 EXPORT_SYMBOL(sk_stop_timer);
2378 void sock_init_data(struct socket *sock, struct sock *sk)
2380 skb_queue_head_init(&sk->sk_receive_queue);
2381 skb_queue_head_init(&sk->sk_write_queue);
2382 skb_queue_head_init(&sk->sk_error_queue);
2384 sk->sk_send_head = NULL;
2386 init_timer(&sk->sk_timer);
2388 sk->sk_allocation = GFP_KERNEL;
2389 sk->sk_rcvbuf = sysctl_rmem_default;
2390 sk->sk_sndbuf = sysctl_wmem_default;
2391 sk->sk_state = TCP_CLOSE;
2392 sk_set_socket(sk, sock);
2394 sock_set_flag(sk, SOCK_ZAPPED);
2396 if (sock) {
2397 sk->sk_type = sock->type;
2398 sk->sk_wq = sock->wq;
2399 sock->sk = sk;
2400 } else
2401 sk->sk_wq = NULL;
2403 rwlock_init(&sk->sk_callback_lock);
2404 lockdep_set_class_and_name(&sk->sk_callback_lock,
2405 af_callback_keys + sk->sk_family,
2406 af_family_clock_key_strings[sk->sk_family]);
2408 sk->sk_state_change = sock_def_wakeup;
2409 sk->sk_data_ready = sock_def_readable;
2410 sk->sk_write_space = sock_def_write_space;
2411 sk->sk_error_report = sock_def_error_report;
2412 sk->sk_destruct = sock_def_destruct;
2414 sk->sk_frag.page = NULL;
2415 sk->sk_frag.offset = 0;
2416 sk->sk_peek_off = -1;
2418 sk->sk_peer_pid = NULL;
2419 sk->sk_peer_cred = NULL;
2420 sk->sk_write_pending = 0;
2421 sk->sk_rcvlowat = 1;
2422 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2423 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2425 sk->sk_stamp = ktime_set(-1L, 0);
2426 #if BITS_PER_LONG==32
2427 seqlock_init(&sk->sk_stamp_seq);
2428 #endif
2430 #ifdef CONFIG_NET_RX_BUSY_POLL
2431 sk->sk_napi_id = 0;
2432 sk->sk_ll_usec = sysctl_net_busy_read;
2433 #endif
2435 sk->sk_max_pacing_rate = ~0U;
2436 sk->sk_pacing_rate = ~0U;
2437 sk->sk_incoming_cpu = -1;
2439 * Before updating sk_refcnt, we must commit prior changes to memory
2440 * (Documentation/RCU/rculist_nulls.txt for details)
2442 smp_wmb();
2443 atomic_set(&sk->sk_refcnt, 1);
2444 atomic_set(&sk->sk_drops, 0);
2446 EXPORT_SYMBOL(sock_init_data);
2448 void lock_sock_nested(struct sock *sk, int subclass)
2450 might_sleep();
2451 spin_lock_bh(&sk->sk_lock.slock);
2452 if (sk->sk_lock.owned)
2453 __lock_sock(sk);
2454 sk->sk_lock.owned = 1;
2455 spin_unlock(&sk->sk_lock.slock);
2457 * The sk_lock has mutex_lock() semantics here:
2459 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2460 local_bh_enable();
2462 EXPORT_SYMBOL(lock_sock_nested);
2464 void release_sock(struct sock *sk)
2467 * The sk_lock has mutex_unlock() semantics:
2469 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2471 spin_lock_bh(&sk->sk_lock.slock);
2472 if (sk->sk_backlog.tail)
2473 __release_sock(sk);
2475 /* Warning : release_cb() might need to release sk ownership,
2476 * ie call sock_release_ownership(sk) before us.
2478 if (sk->sk_prot->release_cb)
2479 sk->sk_prot->release_cb(sk);
2481 sock_release_ownership(sk);
2482 if (waitqueue_active(&sk->sk_lock.wq))
2483 wake_up(&sk->sk_lock.wq);
2484 spin_unlock_bh(&sk->sk_lock.slock);
2486 EXPORT_SYMBOL(release_sock);
2489 * lock_sock_fast - fast version of lock_sock
2490 * @sk: socket
2492 * This version should be used for very small section, where process wont block
2493 * return false if fast path is taken
2494 * sk_lock.slock locked, owned = 0, BH disabled
2495 * return true if slow path is taken
2496 * sk_lock.slock unlocked, owned = 1, BH enabled
2498 bool lock_sock_fast(struct sock *sk)
2500 might_sleep();
2501 spin_lock_bh(&sk->sk_lock.slock);
2503 if (!sk->sk_lock.owned)
2505 * Note : We must disable BH
2507 return false;
2509 __lock_sock(sk);
2510 sk->sk_lock.owned = 1;
2511 spin_unlock(&sk->sk_lock.slock);
2513 * The sk_lock has mutex_lock() semantics here:
2515 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2516 local_bh_enable();
2517 return true;
2519 EXPORT_SYMBOL(lock_sock_fast);
2521 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2523 struct timeval tv;
2524 if (!sock_flag(sk, SOCK_TIMESTAMP))
2525 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2526 tv = ktime_to_timeval(sk->sk_stamp);
2527 if (tv.tv_sec == -1)
2528 return -ENOENT;
2529 if (tv.tv_sec == 0) {
2530 sk->sk_stamp = ktime_get_real();
2531 tv = ktime_to_timeval(sk->sk_stamp);
2533 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2535 EXPORT_SYMBOL(sock_get_timestamp);
2537 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2539 struct timespec ts;
2540 if (!sock_flag(sk, SOCK_TIMESTAMP))
2541 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2542 ts = ktime_to_timespec(sk->sk_stamp);
2543 if (ts.tv_sec == -1)
2544 return -ENOENT;
2545 if (ts.tv_sec == 0) {
2546 sk->sk_stamp = ktime_get_real();
2547 ts = ktime_to_timespec(sk->sk_stamp);
2549 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2551 EXPORT_SYMBOL(sock_get_timestampns);
2553 void sock_enable_timestamp(struct sock *sk, int flag)
2555 if (!sock_flag(sk, flag)) {
2556 unsigned long previous_flags = sk->sk_flags;
2558 sock_set_flag(sk, flag);
2560 * we just set one of the two flags which require net
2561 * time stamping, but time stamping might have been on
2562 * already because of the other one
2564 if (sock_needs_netstamp(sk) &&
2565 !(previous_flags & SK_FLAGS_TIMESTAMP))
2566 net_enable_timestamp();
2570 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2571 int level, int type)
2573 struct sock_exterr_skb *serr;
2574 struct sk_buff *skb;
2575 int copied, err;
2577 err = -EAGAIN;
2578 skb = sock_dequeue_err_skb(sk);
2579 if (skb == NULL)
2580 goto out;
2582 copied = skb->len;
2583 if (copied > len) {
2584 msg->msg_flags |= MSG_TRUNC;
2585 copied = len;
2587 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2588 if (err)
2589 goto out_free_skb;
2591 sock_recv_timestamp(msg, sk, skb);
2593 serr = SKB_EXT_ERR(skb);
2594 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2596 msg->msg_flags |= MSG_ERRQUEUE;
2597 err = copied;
2599 out_free_skb:
2600 kfree_skb(skb);
2601 out:
2602 return err;
2604 EXPORT_SYMBOL(sock_recv_errqueue);
2607 * Get a socket option on an socket.
2609 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2610 * asynchronous errors should be reported by getsockopt. We assume
2611 * this means if you specify SO_ERROR (otherwise whats the point of it).
2613 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2614 char __user *optval, int __user *optlen)
2616 struct sock *sk = sock->sk;
2618 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2620 EXPORT_SYMBOL(sock_common_getsockopt);
2622 #ifdef CONFIG_COMPAT
2623 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2624 char __user *optval, int __user *optlen)
2626 struct sock *sk = sock->sk;
2628 if (sk->sk_prot->compat_getsockopt != NULL)
2629 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2630 optval, optlen);
2631 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2633 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2634 #endif
2636 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2637 int flags)
2639 struct sock *sk = sock->sk;
2640 int addr_len = 0;
2641 int err;
2643 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2644 flags & ~MSG_DONTWAIT, &addr_len);
2645 if (err >= 0)
2646 msg->msg_namelen = addr_len;
2647 return err;
2649 EXPORT_SYMBOL(sock_common_recvmsg);
2652 * Set socket options on an inet socket.
2654 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2655 char __user *optval, unsigned int optlen)
2657 struct sock *sk = sock->sk;
2659 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2661 EXPORT_SYMBOL(sock_common_setsockopt);
2663 #ifdef CONFIG_COMPAT
2664 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2665 char __user *optval, unsigned int optlen)
2667 struct sock *sk = sock->sk;
2669 if (sk->sk_prot->compat_setsockopt != NULL)
2670 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2671 optval, optlen);
2672 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2674 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2675 #endif
2677 void sk_common_release(struct sock *sk)
2679 if (sk->sk_prot->destroy)
2680 sk->sk_prot->destroy(sk);
2683 * Observation: when sock_common_release is called, processes have
2684 * no access to socket. But net still has.
2685 * Step one, detach it from networking:
2687 * A. Remove from hash tables.
2690 sk->sk_prot->unhash(sk);
2693 * In this point socket cannot receive new packets, but it is possible
2694 * that some packets are in flight because some CPU runs receiver and
2695 * did hash table lookup before we unhashed socket. They will achieve
2696 * receive queue and will be purged by socket destructor.
2698 * Also we still have packets pending on receive queue and probably,
2699 * our own packets waiting in device queues. sock_destroy will drain
2700 * receive queue, but transmitted packets will delay socket destruction
2701 * until the last reference will be released.
2704 sock_orphan(sk);
2706 xfrm_sk_free_policy(sk);
2708 sk_refcnt_debug_release(sk);
2710 sock_put(sk);
2712 EXPORT_SYMBOL(sk_common_release);
2714 #ifdef CONFIG_PROC_FS
2715 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2716 struct prot_inuse {
2717 int val[PROTO_INUSE_NR];
2720 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2722 #ifdef CONFIG_NET_NS
2723 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2725 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2727 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2729 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2731 int cpu, idx = prot->inuse_idx;
2732 int res = 0;
2734 for_each_possible_cpu(cpu)
2735 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2737 return res >= 0 ? res : 0;
2739 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2741 static int __net_init sock_inuse_init_net(struct net *net)
2743 net->core.inuse = alloc_percpu(struct prot_inuse);
2744 return net->core.inuse ? 0 : -ENOMEM;
2747 static void __net_exit sock_inuse_exit_net(struct net *net)
2749 free_percpu(net->core.inuse);
2752 static struct pernet_operations net_inuse_ops = {
2753 .init = sock_inuse_init_net,
2754 .exit = sock_inuse_exit_net,
2757 static __init int net_inuse_init(void)
2759 if (register_pernet_subsys(&net_inuse_ops))
2760 panic("Cannot initialize net inuse counters");
2762 return 0;
2765 core_initcall(net_inuse_init);
2766 #else
2767 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2769 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2771 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2773 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2775 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2777 int cpu, idx = prot->inuse_idx;
2778 int res = 0;
2780 for_each_possible_cpu(cpu)
2781 res += per_cpu(prot_inuse, cpu).val[idx];
2783 return res >= 0 ? res : 0;
2785 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2786 #endif
2788 static void assign_proto_idx(struct proto *prot)
2790 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2792 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2793 pr_err("PROTO_INUSE_NR exhausted\n");
2794 return;
2797 set_bit(prot->inuse_idx, proto_inuse_idx);
2800 static void release_proto_idx(struct proto *prot)
2802 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2803 clear_bit(prot->inuse_idx, proto_inuse_idx);
2805 #else
2806 static inline void assign_proto_idx(struct proto *prot)
2810 static inline void release_proto_idx(struct proto *prot)
2813 #endif
2815 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2817 if (!rsk_prot)
2818 return;
2819 kfree(rsk_prot->slab_name);
2820 rsk_prot->slab_name = NULL;
2821 kmem_cache_destroy(rsk_prot->slab);
2822 rsk_prot->slab = NULL;
2825 static int req_prot_init(const struct proto *prot)
2827 struct request_sock_ops *rsk_prot = prot->rsk_prot;
2829 if (!rsk_prot)
2830 return 0;
2832 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2833 prot->name);
2834 if (!rsk_prot->slab_name)
2835 return -ENOMEM;
2837 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2838 rsk_prot->obj_size, 0,
2839 prot->slab_flags, NULL);
2841 if (!rsk_prot->slab) {
2842 pr_crit("%s: Can't create request sock SLAB cache!\n",
2843 prot->name);
2844 return -ENOMEM;
2846 return 0;
2849 int proto_register(struct proto *prot, int alloc_slab)
2851 if (alloc_slab) {
2852 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2853 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2854 NULL);
2856 if (prot->slab == NULL) {
2857 pr_crit("%s: Can't create sock SLAB cache!\n",
2858 prot->name);
2859 goto out;
2862 if (req_prot_init(prot))
2863 goto out_free_request_sock_slab;
2865 if (prot->twsk_prot != NULL) {
2866 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2868 if (prot->twsk_prot->twsk_slab_name == NULL)
2869 goto out_free_request_sock_slab;
2871 prot->twsk_prot->twsk_slab =
2872 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2873 prot->twsk_prot->twsk_obj_size,
2875 prot->slab_flags,
2876 NULL);
2877 if (prot->twsk_prot->twsk_slab == NULL)
2878 goto out_free_timewait_sock_slab_name;
2882 mutex_lock(&proto_list_mutex);
2883 list_add(&prot->node, &proto_list);
2884 assign_proto_idx(prot);
2885 mutex_unlock(&proto_list_mutex);
2886 return 0;
2888 out_free_timewait_sock_slab_name:
2889 kfree(prot->twsk_prot->twsk_slab_name);
2890 out_free_request_sock_slab:
2891 req_prot_cleanup(prot->rsk_prot);
2893 kmem_cache_destroy(prot->slab);
2894 prot->slab = NULL;
2895 out:
2896 return -ENOBUFS;
2898 EXPORT_SYMBOL(proto_register);
2900 void proto_unregister(struct proto *prot)
2902 mutex_lock(&proto_list_mutex);
2903 release_proto_idx(prot);
2904 list_del(&prot->node);
2905 mutex_unlock(&proto_list_mutex);
2907 kmem_cache_destroy(prot->slab);
2908 prot->slab = NULL;
2910 req_prot_cleanup(prot->rsk_prot);
2912 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2913 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2914 kfree(prot->twsk_prot->twsk_slab_name);
2915 prot->twsk_prot->twsk_slab = NULL;
2918 EXPORT_SYMBOL(proto_unregister);
2920 #ifdef CONFIG_PROC_FS
2921 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2922 __acquires(proto_list_mutex)
2924 mutex_lock(&proto_list_mutex);
2925 return seq_list_start_head(&proto_list, *pos);
2928 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2930 return seq_list_next(v, &proto_list, pos);
2933 static void proto_seq_stop(struct seq_file *seq, void *v)
2934 __releases(proto_list_mutex)
2936 mutex_unlock(&proto_list_mutex);
2939 static char proto_method_implemented(const void *method)
2941 return method == NULL ? 'n' : 'y';
2943 static long sock_prot_memory_allocated(struct proto *proto)
2945 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2948 static char *sock_prot_memory_pressure(struct proto *proto)
2950 return proto->memory_pressure != NULL ?
2951 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2954 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2957 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2958 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2959 proto->name,
2960 proto->obj_size,
2961 sock_prot_inuse_get(seq_file_net(seq), proto),
2962 sock_prot_memory_allocated(proto),
2963 sock_prot_memory_pressure(proto),
2964 proto->max_header,
2965 proto->slab == NULL ? "no" : "yes",
2966 module_name(proto->owner),
2967 proto_method_implemented(proto->close),
2968 proto_method_implemented(proto->connect),
2969 proto_method_implemented(proto->disconnect),
2970 proto_method_implemented(proto->accept),
2971 proto_method_implemented(proto->ioctl),
2972 proto_method_implemented(proto->init),
2973 proto_method_implemented(proto->destroy),
2974 proto_method_implemented(proto->shutdown),
2975 proto_method_implemented(proto->setsockopt),
2976 proto_method_implemented(proto->getsockopt),
2977 proto_method_implemented(proto->sendmsg),
2978 proto_method_implemented(proto->recvmsg),
2979 proto_method_implemented(proto->sendpage),
2980 proto_method_implemented(proto->bind),
2981 proto_method_implemented(proto->backlog_rcv),
2982 proto_method_implemented(proto->hash),
2983 proto_method_implemented(proto->unhash),
2984 proto_method_implemented(proto->get_port),
2985 proto_method_implemented(proto->enter_memory_pressure));
2988 static int proto_seq_show(struct seq_file *seq, void *v)
2990 if (v == &proto_list)
2991 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2992 "protocol",
2993 "size",
2994 "sockets",
2995 "memory",
2996 "press",
2997 "maxhdr",
2998 "slab",
2999 "module",
3000 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3001 else
3002 proto_seq_printf(seq, list_entry(v, struct proto, node));
3003 return 0;
3006 static const struct seq_operations proto_seq_ops = {
3007 .start = proto_seq_start,
3008 .next = proto_seq_next,
3009 .stop = proto_seq_stop,
3010 .show = proto_seq_show,
3013 static int proto_seq_open(struct inode *inode, struct file *file)
3015 return seq_open_net(inode, file, &proto_seq_ops,
3016 sizeof(struct seq_net_private));
3019 static const struct file_operations proto_seq_fops = {
3020 .owner = THIS_MODULE,
3021 .open = proto_seq_open,
3022 .read = seq_read,
3023 .llseek = seq_lseek,
3024 .release = seq_release_net,
3027 static __net_init int proto_init_net(struct net *net)
3029 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
3030 return -ENOMEM;
3032 return 0;
3035 static __net_exit void proto_exit_net(struct net *net)
3037 remove_proc_entry("protocols", net->proc_net);
3041 static __net_initdata struct pernet_operations proto_net_ops = {
3042 .init = proto_init_net,
3043 .exit = proto_exit_net,
3046 static int __init proto_init(void)
3048 return register_pernet_subsys(&proto_net_ops);
3051 subsys_initcall(proto_init);
3053 #endif /* PROC_FS */