2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
121 struct hlist_head unix_socket_table
[2 * UNIX_HASH_SIZE
];
122 EXPORT_SYMBOL_GPL(unix_socket_table
);
123 DEFINE_SPINLOCK(unix_table_lock
);
124 EXPORT_SYMBOL_GPL(unix_table_lock
);
125 static atomic_long_t unix_nr_socks
;
128 static struct hlist_head
*unix_sockets_unbound(void *addr
)
130 unsigned long hash
= (unsigned long)addr
;
134 hash
%= UNIX_HASH_SIZE
;
135 return &unix_socket_table
[UNIX_HASH_SIZE
+ hash
];
138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
143 UNIXCB(skb
).secid
= scm
->secid
;
146 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
148 scm
->secid
= UNIXCB(skb
).secid
;
151 static inline bool unix_secdata_eq(struct scm_cookie
*scm
, struct sk_buff
*skb
)
153 return (scm
->secid
== UNIXCB(skb
).secid
);
156 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
159 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
162 static inline bool unix_secdata_eq(struct scm_cookie
*scm
, struct sk_buff
*skb
)
166 #endif /* CONFIG_SECURITY_NETWORK */
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
174 static inline unsigned int unix_hash_fold(__wsum n
)
176 unsigned int hash
= (__force
unsigned int)csum_fold(n
);
179 return hash
&(UNIX_HASH_SIZE
-1);
182 #define unix_peer(sk) (unix_sk(sk)->peer)
184 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
186 return unix_peer(osk
) == sk
;
189 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
191 return unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
);
194 static inline int unix_recvq_full(struct sock
const *sk
)
196 return skb_queue_len(&sk
->sk_receive_queue
) > sk
->sk_max_ack_backlog
;
199 struct sock
*unix_peer_get(struct sock
*s
)
207 unix_state_unlock(s
);
210 EXPORT_SYMBOL_GPL(unix_peer_get
);
212 static inline void unix_release_addr(struct unix_address
*addr
)
214 if (atomic_dec_and_test(&addr
->refcnt
))
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
225 static int unix_mkname(struct sockaddr_un
*sunaddr
, int len
, unsigned int *hashp
)
227 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
229 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
231 if (sunaddr
->sun_path
[0]) {
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
239 ((char *)sunaddr
)[len
] = 0;
240 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
244 *hashp
= unix_hash_fold(csum_partial(sunaddr
, len
, 0));
248 static void __unix_remove_socket(struct sock
*sk
)
250 sk_del_node_init(sk
);
253 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
255 WARN_ON(!sk_unhashed(sk
));
256 sk_add_node(sk
, list
);
259 static inline void unix_remove_socket(struct sock
*sk
)
261 spin_lock(&unix_table_lock
);
262 __unix_remove_socket(sk
);
263 spin_unlock(&unix_table_lock
);
266 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
268 spin_lock(&unix_table_lock
);
269 __unix_insert_socket(list
, sk
);
270 spin_unlock(&unix_table_lock
);
273 static struct sock
*__unix_find_socket_byname(struct net
*net
,
274 struct sockaddr_un
*sunname
,
275 int len
, int type
, unsigned int hash
)
279 sk_for_each(s
, &unix_socket_table
[hash
^ type
]) {
280 struct unix_sock
*u
= unix_sk(s
);
282 if (!net_eq(sock_net(s
), net
))
285 if (u
->addr
->len
== len
&&
286 !memcmp(u
->addr
->name
, sunname
, len
))
294 static inline struct sock
*unix_find_socket_byname(struct net
*net
,
295 struct sockaddr_un
*sunname
,
301 spin_lock(&unix_table_lock
);
302 s
= __unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
305 spin_unlock(&unix_table_lock
);
309 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
313 spin_lock(&unix_table_lock
);
315 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
316 struct dentry
*dentry
= unix_sk(s
)->path
.dentry
;
318 if (dentry
&& d_backing_inode(dentry
) == i
) {
325 spin_unlock(&unix_table_lock
);
329 static inline int unix_writable(struct sock
*sk
)
331 return (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
334 static void unix_write_space(struct sock
*sk
)
336 struct socket_wq
*wq
;
339 if (unix_writable(sk
)) {
340 wq
= rcu_dereference(sk
->sk_wq
);
341 if (wq_has_sleeper(wq
))
342 wake_up_interruptible_sync_poll(&wq
->wait
,
343 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
344 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
349 /* When dgram socket disconnects (or changes its peer), we clear its receive
350 * queue of packets arrived from previous peer. First, it allows to do
351 * flow control based only on wmem_alloc; second, sk connected to peer
352 * may receive messages only from that peer. */
353 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
355 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
356 skb_queue_purge(&sk
->sk_receive_queue
);
357 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
359 /* If one link of bidirectional dgram pipe is disconnected,
360 * we signal error. Messages are lost. Do not make this,
361 * when peer was not connected to us.
363 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
364 other
->sk_err
= ECONNRESET
;
365 other
->sk_error_report(other
);
370 static void unix_sock_destructor(struct sock
*sk
)
372 struct unix_sock
*u
= unix_sk(sk
);
374 skb_queue_purge(&sk
->sk_receive_queue
);
376 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
377 WARN_ON(!sk_unhashed(sk
));
378 WARN_ON(sk
->sk_socket
);
379 if (!sock_flag(sk
, SOCK_DEAD
)) {
380 pr_info("Attempt to release alive unix socket: %p\n", sk
);
385 unix_release_addr(u
->addr
);
387 atomic_long_dec(&unix_nr_socks
);
389 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
391 #ifdef UNIX_REFCNT_DEBUG
392 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk
,
393 atomic_long_read(&unix_nr_socks
));
397 static void unix_release_sock(struct sock
*sk
, int embrion
)
399 struct unix_sock
*u
= unix_sk(sk
);
405 unix_remove_socket(sk
);
410 sk
->sk_shutdown
= SHUTDOWN_MASK
;
412 u
->path
.dentry
= NULL
;
414 state
= sk
->sk_state
;
415 sk
->sk_state
= TCP_CLOSE
;
416 unix_state_unlock(sk
);
418 wake_up_interruptible_all(&u
->peer_wait
);
420 skpair
= unix_peer(sk
);
422 if (skpair
!= NULL
) {
423 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
424 unix_state_lock(skpair
);
426 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
427 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
428 skpair
->sk_err
= ECONNRESET
;
429 unix_state_unlock(skpair
);
430 skpair
->sk_state_change(skpair
);
431 sk_wake_async(skpair
, SOCK_WAKE_WAITD
, POLL_HUP
);
433 sock_put(skpair
); /* It may now die */
434 unix_peer(sk
) = NULL
;
437 /* Try to flush out this socket. Throw out buffers at least */
439 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
440 if (state
== TCP_LISTEN
)
441 unix_release_sock(skb
->sk
, 1);
442 /* passed fds are erased in the kfree_skb hook */
451 /* ---- Socket is dead now and most probably destroyed ---- */
454 * Fixme: BSD difference: In BSD all sockets connected to us get
455 * ECONNRESET and we die on the spot. In Linux we behave
456 * like files and pipes do and wait for the last
459 * Can't we simply set sock->err?
461 * What the above comment does talk about? --ANK(980817)
464 if (unix_tot_inflight
)
465 unix_gc(); /* Garbage collect fds */
468 static void init_peercred(struct sock
*sk
)
470 put_pid(sk
->sk_peer_pid
);
471 if (sk
->sk_peer_cred
)
472 put_cred(sk
->sk_peer_cred
);
473 sk
->sk_peer_pid
= get_pid(task_tgid(current
));
474 sk
->sk_peer_cred
= get_current_cred();
477 static void copy_peercred(struct sock
*sk
, struct sock
*peersk
)
479 put_pid(sk
->sk_peer_pid
);
480 if (sk
->sk_peer_cred
)
481 put_cred(sk
->sk_peer_cred
);
482 sk
->sk_peer_pid
= get_pid(peersk
->sk_peer_pid
);
483 sk
->sk_peer_cred
= get_cred(peersk
->sk_peer_cred
);
486 static int unix_listen(struct socket
*sock
, int backlog
)
489 struct sock
*sk
= sock
->sk
;
490 struct unix_sock
*u
= unix_sk(sk
);
491 struct pid
*old_pid
= NULL
;
494 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
495 goto out
; /* Only stream/seqpacket sockets accept */
498 goto out
; /* No listens on an unbound socket */
500 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
502 if (backlog
> sk
->sk_max_ack_backlog
)
503 wake_up_interruptible_all(&u
->peer_wait
);
504 sk
->sk_max_ack_backlog
= backlog
;
505 sk
->sk_state
= TCP_LISTEN
;
506 /* set credentials so connect can copy them */
511 unix_state_unlock(sk
);
517 static int unix_release(struct socket
*);
518 static int unix_bind(struct socket
*, struct sockaddr
*, int);
519 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
520 int addr_len
, int flags
);
521 static int unix_socketpair(struct socket
*, struct socket
*);
522 static int unix_accept(struct socket
*, struct socket
*, int);
523 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
524 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
525 static unsigned int unix_dgram_poll(struct file
*, struct socket
*,
527 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
528 static int unix_shutdown(struct socket
*, int);
529 static int unix_stream_sendmsg(struct socket
*, struct msghdr
*, size_t);
530 static int unix_stream_recvmsg(struct socket
*, struct msghdr
*, size_t, int);
531 static ssize_t
unix_stream_sendpage(struct socket
*, struct page
*, int offset
,
532 size_t size
, int flags
);
533 static ssize_t
unix_stream_splice_read(struct socket
*, loff_t
*ppos
,
534 struct pipe_inode_info
*, size_t size
,
536 static int unix_dgram_sendmsg(struct socket
*, struct msghdr
*, size_t);
537 static int unix_dgram_recvmsg(struct socket
*, struct msghdr
*, size_t, int);
538 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
540 static int unix_seqpacket_sendmsg(struct socket
*, struct msghdr
*, size_t);
541 static int unix_seqpacket_recvmsg(struct socket
*, struct msghdr
*, size_t,
544 static int unix_set_peek_off(struct sock
*sk
, int val
)
546 struct unix_sock
*u
= unix_sk(sk
);
548 if (mutex_lock_interruptible(&u
->readlock
))
551 sk
->sk_peek_off
= val
;
552 mutex_unlock(&u
->readlock
);
558 static const struct proto_ops unix_stream_ops
= {
560 .owner
= THIS_MODULE
,
561 .release
= unix_release
,
563 .connect
= unix_stream_connect
,
564 .socketpair
= unix_socketpair
,
565 .accept
= unix_accept
,
566 .getname
= unix_getname
,
569 .listen
= unix_listen
,
570 .shutdown
= unix_shutdown
,
571 .setsockopt
= sock_no_setsockopt
,
572 .getsockopt
= sock_no_getsockopt
,
573 .sendmsg
= unix_stream_sendmsg
,
574 .recvmsg
= unix_stream_recvmsg
,
575 .mmap
= sock_no_mmap
,
576 .sendpage
= unix_stream_sendpage
,
577 .splice_read
= unix_stream_splice_read
,
578 .set_peek_off
= unix_set_peek_off
,
581 static const struct proto_ops unix_dgram_ops
= {
583 .owner
= THIS_MODULE
,
584 .release
= unix_release
,
586 .connect
= unix_dgram_connect
,
587 .socketpair
= unix_socketpair
,
588 .accept
= sock_no_accept
,
589 .getname
= unix_getname
,
590 .poll
= unix_dgram_poll
,
592 .listen
= sock_no_listen
,
593 .shutdown
= unix_shutdown
,
594 .setsockopt
= sock_no_setsockopt
,
595 .getsockopt
= sock_no_getsockopt
,
596 .sendmsg
= unix_dgram_sendmsg
,
597 .recvmsg
= unix_dgram_recvmsg
,
598 .mmap
= sock_no_mmap
,
599 .sendpage
= sock_no_sendpage
,
600 .set_peek_off
= unix_set_peek_off
,
603 static const struct proto_ops unix_seqpacket_ops
= {
605 .owner
= THIS_MODULE
,
606 .release
= unix_release
,
608 .connect
= unix_stream_connect
,
609 .socketpair
= unix_socketpair
,
610 .accept
= unix_accept
,
611 .getname
= unix_getname
,
612 .poll
= unix_dgram_poll
,
614 .listen
= unix_listen
,
615 .shutdown
= unix_shutdown
,
616 .setsockopt
= sock_no_setsockopt
,
617 .getsockopt
= sock_no_getsockopt
,
618 .sendmsg
= unix_seqpacket_sendmsg
,
619 .recvmsg
= unix_seqpacket_recvmsg
,
620 .mmap
= sock_no_mmap
,
621 .sendpage
= sock_no_sendpage
,
622 .set_peek_off
= unix_set_peek_off
,
625 static struct proto unix_proto
= {
627 .owner
= THIS_MODULE
,
628 .obj_size
= sizeof(struct unix_sock
),
632 * AF_UNIX sockets do not interact with hardware, hence they
633 * dont trigger interrupts - so it's safe for them to have
634 * bh-unsafe locking for their sk_receive_queue.lock. Split off
635 * this special lock-class by reinitializing the spinlock key:
637 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
639 static struct sock
*unix_create1(struct net
*net
, struct socket
*sock
, int kern
)
641 struct sock
*sk
= NULL
;
644 atomic_long_inc(&unix_nr_socks
);
645 if (atomic_long_read(&unix_nr_socks
) > 2 * get_max_files())
648 sk
= sk_alloc(net
, PF_UNIX
, GFP_KERNEL
, &unix_proto
, kern
);
652 sock_init_data(sock
, sk
);
653 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
654 &af_unix_sk_receive_queue_lock_key
);
656 sk
->sk_write_space
= unix_write_space
;
657 sk
->sk_max_ack_backlog
= net
->unx
.sysctl_max_dgram_qlen
;
658 sk
->sk_destruct
= unix_sock_destructor
;
660 u
->path
.dentry
= NULL
;
662 spin_lock_init(&u
->lock
);
663 atomic_long_set(&u
->inflight
, 0);
664 INIT_LIST_HEAD(&u
->link
);
665 mutex_init(&u
->readlock
); /* single task reading lock */
666 init_waitqueue_head(&u
->peer_wait
);
667 unix_insert_socket(unix_sockets_unbound(sk
), sk
);
670 atomic_long_dec(&unix_nr_socks
);
673 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
679 static int unix_create(struct net
*net
, struct socket
*sock
, int protocol
,
682 if (protocol
&& protocol
!= PF_UNIX
)
683 return -EPROTONOSUPPORT
;
685 sock
->state
= SS_UNCONNECTED
;
687 switch (sock
->type
) {
689 sock
->ops
= &unix_stream_ops
;
692 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
696 sock
->type
= SOCK_DGRAM
;
698 sock
->ops
= &unix_dgram_ops
;
701 sock
->ops
= &unix_seqpacket_ops
;
704 return -ESOCKTNOSUPPORT
;
707 return unix_create1(net
, sock
, kern
) ? 0 : -ENOMEM
;
710 static int unix_release(struct socket
*sock
)
712 struct sock
*sk
= sock
->sk
;
717 unix_release_sock(sk
, 0);
723 static int unix_autobind(struct socket
*sock
)
725 struct sock
*sk
= sock
->sk
;
726 struct net
*net
= sock_net(sk
);
727 struct unix_sock
*u
= unix_sk(sk
);
728 static u32 ordernum
= 1;
729 struct unix_address
*addr
;
731 unsigned int retries
= 0;
733 err
= mutex_lock_interruptible(&u
->readlock
);
742 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
746 addr
->name
->sun_family
= AF_UNIX
;
747 atomic_set(&addr
->refcnt
, 1);
750 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
751 addr
->hash
= unix_hash_fold(csum_partial(addr
->name
, addr
->len
, 0));
753 spin_lock(&unix_table_lock
);
754 ordernum
= (ordernum
+1)&0xFFFFF;
756 if (__unix_find_socket_byname(net
, addr
->name
, addr
->len
, sock
->type
,
758 spin_unlock(&unix_table_lock
);
760 * __unix_find_socket_byname() may take long time if many names
761 * are already in use.
764 /* Give up if all names seems to be in use. */
765 if (retries
++ == 0xFFFFF) {
772 addr
->hash
^= sk
->sk_type
;
774 __unix_remove_socket(sk
);
776 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
777 spin_unlock(&unix_table_lock
);
780 out
: mutex_unlock(&u
->readlock
);
784 static struct sock
*unix_find_other(struct net
*net
,
785 struct sockaddr_un
*sunname
, int len
,
786 int type
, unsigned int hash
, int *error
)
792 if (sunname
->sun_path
[0]) {
794 err
= kern_path(sunname
->sun_path
, LOOKUP_FOLLOW
, &path
);
797 inode
= d_backing_inode(path
.dentry
);
798 err
= inode_permission(inode
, MAY_WRITE
);
803 if (!S_ISSOCK(inode
->i_mode
))
805 u
= unix_find_socket_byinode(inode
);
809 if (u
->sk_type
== type
)
815 if (u
->sk_type
!= type
) {
821 u
= unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
823 struct dentry
*dentry
;
824 dentry
= unix_sk(u
)->path
.dentry
;
826 touch_atime(&unix_sk(u
)->path
);
839 static int unix_mknod(const char *sun_path
, umode_t mode
, struct path
*res
)
841 struct dentry
*dentry
;
845 * Get the parent directory, calculate the hash for last
848 dentry
= kern_path_create(AT_FDCWD
, sun_path
, &path
, 0);
849 err
= PTR_ERR(dentry
);
854 * All right, let's create it.
856 err
= security_path_mknod(&path
, dentry
, mode
, 0);
858 err
= vfs_mknod(d_inode(path
.dentry
), dentry
, mode
, 0);
860 res
->mnt
= mntget(path
.mnt
);
861 res
->dentry
= dget(dentry
);
864 done_path_create(&path
, dentry
);
868 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
870 struct sock
*sk
= sock
->sk
;
871 struct net
*net
= sock_net(sk
);
872 struct unix_sock
*u
= unix_sk(sk
);
873 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
874 char *sun_path
= sunaddr
->sun_path
;
877 struct unix_address
*addr
;
878 struct hlist_head
*list
;
881 if (sunaddr
->sun_family
!= AF_UNIX
)
884 if (addr_len
== sizeof(short)) {
885 err
= unix_autobind(sock
);
889 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
894 err
= mutex_lock_interruptible(&u
->readlock
);
903 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
907 memcpy(addr
->name
, sunaddr
, addr_len
);
908 addr
->len
= addr_len
;
909 addr
->hash
= hash
^ sk
->sk_type
;
910 atomic_set(&addr
->refcnt
, 1);
914 umode_t mode
= S_IFSOCK
|
915 (SOCK_INODE(sock
)->i_mode
& ~current_umask());
916 err
= unix_mknod(sun_path
, mode
, &path
);
920 unix_release_addr(addr
);
923 addr
->hash
= UNIX_HASH_SIZE
;
924 hash
= d_backing_inode(path
.dentry
)->i_ino
& (UNIX_HASH_SIZE
-1);
925 spin_lock(&unix_table_lock
);
927 list
= &unix_socket_table
[hash
];
929 spin_lock(&unix_table_lock
);
931 if (__unix_find_socket_byname(net
, sunaddr
, addr_len
,
932 sk
->sk_type
, hash
)) {
933 unix_release_addr(addr
);
937 list
= &unix_socket_table
[addr
->hash
];
941 __unix_remove_socket(sk
);
943 __unix_insert_socket(list
, sk
);
946 spin_unlock(&unix_table_lock
);
948 mutex_unlock(&u
->readlock
);
953 static void unix_state_double_lock(struct sock
*sk1
, struct sock
*sk2
)
955 if (unlikely(sk1
== sk2
) || !sk2
) {
956 unix_state_lock(sk1
);
960 unix_state_lock(sk1
);
961 unix_state_lock_nested(sk2
);
963 unix_state_lock(sk2
);
964 unix_state_lock_nested(sk1
);
968 static void unix_state_double_unlock(struct sock
*sk1
, struct sock
*sk2
)
970 if (unlikely(sk1
== sk2
) || !sk2
) {
971 unix_state_unlock(sk1
);
974 unix_state_unlock(sk1
);
975 unix_state_unlock(sk2
);
978 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
981 struct sock
*sk
= sock
->sk
;
982 struct net
*net
= sock_net(sk
);
983 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)addr
;
988 if (addr
->sa_family
!= AF_UNSPEC
) {
989 err
= unix_mkname(sunaddr
, alen
, &hash
);
994 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
995 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
999 other
= unix_find_other(net
, sunaddr
, alen
, sock
->type
, hash
, &err
);
1003 unix_state_double_lock(sk
, other
);
1005 /* Apparently VFS overslept socket death. Retry. */
1006 if (sock_flag(other
, SOCK_DEAD
)) {
1007 unix_state_double_unlock(sk
, other
);
1013 if (!unix_may_send(sk
, other
))
1016 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1022 * 1003.1g breaking connected state with AF_UNSPEC
1025 unix_state_double_lock(sk
, other
);
1029 * If it was connected, reconnect.
1031 if (unix_peer(sk
)) {
1032 struct sock
*old_peer
= unix_peer(sk
);
1033 unix_peer(sk
) = other
;
1034 unix_state_double_unlock(sk
, other
);
1036 if (other
!= old_peer
)
1037 unix_dgram_disconnected(sk
, old_peer
);
1040 unix_peer(sk
) = other
;
1041 unix_state_double_unlock(sk
, other
);
1046 unix_state_double_unlock(sk
, other
);
1052 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
1054 struct unix_sock
*u
= unix_sk(other
);
1058 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
1060 sched
= !sock_flag(other
, SOCK_DEAD
) &&
1061 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
1062 unix_recvq_full(other
);
1064 unix_state_unlock(other
);
1067 timeo
= schedule_timeout(timeo
);
1069 finish_wait(&u
->peer_wait
, &wait
);
1073 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1074 int addr_len
, int flags
)
1076 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
1077 struct sock
*sk
= sock
->sk
;
1078 struct net
*net
= sock_net(sk
);
1079 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
1080 struct sock
*newsk
= NULL
;
1081 struct sock
*other
= NULL
;
1082 struct sk_buff
*skb
= NULL
;
1088 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
1093 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
&&
1094 (err
= unix_autobind(sock
)) != 0)
1097 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1099 /* First of all allocate resources.
1100 If we will make it after state is locked,
1101 we will have to recheck all again in any case.
1106 /* create new sock for complete connection */
1107 newsk
= unix_create1(sock_net(sk
), NULL
, 0);
1111 /* Allocate skb for sending to listening sock */
1112 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
1117 /* Find listening sock. */
1118 other
= unix_find_other(net
, sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
1122 /* Latch state of peer */
1123 unix_state_lock(other
);
1125 /* Apparently VFS overslept socket death. Retry. */
1126 if (sock_flag(other
, SOCK_DEAD
)) {
1127 unix_state_unlock(other
);
1132 err
= -ECONNREFUSED
;
1133 if (other
->sk_state
!= TCP_LISTEN
)
1135 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1138 if (unix_recvq_full(other
)) {
1143 timeo
= unix_wait_for_peer(other
, timeo
);
1145 err
= sock_intr_errno(timeo
);
1146 if (signal_pending(current
))
1154 It is tricky place. We need to grab our state lock and cannot
1155 drop lock on peer. It is dangerous because deadlock is
1156 possible. Connect to self case and simultaneous
1157 attempt to connect are eliminated by checking socket
1158 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1159 check this before attempt to grab lock.
1161 Well, and we have to recheck the state after socket locked.
1167 /* This is ok... continue with connect */
1169 case TCP_ESTABLISHED
:
1170 /* Socket is already connected */
1178 unix_state_lock_nested(sk
);
1180 if (sk
->sk_state
!= st
) {
1181 unix_state_unlock(sk
);
1182 unix_state_unlock(other
);
1187 err
= security_unix_stream_connect(sk
, other
, newsk
);
1189 unix_state_unlock(sk
);
1193 /* The way is open! Fastly set all the necessary fields... */
1196 unix_peer(newsk
) = sk
;
1197 newsk
->sk_state
= TCP_ESTABLISHED
;
1198 newsk
->sk_type
= sk
->sk_type
;
1199 init_peercred(newsk
);
1200 newu
= unix_sk(newsk
);
1201 RCU_INIT_POINTER(newsk
->sk_wq
, &newu
->peer_wq
);
1202 otheru
= unix_sk(other
);
1204 /* copy address information from listening to new sock*/
1206 atomic_inc(&otheru
->addr
->refcnt
);
1207 newu
->addr
= otheru
->addr
;
1209 if (otheru
->path
.dentry
) {
1210 path_get(&otheru
->path
);
1211 newu
->path
= otheru
->path
;
1214 /* Set credentials */
1215 copy_peercred(sk
, other
);
1217 sock
->state
= SS_CONNECTED
;
1218 sk
->sk_state
= TCP_ESTABLISHED
;
1221 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1222 unix_peer(sk
) = newsk
;
1224 unix_state_unlock(sk
);
1226 /* take ten and and send info to listening sock */
1227 spin_lock(&other
->sk_receive_queue
.lock
);
1228 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1229 spin_unlock(&other
->sk_receive_queue
.lock
);
1230 unix_state_unlock(other
);
1231 other
->sk_data_ready(other
);
1237 unix_state_unlock(other
);
1242 unix_release_sock(newsk
, 0);
1248 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1250 struct sock
*ska
= socka
->sk
, *skb
= sockb
->sk
;
1252 /* Join our sockets back to back */
1255 unix_peer(ska
) = skb
;
1256 unix_peer(skb
) = ska
;
1260 if (ska
->sk_type
!= SOCK_DGRAM
) {
1261 ska
->sk_state
= TCP_ESTABLISHED
;
1262 skb
->sk_state
= TCP_ESTABLISHED
;
1263 socka
->state
= SS_CONNECTED
;
1264 sockb
->state
= SS_CONNECTED
;
1269 static void unix_sock_inherit_flags(const struct socket
*old
,
1272 if (test_bit(SOCK_PASSCRED
, &old
->flags
))
1273 set_bit(SOCK_PASSCRED
, &new->flags
);
1274 if (test_bit(SOCK_PASSSEC
, &old
->flags
))
1275 set_bit(SOCK_PASSSEC
, &new->flags
);
1278 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1280 struct sock
*sk
= sock
->sk
;
1282 struct sk_buff
*skb
;
1286 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
1290 if (sk
->sk_state
!= TCP_LISTEN
)
1293 /* If socket state is TCP_LISTEN it cannot change (for now...),
1294 * so that no locks are necessary.
1297 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1299 /* This means receive shutdown. */
1306 skb_free_datagram(sk
, skb
);
1307 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1309 /* attach accepted sock to socket */
1310 unix_state_lock(tsk
);
1311 newsock
->state
= SS_CONNECTED
;
1312 unix_sock_inherit_flags(sock
, newsock
);
1313 sock_graft(tsk
, newsock
);
1314 unix_state_unlock(tsk
);
1322 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1324 struct sock
*sk
= sock
->sk
;
1325 struct unix_sock
*u
;
1326 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, uaddr
);
1330 sk
= unix_peer_get(sk
);
1341 unix_state_lock(sk
);
1343 sunaddr
->sun_family
= AF_UNIX
;
1344 sunaddr
->sun_path
[0] = 0;
1345 *uaddr_len
= sizeof(short);
1347 struct unix_address
*addr
= u
->addr
;
1349 *uaddr_len
= addr
->len
;
1350 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1352 unix_state_unlock(sk
);
1358 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1362 scm
->fp
= UNIXCB(skb
).fp
;
1363 UNIXCB(skb
).fp
= NULL
;
1365 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1366 unix_notinflight(scm
->fp
->fp
[i
]);
1369 static void unix_destruct_scm(struct sk_buff
*skb
)
1371 struct scm_cookie scm
;
1372 memset(&scm
, 0, sizeof(scm
));
1373 scm
.pid
= UNIXCB(skb
).pid
;
1375 unix_detach_fds(&scm
, skb
);
1377 /* Alas, it calls VFS */
1378 /* So fscking what? fput() had been SMP-safe since the last Summer */
1383 #define MAX_RECURSION_LEVEL 4
1385 static int unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1388 unsigned char max_level
= 0;
1389 int unix_sock_count
= 0;
1391 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--) {
1392 struct sock
*sk
= unix_get_socket(scm
->fp
->fp
[i
]);
1396 max_level
= max(max_level
,
1397 unix_sk(sk
)->recursion_level
);
1400 if (unlikely(max_level
> MAX_RECURSION_LEVEL
))
1401 return -ETOOMANYREFS
;
1404 * Need to duplicate file references for the sake of garbage
1405 * collection. Otherwise a socket in the fps might become a
1406 * candidate for GC while the skb is not yet queued.
1408 UNIXCB(skb
).fp
= scm_fp_dup(scm
->fp
);
1409 if (!UNIXCB(skb
).fp
)
1412 if (unix_sock_count
) {
1413 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--)
1414 unix_inflight(scm
->fp
->fp
[i
]);
1419 static int unix_scm_to_skb(struct scm_cookie
*scm
, struct sk_buff
*skb
, bool send_fds
)
1423 UNIXCB(skb
).pid
= get_pid(scm
->pid
);
1424 UNIXCB(skb
).uid
= scm
->creds
.uid
;
1425 UNIXCB(skb
).gid
= scm
->creds
.gid
;
1426 UNIXCB(skb
).fp
= NULL
;
1427 unix_get_secdata(scm
, skb
);
1428 if (scm
->fp
&& send_fds
)
1429 err
= unix_attach_fds(scm
, skb
);
1431 skb
->destructor
= unix_destruct_scm
;
1436 * Some apps rely on write() giving SCM_CREDENTIALS
1437 * We include credentials if source or destination socket
1438 * asserted SOCK_PASSCRED.
1440 static void maybe_add_creds(struct sk_buff
*skb
, const struct socket
*sock
,
1441 const struct sock
*other
)
1443 if (UNIXCB(skb
).pid
)
1445 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) ||
1446 !other
->sk_socket
||
1447 test_bit(SOCK_PASSCRED
, &other
->sk_socket
->flags
)) {
1448 UNIXCB(skb
).pid
= get_pid(task_tgid(current
));
1449 current_uid_gid(&UNIXCB(skb
).uid
, &UNIXCB(skb
).gid
);
1454 * Send AF_UNIX data.
1457 static int unix_dgram_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1460 struct sock
*sk
= sock
->sk
;
1461 struct net
*net
= sock_net(sk
);
1462 struct unix_sock
*u
= unix_sk(sk
);
1463 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, msg
->msg_name
);
1464 struct sock
*other
= NULL
;
1465 int namelen
= 0; /* fake GCC */
1468 struct sk_buff
*skb
;
1470 struct scm_cookie scm
;
1475 err
= scm_send(sock
, msg
, &scm
, false);
1480 if (msg
->msg_flags
&MSG_OOB
)
1483 if (msg
->msg_namelen
) {
1484 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1491 other
= unix_peer_get(sk
);
1496 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
1497 && (err
= unix_autobind(sock
)) != 0)
1501 if (len
> sk
->sk_sndbuf
- 32)
1504 if (len
> SKB_MAX_ALLOC
) {
1505 data_len
= min_t(size_t,
1506 len
- SKB_MAX_ALLOC
,
1507 MAX_SKB_FRAGS
* PAGE_SIZE
);
1508 data_len
= PAGE_ALIGN(data_len
);
1510 BUILD_BUG_ON(SKB_MAX_ALLOC
< PAGE_SIZE
);
1513 skb
= sock_alloc_send_pskb(sk
, len
- data_len
, data_len
,
1514 msg
->msg_flags
& MSG_DONTWAIT
, &err
,
1515 PAGE_ALLOC_COSTLY_ORDER
);
1519 err
= unix_scm_to_skb(&scm
, skb
, true);
1522 max_level
= err
+ 1;
1524 skb_put(skb
, len
- data_len
);
1525 skb
->data_len
= data_len
;
1527 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1531 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1536 if (sunaddr
== NULL
)
1539 other
= unix_find_other(net
, sunaddr
, namelen
, sk
->sk_type
,
1545 if (sk_filter(other
, skb
) < 0) {
1546 /* Toss the packet but do not return any error to the sender */
1551 unix_state_lock(other
);
1553 if (!unix_may_send(sk
, other
))
1556 if (sock_flag(other
, SOCK_DEAD
)) {
1558 * Check with 1003.1g - what should
1561 unix_state_unlock(other
);
1565 unix_state_lock(sk
);
1566 if (unix_peer(sk
) == other
) {
1567 unix_peer(sk
) = NULL
;
1568 unix_state_unlock(sk
);
1570 unix_dgram_disconnected(sk
, other
);
1572 err
= -ECONNREFUSED
;
1574 unix_state_unlock(sk
);
1584 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1587 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1588 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1593 if (unix_peer(other
) != sk
&& unix_recvq_full(other
)) {
1599 timeo
= unix_wait_for_peer(other
, timeo
);
1601 err
= sock_intr_errno(timeo
);
1602 if (signal_pending(current
))
1608 if (sock_flag(other
, SOCK_RCVTSTAMP
))
1609 __net_timestamp(skb
);
1610 maybe_add_creds(skb
, sock
, other
);
1611 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1612 if (max_level
> unix_sk(other
)->recursion_level
)
1613 unix_sk(other
)->recursion_level
= max_level
;
1614 unix_state_unlock(other
);
1615 other
->sk_data_ready(other
);
1621 unix_state_unlock(other
);
1631 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1632 * bytes, and a minimun of a full page.
1634 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1636 static int unix_stream_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1639 struct sock
*sk
= sock
->sk
;
1640 struct sock
*other
= NULL
;
1642 struct sk_buff
*skb
;
1644 struct scm_cookie scm
;
1645 bool fds_sent
= false;
1650 err
= scm_send(sock
, msg
, &scm
, false);
1655 if (msg
->msg_flags
&MSG_OOB
)
1658 if (msg
->msg_namelen
) {
1659 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1663 other
= unix_peer(sk
);
1668 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1671 while (sent
< len
) {
1674 /* Keep two messages in the pipe so it schedules better */
1675 size
= min_t(int, size
, (sk
->sk_sndbuf
>> 1) - 64);
1677 /* allow fallback to order-0 allocations */
1678 size
= min_t(int, size
, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ
);
1680 data_len
= max_t(int, 0, size
- SKB_MAX_HEAD(0));
1682 data_len
= min_t(size_t, size
, PAGE_ALIGN(data_len
));
1684 skb
= sock_alloc_send_pskb(sk
, size
- data_len
, data_len
,
1685 msg
->msg_flags
& MSG_DONTWAIT
, &err
,
1686 get_order(UNIX_SKB_FRAGS_SZ
));
1690 /* Only send the fds in the first buffer */
1691 err
= unix_scm_to_skb(&scm
, skb
, !fds_sent
);
1696 max_level
= err
+ 1;
1699 skb_put(skb
, size
- data_len
);
1700 skb
->data_len
= data_len
;
1702 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, size
);
1708 unix_state_lock(other
);
1710 if (sock_flag(other
, SOCK_DEAD
) ||
1711 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1714 maybe_add_creds(skb
, sock
, other
);
1715 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1716 if (max_level
> unix_sk(other
)->recursion_level
)
1717 unix_sk(other
)->recursion_level
= max_level
;
1718 unix_state_unlock(other
);
1719 other
->sk_data_ready(other
);
1728 unix_state_unlock(other
);
1731 if (sent
== 0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1732 send_sig(SIGPIPE
, current
, 0);
1736 return sent
? : err
;
1739 static ssize_t
unix_stream_sendpage(struct socket
*socket
, struct page
*page
,
1740 int offset
, size_t size
, int flags
)
1743 bool send_sigpipe
= true;
1744 struct sock
*other
, *sk
= socket
->sk
;
1745 struct sk_buff
*skb
, *newskb
= NULL
, *tail
= NULL
;
1747 if (flags
& MSG_OOB
)
1750 other
= unix_peer(sk
);
1751 if (!other
|| sk
->sk_state
!= TCP_ESTABLISHED
)
1756 unix_state_unlock(other
);
1757 mutex_unlock(&unix_sk(other
)->readlock
);
1758 newskb
= sock_alloc_send_pskb(sk
, 0, 0, flags
& MSG_DONTWAIT
,
1764 /* we must acquire readlock as we modify already present
1765 * skbs in the sk_receive_queue and mess with skb->len
1767 err
= mutex_lock_interruptible(&unix_sk(other
)->readlock
);
1769 err
= flags
& MSG_DONTWAIT
? -EAGAIN
: -ERESTARTSYS
;
1770 send_sigpipe
= false;
1774 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1779 unix_state_lock(other
);
1781 if (sock_flag(other
, SOCK_DEAD
) ||
1782 other
->sk_shutdown
& RCV_SHUTDOWN
) {
1784 goto err_state_unlock
;
1787 skb
= skb_peek_tail(&other
->sk_receive_queue
);
1788 if (tail
&& tail
== skb
) {
1795 } else if (newskb
) {
1796 /* this is fast path, we don't necessarily need to
1797 * call to kfree_skb even though with newskb == NULL
1798 * this - does no harm
1800 consume_skb(newskb
);
1803 if (skb_append_pagefrags(skb
, page
, offset
, size
)) {
1809 skb
->data_len
+= size
;
1810 skb
->truesize
+= size
;
1811 atomic_add(size
, &sk
->sk_wmem_alloc
);
1814 __skb_queue_tail(&other
->sk_receive_queue
, newskb
);
1816 unix_state_unlock(other
);
1817 mutex_unlock(&unix_sk(other
)->readlock
);
1819 other
->sk_data_ready(other
);
1824 unix_state_unlock(other
);
1826 mutex_unlock(&unix_sk(other
)->readlock
);
1829 if (send_sigpipe
&& !(flags
& MSG_NOSIGNAL
))
1830 send_sig(SIGPIPE
, current
, 0);
1834 static int unix_seqpacket_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1838 struct sock
*sk
= sock
->sk
;
1840 err
= sock_error(sk
);
1844 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1847 if (msg
->msg_namelen
)
1848 msg
->msg_namelen
= 0;
1850 return unix_dgram_sendmsg(sock
, msg
, len
);
1853 static int unix_seqpacket_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1854 size_t size
, int flags
)
1856 struct sock
*sk
= sock
->sk
;
1858 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1861 return unix_dgram_recvmsg(sock
, msg
, size
, flags
);
1864 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1866 struct unix_sock
*u
= unix_sk(sk
);
1869 msg
->msg_namelen
= u
->addr
->len
;
1870 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1874 static int unix_dgram_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1875 size_t size
, int flags
)
1877 struct scm_cookie scm
;
1878 struct sock
*sk
= sock
->sk
;
1879 struct unix_sock
*u
= unix_sk(sk
);
1880 int noblock
= flags
& MSG_DONTWAIT
;
1881 struct sk_buff
*skb
;
1889 err
= mutex_lock_interruptible(&u
->readlock
);
1890 if (unlikely(err
)) {
1891 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1892 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1894 err
= noblock
? -EAGAIN
: -ERESTARTSYS
;
1898 skip
= sk_peek_offset(sk
, flags
);
1900 skb
= __skb_recv_datagram(sk
, flags
, &peeked
, &skip
, &err
);
1902 unix_state_lock(sk
);
1903 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1904 if (sk
->sk_type
== SOCK_SEQPACKET
&& err
== -EAGAIN
&&
1905 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1907 unix_state_unlock(sk
);
1911 wake_up_interruptible_sync_poll(&u
->peer_wait
,
1912 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
1915 unix_copy_addr(msg
, skb
->sk
);
1917 if (size
> skb
->len
- skip
)
1918 size
= skb
->len
- skip
;
1919 else if (size
< skb
->len
- skip
)
1920 msg
->msg_flags
|= MSG_TRUNC
;
1922 err
= skb_copy_datagram_msg(skb
, skip
, msg
, size
);
1926 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
1927 __sock_recv_timestamp(msg
, sk
, skb
);
1929 memset(&scm
, 0, sizeof(scm
));
1931 scm_set_cred(&scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).uid
, UNIXCB(skb
).gid
);
1932 unix_set_secdata(&scm
, skb
);
1934 if (!(flags
& MSG_PEEK
)) {
1936 unix_detach_fds(&scm
, skb
);
1938 sk_peek_offset_bwd(sk
, skb
->len
);
1940 /* It is questionable: on PEEK we could:
1941 - do not return fds - good, but too simple 8)
1942 - return fds, and do not return them on read (old strategy,
1944 - clone fds (I chose it for now, it is the most universal
1947 POSIX 1003.1g does not actually define this clearly
1948 at all. POSIX 1003.1g doesn't define a lot of things
1953 sk_peek_offset_fwd(sk
, size
);
1956 scm
.fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1958 err
= (flags
& MSG_TRUNC
) ? skb
->len
- skip
: size
;
1960 scm_recv(sock
, msg
, &scm
, flags
);
1963 skb_free_datagram(sk
, skb
);
1965 mutex_unlock(&u
->readlock
);
1971 * Sleep until more data has arrived. But check for races..
1973 static long unix_stream_data_wait(struct sock
*sk
, long timeo
,
1974 struct sk_buff
*last
, unsigned int last_len
)
1976 struct sk_buff
*tail
;
1979 unix_state_lock(sk
);
1982 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1984 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
1986 (tail
&& tail
->len
!= last_len
) ||
1988 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1989 signal_pending(current
) ||
1993 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1994 unix_state_unlock(sk
);
1995 timeo
= freezable_schedule_timeout(timeo
);
1996 unix_state_lock(sk
);
1998 if (sock_flag(sk
, SOCK_DEAD
))
2001 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
2004 finish_wait(sk_sleep(sk
), &wait
);
2005 unix_state_unlock(sk
);
2009 static unsigned int unix_skb_len(const struct sk_buff
*skb
)
2011 return skb
->len
- UNIXCB(skb
).consumed
;
2014 struct unix_stream_read_state
{
2015 int (*recv_actor
)(struct sk_buff
*, int, int,
2016 struct unix_stream_read_state
*);
2017 struct socket
*socket
;
2019 struct pipe_inode_info
*pipe
;
2022 unsigned int splice_flags
;
2025 static int unix_stream_read_generic(struct unix_stream_read_state
*state
)
2027 struct scm_cookie scm
;
2028 struct socket
*sock
= state
->socket
;
2029 struct sock
*sk
= sock
->sk
;
2030 struct unix_sock
*u
= unix_sk(sk
);
2032 int flags
= state
->flags
;
2033 int noblock
= flags
& MSG_DONTWAIT
;
2034 bool check_creds
= false;
2039 size_t size
= state
->size
;
2040 unsigned int last_len
;
2043 if (sk
->sk_state
!= TCP_ESTABLISHED
)
2047 if (flags
& MSG_OOB
)
2050 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, size
);
2051 timeo
= sock_rcvtimeo(sk
, noblock
);
2053 memset(&scm
, 0, sizeof(scm
));
2055 /* Lock the socket to prevent queue disordering
2056 * while sleeps in memcpy_tomsg
2058 err
= mutex_lock_interruptible(&u
->readlock
);
2059 if (unlikely(err
)) {
2060 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2061 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2063 err
= noblock
? -EAGAIN
: -ERESTARTSYS
;
2069 struct sk_buff
*skb
, *last
;
2071 unix_state_lock(sk
);
2072 if (sock_flag(sk
, SOCK_DEAD
)) {
2076 last
= skb
= skb_peek(&sk
->sk_receive_queue
);
2077 last_len
= last
? last
->len
: 0;
2080 unix_sk(sk
)->recursion_level
= 0;
2081 if (copied
>= target
)
2085 * POSIX 1003.1g mandates this order.
2088 err
= sock_error(sk
);
2091 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2094 unix_state_unlock(sk
);
2098 mutex_unlock(&u
->readlock
);
2100 timeo
= unix_stream_data_wait(sk
, timeo
, last
,
2103 if (signal_pending(current
) ||
2104 mutex_lock_interruptible(&u
->readlock
)) {
2105 err
= sock_intr_errno(timeo
);
2111 unix_state_unlock(sk
);
2115 skip
= sk_peek_offset(sk
, flags
);
2116 while (skip
>= unix_skb_len(skb
)) {
2117 skip
-= unix_skb_len(skb
);
2119 last_len
= skb
->len
;
2120 skb
= skb_peek_next(skb
, &sk
->sk_receive_queue
);
2125 unix_state_unlock(sk
);
2128 /* Never glue messages from different writers */
2129 if ((UNIXCB(skb
).pid
!= scm
.pid
) ||
2130 !uid_eq(UNIXCB(skb
).uid
, scm
.creds
.uid
) ||
2131 !gid_eq(UNIXCB(skb
).gid
, scm
.creds
.gid
) ||
2132 !unix_secdata_eq(&scm
, skb
))
2134 } else if (test_bit(SOCK_PASSCRED
, &sock
->flags
)) {
2135 /* Copy credentials */
2136 scm_set_cred(&scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).uid
, UNIXCB(skb
).gid
);
2137 unix_set_secdata(&scm
, skb
);
2141 /* Copy address just once */
2142 if (state
->msg
&& state
->msg
->msg_name
) {
2143 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
,
2144 state
->msg
->msg_name
);
2145 unix_copy_addr(state
->msg
, skb
->sk
);
2149 chunk
= min_t(unsigned int, unix_skb_len(skb
) - skip
, size
);
2150 chunk
= state
->recv_actor(skb
, skip
, chunk
, state
);
2159 /* Mark read part of skb as used */
2160 if (!(flags
& MSG_PEEK
)) {
2161 UNIXCB(skb
).consumed
+= chunk
;
2163 sk_peek_offset_bwd(sk
, chunk
);
2166 unix_detach_fds(&scm
, skb
);
2168 if (unix_skb_len(skb
))
2171 skb_unlink(skb
, &sk
->sk_receive_queue
);
2177 /* It is questionable, see note in unix_dgram_recvmsg.
2180 scm
.fp
= scm_fp_dup(UNIXCB(skb
).fp
);
2182 sk_peek_offset_fwd(sk
, chunk
);
2188 mutex_unlock(&u
->readlock
);
2190 scm_recv(sock
, state
->msg
, &scm
, flags
);
2194 return copied
? : err
;
2197 static int unix_stream_read_actor(struct sk_buff
*skb
,
2198 int skip
, int chunk
,
2199 struct unix_stream_read_state
*state
)
2203 ret
= skb_copy_datagram_msg(skb
, UNIXCB(skb
).consumed
+ skip
,
2205 return ret
?: chunk
;
2208 static int unix_stream_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
2209 size_t size
, int flags
)
2211 struct unix_stream_read_state state
= {
2212 .recv_actor
= unix_stream_read_actor
,
2219 return unix_stream_read_generic(&state
);
2222 static ssize_t
skb_unix_socket_splice(struct sock
*sk
,
2223 struct pipe_inode_info
*pipe
,
2224 struct splice_pipe_desc
*spd
)
2227 struct unix_sock
*u
= unix_sk(sk
);
2229 mutex_unlock(&u
->readlock
);
2230 ret
= splice_to_pipe(pipe
, spd
);
2231 mutex_lock(&u
->readlock
);
2236 static int unix_stream_splice_actor(struct sk_buff
*skb
,
2237 int skip
, int chunk
,
2238 struct unix_stream_read_state
*state
)
2240 return skb_splice_bits(skb
, state
->socket
->sk
,
2241 UNIXCB(skb
).consumed
+ skip
,
2242 state
->pipe
, chunk
, state
->splice_flags
,
2243 skb_unix_socket_splice
);
2246 static ssize_t
unix_stream_splice_read(struct socket
*sock
, loff_t
*ppos
,
2247 struct pipe_inode_info
*pipe
,
2248 size_t size
, unsigned int flags
)
2250 struct unix_stream_read_state state
= {
2251 .recv_actor
= unix_stream_splice_actor
,
2255 .splice_flags
= flags
,
2258 if (unlikely(*ppos
))
2261 if (sock
->file
->f_flags
& O_NONBLOCK
||
2262 flags
& SPLICE_F_NONBLOCK
)
2263 state
.flags
= MSG_DONTWAIT
;
2265 return unix_stream_read_generic(&state
);
2268 static int unix_shutdown(struct socket
*sock
, int mode
)
2270 struct sock
*sk
= sock
->sk
;
2273 if (mode
< SHUT_RD
|| mode
> SHUT_RDWR
)
2276 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2277 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2278 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2282 unix_state_lock(sk
);
2283 sk
->sk_shutdown
|= mode
;
2284 other
= unix_peer(sk
);
2287 unix_state_unlock(sk
);
2288 sk
->sk_state_change(sk
);
2291 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
2295 if (mode
&RCV_SHUTDOWN
)
2296 peer_mode
|= SEND_SHUTDOWN
;
2297 if (mode
&SEND_SHUTDOWN
)
2298 peer_mode
|= RCV_SHUTDOWN
;
2299 unix_state_lock(other
);
2300 other
->sk_shutdown
|= peer_mode
;
2301 unix_state_unlock(other
);
2302 other
->sk_state_change(other
);
2303 if (peer_mode
== SHUTDOWN_MASK
)
2304 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_HUP
);
2305 else if (peer_mode
& RCV_SHUTDOWN
)
2306 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_IN
);
2314 long unix_inq_len(struct sock
*sk
)
2316 struct sk_buff
*skb
;
2319 if (sk
->sk_state
== TCP_LISTEN
)
2322 spin_lock(&sk
->sk_receive_queue
.lock
);
2323 if (sk
->sk_type
== SOCK_STREAM
||
2324 sk
->sk_type
== SOCK_SEQPACKET
) {
2325 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
2326 amount
+= unix_skb_len(skb
);
2328 skb
= skb_peek(&sk
->sk_receive_queue
);
2332 spin_unlock(&sk
->sk_receive_queue
.lock
);
2336 EXPORT_SYMBOL_GPL(unix_inq_len
);
2338 long unix_outq_len(struct sock
*sk
)
2340 return sk_wmem_alloc_get(sk
);
2342 EXPORT_SYMBOL_GPL(unix_outq_len
);
2344 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
2346 struct sock
*sk
= sock
->sk
;
2352 amount
= unix_outq_len(sk
);
2353 err
= put_user(amount
, (int __user
*)arg
);
2356 amount
= unix_inq_len(sk
);
2360 err
= put_user(amount
, (int __user
*)arg
);
2369 static unsigned int unix_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2371 struct sock
*sk
= sock
->sk
;
2374 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2377 /* exceptional events? */
2380 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2382 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2383 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2386 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2387 mask
|= POLLIN
| POLLRDNORM
;
2389 /* Connection-based need to check for termination and startup */
2390 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) &&
2391 sk
->sk_state
== TCP_CLOSE
)
2395 * we set writable also when the other side has shut down the
2396 * connection. This prevents stuck sockets.
2398 if (unix_writable(sk
))
2399 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2404 static unsigned int unix_dgram_poll(struct file
*file
, struct socket
*sock
,
2407 struct sock
*sk
= sock
->sk
, *other
;
2408 unsigned int mask
, writable
;
2410 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2413 /* exceptional events? */
2414 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
2416 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0);
2418 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2419 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2420 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2424 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2425 mask
|= POLLIN
| POLLRDNORM
;
2427 /* Connection-based need to check for termination and startup */
2428 if (sk
->sk_type
== SOCK_SEQPACKET
) {
2429 if (sk
->sk_state
== TCP_CLOSE
)
2431 /* connection hasn't started yet? */
2432 if (sk
->sk_state
== TCP_SYN_SENT
)
2436 /* No write status requested, avoid expensive OUT tests. */
2437 if (!(poll_requested_events(wait
) & (POLLWRBAND
|POLLWRNORM
|POLLOUT
)))
2440 writable
= unix_writable(sk
);
2441 other
= unix_peer_get(sk
);
2443 if (unix_peer(other
) != sk
) {
2444 sock_poll_wait(file
, &unix_sk(other
)->peer_wait
, wait
);
2445 if (unix_recvq_full(other
))
2452 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2454 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
2459 #ifdef CONFIG_PROC_FS
2461 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2463 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2464 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2465 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2467 static struct sock
*unix_from_bucket(struct seq_file
*seq
, loff_t
*pos
)
2469 unsigned long offset
= get_offset(*pos
);
2470 unsigned long bucket
= get_bucket(*pos
);
2472 unsigned long count
= 0;
2474 for (sk
= sk_head(&unix_socket_table
[bucket
]); sk
; sk
= sk_next(sk
)) {
2475 if (sock_net(sk
) != seq_file_net(seq
))
2477 if (++count
== offset
)
2484 static struct sock
*unix_next_socket(struct seq_file
*seq
,
2488 unsigned long bucket
;
2490 while (sk
> (struct sock
*)SEQ_START_TOKEN
) {
2494 if (sock_net(sk
) == seq_file_net(seq
))
2499 sk
= unix_from_bucket(seq
, pos
);
2504 bucket
= get_bucket(*pos
) + 1;
2505 *pos
= set_bucket_offset(bucket
, 1);
2506 } while (bucket
< ARRAY_SIZE(unix_socket_table
));
2511 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2512 __acquires(unix_table_lock
)
2514 spin_lock(&unix_table_lock
);
2517 return SEQ_START_TOKEN
;
2519 if (get_bucket(*pos
) >= ARRAY_SIZE(unix_socket_table
))
2522 return unix_next_socket(seq
, NULL
, pos
);
2525 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2528 return unix_next_socket(seq
, v
, pos
);
2531 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
2532 __releases(unix_table_lock
)
2534 spin_unlock(&unix_table_lock
);
2537 static int unix_seq_show(struct seq_file
*seq
, void *v
)
2540 if (v
== SEQ_START_TOKEN
)
2541 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
2545 struct unix_sock
*u
= unix_sk(s
);
2548 seq_printf(seq
, "%pK: %08X %08X %08X %04X %02X %5lu",
2550 atomic_read(&s
->sk_refcnt
),
2552 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
2555 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
2556 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
2564 len
= u
->addr
->len
- sizeof(short);
2565 if (!UNIX_ABSTRACT(s
))
2571 for ( ; i
< len
; i
++)
2572 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2574 unix_state_unlock(s
);
2575 seq_putc(seq
, '\n');
2581 static const struct seq_operations unix_seq_ops
= {
2582 .start
= unix_seq_start
,
2583 .next
= unix_seq_next
,
2584 .stop
= unix_seq_stop
,
2585 .show
= unix_seq_show
,
2588 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2590 return seq_open_net(inode
, file
, &unix_seq_ops
,
2591 sizeof(struct seq_net_private
));
2594 static const struct file_operations unix_seq_fops
= {
2595 .owner
= THIS_MODULE
,
2596 .open
= unix_seq_open
,
2598 .llseek
= seq_lseek
,
2599 .release
= seq_release_net
,
2604 static const struct net_proto_family unix_family_ops
= {
2606 .create
= unix_create
,
2607 .owner
= THIS_MODULE
,
2611 static int __net_init
unix_net_init(struct net
*net
)
2613 int error
= -ENOMEM
;
2615 net
->unx
.sysctl_max_dgram_qlen
= 10;
2616 if (unix_sysctl_register(net
))
2619 #ifdef CONFIG_PROC_FS
2620 if (!proc_create("unix", 0, net
->proc_net
, &unix_seq_fops
)) {
2621 unix_sysctl_unregister(net
);
2630 static void __net_exit
unix_net_exit(struct net
*net
)
2632 unix_sysctl_unregister(net
);
2633 remove_proc_entry("unix", net
->proc_net
);
2636 static struct pernet_operations unix_net_ops
= {
2637 .init
= unix_net_init
,
2638 .exit
= unix_net_exit
,
2641 static int __init
af_unix_init(void)
2645 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2647 rc
= proto_register(&unix_proto
, 1);
2649 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__
);
2653 sock_register(&unix_family_ops
);
2654 register_pernet_subsys(&unix_net_ops
);
2659 static void __exit
af_unix_exit(void)
2661 sock_unregister(PF_UNIX
);
2662 proto_unregister(&unix_proto
);
2663 unregister_pernet_subsys(&unix_net_ops
);
2666 /* Earlier than device_initcall() so that other drivers invoking
2667 request_module() don't end up in a loop when modprobe tries
2668 to use a UNIX socket. But later than subsys_initcall() because
2669 we depend on stuff initialised there */
2670 fs_initcall(af_unix_init
);
2671 module_exit(af_unix_exit
);
2673 MODULE_LICENSE("GPL");
2674 MODULE_ALIAS_NETPROTO(PF_UNIX
);