2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table
[UNIX_HASH_SIZE
+ 1];
119 static DEFINE_SPINLOCK(unix_table_lock
);
120 static atomic_long_t unix_nr_socks
;
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
129 memcpy(UNIXSID(skb
), &scm
->secid
, sizeof(u32
));
132 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
134 scm
->secid
= *UNIXSID(skb
);
137 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
140 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n
)
152 unsigned hash
= (__force
unsigned)n
;
155 return hash
&(UNIX_HASH_SIZE
-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
162 return unix_peer(osk
) == sk
;
165 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
167 return unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
);
170 static inline int unix_recvq_full(struct sock
const *sk
)
172 return skb_queue_len(&sk
->sk_receive_queue
) > sk
->sk_max_ack_backlog
;
175 static struct sock
*unix_peer_get(struct sock
*s
)
183 unix_state_unlock(s
);
187 static inline void unix_release_addr(struct unix_address
*addr
)
189 if (atomic_dec_and_test(&addr
->refcnt
))
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un
*sunaddr
, int len
, unsigned *hashp
)
202 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
204 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
206 if (sunaddr
->sun_path
[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr
)[len
] = 0;
215 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
219 *hashp
= unix_hash_fold(csum_partial(sunaddr
, len
, 0));
223 static void __unix_remove_socket(struct sock
*sk
)
225 sk_del_node_init(sk
);
228 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
230 WARN_ON(!sk_unhashed(sk
));
231 sk_add_node(sk
, list
);
234 static inline void unix_remove_socket(struct sock
*sk
)
236 spin_lock(&unix_table_lock
);
237 __unix_remove_socket(sk
);
238 spin_unlock(&unix_table_lock
);
241 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
243 spin_lock(&unix_table_lock
);
244 __unix_insert_socket(list
, sk
);
245 spin_unlock(&unix_table_lock
);
248 static struct sock
*__unix_find_socket_byname(struct net
*net
,
249 struct sockaddr_un
*sunname
,
250 int len
, int type
, unsigned hash
)
253 struct hlist_node
*node
;
255 sk_for_each(s
, node
, &unix_socket_table
[hash
^ type
]) {
256 struct unix_sock
*u
= unix_sk(s
);
258 if (!net_eq(sock_net(s
), net
))
261 if (u
->addr
->len
== len
&&
262 !memcmp(u
->addr
->name
, sunname
, len
))
270 static inline struct sock
*unix_find_socket_byname(struct net
*net
,
271 struct sockaddr_un
*sunname
,
277 spin_lock(&unix_table_lock
);
278 s
= __unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
281 spin_unlock(&unix_table_lock
);
285 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
288 struct hlist_node
*node
;
290 spin_lock(&unix_table_lock
);
292 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
293 struct dentry
*dentry
= unix_sk(s
)->dentry
;
295 if (dentry
&& dentry
->d_inode
== i
) {
302 spin_unlock(&unix_table_lock
);
306 static inline int unix_writable(struct sock
*sk
)
308 return (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
311 static void unix_write_space(struct sock
*sk
)
313 struct socket_wq
*wq
;
316 if (unix_writable(sk
)) {
317 wq
= rcu_dereference(sk
->sk_wq
);
318 if (wq_has_sleeper(wq
))
319 wake_up_interruptible_sync_poll(&wq
->wait
,
320 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
321 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
326 /* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
332 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
333 skb_queue_purge(&sk
->sk_receive_queue
);
334 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
340 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
341 other
->sk_err
= ECONNRESET
;
342 other
->sk_error_report(other
);
347 static void unix_sock_destructor(struct sock
*sk
)
349 struct unix_sock
*u
= unix_sk(sk
);
351 skb_queue_purge(&sk
->sk_receive_queue
);
353 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
354 WARN_ON(!sk_unhashed(sk
));
355 WARN_ON(sk
->sk_socket
);
356 if (!sock_flag(sk
, SOCK_DEAD
)) {
357 printk(KERN_INFO
"Attempt to release alive unix socket: %p\n", sk
);
362 unix_release_addr(u
->addr
);
364 atomic_long_dec(&unix_nr_socks
);
366 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
368 #ifdef UNIX_REFCNT_DEBUG
369 printk(KERN_DEBUG
"UNIX %p is destroyed, %ld are still alive.\n", sk
,
370 atomic_long_read(&unix_nr_socks
));
374 static int unix_release_sock(struct sock
*sk
, int embrion
)
376 struct unix_sock
*u
= unix_sk(sk
);
377 struct dentry
*dentry
;
378 struct vfsmount
*mnt
;
383 unix_remove_socket(sk
);
388 sk
->sk_shutdown
= SHUTDOWN_MASK
;
393 state
= sk
->sk_state
;
394 sk
->sk_state
= TCP_CLOSE
;
395 unix_state_unlock(sk
);
397 wake_up_interruptible_all(&u
->peer_wait
);
399 skpair
= unix_peer(sk
);
401 if (skpair
!= NULL
) {
402 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
403 unix_state_lock(skpair
);
405 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
406 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
407 skpair
->sk_err
= ECONNRESET
;
408 unix_state_unlock(skpair
);
409 skpair
->sk_state_change(skpair
);
410 sk_wake_async(skpair
, SOCK_WAKE_WAITD
, POLL_HUP
);
412 sock_put(skpair
); /* It may now die */
413 unix_peer(sk
) = NULL
;
416 /* Try to flush out this socket. Throw out buffers at least */
418 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
419 if (state
== TCP_LISTEN
)
420 unix_release_sock(skb
->sk
, 1);
421 /* passed fds are erased in the kfree_skb hook */
432 /* ---- Socket is dead now and most probably destroyed ---- */
435 * Fixme: BSD difference: In BSD all sockets connected to use get
436 * ECONNRESET and we die on the spot. In Linux we behave
437 * like files and pipes do and wait for the last
440 * Can't we simply set sock->err?
442 * What the above comment does talk about? --ANK(980817)
445 if (unix_tot_inflight
)
446 unix_gc(); /* Garbage collect fds */
451 static void init_peercred(struct sock
*sk
)
453 put_pid(sk
->sk_peer_pid
);
454 if (sk
->sk_peer_cred
)
455 put_cred(sk
->sk_peer_cred
);
456 sk
->sk_peer_pid
= get_pid(task_tgid(current
));
457 sk
->sk_peer_cred
= get_current_cred();
460 static void copy_peercred(struct sock
*sk
, struct sock
*peersk
)
462 put_pid(sk
->sk_peer_pid
);
463 if (sk
->sk_peer_cred
)
464 put_cred(sk
->sk_peer_cred
);
465 sk
->sk_peer_pid
= get_pid(peersk
->sk_peer_pid
);
466 sk
->sk_peer_cred
= get_cred(peersk
->sk_peer_cred
);
469 static int unix_listen(struct socket
*sock
, int backlog
)
472 struct sock
*sk
= sock
->sk
;
473 struct unix_sock
*u
= unix_sk(sk
);
474 struct pid
*old_pid
= NULL
;
475 const struct cred
*old_cred
= NULL
;
478 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
479 goto out
; /* Only stream/seqpacket sockets accept */
482 goto out
; /* No listens on an unbound socket */
484 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
486 if (backlog
> sk
->sk_max_ack_backlog
)
487 wake_up_interruptible_all(&u
->peer_wait
);
488 sk
->sk_max_ack_backlog
= backlog
;
489 sk
->sk_state
= TCP_LISTEN
;
490 /* set credentials so connect can copy them */
495 unix_state_unlock(sk
);
503 static int unix_release(struct socket
*);
504 static int unix_bind(struct socket
*, struct sockaddr
*, int);
505 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
506 int addr_len
, int flags
);
507 static int unix_socketpair(struct socket
*, struct socket
*);
508 static int unix_accept(struct socket
*, struct socket
*, int);
509 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
510 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
511 static unsigned int unix_dgram_poll(struct file
*, struct socket
*,
513 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
514 static int unix_shutdown(struct socket
*, int);
515 static int unix_stream_sendmsg(struct kiocb
*, struct socket
*,
516 struct msghdr
*, size_t);
517 static int unix_stream_recvmsg(struct kiocb
*, struct socket
*,
518 struct msghdr
*, size_t, int);
519 static int unix_dgram_sendmsg(struct kiocb
*, struct socket
*,
520 struct msghdr
*, size_t);
521 static int unix_dgram_recvmsg(struct kiocb
*, struct socket
*,
522 struct msghdr
*, size_t, int);
523 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
525 static int unix_seqpacket_sendmsg(struct kiocb
*, struct socket
*,
526 struct msghdr
*, size_t);
528 static const struct proto_ops unix_stream_ops
= {
530 .owner
= THIS_MODULE
,
531 .release
= unix_release
,
533 .connect
= unix_stream_connect
,
534 .socketpair
= unix_socketpair
,
535 .accept
= unix_accept
,
536 .getname
= unix_getname
,
539 .listen
= unix_listen
,
540 .shutdown
= unix_shutdown
,
541 .setsockopt
= sock_no_setsockopt
,
542 .getsockopt
= sock_no_getsockopt
,
543 .sendmsg
= unix_stream_sendmsg
,
544 .recvmsg
= unix_stream_recvmsg
,
545 .mmap
= sock_no_mmap
,
546 .sendpage
= sock_no_sendpage
,
549 static const struct proto_ops unix_dgram_ops
= {
551 .owner
= THIS_MODULE
,
552 .release
= unix_release
,
554 .connect
= unix_dgram_connect
,
555 .socketpair
= unix_socketpair
,
556 .accept
= sock_no_accept
,
557 .getname
= unix_getname
,
558 .poll
= unix_dgram_poll
,
560 .listen
= sock_no_listen
,
561 .shutdown
= unix_shutdown
,
562 .setsockopt
= sock_no_setsockopt
,
563 .getsockopt
= sock_no_getsockopt
,
564 .sendmsg
= unix_dgram_sendmsg
,
565 .recvmsg
= unix_dgram_recvmsg
,
566 .mmap
= sock_no_mmap
,
567 .sendpage
= sock_no_sendpage
,
570 static const struct proto_ops unix_seqpacket_ops
= {
572 .owner
= THIS_MODULE
,
573 .release
= unix_release
,
575 .connect
= unix_stream_connect
,
576 .socketpair
= unix_socketpair
,
577 .accept
= unix_accept
,
578 .getname
= unix_getname
,
579 .poll
= unix_dgram_poll
,
581 .listen
= unix_listen
,
582 .shutdown
= unix_shutdown
,
583 .setsockopt
= sock_no_setsockopt
,
584 .getsockopt
= sock_no_getsockopt
,
585 .sendmsg
= unix_seqpacket_sendmsg
,
586 .recvmsg
= unix_dgram_recvmsg
,
587 .mmap
= sock_no_mmap
,
588 .sendpage
= sock_no_sendpage
,
591 static struct proto unix_proto
= {
593 .owner
= THIS_MODULE
,
594 .obj_size
= sizeof(struct unix_sock
),
598 * AF_UNIX sockets do not interact with hardware, hence they
599 * dont trigger interrupts - so it's safe for them to have
600 * bh-unsafe locking for their sk_receive_queue.lock. Split off
601 * this special lock-class by reinitializing the spinlock key:
603 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
605 static struct sock
*unix_create1(struct net
*net
, struct socket
*sock
)
607 struct sock
*sk
= NULL
;
610 atomic_long_inc(&unix_nr_socks
);
611 if (atomic_long_read(&unix_nr_socks
) > 2 * get_max_files())
614 sk
= sk_alloc(net
, PF_UNIX
, GFP_KERNEL
, &unix_proto
);
618 sock_init_data(sock
, sk
);
619 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
620 &af_unix_sk_receive_queue_lock_key
);
622 sk
->sk_write_space
= unix_write_space
;
623 sk
->sk_max_ack_backlog
= net
->unx
.sysctl_max_dgram_qlen
;
624 sk
->sk_destruct
= unix_sock_destructor
;
628 spin_lock_init(&u
->lock
);
629 atomic_long_set(&u
->inflight
, 0);
630 INIT_LIST_HEAD(&u
->link
);
631 mutex_init(&u
->readlock
); /* single task reading lock */
632 init_waitqueue_head(&u
->peer_wait
);
633 unix_insert_socket(unix_sockets_unbound
, sk
);
636 atomic_long_dec(&unix_nr_socks
);
639 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
645 static int unix_create(struct net
*net
, struct socket
*sock
, int protocol
,
648 if (protocol
&& protocol
!= PF_UNIX
)
649 return -EPROTONOSUPPORT
;
651 sock
->state
= SS_UNCONNECTED
;
653 switch (sock
->type
) {
655 sock
->ops
= &unix_stream_ops
;
658 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
662 sock
->type
= SOCK_DGRAM
;
664 sock
->ops
= &unix_dgram_ops
;
667 sock
->ops
= &unix_seqpacket_ops
;
670 return -ESOCKTNOSUPPORT
;
673 return unix_create1(net
, sock
) ? 0 : -ENOMEM
;
676 static int unix_release(struct socket
*sock
)
678 struct sock
*sk
= sock
->sk
;
685 return unix_release_sock(sk
, 0);
688 static int unix_autobind(struct socket
*sock
)
690 struct sock
*sk
= sock
->sk
;
691 struct net
*net
= sock_net(sk
);
692 struct unix_sock
*u
= unix_sk(sk
);
693 static u32 ordernum
= 1;
694 struct unix_address
*addr
;
696 unsigned int retries
= 0;
698 mutex_lock(&u
->readlock
);
705 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
709 addr
->name
->sun_family
= AF_UNIX
;
710 atomic_set(&addr
->refcnt
, 1);
713 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
714 addr
->hash
= unix_hash_fold(csum_partial(addr
->name
, addr
->len
, 0));
716 spin_lock(&unix_table_lock
);
717 ordernum
= (ordernum
+1)&0xFFFFF;
719 if (__unix_find_socket_byname(net
, addr
->name
, addr
->len
, sock
->type
,
721 spin_unlock(&unix_table_lock
);
723 * __unix_find_socket_byname() may take long time if many names
724 * are already in use.
727 /* Give up if all names seems to be in use. */
728 if (retries
++ == 0xFFFFF) {
735 addr
->hash
^= sk
->sk_type
;
737 __unix_remove_socket(sk
);
739 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
740 spin_unlock(&unix_table_lock
);
743 out
: mutex_unlock(&u
->readlock
);
747 static struct sock
*unix_find_other(struct net
*net
,
748 struct sockaddr_un
*sunname
, int len
,
749 int type
, unsigned hash
, int *error
)
755 if (sunname
->sun_path
[0]) {
757 err
= kern_path(sunname
->sun_path
, LOOKUP_FOLLOW
, &path
);
760 inode
= path
.dentry
->d_inode
;
761 err
= inode_permission(inode
, MAY_WRITE
);
766 if (!S_ISSOCK(inode
->i_mode
))
768 u
= unix_find_socket_byinode(inode
);
772 if (u
->sk_type
== type
)
773 touch_atime(path
.mnt
, path
.dentry
);
778 if (u
->sk_type
!= type
) {
784 u
= unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
786 struct dentry
*dentry
;
787 dentry
= unix_sk(u
)->dentry
;
789 touch_atime(unix_sk(u
)->mnt
, dentry
);
803 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
805 struct sock
*sk
= sock
->sk
;
806 struct net
*net
= sock_net(sk
);
807 struct unix_sock
*u
= unix_sk(sk
);
808 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
809 struct dentry
*dentry
= NULL
;
813 struct unix_address
*addr
;
814 struct hlist_head
*list
;
817 if (sunaddr
->sun_family
!= AF_UNIX
)
820 if (addr_len
== sizeof(short)) {
821 err
= unix_autobind(sock
);
825 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
830 mutex_lock(&u
->readlock
);
837 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
841 memcpy(addr
->name
, sunaddr
, addr_len
);
842 addr
->len
= addr_len
;
843 addr
->hash
= hash
^ sk
->sk_type
;
844 atomic_set(&addr
->refcnt
, 1);
846 if (sunaddr
->sun_path
[0]) {
850 * Get the parent directory, calculate the hash for last
853 err
= kern_path_parent(sunaddr
->sun_path
, &nd
);
855 goto out_mknod_parent
;
857 dentry
= lookup_create(&nd
, 0);
858 err
= PTR_ERR(dentry
);
860 goto out_mknod_unlock
;
863 * All right, let's create it.
866 (SOCK_INODE(sock
)->i_mode
& ~current_umask());
867 err
= mnt_want_write(nd
.path
.mnt
);
870 err
= security_path_mknod(&nd
.path
, dentry
, mode
, 0);
872 goto out_mknod_drop_write
;
873 err
= vfs_mknod(nd
.path
.dentry
->d_inode
, dentry
, mode
, 0);
874 out_mknod_drop_write
:
875 mnt_drop_write(nd
.path
.mnt
);
878 mutex_unlock(&nd
.path
.dentry
->d_inode
->i_mutex
);
879 dput(nd
.path
.dentry
);
880 nd
.path
.dentry
= dentry
;
882 addr
->hash
= UNIX_HASH_SIZE
;
885 spin_lock(&unix_table_lock
);
887 if (!sunaddr
->sun_path
[0]) {
889 if (__unix_find_socket_byname(net
, sunaddr
, addr_len
,
890 sk
->sk_type
, hash
)) {
891 unix_release_addr(addr
);
895 list
= &unix_socket_table
[addr
->hash
];
897 list
= &unix_socket_table
[dentry
->d_inode
->i_ino
& (UNIX_HASH_SIZE
-1)];
898 u
->dentry
= nd
.path
.dentry
;
899 u
->mnt
= nd
.path
.mnt
;
903 __unix_remove_socket(sk
);
905 __unix_insert_socket(list
, sk
);
908 spin_unlock(&unix_table_lock
);
910 mutex_unlock(&u
->readlock
);
917 mutex_unlock(&nd
.path
.dentry
->d_inode
->i_mutex
);
922 unix_release_addr(addr
);
926 static void unix_state_double_lock(struct sock
*sk1
, struct sock
*sk2
)
928 if (unlikely(sk1
== sk2
) || !sk2
) {
929 unix_state_lock(sk1
);
933 unix_state_lock(sk1
);
934 unix_state_lock_nested(sk2
);
936 unix_state_lock(sk2
);
937 unix_state_lock_nested(sk1
);
941 static void unix_state_double_unlock(struct sock
*sk1
, struct sock
*sk2
)
943 if (unlikely(sk1
== sk2
) || !sk2
) {
944 unix_state_unlock(sk1
);
947 unix_state_unlock(sk1
);
948 unix_state_unlock(sk2
);
951 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
954 struct sock
*sk
= sock
->sk
;
955 struct net
*net
= sock_net(sk
);
956 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)addr
;
961 if (addr
->sa_family
!= AF_UNSPEC
) {
962 err
= unix_mkname(sunaddr
, alen
, &hash
);
967 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
968 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
972 other
= unix_find_other(net
, sunaddr
, alen
, sock
->type
, hash
, &err
);
976 unix_state_double_lock(sk
, other
);
978 /* Apparently VFS overslept socket death. Retry. */
979 if (sock_flag(other
, SOCK_DEAD
)) {
980 unix_state_double_unlock(sk
, other
);
986 if (!unix_may_send(sk
, other
))
989 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
995 * 1003.1g breaking connected state with AF_UNSPEC
998 unix_state_double_lock(sk
, other
);
1002 * If it was connected, reconnect.
1004 if (unix_peer(sk
)) {
1005 struct sock
*old_peer
= unix_peer(sk
);
1006 unix_peer(sk
) = other
;
1007 unix_state_double_unlock(sk
, other
);
1009 if (other
!= old_peer
)
1010 unix_dgram_disconnected(sk
, old_peer
);
1013 unix_peer(sk
) = other
;
1014 unix_state_double_unlock(sk
, other
);
1019 unix_state_double_unlock(sk
, other
);
1025 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
1027 struct unix_sock
*u
= unix_sk(other
);
1031 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
1033 sched
= !sock_flag(other
, SOCK_DEAD
) &&
1034 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
1035 unix_recvq_full(other
);
1037 unix_state_unlock(other
);
1040 timeo
= schedule_timeout(timeo
);
1042 finish_wait(&u
->peer_wait
, &wait
);
1046 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1047 int addr_len
, int flags
)
1049 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
1050 struct sock
*sk
= sock
->sk
;
1051 struct net
*net
= sock_net(sk
);
1052 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
1053 struct sock
*newsk
= NULL
;
1054 struct sock
*other
= NULL
;
1055 struct sk_buff
*skb
= NULL
;
1061 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
1066 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
&&
1067 (err
= unix_autobind(sock
)) != 0)
1070 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1072 /* First of all allocate resources.
1073 If we will make it after state is locked,
1074 we will have to recheck all again in any case.
1079 /* create new sock for complete connection */
1080 newsk
= unix_create1(sock_net(sk
), NULL
);
1084 /* Allocate skb for sending to listening sock */
1085 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
1090 /* Find listening sock. */
1091 other
= unix_find_other(net
, sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
1095 /* Latch state of peer */
1096 unix_state_lock(other
);
1098 /* Apparently VFS overslept socket death. Retry. */
1099 if (sock_flag(other
, SOCK_DEAD
)) {
1100 unix_state_unlock(other
);
1105 err
= -ECONNREFUSED
;
1106 if (other
->sk_state
!= TCP_LISTEN
)
1108 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1111 if (unix_recvq_full(other
)) {
1116 timeo
= unix_wait_for_peer(other
, timeo
);
1118 err
= sock_intr_errno(timeo
);
1119 if (signal_pending(current
))
1127 It is tricky place. We need to grab our state lock and cannot
1128 drop lock on peer. It is dangerous because deadlock is
1129 possible. Connect to self case and simultaneous
1130 attempt to connect are eliminated by checking socket
1131 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1132 check this before attempt to grab lock.
1134 Well, and we have to recheck the state after socket locked.
1140 /* This is ok... continue with connect */
1142 case TCP_ESTABLISHED
:
1143 /* Socket is already connected */
1151 unix_state_lock_nested(sk
);
1153 if (sk
->sk_state
!= st
) {
1154 unix_state_unlock(sk
);
1155 unix_state_unlock(other
);
1160 err
= security_unix_stream_connect(sk
, other
, newsk
);
1162 unix_state_unlock(sk
);
1166 /* The way is open! Fastly set all the necessary fields... */
1169 unix_peer(newsk
) = sk
;
1170 newsk
->sk_state
= TCP_ESTABLISHED
;
1171 newsk
->sk_type
= sk
->sk_type
;
1172 init_peercred(newsk
);
1173 newu
= unix_sk(newsk
);
1174 RCU_INIT_POINTER(newsk
->sk_wq
, &newu
->peer_wq
);
1175 otheru
= unix_sk(other
);
1177 /* copy address information from listening to new sock*/
1179 atomic_inc(&otheru
->addr
->refcnt
);
1180 newu
->addr
= otheru
->addr
;
1182 if (otheru
->dentry
) {
1183 newu
->dentry
= dget(otheru
->dentry
);
1184 newu
->mnt
= mntget(otheru
->mnt
);
1187 /* Set credentials */
1188 copy_peercred(sk
, other
);
1190 sock
->state
= SS_CONNECTED
;
1191 sk
->sk_state
= TCP_ESTABLISHED
;
1194 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1195 unix_peer(sk
) = newsk
;
1197 unix_state_unlock(sk
);
1199 /* take ten and and send info to listening sock */
1200 spin_lock(&other
->sk_receive_queue
.lock
);
1201 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1202 spin_unlock(&other
->sk_receive_queue
.lock
);
1203 unix_state_unlock(other
);
1204 other
->sk_data_ready(other
, 0);
1210 unix_state_unlock(other
);
1215 unix_release_sock(newsk
, 0);
1221 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1223 struct sock
*ska
= socka
->sk
, *skb
= sockb
->sk
;
1225 /* Join our sockets back to back */
1228 unix_peer(ska
) = skb
;
1229 unix_peer(skb
) = ska
;
1233 if (ska
->sk_type
!= SOCK_DGRAM
) {
1234 ska
->sk_state
= TCP_ESTABLISHED
;
1235 skb
->sk_state
= TCP_ESTABLISHED
;
1236 socka
->state
= SS_CONNECTED
;
1237 sockb
->state
= SS_CONNECTED
;
1242 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1244 struct sock
*sk
= sock
->sk
;
1246 struct sk_buff
*skb
;
1250 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
1254 if (sk
->sk_state
!= TCP_LISTEN
)
1257 /* If socket state is TCP_LISTEN it cannot change (for now...),
1258 * so that no locks are necessary.
1261 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1263 /* This means receive shutdown. */
1270 skb_free_datagram(sk
, skb
);
1271 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1273 /* attach accepted sock to socket */
1274 unix_state_lock(tsk
);
1275 newsock
->state
= SS_CONNECTED
;
1276 sock_graft(tsk
, newsock
);
1277 unix_state_unlock(tsk
);
1285 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1287 struct sock
*sk
= sock
->sk
;
1288 struct unix_sock
*u
;
1289 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, uaddr
);
1293 sk
= unix_peer_get(sk
);
1304 unix_state_lock(sk
);
1306 sunaddr
->sun_family
= AF_UNIX
;
1307 sunaddr
->sun_path
[0] = 0;
1308 *uaddr_len
= sizeof(short);
1310 struct unix_address
*addr
= u
->addr
;
1312 *uaddr_len
= addr
->len
;
1313 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1315 unix_state_unlock(sk
);
1321 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1325 scm
->fp
= UNIXCB(skb
).fp
;
1326 UNIXCB(skb
).fp
= NULL
;
1328 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1329 unix_notinflight(scm
->fp
->fp
[i
]);
1332 static void unix_destruct_scm(struct sk_buff
*skb
)
1334 struct scm_cookie scm
;
1335 memset(&scm
, 0, sizeof(scm
));
1336 scm
.pid
= UNIXCB(skb
).pid
;
1337 scm
.cred
= UNIXCB(skb
).cred
;
1339 unix_detach_fds(&scm
, skb
);
1341 /* Alas, it calls VFS */
1342 /* So fscking what? fput() had been SMP-safe since the last Summer */
1347 #define MAX_RECURSION_LEVEL 4
1349 static int unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1352 unsigned char max_level
= 0;
1353 int unix_sock_count
= 0;
1355 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--) {
1356 struct sock
*sk
= unix_get_socket(scm
->fp
->fp
[i
]);
1360 max_level
= max(max_level
,
1361 unix_sk(sk
)->recursion_level
);
1364 if (unlikely(max_level
> MAX_RECURSION_LEVEL
))
1365 return -ETOOMANYREFS
;
1368 * Need to duplicate file references for the sake of garbage
1369 * collection. Otherwise a socket in the fps might become a
1370 * candidate for GC while the skb is not yet queued.
1372 UNIXCB(skb
).fp
= scm_fp_dup(scm
->fp
);
1373 if (!UNIXCB(skb
).fp
)
1376 if (unix_sock_count
) {
1377 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--)
1378 unix_inflight(scm
->fp
->fp
[i
]);
1383 static int unix_scm_to_skb(struct scm_cookie
*scm
, struct sk_buff
*skb
, bool send_fds
)
1386 UNIXCB(skb
).pid
= get_pid(scm
->pid
);
1387 UNIXCB(skb
).cred
= get_cred(scm
->cred
);
1388 UNIXCB(skb
).fp
= NULL
;
1389 if (scm
->fp
&& send_fds
)
1390 err
= unix_attach_fds(scm
, skb
);
1392 skb
->destructor
= unix_destruct_scm
;
1397 * Send AF_UNIX data.
1400 static int unix_dgram_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1401 struct msghdr
*msg
, size_t len
)
1403 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1404 struct sock
*sk
= sock
->sk
;
1405 struct net
*net
= sock_net(sk
);
1406 struct unix_sock
*u
= unix_sk(sk
);
1407 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1408 struct sock
*other
= NULL
;
1409 int namelen
= 0; /* fake GCC */
1412 struct sk_buff
*skb
;
1414 struct scm_cookie tmp_scm
;
1417 if (NULL
== siocb
->scm
)
1418 siocb
->scm
= &tmp_scm
;
1420 err
= scm_send(sock
, msg
, siocb
->scm
);
1425 if (msg
->msg_flags
&MSG_OOB
)
1428 if (msg
->msg_namelen
) {
1429 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1436 other
= unix_peer_get(sk
);
1441 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
1442 && (err
= unix_autobind(sock
)) != 0)
1446 if (len
> sk
->sk_sndbuf
- 32)
1449 skb
= sock_alloc_send_skb(sk
, len
, msg
->msg_flags
&MSG_DONTWAIT
, &err
);
1453 err
= unix_scm_to_skb(siocb
->scm
, skb
, true);
1456 max_level
= err
+ 1;
1457 unix_get_secdata(siocb
->scm
, skb
);
1459 skb_reset_transport_header(skb
);
1460 err
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
1464 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1469 if (sunaddr
== NULL
)
1472 other
= unix_find_other(net
, sunaddr
, namelen
, sk
->sk_type
,
1478 if (sk_filter(other
, skb
) < 0) {
1479 /* Toss the packet but do not return any error to the sender */
1484 unix_state_lock(other
);
1486 if (!unix_may_send(sk
, other
))
1489 if (sock_flag(other
, SOCK_DEAD
)) {
1491 * Check with 1003.1g - what should
1494 unix_state_unlock(other
);
1498 unix_state_lock(sk
);
1499 if (unix_peer(sk
) == other
) {
1500 unix_peer(sk
) = NULL
;
1501 unix_state_unlock(sk
);
1503 unix_dgram_disconnected(sk
, other
);
1505 err
= -ECONNREFUSED
;
1507 unix_state_unlock(sk
);
1517 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1520 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1521 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1526 if (unix_peer(other
) != sk
&& unix_recvq_full(other
)) {
1532 timeo
= unix_wait_for_peer(other
, timeo
);
1534 err
= sock_intr_errno(timeo
);
1535 if (signal_pending(current
))
1541 if (sock_flag(other
, SOCK_RCVTSTAMP
))
1542 __net_timestamp(skb
);
1543 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1544 if (max_level
> unix_sk(other
)->recursion_level
)
1545 unix_sk(other
)->recursion_level
= max_level
;
1546 unix_state_unlock(other
);
1547 other
->sk_data_ready(other
, len
);
1549 scm_destroy(siocb
->scm
);
1553 unix_state_unlock(other
);
1559 scm_destroy(siocb
->scm
);
1564 static int unix_stream_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1565 struct msghdr
*msg
, size_t len
)
1567 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1568 struct sock
*sk
= sock
->sk
;
1569 struct sock
*other
= NULL
;
1571 struct sk_buff
*skb
;
1573 struct scm_cookie tmp_scm
;
1574 bool fds_sent
= false;
1577 if (NULL
== siocb
->scm
)
1578 siocb
->scm
= &tmp_scm
;
1580 err
= scm_send(sock
, msg
, siocb
->scm
);
1585 if (msg
->msg_flags
&MSG_OOB
)
1588 if (msg
->msg_namelen
) {
1589 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1593 other
= unix_peer(sk
);
1598 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1601 while (sent
< len
) {
1603 * Optimisation for the fact that under 0.01% of X
1604 * messages typically need breaking up.
1609 /* Keep two messages in the pipe so it schedules better */
1610 if (size
> ((sk
->sk_sndbuf
>> 1) - 64))
1611 size
= (sk
->sk_sndbuf
>> 1) - 64;
1613 if (size
> SKB_MAX_ALLOC
)
1614 size
= SKB_MAX_ALLOC
;
1620 skb
= sock_alloc_send_skb(sk
, size
, msg
->msg_flags
&MSG_DONTWAIT
,
1627 * If you pass two values to the sock_alloc_send_skb
1628 * it tries to grab the large buffer with GFP_NOFS
1629 * (which can fail easily), and if it fails grab the
1630 * fallback size buffer which is under a page and will
1633 size
= min_t(int, size
, skb_tailroom(skb
));
1636 /* Only send the fds in the first buffer */
1637 err
= unix_scm_to_skb(siocb
->scm
, skb
, !fds_sent
);
1642 max_level
= err
+ 1;
1645 err
= memcpy_fromiovec(skb_put(skb
, size
), msg
->msg_iov
, size
);
1651 unix_state_lock(other
);
1653 if (sock_flag(other
, SOCK_DEAD
) ||
1654 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1657 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1658 if (max_level
> unix_sk(other
)->recursion_level
)
1659 unix_sk(other
)->recursion_level
= max_level
;
1660 unix_state_unlock(other
);
1661 other
->sk_data_ready(other
, size
);
1665 scm_destroy(siocb
->scm
);
1671 unix_state_unlock(other
);
1674 if (sent
== 0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1675 send_sig(SIGPIPE
, current
, 0);
1678 scm_destroy(siocb
->scm
);
1680 return sent
? : err
;
1683 static int unix_seqpacket_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1684 struct msghdr
*msg
, size_t len
)
1687 struct sock
*sk
= sock
->sk
;
1689 err
= sock_error(sk
);
1693 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1696 if (msg
->msg_namelen
)
1697 msg
->msg_namelen
= 0;
1699 return unix_dgram_sendmsg(kiocb
, sock
, msg
, len
);
1702 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1704 struct unix_sock
*u
= unix_sk(sk
);
1706 msg
->msg_namelen
= 0;
1708 msg
->msg_namelen
= u
->addr
->len
;
1709 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1713 static int unix_dgram_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1714 struct msghdr
*msg
, size_t size
,
1717 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1718 struct scm_cookie tmp_scm
;
1719 struct sock
*sk
= sock
->sk
;
1720 struct unix_sock
*u
= unix_sk(sk
);
1721 int noblock
= flags
& MSG_DONTWAIT
;
1722 struct sk_buff
*skb
;
1729 msg
->msg_namelen
= 0;
1731 err
= mutex_lock_interruptible(&u
->readlock
);
1733 err
= sock_intr_errno(sock_rcvtimeo(sk
, noblock
));
1737 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1739 unix_state_lock(sk
);
1740 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1741 if (sk
->sk_type
== SOCK_SEQPACKET
&& err
== -EAGAIN
&&
1742 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1744 unix_state_unlock(sk
);
1748 wake_up_interruptible_sync_poll(&u
->peer_wait
,
1749 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
1752 unix_copy_addr(msg
, skb
->sk
);
1754 if (size
> skb
->len
)
1756 else if (size
< skb
->len
)
1757 msg
->msg_flags
|= MSG_TRUNC
;
1759 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, size
);
1763 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
1764 __sock_recv_timestamp(msg
, sk
, skb
);
1767 siocb
->scm
= &tmp_scm
;
1768 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1770 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).cred
);
1771 unix_set_secdata(siocb
->scm
, skb
);
1773 if (!(flags
& MSG_PEEK
)) {
1775 unix_detach_fds(siocb
->scm
, skb
);
1777 /* It is questionable: on PEEK we could:
1778 - do not return fds - good, but too simple 8)
1779 - return fds, and do not return them on read (old strategy,
1781 - clone fds (I chose it for now, it is the most universal
1784 POSIX 1003.1g does not actually define this clearly
1785 at all. POSIX 1003.1g doesn't define a lot of things
1790 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1794 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1797 skb_free_datagram(sk
, skb
);
1799 mutex_unlock(&u
->readlock
);
1805 * Sleep until data has arrive. But check for races..
1808 static long unix_stream_data_wait(struct sock
*sk
, long timeo
)
1812 unix_state_lock(sk
);
1815 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1817 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1819 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1820 signal_pending(current
) ||
1824 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1825 unix_state_unlock(sk
);
1826 timeo
= schedule_timeout(timeo
);
1827 unix_state_lock(sk
);
1828 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1831 finish_wait(sk_sleep(sk
), &wait
);
1832 unix_state_unlock(sk
);
1838 static int unix_stream_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1839 struct msghdr
*msg
, size_t size
,
1842 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1843 struct scm_cookie tmp_scm
;
1844 struct sock
*sk
= sock
->sk
;
1845 struct unix_sock
*u
= unix_sk(sk
);
1846 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1848 int check_creds
= 0;
1854 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1861 target
= sock_rcvlowat(sk
, flags
&MSG_WAITALL
, size
);
1862 timeo
= sock_rcvtimeo(sk
, flags
&MSG_DONTWAIT
);
1864 msg
->msg_namelen
= 0;
1866 /* Lock the socket to prevent queue disordering
1867 * while sleeps in memcpy_tomsg
1871 siocb
->scm
= &tmp_scm
;
1872 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1875 err
= mutex_lock_interruptible(&u
->readlock
);
1877 err
= sock_intr_errno(timeo
);
1883 struct sk_buff
*skb
;
1885 unix_state_lock(sk
);
1886 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1888 unix_sk(sk
)->recursion_level
= 0;
1889 if (copied
>= target
)
1893 * POSIX 1003.1g mandates this order.
1896 err
= sock_error(sk
);
1899 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1902 unix_state_unlock(sk
);
1906 mutex_unlock(&u
->readlock
);
1908 timeo
= unix_stream_data_wait(sk
, timeo
);
1910 if (signal_pending(current
)
1911 || mutex_lock_interruptible(&u
->readlock
)) {
1912 err
= sock_intr_errno(timeo
);
1918 unix_state_unlock(sk
);
1921 unix_state_unlock(sk
);
1924 /* Never glue messages from different writers */
1925 if ((UNIXCB(skb
).pid
!= siocb
->scm
->pid
) ||
1926 (UNIXCB(skb
).cred
!= siocb
->scm
->cred
)) {
1927 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1931 /* Copy credentials */
1932 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).cred
);
1936 /* Copy address just once */
1938 unix_copy_addr(msg
, skb
->sk
);
1942 chunk
= min_t(unsigned int, skb
->len
, size
);
1943 if (memcpy_toiovec(msg
->msg_iov
, skb
->data
, chunk
)) {
1944 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1952 /* Mark read part of skb as used */
1953 if (!(flags
& MSG_PEEK
)) {
1954 skb_pull(skb
, chunk
);
1957 unix_detach_fds(siocb
->scm
, skb
);
1959 /* put the skb back if we didn't use it up.. */
1961 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1970 /* It is questionable, see note in unix_dgram_recvmsg.
1973 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1975 /* put message back and return */
1976 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1981 mutex_unlock(&u
->readlock
);
1982 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1984 return copied
? : err
;
1987 static int unix_shutdown(struct socket
*sock
, int mode
)
1989 struct sock
*sk
= sock
->sk
;
1992 mode
= (mode
+1)&(RCV_SHUTDOWN
|SEND_SHUTDOWN
);
1997 unix_state_lock(sk
);
1998 sk
->sk_shutdown
|= mode
;
1999 other
= unix_peer(sk
);
2002 unix_state_unlock(sk
);
2003 sk
->sk_state_change(sk
);
2006 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
2010 if (mode
&RCV_SHUTDOWN
)
2011 peer_mode
|= SEND_SHUTDOWN
;
2012 if (mode
&SEND_SHUTDOWN
)
2013 peer_mode
|= RCV_SHUTDOWN
;
2014 unix_state_lock(other
);
2015 other
->sk_shutdown
|= peer_mode
;
2016 unix_state_unlock(other
);
2017 other
->sk_state_change(other
);
2018 if (peer_mode
== SHUTDOWN_MASK
)
2019 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_HUP
);
2020 else if (peer_mode
& RCV_SHUTDOWN
)
2021 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_IN
);
2029 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
2031 struct sock
*sk
= sock
->sk
;
2037 amount
= sk_wmem_alloc_get(sk
);
2038 err
= put_user(amount
, (int __user
*)arg
);
2042 struct sk_buff
*skb
;
2044 if (sk
->sk_state
== TCP_LISTEN
) {
2049 spin_lock(&sk
->sk_receive_queue
.lock
);
2050 if (sk
->sk_type
== SOCK_STREAM
||
2051 sk
->sk_type
== SOCK_SEQPACKET
) {
2052 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
2055 skb
= skb_peek(&sk
->sk_receive_queue
);
2059 spin_unlock(&sk
->sk_receive_queue
.lock
);
2060 err
= put_user(amount
, (int __user
*)arg
);
2071 static unsigned int unix_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2073 struct sock
*sk
= sock
->sk
;
2076 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2079 /* exceptional events? */
2082 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2084 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2085 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2088 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2089 mask
|= POLLIN
| POLLRDNORM
;
2091 /* Connection-based need to check for termination and startup */
2092 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) &&
2093 sk
->sk_state
== TCP_CLOSE
)
2097 * we set writable also when the other side has shut down the
2098 * connection. This prevents stuck sockets.
2100 if (unix_writable(sk
))
2101 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2106 static unsigned int unix_dgram_poll(struct file
*file
, struct socket
*sock
,
2109 struct sock
*sk
= sock
->sk
, *other
;
2110 unsigned int mask
, writable
;
2112 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2115 /* exceptional events? */
2116 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
2118 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2119 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2120 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2124 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2125 mask
|= POLLIN
| POLLRDNORM
;
2127 /* Connection-based need to check for termination and startup */
2128 if (sk
->sk_type
== SOCK_SEQPACKET
) {
2129 if (sk
->sk_state
== TCP_CLOSE
)
2131 /* connection hasn't started yet? */
2132 if (sk
->sk_state
== TCP_SYN_SENT
)
2136 /* No write status requested, avoid expensive OUT tests. */
2137 if (wait
&& !(wait
->key
& (POLLWRBAND
| POLLWRNORM
| POLLOUT
)))
2140 writable
= unix_writable(sk
);
2141 other
= unix_peer_get(sk
);
2143 if (unix_peer(other
) != sk
) {
2144 sock_poll_wait(file
, &unix_sk(other
)->peer_wait
, wait
);
2145 if (unix_recvq_full(other
))
2152 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2154 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
2159 #ifdef CONFIG_PROC_FS
2160 static struct sock
*first_unix_socket(int *i
)
2162 for (*i
= 0; *i
<= UNIX_HASH_SIZE
; (*i
)++) {
2163 if (!hlist_empty(&unix_socket_table
[*i
]))
2164 return __sk_head(&unix_socket_table
[*i
]);
2169 static struct sock
*next_unix_socket(int *i
, struct sock
*s
)
2171 struct sock
*next
= sk_next(s
);
2172 /* More in this chain? */
2175 /* Look for next non-empty chain. */
2176 for ((*i
)++; *i
<= UNIX_HASH_SIZE
; (*i
)++) {
2177 if (!hlist_empty(&unix_socket_table
[*i
]))
2178 return __sk_head(&unix_socket_table
[*i
]);
2183 struct unix_iter_state
{
2184 struct seq_net_private p
;
2188 static struct sock
*unix_seq_idx(struct seq_file
*seq
, loff_t pos
)
2190 struct unix_iter_state
*iter
= seq
->private;
2194 for (s
= first_unix_socket(&iter
->i
); s
; s
= next_unix_socket(&iter
->i
, s
)) {
2195 if (sock_net(s
) != seq_file_net(seq
))
2204 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2205 __acquires(unix_table_lock
)
2207 spin_lock(&unix_table_lock
);
2208 return *pos
? unix_seq_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2211 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2213 struct unix_iter_state
*iter
= seq
->private;
2214 struct sock
*sk
= v
;
2217 if (v
== SEQ_START_TOKEN
)
2218 sk
= first_unix_socket(&iter
->i
);
2220 sk
= next_unix_socket(&iter
->i
, sk
);
2221 while (sk
&& (sock_net(sk
) != seq_file_net(seq
)))
2222 sk
= next_unix_socket(&iter
->i
, sk
);
2226 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
2227 __releases(unix_table_lock
)
2229 spin_unlock(&unix_table_lock
);
2232 static int unix_seq_show(struct seq_file
*seq
, void *v
)
2235 if (v
== SEQ_START_TOKEN
)
2236 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
2240 struct unix_sock
*u
= unix_sk(s
);
2243 seq_printf(seq
, "%p: %08X %08X %08X %04X %02X %5lu",
2245 atomic_read(&s
->sk_refcnt
),
2247 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
2250 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
2251 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
2259 len
= u
->addr
->len
- sizeof(short);
2260 if (!UNIX_ABSTRACT(s
))
2266 for ( ; i
< len
; i
++)
2267 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2269 unix_state_unlock(s
);
2270 seq_putc(seq
, '\n');
2276 static const struct seq_operations unix_seq_ops
= {
2277 .start
= unix_seq_start
,
2278 .next
= unix_seq_next
,
2279 .stop
= unix_seq_stop
,
2280 .show
= unix_seq_show
,
2283 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2285 return seq_open_net(inode
, file
, &unix_seq_ops
,
2286 sizeof(struct unix_iter_state
));
2289 static const struct file_operations unix_seq_fops
= {
2290 .owner
= THIS_MODULE
,
2291 .open
= unix_seq_open
,
2293 .llseek
= seq_lseek
,
2294 .release
= seq_release_net
,
2299 static const struct net_proto_family unix_family_ops
= {
2301 .create
= unix_create
,
2302 .owner
= THIS_MODULE
,
2306 static int __net_init
unix_net_init(struct net
*net
)
2308 int error
= -ENOMEM
;
2310 net
->unx
.sysctl_max_dgram_qlen
= 10;
2311 if (unix_sysctl_register(net
))
2314 #ifdef CONFIG_PROC_FS
2315 if (!proc_net_fops_create(net
, "unix", 0, &unix_seq_fops
)) {
2316 unix_sysctl_unregister(net
);
2325 static void __net_exit
unix_net_exit(struct net
*net
)
2327 unix_sysctl_unregister(net
);
2328 proc_net_remove(net
, "unix");
2331 static struct pernet_operations unix_net_ops
= {
2332 .init
= unix_net_init
,
2333 .exit
= unix_net_exit
,
2336 static int __init
af_unix_init(void)
2339 struct sk_buff
*dummy_skb
;
2341 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > sizeof(dummy_skb
->cb
));
2343 rc
= proto_register(&unix_proto
, 1);
2345 printk(KERN_CRIT
"%s: Cannot create unix_sock SLAB cache!\n",
2350 sock_register(&unix_family_ops
);
2351 register_pernet_subsys(&unix_net_ops
);
2356 static void __exit
af_unix_exit(void)
2358 sock_unregister(PF_UNIX
);
2359 proto_unregister(&unix_proto
);
2360 unregister_pernet_subsys(&unix_net_ops
);
2363 /* Earlier than device_initcall() so that other drivers invoking
2364 request_module() don't end up in a loop when modprobe tries
2365 to use a UNIX socket. But later than subsys_initcall() because
2366 we depend on stuff initialised there */
2367 fs_initcall(af_unix_init
);
2368 module_exit(af_unix_exit
);
2370 MODULE_LICENSE("GPL");
2371 MODULE_ALIAS_NETPROTO(PF_UNIX
);