2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
118 static struct hlist_head unix_socket_table
[UNIX_HASH_SIZE
+ 1];
119 static DEFINE_SPINLOCK(unix_table_lock
);
120 static atomic_long_t unix_nr_socks
;
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
129 memcpy(UNIXSID(skb
), &scm
->secid
, sizeof(u32
));
132 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
134 scm
->secid
= *UNIXSID(skb
);
137 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
140 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
142 #endif /* CONFIG_SECURITY_NETWORK */
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate spin lock.
150 static inline unsigned unix_hash_fold(__wsum n
)
152 unsigned hash
= (__force
unsigned)n
;
155 return hash
&(UNIX_HASH_SIZE
-1);
158 #define unix_peer(sk) (unix_sk(sk)->peer)
160 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
162 return unix_peer(osk
) == sk
;
165 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
167 return unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
);
170 static inline int unix_recvq_full(struct sock
const *sk
)
172 return skb_queue_len(&sk
->sk_receive_queue
) > sk
->sk_max_ack_backlog
;
175 static struct sock
*unix_peer_get(struct sock
*s
)
183 unix_state_unlock(s
);
187 static inline void unix_release_addr(struct unix_address
*addr
)
189 if (atomic_dec_and_test(&addr
->refcnt
))
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
200 static int unix_mkname(struct sockaddr_un
*sunaddr
, int len
, unsigned *hashp
)
202 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
204 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
206 if (sunaddr
->sun_path
[0]) {
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesn't as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
214 ((char *)sunaddr
)[len
] = 0;
215 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
219 *hashp
= unix_hash_fold(csum_partial(sunaddr
, len
, 0));
223 static void __unix_remove_socket(struct sock
*sk
)
225 sk_del_node_init(sk
);
228 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
230 WARN_ON(!sk_unhashed(sk
));
231 sk_add_node(sk
, list
);
234 static inline void unix_remove_socket(struct sock
*sk
)
236 spin_lock(&unix_table_lock
);
237 __unix_remove_socket(sk
);
238 spin_unlock(&unix_table_lock
);
241 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
243 spin_lock(&unix_table_lock
);
244 __unix_insert_socket(list
, sk
);
245 spin_unlock(&unix_table_lock
);
248 static struct sock
*__unix_find_socket_byname(struct net
*net
,
249 struct sockaddr_un
*sunname
,
250 int len
, int type
, unsigned hash
)
253 struct hlist_node
*node
;
255 sk_for_each(s
, node
, &unix_socket_table
[hash
^ type
]) {
256 struct unix_sock
*u
= unix_sk(s
);
258 if (!net_eq(sock_net(s
), net
))
261 if (u
->addr
->len
== len
&&
262 !memcmp(u
->addr
->name
, sunname
, len
))
270 static inline struct sock
*unix_find_socket_byname(struct net
*net
,
271 struct sockaddr_un
*sunname
,
277 spin_lock(&unix_table_lock
);
278 s
= __unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
281 spin_unlock(&unix_table_lock
);
285 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
288 struct hlist_node
*node
;
290 spin_lock(&unix_table_lock
);
292 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
293 struct dentry
*dentry
= unix_sk(s
)->dentry
;
295 if (dentry
&& dentry
->d_inode
== i
) {
302 spin_unlock(&unix_table_lock
);
306 static inline int unix_writable(struct sock
*sk
)
308 return (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
311 static void unix_write_space(struct sock
*sk
)
313 struct socket_wq
*wq
;
316 if (unix_writable(sk
)) {
317 wq
= rcu_dereference(sk
->sk_wq
);
318 if (wq_has_sleeper(wq
))
319 wake_up_interruptible_sync_poll(&wq
->wait
,
320 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
321 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
326 /* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
332 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
333 skb_queue_purge(&sk
->sk_receive_queue
);
334 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
340 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
341 other
->sk_err
= ECONNRESET
;
342 other
->sk_error_report(other
);
347 static void unix_sock_destructor(struct sock
*sk
)
349 struct unix_sock
*u
= unix_sk(sk
);
351 skb_queue_purge(&sk
->sk_receive_queue
);
353 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
354 WARN_ON(!sk_unhashed(sk
));
355 WARN_ON(sk
->sk_socket
);
356 if (!sock_flag(sk
, SOCK_DEAD
)) {
357 printk(KERN_INFO
"Attempt to release alive unix socket: %p\n", sk
);
362 unix_release_addr(u
->addr
);
364 atomic_long_dec(&unix_nr_socks
);
366 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
368 #ifdef UNIX_REFCNT_DEBUG
369 printk(KERN_DEBUG
"UNIX %p is destroyed, %ld are still alive.\n", sk
,
370 atomic_long_read(&unix_nr_socks
));
374 static int unix_release_sock(struct sock
*sk
, int embrion
)
376 struct unix_sock
*u
= unix_sk(sk
);
377 struct dentry
*dentry
;
378 struct vfsmount
*mnt
;
383 unix_remove_socket(sk
);
388 sk
->sk_shutdown
= SHUTDOWN_MASK
;
393 state
= sk
->sk_state
;
394 sk
->sk_state
= TCP_CLOSE
;
395 unix_state_unlock(sk
);
397 wake_up_interruptible_all(&u
->peer_wait
);
399 skpair
= unix_peer(sk
);
401 if (skpair
!= NULL
) {
402 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
403 unix_state_lock(skpair
);
405 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
406 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
407 skpair
->sk_err
= ECONNRESET
;
408 unix_state_unlock(skpair
);
409 skpair
->sk_state_change(skpair
);
410 sk_wake_async(skpair
, SOCK_WAKE_WAITD
, POLL_HUP
);
412 sock_put(skpair
); /* It may now die */
413 unix_peer(sk
) = NULL
;
416 /* Try to flush out this socket. Throw out buffers at least */
418 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
419 if (state
== TCP_LISTEN
)
420 unix_release_sock(skb
->sk
, 1);
421 /* passed fds are erased in the kfree_skb hook */
432 /* ---- Socket is dead now and most probably destroyed ---- */
435 * Fixme: BSD difference: In BSD all sockets connected to use get
436 * ECONNRESET and we die on the spot. In Linux we behave
437 * like files and pipes do and wait for the last
440 * Can't we simply set sock->err?
442 * What the above comment does talk about? --ANK(980817)
445 if (unix_tot_inflight
)
446 unix_gc(); /* Garbage collect fds */
451 static void init_peercred(struct sock
*sk
)
453 put_pid(sk
->sk_peer_pid
);
454 if (sk
->sk_peer_cred
)
455 put_cred(sk
->sk_peer_cred
);
456 sk
->sk_peer_pid
= get_pid(task_tgid(current
));
457 sk
->sk_peer_cred
= get_current_cred();
460 static void copy_peercred(struct sock
*sk
, struct sock
*peersk
)
462 put_pid(sk
->sk_peer_pid
);
463 if (sk
->sk_peer_cred
)
464 put_cred(sk
->sk_peer_cred
);
465 sk
->sk_peer_pid
= get_pid(peersk
->sk_peer_pid
);
466 sk
->sk_peer_cred
= get_cred(peersk
->sk_peer_cred
);
469 static int unix_listen(struct socket
*sock
, int backlog
)
472 struct sock
*sk
= sock
->sk
;
473 struct unix_sock
*u
= unix_sk(sk
);
474 struct pid
*old_pid
= NULL
;
475 const struct cred
*old_cred
= NULL
;
478 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
479 goto out
; /* Only stream/seqpacket sockets accept */
482 goto out
; /* No listens on an unbound socket */
484 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
486 if (backlog
> sk
->sk_max_ack_backlog
)
487 wake_up_interruptible_all(&u
->peer_wait
);
488 sk
->sk_max_ack_backlog
= backlog
;
489 sk
->sk_state
= TCP_LISTEN
;
490 /* set credentials so connect can copy them */
495 unix_state_unlock(sk
);
503 static int unix_release(struct socket
*);
504 static int unix_bind(struct socket
*, struct sockaddr
*, int);
505 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
506 int addr_len
, int flags
);
507 static int unix_socketpair(struct socket
*, struct socket
*);
508 static int unix_accept(struct socket
*, struct socket
*, int);
509 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
510 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
511 static unsigned int unix_dgram_poll(struct file
*, struct socket
*,
513 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
514 static int unix_shutdown(struct socket
*, int);
515 static int unix_stream_sendmsg(struct kiocb
*, struct socket
*,
516 struct msghdr
*, size_t);
517 static int unix_stream_recvmsg(struct kiocb
*, struct socket
*,
518 struct msghdr
*, size_t, int);
519 static int unix_dgram_sendmsg(struct kiocb
*, struct socket
*,
520 struct msghdr
*, size_t);
521 static int unix_dgram_recvmsg(struct kiocb
*, struct socket
*,
522 struct msghdr
*, size_t, int);
523 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
525 static int unix_seqpacket_sendmsg(struct kiocb
*, struct socket
*,
526 struct msghdr
*, size_t);
527 static int unix_seqpacket_recvmsg(struct kiocb
*, struct socket
*,
528 struct msghdr
*, size_t, int);
530 static const struct proto_ops unix_stream_ops
= {
532 .owner
= THIS_MODULE
,
533 .release
= unix_release
,
535 .connect
= unix_stream_connect
,
536 .socketpair
= unix_socketpair
,
537 .accept
= unix_accept
,
538 .getname
= unix_getname
,
541 .listen
= unix_listen
,
542 .shutdown
= unix_shutdown
,
543 .setsockopt
= sock_no_setsockopt
,
544 .getsockopt
= sock_no_getsockopt
,
545 .sendmsg
= unix_stream_sendmsg
,
546 .recvmsg
= unix_stream_recvmsg
,
547 .mmap
= sock_no_mmap
,
548 .sendpage
= sock_no_sendpage
,
551 static const struct proto_ops unix_dgram_ops
= {
553 .owner
= THIS_MODULE
,
554 .release
= unix_release
,
556 .connect
= unix_dgram_connect
,
557 .socketpair
= unix_socketpair
,
558 .accept
= sock_no_accept
,
559 .getname
= unix_getname
,
560 .poll
= unix_dgram_poll
,
562 .listen
= sock_no_listen
,
563 .shutdown
= unix_shutdown
,
564 .setsockopt
= sock_no_setsockopt
,
565 .getsockopt
= sock_no_getsockopt
,
566 .sendmsg
= unix_dgram_sendmsg
,
567 .recvmsg
= unix_dgram_recvmsg
,
568 .mmap
= sock_no_mmap
,
569 .sendpage
= sock_no_sendpage
,
572 static const struct proto_ops unix_seqpacket_ops
= {
574 .owner
= THIS_MODULE
,
575 .release
= unix_release
,
577 .connect
= unix_stream_connect
,
578 .socketpair
= unix_socketpair
,
579 .accept
= unix_accept
,
580 .getname
= unix_getname
,
581 .poll
= unix_dgram_poll
,
583 .listen
= unix_listen
,
584 .shutdown
= unix_shutdown
,
585 .setsockopt
= sock_no_setsockopt
,
586 .getsockopt
= sock_no_getsockopt
,
587 .sendmsg
= unix_seqpacket_sendmsg
,
588 .recvmsg
= unix_seqpacket_recvmsg
,
589 .mmap
= sock_no_mmap
,
590 .sendpage
= sock_no_sendpage
,
593 static struct proto unix_proto
= {
595 .owner
= THIS_MODULE
,
596 .obj_size
= sizeof(struct unix_sock
),
600 * AF_UNIX sockets do not interact with hardware, hence they
601 * dont trigger interrupts - so it's safe for them to have
602 * bh-unsafe locking for their sk_receive_queue.lock. Split off
603 * this special lock-class by reinitializing the spinlock key:
605 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
607 static struct sock
*unix_create1(struct net
*net
, struct socket
*sock
)
609 struct sock
*sk
= NULL
;
612 atomic_long_inc(&unix_nr_socks
);
613 if (atomic_long_read(&unix_nr_socks
) > 2 * get_max_files())
616 sk
= sk_alloc(net
, PF_UNIX
, GFP_KERNEL
, &unix_proto
);
620 sock_init_data(sock
, sk
);
621 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
622 &af_unix_sk_receive_queue_lock_key
);
624 sk
->sk_write_space
= unix_write_space
;
625 sk
->sk_max_ack_backlog
= net
->unx
.sysctl_max_dgram_qlen
;
626 sk
->sk_destruct
= unix_sock_destructor
;
630 spin_lock_init(&u
->lock
);
631 atomic_long_set(&u
->inflight
, 0);
632 INIT_LIST_HEAD(&u
->link
);
633 mutex_init(&u
->readlock
); /* single task reading lock */
634 init_waitqueue_head(&u
->peer_wait
);
635 unix_insert_socket(unix_sockets_unbound
, sk
);
638 atomic_long_dec(&unix_nr_socks
);
641 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
647 static int unix_create(struct net
*net
, struct socket
*sock
, int protocol
,
650 if (protocol
&& protocol
!= PF_UNIX
)
651 return -EPROTONOSUPPORT
;
653 sock
->state
= SS_UNCONNECTED
;
655 switch (sock
->type
) {
657 sock
->ops
= &unix_stream_ops
;
660 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
664 sock
->type
= SOCK_DGRAM
;
666 sock
->ops
= &unix_dgram_ops
;
669 sock
->ops
= &unix_seqpacket_ops
;
672 return -ESOCKTNOSUPPORT
;
675 return unix_create1(net
, sock
) ? 0 : -ENOMEM
;
678 static int unix_release(struct socket
*sock
)
680 struct sock
*sk
= sock
->sk
;
687 return unix_release_sock(sk
, 0);
690 static int unix_autobind(struct socket
*sock
)
692 struct sock
*sk
= sock
->sk
;
693 struct net
*net
= sock_net(sk
);
694 struct unix_sock
*u
= unix_sk(sk
);
695 static u32 ordernum
= 1;
696 struct unix_address
*addr
;
698 unsigned int retries
= 0;
700 mutex_lock(&u
->readlock
);
707 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
711 addr
->name
->sun_family
= AF_UNIX
;
712 atomic_set(&addr
->refcnt
, 1);
715 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
716 addr
->hash
= unix_hash_fold(csum_partial(addr
->name
, addr
->len
, 0));
718 spin_lock(&unix_table_lock
);
719 ordernum
= (ordernum
+1)&0xFFFFF;
721 if (__unix_find_socket_byname(net
, addr
->name
, addr
->len
, sock
->type
,
723 spin_unlock(&unix_table_lock
);
725 * __unix_find_socket_byname() may take long time if many names
726 * are already in use.
729 /* Give up if all names seems to be in use. */
730 if (retries
++ == 0xFFFFF) {
737 addr
->hash
^= sk
->sk_type
;
739 __unix_remove_socket(sk
);
741 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
742 spin_unlock(&unix_table_lock
);
745 out
: mutex_unlock(&u
->readlock
);
749 static struct sock
*unix_find_other(struct net
*net
,
750 struct sockaddr_un
*sunname
, int len
,
751 int type
, unsigned hash
, int *error
)
757 if (sunname
->sun_path
[0]) {
759 err
= kern_path(sunname
->sun_path
, LOOKUP_FOLLOW
, &path
);
762 inode
= path
.dentry
->d_inode
;
763 err
= inode_permission(inode
, MAY_WRITE
);
768 if (!S_ISSOCK(inode
->i_mode
))
770 u
= unix_find_socket_byinode(inode
);
774 if (u
->sk_type
== type
)
775 touch_atime(path
.mnt
, path
.dentry
);
780 if (u
->sk_type
!= type
) {
786 u
= unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
788 struct dentry
*dentry
;
789 dentry
= unix_sk(u
)->dentry
;
791 touch_atime(unix_sk(u
)->mnt
, dentry
);
805 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
807 struct sock
*sk
= sock
->sk
;
808 struct net
*net
= sock_net(sk
);
809 struct unix_sock
*u
= unix_sk(sk
);
810 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
811 char *sun_path
= sunaddr
->sun_path
;
812 struct dentry
*dentry
= NULL
;
816 struct unix_address
*addr
;
817 struct hlist_head
*list
;
820 if (sunaddr
->sun_family
!= AF_UNIX
)
823 if (addr_len
== sizeof(short)) {
824 err
= unix_autobind(sock
);
828 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
833 mutex_lock(&u
->readlock
);
840 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
844 memcpy(addr
->name
, sunaddr
, addr_len
);
845 addr
->len
= addr_len
;
846 addr
->hash
= hash
^ sk
->sk_type
;
847 atomic_set(&addr
->refcnt
, 1);
853 * Get the parent directory, calculate the hash for last
856 dentry
= kern_path_create(AT_FDCWD
, sun_path
, &path
, 0);
857 err
= PTR_ERR(dentry
);
859 goto out_mknod_parent
;
862 * All right, let's create it.
865 (SOCK_INODE(sock
)->i_mode
& ~current_umask());
866 err
= mnt_want_write(path
.mnt
);
869 err
= security_path_mknod(&path
, dentry
, mode
, 0);
871 goto out_mknod_drop_write
;
872 err
= vfs_mknod(path
.dentry
->d_inode
, dentry
, mode
, 0);
873 out_mknod_drop_write
:
874 mnt_drop_write(path
.mnt
);
877 mutex_unlock(&path
.dentry
->d_inode
->i_mutex
);
879 path
.dentry
= dentry
;
881 addr
->hash
= UNIX_HASH_SIZE
;
884 spin_lock(&unix_table_lock
);
888 if (__unix_find_socket_byname(net
, sunaddr
, addr_len
,
889 sk
->sk_type
, hash
)) {
890 unix_release_addr(addr
);
894 list
= &unix_socket_table
[addr
->hash
];
896 list
= &unix_socket_table
[dentry
->d_inode
->i_ino
& (UNIX_HASH_SIZE
-1)];
897 u
->dentry
= path
.dentry
;
902 __unix_remove_socket(sk
);
904 __unix_insert_socket(list
, sk
);
907 spin_unlock(&unix_table_lock
);
909 mutex_unlock(&u
->readlock
);
915 mutex_unlock(&path
.dentry
->d_inode
->i_mutex
);
920 unix_release_addr(addr
);
924 static void unix_state_double_lock(struct sock
*sk1
, struct sock
*sk2
)
926 if (unlikely(sk1
== sk2
) || !sk2
) {
927 unix_state_lock(sk1
);
931 unix_state_lock(sk1
);
932 unix_state_lock_nested(sk2
);
934 unix_state_lock(sk2
);
935 unix_state_lock_nested(sk1
);
939 static void unix_state_double_unlock(struct sock
*sk1
, struct sock
*sk2
)
941 if (unlikely(sk1
== sk2
) || !sk2
) {
942 unix_state_unlock(sk1
);
945 unix_state_unlock(sk1
);
946 unix_state_unlock(sk2
);
949 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
952 struct sock
*sk
= sock
->sk
;
953 struct net
*net
= sock_net(sk
);
954 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)addr
;
959 if (addr
->sa_family
!= AF_UNSPEC
) {
960 err
= unix_mkname(sunaddr
, alen
, &hash
);
965 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
966 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
970 other
= unix_find_other(net
, sunaddr
, alen
, sock
->type
, hash
, &err
);
974 unix_state_double_lock(sk
, other
);
976 /* Apparently VFS overslept socket death. Retry. */
977 if (sock_flag(other
, SOCK_DEAD
)) {
978 unix_state_double_unlock(sk
, other
);
984 if (!unix_may_send(sk
, other
))
987 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
993 * 1003.1g breaking connected state with AF_UNSPEC
996 unix_state_double_lock(sk
, other
);
1000 * If it was connected, reconnect.
1002 if (unix_peer(sk
)) {
1003 struct sock
*old_peer
= unix_peer(sk
);
1004 unix_peer(sk
) = other
;
1005 unix_state_double_unlock(sk
, other
);
1007 if (other
!= old_peer
)
1008 unix_dgram_disconnected(sk
, old_peer
);
1011 unix_peer(sk
) = other
;
1012 unix_state_double_unlock(sk
, other
);
1017 unix_state_double_unlock(sk
, other
);
1023 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
1025 struct unix_sock
*u
= unix_sk(other
);
1029 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
1031 sched
= !sock_flag(other
, SOCK_DEAD
) &&
1032 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
1033 unix_recvq_full(other
);
1035 unix_state_unlock(other
);
1038 timeo
= schedule_timeout(timeo
);
1040 finish_wait(&u
->peer_wait
, &wait
);
1044 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1045 int addr_len
, int flags
)
1047 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
1048 struct sock
*sk
= sock
->sk
;
1049 struct net
*net
= sock_net(sk
);
1050 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
1051 struct sock
*newsk
= NULL
;
1052 struct sock
*other
= NULL
;
1053 struct sk_buff
*skb
= NULL
;
1059 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
1064 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
&&
1065 (err
= unix_autobind(sock
)) != 0)
1068 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1070 /* First of all allocate resources.
1071 If we will make it after state is locked,
1072 we will have to recheck all again in any case.
1077 /* create new sock for complete connection */
1078 newsk
= unix_create1(sock_net(sk
), NULL
);
1082 /* Allocate skb for sending to listening sock */
1083 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
1088 /* Find listening sock. */
1089 other
= unix_find_other(net
, sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
1093 /* Latch state of peer */
1094 unix_state_lock(other
);
1096 /* Apparently VFS overslept socket death. Retry. */
1097 if (sock_flag(other
, SOCK_DEAD
)) {
1098 unix_state_unlock(other
);
1103 err
= -ECONNREFUSED
;
1104 if (other
->sk_state
!= TCP_LISTEN
)
1106 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1109 if (unix_recvq_full(other
)) {
1114 timeo
= unix_wait_for_peer(other
, timeo
);
1116 err
= sock_intr_errno(timeo
);
1117 if (signal_pending(current
))
1125 It is tricky place. We need to grab our state lock and cannot
1126 drop lock on peer. It is dangerous because deadlock is
1127 possible. Connect to self case and simultaneous
1128 attempt to connect are eliminated by checking socket
1129 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1130 check this before attempt to grab lock.
1132 Well, and we have to recheck the state after socket locked.
1138 /* This is ok... continue with connect */
1140 case TCP_ESTABLISHED
:
1141 /* Socket is already connected */
1149 unix_state_lock_nested(sk
);
1151 if (sk
->sk_state
!= st
) {
1152 unix_state_unlock(sk
);
1153 unix_state_unlock(other
);
1158 err
= security_unix_stream_connect(sk
, other
, newsk
);
1160 unix_state_unlock(sk
);
1164 /* The way is open! Fastly set all the necessary fields... */
1167 unix_peer(newsk
) = sk
;
1168 newsk
->sk_state
= TCP_ESTABLISHED
;
1169 newsk
->sk_type
= sk
->sk_type
;
1170 init_peercred(newsk
);
1171 newu
= unix_sk(newsk
);
1172 RCU_INIT_POINTER(newsk
->sk_wq
, &newu
->peer_wq
);
1173 otheru
= unix_sk(other
);
1175 /* copy address information from listening to new sock*/
1177 atomic_inc(&otheru
->addr
->refcnt
);
1178 newu
->addr
= otheru
->addr
;
1180 if (otheru
->dentry
) {
1181 newu
->dentry
= dget(otheru
->dentry
);
1182 newu
->mnt
= mntget(otheru
->mnt
);
1185 /* Set credentials */
1186 copy_peercred(sk
, other
);
1188 sock
->state
= SS_CONNECTED
;
1189 sk
->sk_state
= TCP_ESTABLISHED
;
1192 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1193 unix_peer(sk
) = newsk
;
1195 unix_state_unlock(sk
);
1197 /* take ten and and send info to listening sock */
1198 spin_lock(&other
->sk_receive_queue
.lock
);
1199 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1200 spin_unlock(&other
->sk_receive_queue
.lock
);
1201 unix_state_unlock(other
);
1202 other
->sk_data_ready(other
, 0);
1208 unix_state_unlock(other
);
1213 unix_release_sock(newsk
, 0);
1219 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1221 struct sock
*ska
= socka
->sk
, *skb
= sockb
->sk
;
1223 /* Join our sockets back to back */
1226 unix_peer(ska
) = skb
;
1227 unix_peer(skb
) = ska
;
1231 if (ska
->sk_type
!= SOCK_DGRAM
) {
1232 ska
->sk_state
= TCP_ESTABLISHED
;
1233 skb
->sk_state
= TCP_ESTABLISHED
;
1234 socka
->state
= SS_CONNECTED
;
1235 sockb
->state
= SS_CONNECTED
;
1240 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1242 struct sock
*sk
= sock
->sk
;
1244 struct sk_buff
*skb
;
1248 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
1252 if (sk
->sk_state
!= TCP_LISTEN
)
1255 /* If socket state is TCP_LISTEN it cannot change (for now...),
1256 * so that no locks are necessary.
1259 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1261 /* This means receive shutdown. */
1268 skb_free_datagram(sk
, skb
);
1269 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1271 /* attach accepted sock to socket */
1272 unix_state_lock(tsk
);
1273 newsock
->state
= SS_CONNECTED
;
1274 sock_graft(tsk
, newsock
);
1275 unix_state_unlock(tsk
);
1283 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1285 struct sock
*sk
= sock
->sk
;
1286 struct unix_sock
*u
;
1287 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, uaddr
);
1291 sk
= unix_peer_get(sk
);
1302 unix_state_lock(sk
);
1304 sunaddr
->sun_family
= AF_UNIX
;
1305 sunaddr
->sun_path
[0] = 0;
1306 *uaddr_len
= sizeof(short);
1308 struct unix_address
*addr
= u
->addr
;
1310 *uaddr_len
= addr
->len
;
1311 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1313 unix_state_unlock(sk
);
1319 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1323 scm
->fp
= UNIXCB(skb
).fp
;
1324 UNIXCB(skb
).fp
= NULL
;
1326 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1327 unix_notinflight(scm
->fp
->fp
[i
]);
1330 static void unix_destruct_scm(struct sk_buff
*skb
)
1332 struct scm_cookie scm
;
1333 memset(&scm
, 0, sizeof(scm
));
1334 scm
.pid
= UNIXCB(skb
).pid
;
1335 scm
.cred
= UNIXCB(skb
).cred
;
1337 unix_detach_fds(&scm
, skb
);
1339 /* Alas, it calls VFS */
1340 /* So fscking what? fput() had been SMP-safe since the last Summer */
1345 #define MAX_RECURSION_LEVEL 4
1347 static int unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1350 unsigned char max_level
= 0;
1351 int unix_sock_count
= 0;
1353 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--) {
1354 struct sock
*sk
= unix_get_socket(scm
->fp
->fp
[i
]);
1358 max_level
= max(max_level
,
1359 unix_sk(sk
)->recursion_level
);
1362 if (unlikely(max_level
> MAX_RECURSION_LEVEL
))
1363 return -ETOOMANYREFS
;
1366 * Need to duplicate file references for the sake of garbage
1367 * collection. Otherwise a socket in the fps might become a
1368 * candidate for GC while the skb is not yet queued.
1370 UNIXCB(skb
).fp
= scm_fp_dup(scm
->fp
);
1371 if (!UNIXCB(skb
).fp
)
1374 if (unix_sock_count
) {
1375 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--)
1376 unix_inflight(scm
->fp
->fp
[i
]);
1381 static int unix_scm_to_skb(struct scm_cookie
*scm
, struct sk_buff
*skb
, bool send_fds
)
1385 UNIXCB(skb
).pid
= get_pid(scm
->pid
);
1387 UNIXCB(skb
).cred
= get_cred(scm
->cred
);
1388 UNIXCB(skb
).fp
= NULL
;
1389 if (scm
->fp
&& send_fds
)
1390 err
= unix_attach_fds(scm
, skb
);
1392 skb
->destructor
= unix_destruct_scm
;
1397 * Some apps rely on write() giving SCM_CREDENTIALS
1398 * We include credentials if source or destination socket
1399 * asserted SOCK_PASSCRED.
1401 static void maybe_add_creds(struct sk_buff
*skb
, const struct socket
*sock
,
1402 const struct sock
*other
)
1404 if (UNIXCB(skb
).cred
)
1406 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) ||
1407 !other
->sk_socket
||
1408 test_bit(SOCK_PASSCRED
, &other
->sk_socket
->flags
)) {
1409 UNIXCB(skb
).pid
= get_pid(task_tgid(current
));
1410 UNIXCB(skb
).cred
= get_current_cred();
1415 * Send AF_UNIX data.
1418 static int unix_dgram_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1419 struct msghdr
*msg
, size_t len
)
1421 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1422 struct sock
*sk
= sock
->sk
;
1423 struct net
*net
= sock_net(sk
);
1424 struct unix_sock
*u
= unix_sk(sk
);
1425 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1426 struct sock
*other
= NULL
;
1427 int namelen
= 0; /* fake GCC */
1430 struct sk_buff
*skb
;
1432 struct scm_cookie tmp_scm
;
1435 if (NULL
== siocb
->scm
)
1436 siocb
->scm
= &tmp_scm
;
1438 err
= scm_send(sock
, msg
, siocb
->scm
);
1443 if (msg
->msg_flags
&MSG_OOB
)
1446 if (msg
->msg_namelen
) {
1447 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1454 other
= unix_peer_get(sk
);
1459 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
1460 && (err
= unix_autobind(sock
)) != 0)
1464 if (len
> sk
->sk_sndbuf
- 32)
1467 skb
= sock_alloc_send_skb(sk
, len
, msg
->msg_flags
&MSG_DONTWAIT
, &err
);
1471 err
= unix_scm_to_skb(siocb
->scm
, skb
, true);
1474 max_level
= err
+ 1;
1475 unix_get_secdata(siocb
->scm
, skb
);
1477 skb_reset_transport_header(skb
);
1478 err
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
1482 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1487 if (sunaddr
== NULL
)
1490 other
= unix_find_other(net
, sunaddr
, namelen
, sk
->sk_type
,
1496 if (sk_filter(other
, skb
) < 0) {
1497 /* Toss the packet but do not return any error to the sender */
1502 unix_state_lock(other
);
1504 if (!unix_may_send(sk
, other
))
1507 if (sock_flag(other
, SOCK_DEAD
)) {
1509 * Check with 1003.1g - what should
1512 unix_state_unlock(other
);
1516 unix_state_lock(sk
);
1517 if (unix_peer(sk
) == other
) {
1518 unix_peer(sk
) = NULL
;
1519 unix_state_unlock(sk
);
1521 unix_dgram_disconnected(sk
, other
);
1523 err
= -ECONNREFUSED
;
1525 unix_state_unlock(sk
);
1535 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1538 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1539 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1544 if (unix_peer(other
) != sk
&& unix_recvq_full(other
)) {
1550 timeo
= unix_wait_for_peer(other
, timeo
);
1552 err
= sock_intr_errno(timeo
);
1553 if (signal_pending(current
))
1559 if (sock_flag(other
, SOCK_RCVTSTAMP
))
1560 __net_timestamp(skb
);
1561 maybe_add_creds(skb
, sock
, other
);
1562 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1563 if (max_level
> unix_sk(other
)->recursion_level
)
1564 unix_sk(other
)->recursion_level
= max_level
;
1565 unix_state_unlock(other
);
1566 other
->sk_data_ready(other
, len
);
1568 scm_destroy(siocb
->scm
);
1572 unix_state_unlock(other
);
1578 scm_destroy(siocb
->scm
);
1583 static int unix_stream_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1584 struct msghdr
*msg
, size_t len
)
1586 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1587 struct sock
*sk
= sock
->sk
;
1588 struct sock
*other
= NULL
;
1590 struct sk_buff
*skb
;
1592 struct scm_cookie tmp_scm
;
1593 bool fds_sent
= false;
1596 if (NULL
== siocb
->scm
)
1597 siocb
->scm
= &tmp_scm
;
1599 err
= scm_send(sock
, msg
, siocb
->scm
);
1604 if (msg
->msg_flags
&MSG_OOB
)
1607 if (msg
->msg_namelen
) {
1608 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1612 other
= unix_peer(sk
);
1617 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1620 while (sent
< len
) {
1622 * Optimisation for the fact that under 0.01% of X
1623 * messages typically need breaking up.
1628 /* Keep two messages in the pipe so it schedules better */
1629 if (size
> ((sk
->sk_sndbuf
>> 1) - 64))
1630 size
= (sk
->sk_sndbuf
>> 1) - 64;
1632 if (size
> SKB_MAX_ALLOC
)
1633 size
= SKB_MAX_ALLOC
;
1639 skb
= sock_alloc_send_skb(sk
, size
, msg
->msg_flags
&MSG_DONTWAIT
,
1646 * If you pass two values to the sock_alloc_send_skb
1647 * it tries to grab the large buffer with GFP_NOFS
1648 * (which can fail easily), and if it fails grab the
1649 * fallback size buffer which is under a page and will
1652 size
= min_t(int, size
, skb_tailroom(skb
));
1655 /* Only send the fds in the first buffer */
1656 err
= unix_scm_to_skb(siocb
->scm
, skb
, !fds_sent
);
1661 max_level
= err
+ 1;
1664 err
= memcpy_fromiovec(skb_put(skb
, size
), msg
->msg_iov
, size
);
1670 unix_state_lock(other
);
1672 if (sock_flag(other
, SOCK_DEAD
) ||
1673 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1676 maybe_add_creds(skb
, sock
, other
);
1677 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1678 if (max_level
> unix_sk(other
)->recursion_level
)
1679 unix_sk(other
)->recursion_level
= max_level
;
1680 unix_state_unlock(other
);
1681 other
->sk_data_ready(other
, size
);
1685 scm_destroy(siocb
->scm
);
1691 unix_state_unlock(other
);
1694 if (sent
== 0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1695 send_sig(SIGPIPE
, current
, 0);
1698 scm_destroy(siocb
->scm
);
1700 return sent
? : err
;
1703 static int unix_seqpacket_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1704 struct msghdr
*msg
, size_t len
)
1707 struct sock
*sk
= sock
->sk
;
1709 err
= sock_error(sk
);
1713 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1716 if (msg
->msg_namelen
)
1717 msg
->msg_namelen
= 0;
1719 return unix_dgram_sendmsg(kiocb
, sock
, msg
, len
);
1722 static int unix_seqpacket_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1723 struct msghdr
*msg
, size_t size
,
1726 struct sock
*sk
= sock
->sk
;
1728 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1731 return unix_dgram_recvmsg(iocb
, sock
, msg
, size
, flags
);
1734 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1736 struct unix_sock
*u
= unix_sk(sk
);
1738 msg
->msg_namelen
= 0;
1740 msg
->msg_namelen
= u
->addr
->len
;
1741 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1745 static int unix_dgram_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1746 struct msghdr
*msg
, size_t size
,
1749 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1750 struct scm_cookie tmp_scm
;
1751 struct sock
*sk
= sock
->sk
;
1752 struct unix_sock
*u
= unix_sk(sk
);
1753 int noblock
= flags
& MSG_DONTWAIT
;
1754 struct sk_buff
*skb
;
1761 msg
->msg_namelen
= 0;
1763 err
= mutex_lock_interruptible(&u
->readlock
);
1765 err
= sock_intr_errno(sock_rcvtimeo(sk
, noblock
));
1769 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1771 unix_state_lock(sk
);
1772 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1773 if (sk
->sk_type
== SOCK_SEQPACKET
&& err
== -EAGAIN
&&
1774 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1776 unix_state_unlock(sk
);
1780 wake_up_interruptible_sync_poll(&u
->peer_wait
,
1781 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
1784 unix_copy_addr(msg
, skb
->sk
);
1786 if (size
> skb
->len
)
1788 else if (size
< skb
->len
)
1789 msg
->msg_flags
|= MSG_TRUNC
;
1791 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, size
);
1795 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
1796 __sock_recv_timestamp(msg
, sk
, skb
);
1799 siocb
->scm
= &tmp_scm
;
1800 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1802 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).cred
);
1803 unix_set_secdata(siocb
->scm
, skb
);
1805 if (!(flags
& MSG_PEEK
)) {
1807 unix_detach_fds(siocb
->scm
, skb
);
1809 /* It is questionable: on PEEK we could:
1810 - do not return fds - good, but too simple 8)
1811 - return fds, and do not return them on read (old strategy,
1813 - clone fds (I chose it for now, it is the most universal
1816 POSIX 1003.1g does not actually define this clearly
1817 at all. POSIX 1003.1g doesn't define a lot of things
1822 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1826 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1829 skb_free_datagram(sk
, skb
);
1831 mutex_unlock(&u
->readlock
);
1837 * Sleep until data has arrive. But check for races..
1840 static long unix_stream_data_wait(struct sock
*sk
, long timeo
)
1844 unix_state_lock(sk
);
1847 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1849 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1851 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1852 signal_pending(current
) ||
1856 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1857 unix_state_unlock(sk
);
1858 timeo
= schedule_timeout(timeo
);
1859 unix_state_lock(sk
);
1860 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1863 finish_wait(sk_sleep(sk
), &wait
);
1864 unix_state_unlock(sk
);
1870 static int unix_stream_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1871 struct msghdr
*msg
, size_t size
,
1874 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1875 struct scm_cookie tmp_scm
;
1876 struct sock
*sk
= sock
->sk
;
1877 struct unix_sock
*u
= unix_sk(sk
);
1878 struct sockaddr_un
*sunaddr
= msg
->msg_name
;
1880 int check_creds
= 0;
1886 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1893 target
= sock_rcvlowat(sk
, flags
&MSG_WAITALL
, size
);
1894 timeo
= sock_rcvtimeo(sk
, flags
&MSG_DONTWAIT
);
1896 msg
->msg_namelen
= 0;
1898 /* Lock the socket to prevent queue disordering
1899 * while sleeps in memcpy_tomsg
1903 siocb
->scm
= &tmp_scm
;
1904 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1907 err
= mutex_lock_interruptible(&u
->readlock
);
1909 err
= sock_intr_errno(timeo
);
1915 struct sk_buff
*skb
;
1917 unix_state_lock(sk
);
1918 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1920 unix_sk(sk
)->recursion_level
= 0;
1921 if (copied
>= target
)
1925 * POSIX 1003.1g mandates this order.
1928 err
= sock_error(sk
);
1931 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1934 unix_state_unlock(sk
);
1938 mutex_unlock(&u
->readlock
);
1940 timeo
= unix_stream_data_wait(sk
, timeo
);
1942 if (signal_pending(current
)
1943 || mutex_lock_interruptible(&u
->readlock
)) {
1944 err
= sock_intr_errno(timeo
);
1950 unix_state_unlock(sk
);
1953 unix_state_unlock(sk
);
1956 /* Never glue messages from different writers */
1957 if ((UNIXCB(skb
).pid
!= siocb
->scm
->pid
) ||
1958 (UNIXCB(skb
).cred
!= siocb
->scm
->cred
)) {
1959 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1963 /* Copy credentials */
1964 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).cred
);
1968 /* Copy address just once */
1970 unix_copy_addr(msg
, skb
->sk
);
1974 chunk
= min_t(unsigned int, skb
->len
, size
);
1975 if (memcpy_toiovec(msg
->msg_iov
, skb
->data
, chunk
)) {
1976 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1984 /* Mark read part of skb as used */
1985 if (!(flags
& MSG_PEEK
)) {
1986 skb_pull(skb
, chunk
);
1989 unix_detach_fds(siocb
->scm
, skb
);
1991 /* put the skb back if we didn't use it up.. */
1993 skb_queue_head(&sk
->sk_receive_queue
, skb
);
2002 /* It is questionable, see note in unix_dgram_recvmsg.
2005 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
2007 /* put message back and return */
2008 skb_queue_head(&sk
->sk_receive_queue
, skb
);
2013 mutex_unlock(&u
->readlock
);
2014 scm_recv(sock
, msg
, siocb
->scm
, flags
);
2016 return copied
? : err
;
2019 static int unix_shutdown(struct socket
*sock
, int mode
)
2021 struct sock
*sk
= sock
->sk
;
2024 mode
= (mode
+1)&(RCV_SHUTDOWN
|SEND_SHUTDOWN
);
2029 unix_state_lock(sk
);
2030 sk
->sk_shutdown
|= mode
;
2031 other
= unix_peer(sk
);
2034 unix_state_unlock(sk
);
2035 sk
->sk_state_change(sk
);
2038 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
2042 if (mode
&RCV_SHUTDOWN
)
2043 peer_mode
|= SEND_SHUTDOWN
;
2044 if (mode
&SEND_SHUTDOWN
)
2045 peer_mode
|= RCV_SHUTDOWN
;
2046 unix_state_lock(other
);
2047 other
->sk_shutdown
|= peer_mode
;
2048 unix_state_unlock(other
);
2049 other
->sk_state_change(other
);
2050 if (peer_mode
== SHUTDOWN_MASK
)
2051 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_HUP
);
2052 else if (peer_mode
& RCV_SHUTDOWN
)
2053 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_IN
);
2061 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
2063 struct sock
*sk
= sock
->sk
;
2069 amount
= sk_wmem_alloc_get(sk
);
2070 err
= put_user(amount
, (int __user
*)arg
);
2074 struct sk_buff
*skb
;
2076 if (sk
->sk_state
== TCP_LISTEN
) {
2081 spin_lock(&sk
->sk_receive_queue
.lock
);
2082 if (sk
->sk_type
== SOCK_STREAM
||
2083 sk
->sk_type
== SOCK_SEQPACKET
) {
2084 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
2087 skb
= skb_peek(&sk
->sk_receive_queue
);
2091 spin_unlock(&sk
->sk_receive_queue
.lock
);
2092 err
= put_user(amount
, (int __user
*)arg
);
2103 static unsigned int unix_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2105 struct sock
*sk
= sock
->sk
;
2108 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2111 /* exceptional events? */
2114 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2116 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2117 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2120 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2121 mask
|= POLLIN
| POLLRDNORM
;
2123 /* Connection-based need to check for termination and startup */
2124 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) &&
2125 sk
->sk_state
== TCP_CLOSE
)
2129 * we set writable also when the other side has shut down the
2130 * connection. This prevents stuck sockets.
2132 if (unix_writable(sk
))
2133 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2138 static unsigned int unix_dgram_poll(struct file
*file
, struct socket
*sock
,
2141 struct sock
*sk
= sock
->sk
, *other
;
2142 unsigned int mask
, writable
;
2144 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2147 /* exceptional events? */
2148 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
2150 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2151 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2152 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2156 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2157 mask
|= POLLIN
| POLLRDNORM
;
2159 /* Connection-based need to check for termination and startup */
2160 if (sk
->sk_type
== SOCK_SEQPACKET
) {
2161 if (sk
->sk_state
== TCP_CLOSE
)
2163 /* connection hasn't started yet? */
2164 if (sk
->sk_state
== TCP_SYN_SENT
)
2168 /* No write status requested, avoid expensive OUT tests. */
2169 if (wait
&& !(wait
->key
& (POLLWRBAND
| POLLWRNORM
| POLLOUT
)))
2172 writable
= unix_writable(sk
);
2173 other
= unix_peer_get(sk
);
2175 if (unix_peer(other
) != sk
) {
2176 sock_poll_wait(file
, &unix_sk(other
)->peer_wait
, wait
);
2177 if (unix_recvq_full(other
))
2184 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2186 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
2191 #ifdef CONFIG_PROC_FS
2192 static struct sock
*first_unix_socket(int *i
)
2194 for (*i
= 0; *i
<= UNIX_HASH_SIZE
; (*i
)++) {
2195 if (!hlist_empty(&unix_socket_table
[*i
]))
2196 return __sk_head(&unix_socket_table
[*i
]);
2201 static struct sock
*next_unix_socket(int *i
, struct sock
*s
)
2203 struct sock
*next
= sk_next(s
);
2204 /* More in this chain? */
2207 /* Look for next non-empty chain. */
2208 for ((*i
)++; *i
<= UNIX_HASH_SIZE
; (*i
)++) {
2209 if (!hlist_empty(&unix_socket_table
[*i
]))
2210 return __sk_head(&unix_socket_table
[*i
]);
2215 struct unix_iter_state
{
2216 struct seq_net_private p
;
2220 static struct sock
*unix_seq_idx(struct seq_file
*seq
, loff_t pos
)
2222 struct unix_iter_state
*iter
= seq
->private;
2226 for (s
= first_unix_socket(&iter
->i
); s
; s
= next_unix_socket(&iter
->i
, s
)) {
2227 if (sock_net(s
) != seq_file_net(seq
))
2236 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2237 __acquires(unix_table_lock
)
2239 spin_lock(&unix_table_lock
);
2240 return *pos
? unix_seq_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2243 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2245 struct unix_iter_state
*iter
= seq
->private;
2246 struct sock
*sk
= v
;
2249 if (v
== SEQ_START_TOKEN
)
2250 sk
= first_unix_socket(&iter
->i
);
2252 sk
= next_unix_socket(&iter
->i
, sk
);
2253 while (sk
&& (sock_net(sk
) != seq_file_net(seq
)))
2254 sk
= next_unix_socket(&iter
->i
, sk
);
2258 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
2259 __releases(unix_table_lock
)
2261 spin_unlock(&unix_table_lock
);
2264 static int unix_seq_show(struct seq_file
*seq
, void *v
)
2267 if (v
== SEQ_START_TOKEN
)
2268 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
2272 struct unix_sock
*u
= unix_sk(s
);
2275 seq_printf(seq
, "%pK: %08X %08X %08X %04X %02X %5lu",
2277 atomic_read(&s
->sk_refcnt
),
2279 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
2282 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
2283 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
2291 len
= u
->addr
->len
- sizeof(short);
2292 if (!UNIX_ABSTRACT(s
))
2298 for ( ; i
< len
; i
++)
2299 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2301 unix_state_unlock(s
);
2302 seq_putc(seq
, '\n');
2308 static const struct seq_operations unix_seq_ops
= {
2309 .start
= unix_seq_start
,
2310 .next
= unix_seq_next
,
2311 .stop
= unix_seq_stop
,
2312 .show
= unix_seq_show
,
2315 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2317 return seq_open_net(inode
, file
, &unix_seq_ops
,
2318 sizeof(struct unix_iter_state
));
2321 static const struct file_operations unix_seq_fops
= {
2322 .owner
= THIS_MODULE
,
2323 .open
= unix_seq_open
,
2325 .llseek
= seq_lseek
,
2326 .release
= seq_release_net
,
2331 static const struct net_proto_family unix_family_ops
= {
2333 .create
= unix_create
,
2334 .owner
= THIS_MODULE
,
2338 static int __net_init
unix_net_init(struct net
*net
)
2340 int error
= -ENOMEM
;
2342 net
->unx
.sysctl_max_dgram_qlen
= 10;
2343 if (unix_sysctl_register(net
))
2346 #ifdef CONFIG_PROC_FS
2347 if (!proc_net_fops_create(net
, "unix", 0, &unix_seq_fops
)) {
2348 unix_sysctl_unregister(net
);
2357 static void __net_exit
unix_net_exit(struct net
*net
)
2359 unix_sysctl_unregister(net
);
2360 proc_net_remove(net
, "unix");
2363 static struct pernet_operations unix_net_ops
= {
2364 .init
= unix_net_init
,
2365 .exit
= unix_net_exit
,
2368 static int __init
af_unix_init(void)
2371 struct sk_buff
*dummy_skb
;
2373 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > sizeof(dummy_skb
->cb
));
2375 rc
= proto_register(&unix_proto
, 1);
2377 printk(KERN_CRIT
"%s: Cannot create unix_sock SLAB cache!\n",
2382 sock_register(&unix_family_ops
);
2383 register_pernet_subsys(&unix_net_ops
);
2388 static void __exit
af_unix_exit(void)
2390 sock_unregister(PF_UNIX
);
2391 proto_unregister(&unix_proto
);
2392 unregister_pernet_subsys(&unix_net_ops
);
2395 /* Earlier than device_initcall() so that other drivers invoking
2396 request_module() don't end up in a loop when modprobe tries
2397 to use a UNIX socket. But later than subsys_initcall() because
2398 we depend on stuff initialised there */
2399 fs_initcall(af_unix_init
);
2400 module_exit(af_unix_exit
);
2402 MODULE_LICENSE("GPL");
2403 MODULE_ALIAS_NETPROTO(PF_UNIX
);