2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan.cox@linux.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
14 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support.
16 * Carsten Paeth : PF_UNIX check, address fixes.
17 * Alan Cox : Limit size of allocated blocks.
18 * Alan Cox : Fixed the stupid socketpair bug.
19 * Alan Cox : BSD compatibility fine tuning.
20 * Alan Cox : Fixed a bug in connect when interrupted.
21 * Alan Cox : Sorted out a proper draft version of
22 * file descriptor passing hacked up from
24 * Marty Leisner : Fixes to fd passing
25 * Nick Nevin : recvmsg bugfix.
26 * Alan Cox : Started proper garbage collector
27 * Heiko EiBfeldt : Missing verify_area check
28 * Alan Cox : Started POSIXisms
29 * Andreas Schwab : Replace inode by dentry for proper
31 * Kirk Petersen : Made this a module
32 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
34 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
35 * by above two patches.
36 * Andrea Arcangeli : If possible we block in connect(2)
37 * if the max backlog of the listen socket
38 * is been reached. This won't break
39 * old apps and it will avoid huge amount
40 * of socks hashed (this for unix_gc()
41 * performances reasons).
42 * Security fix that limits the max
43 * number of socks to 2*max_files and
44 * the number of skb queueable in the
46 * Artur Skawina : Hash function optimizations
47 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
48 * Malcolm Beattie : Set peercred for socketpair
49 * Michal Ostrowski : Module initialization cleanup.
50 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
51 * the core infrastructure is doing that
52 * for all net proto families now (2.5.69+)
55 * Known differences from reference BSD that was tested:
58 * ECONNREFUSED is not returned from one end of a connected() socket to the
59 * other the moment one end closes.
60 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
61 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
63 * accept() returns a path name even if the connecting socket has closed
64 * in the meantime (BSD loses the path and gives up).
65 * accept() returns 0 length path for an unbound connector. BSD returns 16
66 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
68 * BSD af_unix apparently has connect forgetting to block properly.
69 * (need to check this with the POSIX spec in detail)
71 * Differences from 2.0.0-11-... (ANK)
72 * Bug fixes and improvements.
73 * - client shutdown killed server socket.
74 * - removed all useless cli/sti pairs.
76 * Semantic changes/extensions.
77 * - generic control message passing.
78 * - SCM_CREDENTIALS control message.
79 * - "Abstract" (not FS based) socket bindings.
80 * Abstract names are sequences of bytes (not zero terminated)
81 * started by 0, so that this name space does not intersect
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/sock.h>
107 #include <net/tcp_states.h>
108 #include <net/af_unix.h>
109 #include <linux/proc_fs.h>
110 #include <linux/seq_file.h>
112 #include <linux/init.h>
113 #include <linux/poll.h>
114 #include <linux/smp_lock.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
120 int sysctl_unix_max_dgram_qlen __read_mostly
= 10;
122 struct hlist_head unix_socket_table
[UNIX_HASH_SIZE
+ 1];
123 DEFINE_SPINLOCK(unix_table_lock
);
124 static atomic_t unix_nr_socks
= ATOMIC_INIT(0);
126 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
128 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
130 #ifdef CONFIG_SECURITY_NETWORK
131 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
133 memcpy(UNIXSID(skb
), &scm
->secid
, sizeof(u32
));
136 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
138 scm
->secid
= *UNIXSID(skb
);
141 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
144 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
146 #endif /* CONFIG_SECURITY_NETWORK */
149 * SMP locking strategy:
150 * hash table is protected with spinlock unix_table_lock
151 * each socket state is protected by separate rwlock.
154 static inline unsigned unix_hash_fold(unsigned hash
)
158 return hash
&(UNIX_HASH_SIZE
-1);
161 #define unix_peer(sk) (unix_sk(sk)->peer)
163 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
165 return unix_peer(osk
) == sk
;
168 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
170 return (unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
));
173 static struct sock
*unix_peer_get(struct sock
*s
)
181 unix_state_runlock(s
);
185 static inline void unix_release_addr(struct unix_address
*addr
)
187 if (atomic_dec_and_test(&addr
->refcnt
))
192 * Check unix socket name:
193 * - should be not zero length.
194 * - if started by not zero, should be NULL terminated (FS object)
195 * - if started by zero, it is abstract name.
198 static int unix_mkname(struct sockaddr_un
* sunaddr
, int len
, unsigned *hashp
)
200 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
202 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
204 if (sunaddr
->sun_path
[0]) {
206 * This may look like an off by one error but it is a bit more
207 * subtle. 108 is the longest valid AF_UNIX path for a binding.
208 * sun_path[108] doesnt as such exist. However in kernel space
209 * we are guaranteed that it is a valid memory location in our
210 * kernel address buffer.
212 ((char *)sunaddr
)[len
]=0;
213 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
217 *hashp
= unix_hash_fold(csum_partial((char*)sunaddr
, len
, 0));
221 static void __unix_remove_socket(struct sock
*sk
)
223 sk_del_node_init(sk
);
226 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
228 BUG_TRAP(sk_unhashed(sk
));
229 sk_add_node(sk
, list
);
232 static inline void unix_remove_socket(struct sock
*sk
)
234 spin_lock(&unix_table_lock
);
235 __unix_remove_socket(sk
);
236 spin_unlock(&unix_table_lock
);
239 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
241 spin_lock(&unix_table_lock
);
242 __unix_insert_socket(list
, sk
);
243 spin_unlock(&unix_table_lock
);
246 static struct sock
*__unix_find_socket_byname(struct sockaddr_un
*sunname
,
247 int len
, int type
, unsigned hash
)
250 struct hlist_node
*node
;
252 sk_for_each(s
, node
, &unix_socket_table
[hash
^ type
]) {
253 struct unix_sock
*u
= unix_sk(s
);
255 if (u
->addr
->len
== len
&&
256 !memcmp(u
->addr
->name
, sunname
, len
))
264 static inline struct sock
*unix_find_socket_byname(struct sockaddr_un
*sunname
,
270 spin_lock(&unix_table_lock
);
271 s
= __unix_find_socket_byname(sunname
, len
, type
, hash
);
274 spin_unlock(&unix_table_lock
);
278 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
281 struct hlist_node
*node
;
283 spin_lock(&unix_table_lock
);
285 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
286 struct dentry
*dentry
= unix_sk(s
)->dentry
;
288 if(dentry
&& dentry
->d_inode
== i
)
296 spin_unlock(&unix_table_lock
);
300 static inline int unix_writable(struct sock
*sk
)
302 return (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
305 static void unix_write_space(struct sock
*sk
)
307 read_lock(&sk
->sk_callback_lock
);
308 if (unix_writable(sk
)) {
309 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
310 wake_up_interruptible(sk
->sk_sleep
);
311 sk_wake_async(sk
, 2, POLL_OUT
);
313 read_unlock(&sk
->sk_callback_lock
);
316 /* When dgram socket disconnects (or changes its peer), we clear its receive
317 * queue of packets arrived from previous peer. First, it allows to do
318 * flow control based only on wmem_alloc; second, sk connected to peer
319 * may receive messages only from that peer. */
320 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
322 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
323 skb_queue_purge(&sk
->sk_receive_queue
);
324 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
326 /* If one link of bidirectional dgram pipe is disconnected,
327 * we signal error. Messages are lost. Do not make this,
328 * when peer was not connected to us.
330 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
331 other
->sk_err
= ECONNRESET
;
332 other
->sk_error_report(other
);
337 static void unix_sock_destructor(struct sock
*sk
)
339 struct unix_sock
*u
= unix_sk(sk
);
341 skb_queue_purge(&sk
->sk_receive_queue
);
343 BUG_TRAP(!atomic_read(&sk
->sk_wmem_alloc
));
344 BUG_TRAP(sk_unhashed(sk
));
345 BUG_TRAP(!sk
->sk_socket
);
346 if (!sock_flag(sk
, SOCK_DEAD
)) {
347 printk("Attempt to release alive unix socket: %p\n", sk
);
352 unix_release_addr(u
->addr
);
354 atomic_dec(&unix_nr_socks
);
355 #ifdef UNIX_REFCNT_DEBUG
356 printk(KERN_DEBUG
"UNIX %p is destroyed, %d are still alive.\n", sk
, atomic_read(&unix_nr_socks
));
360 static int unix_release_sock (struct sock
*sk
, int embrion
)
362 struct unix_sock
*u
= unix_sk(sk
);
363 struct dentry
*dentry
;
364 struct vfsmount
*mnt
;
369 unix_remove_socket(sk
);
372 unix_state_wlock(sk
);
374 sk
->sk_shutdown
= SHUTDOWN_MASK
;
379 state
= sk
->sk_state
;
380 sk
->sk_state
= TCP_CLOSE
;
381 unix_state_wunlock(sk
);
383 wake_up_interruptible_all(&u
->peer_wait
);
385 skpair
=unix_peer(sk
);
388 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
389 unix_state_wlock(skpair
);
391 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
392 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
393 skpair
->sk_err
= ECONNRESET
;
394 unix_state_wunlock(skpair
);
395 skpair
->sk_state_change(skpair
);
396 read_lock(&skpair
->sk_callback_lock
);
397 sk_wake_async(skpair
,1,POLL_HUP
);
398 read_unlock(&skpair
->sk_callback_lock
);
400 sock_put(skpair
); /* It may now die */
401 unix_peer(sk
) = NULL
;
404 /* Try to flush out this socket. Throw out buffers at least */
406 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
407 if (state
==TCP_LISTEN
)
408 unix_release_sock(skb
->sk
, 1);
409 /* passed fds are erased in the kfree_skb hook */
420 /* ---- Socket is dead now and most probably destroyed ---- */
423 * Fixme: BSD difference: In BSD all sockets connected to use get
424 * ECONNRESET and we die on the spot. In Linux we behave
425 * like files and pipes do and wait for the last
428 * Can't we simply set sock->err?
430 * What the above comment does talk about? --ANK(980817)
433 if (atomic_read(&unix_tot_inflight
))
434 unix_gc(); /* Garbage collect fds */
439 static int unix_listen(struct socket
*sock
, int backlog
)
442 struct sock
*sk
= sock
->sk
;
443 struct unix_sock
*u
= unix_sk(sk
);
446 if (sock
->type
!=SOCK_STREAM
&& sock
->type
!=SOCK_SEQPACKET
)
447 goto out
; /* Only stream/seqpacket sockets accept */
450 goto out
; /* No listens on an unbound socket */
451 unix_state_wlock(sk
);
452 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
454 if (backlog
> sk
->sk_max_ack_backlog
)
455 wake_up_interruptible_all(&u
->peer_wait
);
456 sk
->sk_max_ack_backlog
= backlog
;
457 sk
->sk_state
= TCP_LISTEN
;
458 /* set credentials so connect can copy them */
459 sk
->sk_peercred
.pid
= current
->tgid
;
460 sk
->sk_peercred
.uid
= current
->euid
;
461 sk
->sk_peercred
.gid
= current
->egid
;
465 unix_state_wunlock(sk
);
470 static int unix_release(struct socket
*);
471 static int unix_bind(struct socket
*, struct sockaddr
*, int);
472 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
473 int addr_len
, int flags
);
474 static int unix_socketpair(struct socket
*, struct socket
*);
475 static int unix_accept(struct socket
*, struct socket
*, int);
476 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
477 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
478 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
479 static int unix_shutdown(struct socket
*, int);
480 static int unix_stream_sendmsg(struct kiocb
*, struct socket
*,
481 struct msghdr
*, size_t);
482 static int unix_stream_recvmsg(struct kiocb
*, struct socket
*,
483 struct msghdr
*, size_t, int);
484 static int unix_dgram_sendmsg(struct kiocb
*, struct socket
*,
485 struct msghdr
*, size_t);
486 static int unix_dgram_recvmsg(struct kiocb
*, struct socket
*,
487 struct msghdr
*, size_t, int);
488 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
490 static int unix_seqpacket_sendmsg(struct kiocb
*, struct socket
*,
491 struct msghdr
*, size_t);
493 static const struct proto_ops unix_stream_ops
= {
495 .owner
= THIS_MODULE
,
496 .release
= unix_release
,
498 .connect
= unix_stream_connect
,
499 .socketpair
= unix_socketpair
,
500 .accept
= unix_accept
,
501 .getname
= unix_getname
,
504 .listen
= unix_listen
,
505 .shutdown
= unix_shutdown
,
506 .setsockopt
= sock_no_setsockopt
,
507 .getsockopt
= sock_no_getsockopt
,
508 .sendmsg
= unix_stream_sendmsg
,
509 .recvmsg
= unix_stream_recvmsg
,
510 .mmap
= sock_no_mmap
,
511 .sendpage
= sock_no_sendpage
,
514 static const struct proto_ops unix_dgram_ops
= {
516 .owner
= THIS_MODULE
,
517 .release
= unix_release
,
519 .connect
= unix_dgram_connect
,
520 .socketpair
= unix_socketpair
,
521 .accept
= sock_no_accept
,
522 .getname
= unix_getname
,
523 .poll
= datagram_poll
,
525 .listen
= sock_no_listen
,
526 .shutdown
= unix_shutdown
,
527 .setsockopt
= sock_no_setsockopt
,
528 .getsockopt
= sock_no_getsockopt
,
529 .sendmsg
= unix_dgram_sendmsg
,
530 .recvmsg
= unix_dgram_recvmsg
,
531 .mmap
= sock_no_mmap
,
532 .sendpage
= sock_no_sendpage
,
535 static const struct proto_ops unix_seqpacket_ops
= {
537 .owner
= THIS_MODULE
,
538 .release
= unix_release
,
540 .connect
= unix_stream_connect
,
541 .socketpair
= unix_socketpair
,
542 .accept
= unix_accept
,
543 .getname
= unix_getname
,
544 .poll
= datagram_poll
,
546 .listen
= unix_listen
,
547 .shutdown
= unix_shutdown
,
548 .setsockopt
= sock_no_setsockopt
,
549 .getsockopt
= sock_no_getsockopt
,
550 .sendmsg
= unix_seqpacket_sendmsg
,
551 .recvmsg
= unix_dgram_recvmsg
,
552 .mmap
= sock_no_mmap
,
553 .sendpage
= sock_no_sendpage
,
556 static struct proto unix_proto
= {
558 .owner
= THIS_MODULE
,
559 .obj_size
= sizeof(struct unix_sock
),
563 * AF_UNIX sockets do not interact with hardware, hence they
564 * dont trigger interrupts - so it's safe for them to have
565 * bh-unsafe locking for their sk_receive_queue.lock. Split off
566 * this special lock-class by reinitializing the spinlock key:
568 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
570 static struct sock
* unix_create1(struct socket
*sock
)
572 struct sock
*sk
= NULL
;
575 if (atomic_read(&unix_nr_socks
) >= 2*get_max_files())
578 sk
= sk_alloc(PF_UNIX
, GFP_KERNEL
, &unix_proto
, 1);
582 atomic_inc(&unix_nr_socks
);
584 sock_init_data(sock
,sk
);
585 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
586 &af_unix_sk_receive_queue_lock_key
);
588 sk
->sk_write_space
= unix_write_space
;
589 sk
->sk_max_ack_backlog
= sysctl_unix_max_dgram_qlen
;
590 sk
->sk_destruct
= unix_sock_destructor
;
594 spin_lock_init(&u
->lock
);
595 atomic_set(&u
->inflight
, sock
? 0 : -1);
596 mutex_init(&u
->readlock
); /* single task reading lock */
597 init_waitqueue_head(&u
->peer_wait
);
598 unix_insert_socket(unix_sockets_unbound
, sk
);
603 static int unix_create(struct socket
*sock
, int protocol
)
605 if (protocol
&& protocol
!= PF_UNIX
)
606 return -EPROTONOSUPPORT
;
608 sock
->state
= SS_UNCONNECTED
;
610 switch (sock
->type
) {
612 sock
->ops
= &unix_stream_ops
;
615 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
619 sock
->type
=SOCK_DGRAM
;
621 sock
->ops
= &unix_dgram_ops
;
624 sock
->ops
= &unix_seqpacket_ops
;
627 return -ESOCKTNOSUPPORT
;
630 return unix_create1(sock
) ? 0 : -ENOMEM
;
633 static int unix_release(struct socket
*sock
)
635 struct sock
*sk
= sock
->sk
;
642 return unix_release_sock (sk
, 0);
645 static int unix_autobind(struct socket
*sock
)
647 struct sock
*sk
= sock
->sk
;
648 struct unix_sock
*u
= unix_sk(sk
);
649 static u32 ordernum
= 1;
650 struct unix_address
* addr
;
653 mutex_lock(&u
->readlock
);
660 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
664 addr
->name
->sun_family
= AF_UNIX
;
665 atomic_set(&addr
->refcnt
, 1);
668 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
669 addr
->hash
= unix_hash_fold(csum_partial((void*)addr
->name
, addr
->len
, 0));
671 spin_lock(&unix_table_lock
);
672 ordernum
= (ordernum
+1)&0xFFFFF;
674 if (__unix_find_socket_byname(addr
->name
, addr
->len
, sock
->type
,
676 spin_unlock(&unix_table_lock
);
677 /* Sanity yield. It is unusual case, but yet... */
678 if (!(ordernum
&0xFF))
682 addr
->hash
^= sk
->sk_type
;
684 __unix_remove_socket(sk
);
686 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
687 spin_unlock(&unix_table_lock
);
690 out
: mutex_unlock(&u
->readlock
);
694 static struct sock
*unix_find_other(struct sockaddr_un
*sunname
, int len
,
695 int type
, unsigned hash
, int *error
)
701 if (sunname
->sun_path
[0]) {
702 err
= path_lookup(sunname
->sun_path
, LOOKUP_FOLLOW
, &nd
);
705 err
= vfs_permission(&nd
, MAY_WRITE
);
710 if (!S_ISSOCK(nd
.dentry
->d_inode
->i_mode
))
712 u
=unix_find_socket_byinode(nd
.dentry
->d_inode
);
716 if (u
->sk_type
== type
)
717 touch_atime(nd
.mnt
, nd
.dentry
);
722 if (u
->sk_type
!= type
) {
728 u
=unix_find_socket_byname(sunname
, len
, type
, hash
);
730 struct dentry
*dentry
;
731 dentry
= unix_sk(u
)->dentry
;
733 touch_atime(unix_sk(u
)->mnt
, dentry
);
747 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
749 struct sock
*sk
= sock
->sk
;
750 struct unix_sock
*u
= unix_sk(sk
);
751 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)uaddr
;
752 struct dentry
* dentry
= NULL
;
756 struct unix_address
*addr
;
757 struct hlist_head
*list
;
760 if (sunaddr
->sun_family
!= AF_UNIX
)
763 if (addr_len
==sizeof(short)) {
764 err
= unix_autobind(sock
);
768 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
773 mutex_lock(&u
->readlock
);
780 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
784 memcpy(addr
->name
, sunaddr
, addr_len
);
785 addr
->len
= addr_len
;
786 addr
->hash
= hash
^ sk
->sk_type
;
787 atomic_set(&addr
->refcnt
, 1);
789 if (sunaddr
->sun_path
[0]) {
793 * Get the parent directory, calculate the hash for last
796 err
= path_lookup(sunaddr
->sun_path
, LOOKUP_PARENT
, &nd
);
798 goto out_mknod_parent
;
800 dentry
= lookup_create(&nd
, 0);
801 err
= PTR_ERR(dentry
);
803 goto out_mknod_unlock
;
806 * All right, let's create it.
809 (SOCK_INODE(sock
)->i_mode
& ~current
->fs
->umask
);
810 err
= vfs_mknod(nd
.dentry
->d_inode
, dentry
, mode
, 0);
813 mutex_unlock(&nd
.dentry
->d_inode
->i_mutex
);
817 addr
->hash
= UNIX_HASH_SIZE
;
820 spin_lock(&unix_table_lock
);
822 if (!sunaddr
->sun_path
[0]) {
824 if (__unix_find_socket_byname(sunaddr
, addr_len
,
825 sk
->sk_type
, hash
)) {
826 unix_release_addr(addr
);
830 list
= &unix_socket_table
[addr
->hash
];
832 list
= &unix_socket_table
[dentry
->d_inode
->i_ino
& (UNIX_HASH_SIZE
-1)];
833 u
->dentry
= nd
.dentry
;
838 __unix_remove_socket(sk
);
840 __unix_insert_socket(list
, sk
);
843 spin_unlock(&unix_table_lock
);
845 mutex_unlock(&u
->readlock
);
852 mutex_unlock(&nd
.dentry
->d_inode
->i_mutex
);
857 unix_release_addr(addr
);
861 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
864 struct sock
*sk
= sock
->sk
;
865 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)addr
;
870 if (addr
->sa_family
!= AF_UNSPEC
) {
871 err
= unix_mkname(sunaddr
, alen
, &hash
);
876 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
877 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
880 other
=unix_find_other(sunaddr
, alen
, sock
->type
, hash
, &err
);
884 unix_state_wlock(sk
);
887 if (!unix_may_send(sk
, other
))
890 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
896 * 1003.1g breaking connected state with AF_UNSPEC
899 unix_state_wlock(sk
);
903 * If it was connected, reconnect.
906 struct sock
*old_peer
= unix_peer(sk
);
908 unix_state_wunlock(sk
);
910 if (other
!= old_peer
)
911 unix_dgram_disconnected(sk
, old_peer
);
915 unix_state_wunlock(sk
);
920 unix_state_wunlock(sk
);
926 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
928 struct unix_sock
*u
= unix_sk(other
);
932 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
934 sched
= !sock_flag(other
, SOCK_DEAD
) &&
935 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
936 (skb_queue_len(&other
->sk_receive_queue
) >
937 other
->sk_max_ack_backlog
);
939 unix_state_runlock(other
);
942 timeo
= schedule_timeout(timeo
);
944 finish_wait(&u
->peer_wait
, &wait
);
948 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
949 int addr_len
, int flags
)
951 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)uaddr
;
952 struct sock
*sk
= sock
->sk
;
953 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
954 struct sock
*newsk
= NULL
;
955 struct sock
*other
= NULL
;
956 struct sk_buff
*skb
= NULL
;
962 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
967 if (test_bit(SOCK_PASSCRED
, &sock
->flags
)
968 && !u
->addr
&& (err
= unix_autobind(sock
)) != 0)
971 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
973 /* First of all allocate resources.
974 If we will make it after state is locked,
975 we will have to recheck all again in any case.
980 /* create new sock for complete connection */
981 newsk
= unix_create1(NULL
);
985 /* Allocate skb for sending to listening sock */
986 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
991 /* Find listening sock. */
992 other
= unix_find_other(sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
996 /* Latch state of peer */
997 unix_state_rlock(other
);
999 /* Apparently VFS overslept socket death. Retry. */
1000 if (sock_flag(other
, SOCK_DEAD
)) {
1001 unix_state_runlock(other
);
1006 err
= -ECONNREFUSED
;
1007 if (other
->sk_state
!= TCP_LISTEN
)
1010 if (skb_queue_len(&other
->sk_receive_queue
) >
1011 other
->sk_max_ack_backlog
) {
1016 timeo
= unix_wait_for_peer(other
, timeo
);
1018 err
= sock_intr_errno(timeo
);
1019 if (signal_pending(current
))
1027 It is tricky place. We need to grab write lock and cannot
1028 drop lock on peer. It is dangerous because deadlock is
1029 possible. Connect to self case and simultaneous
1030 attempt to connect are eliminated by checking socket
1031 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1032 check this before attempt to grab lock.
1034 Well, and we have to recheck the state after socket locked.
1040 /* This is ok... continue with connect */
1042 case TCP_ESTABLISHED
:
1043 /* Socket is already connected */
1051 unix_state_wlock_nested(sk
);
1053 if (sk
->sk_state
!= st
) {
1054 unix_state_wunlock(sk
);
1055 unix_state_runlock(other
);
1060 err
= security_unix_stream_connect(sock
, other
->sk_socket
, newsk
);
1062 unix_state_wunlock(sk
);
1066 /* The way is open! Fastly set all the necessary fields... */
1069 unix_peer(newsk
) = sk
;
1070 newsk
->sk_state
= TCP_ESTABLISHED
;
1071 newsk
->sk_type
= sk
->sk_type
;
1072 newsk
->sk_peercred
.pid
= current
->tgid
;
1073 newsk
->sk_peercred
.uid
= current
->euid
;
1074 newsk
->sk_peercred
.gid
= current
->egid
;
1075 newu
= unix_sk(newsk
);
1076 newsk
->sk_sleep
= &newu
->peer_wait
;
1077 otheru
= unix_sk(other
);
1079 /* copy address information from listening to new sock*/
1081 atomic_inc(&otheru
->addr
->refcnt
);
1082 newu
->addr
= otheru
->addr
;
1084 if (otheru
->dentry
) {
1085 newu
->dentry
= dget(otheru
->dentry
);
1086 newu
->mnt
= mntget(otheru
->mnt
);
1089 /* Set credentials */
1090 sk
->sk_peercred
= other
->sk_peercred
;
1092 sock
->state
= SS_CONNECTED
;
1093 sk
->sk_state
= TCP_ESTABLISHED
;
1096 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1097 unix_peer(sk
) = newsk
;
1099 unix_state_wunlock(sk
);
1101 /* take ten and and send info to listening sock */
1102 spin_lock(&other
->sk_receive_queue
.lock
);
1103 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1104 /* Undo artificially decreased inflight after embrion
1105 * is installed to listening socket. */
1106 atomic_inc(&newu
->inflight
);
1107 spin_unlock(&other
->sk_receive_queue
.lock
);
1108 unix_state_runlock(other
);
1109 other
->sk_data_ready(other
, 0);
1115 unix_state_runlock(other
);
1121 unix_release_sock(newsk
, 0);
1127 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1129 struct sock
*ska
=socka
->sk
, *skb
= sockb
->sk
;
1131 /* Join our sockets back to back */
1136 ska
->sk_peercred
.pid
= skb
->sk_peercred
.pid
= current
->tgid
;
1137 ska
->sk_peercred
.uid
= skb
->sk_peercred
.uid
= current
->euid
;
1138 ska
->sk_peercred
.gid
= skb
->sk_peercred
.gid
= current
->egid
;
1140 if (ska
->sk_type
!= SOCK_DGRAM
) {
1141 ska
->sk_state
= TCP_ESTABLISHED
;
1142 skb
->sk_state
= TCP_ESTABLISHED
;
1143 socka
->state
= SS_CONNECTED
;
1144 sockb
->state
= SS_CONNECTED
;
1149 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1151 struct sock
*sk
= sock
->sk
;
1153 struct sk_buff
*skb
;
1157 if (sock
->type
!=SOCK_STREAM
&& sock
->type
!=SOCK_SEQPACKET
)
1161 if (sk
->sk_state
!= TCP_LISTEN
)
1164 /* If socket state is TCP_LISTEN it cannot change (for now...),
1165 * so that no locks are necessary.
1168 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1170 /* This means receive shutdown. */
1177 skb_free_datagram(sk
, skb
);
1178 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1180 /* attach accepted sock to socket */
1181 unix_state_wlock(tsk
);
1182 newsock
->state
= SS_CONNECTED
;
1183 sock_graft(tsk
, newsock
);
1184 unix_state_wunlock(tsk
);
1192 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1194 struct sock
*sk
= sock
->sk
;
1195 struct unix_sock
*u
;
1196 struct sockaddr_un
*sunaddr
=(struct sockaddr_un
*)uaddr
;
1200 sk
= unix_peer_get(sk
);
1211 unix_state_rlock(sk
);
1213 sunaddr
->sun_family
= AF_UNIX
;
1214 sunaddr
->sun_path
[0] = 0;
1215 *uaddr_len
= sizeof(short);
1217 struct unix_address
*addr
= u
->addr
;
1219 *uaddr_len
= addr
->len
;
1220 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1222 unix_state_runlock(sk
);
1228 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1232 scm
->fp
= UNIXCB(skb
).fp
;
1233 skb
->destructor
= sock_wfree
;
1234 UNIXCB(skb
).fp
= NULL
;
1236 for (i
=scm
->fp
->count
-1; i
>=0; i
--)
1237 unix_notinflight(scm
->fp
->fp
[i
]);
1240 static void unix_destruct_fds(struct sk_buff
*skb
)
1242 struct scm_cookie scm
;
1243 memset(&scm
, 0, sizeof(scm
));
1244 unix_detach_fds(&scm
, skb
);
1246 /* Alas, it calls VFS */
1247 /* So fscking what? fput() had been SMP-safe since the last Summer */
1252 static void unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1255 for (i
=scm
->fp
->count
-1; i
>=0; i
--)
1256 unix_inflight(scm
->fp
->fp
[i
]);
1257 UNIXCB(skb
).fp
= scm
->fp
;
1258 skb
->destructor
= unix_destruct_fds
;
1263 * Send AF_UNIX data.
1266 static int unix_dgram_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1267 struct msghdr
*msg
, size_t len
)
1269 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1270 struct sock
*sk
= sock
->sk
;
1271 struct unix_sock
*u
= unix_sk(sk
);
1272 struct sockaddr_un
*sunaddr
=msg
->msg_name
;
1273 struct sock
*other
= NULL
;
1274 int namelen
= 0; /* fake GCC */
1277 struct sk_buff
*skb
;
1279 struct scm_cookie tmp_scm
;
1281 if (NULL
== siocb
->scm
)
1282 siocb
->scm
= &tmp_scm
;
1283 err
= scm_send(sock
, msg
, siocb
->scm
);
1288 if (msg
->msg_flags
&MSG_OOB
)
1291 if (msg
->msg_namelen
) {
1292 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1299 other
= unix_peer_get(sk
);
1304 if (test_bit(SOCK_PASSCRED
, &sock
->flags
)
1305 && !u
->addr
&& (err
= unix_autobind(sock
)) != 0)
1309 if (len
> sk
->sk_sndbuf
- 32)
1312 skb
= sock_alloc_send_skb(sk
, len
, msg
->msg_flags
&MSG_DONTWAIT
, &err
);
1316 memcpy(UNIXCREDS(skb
), &siocb
->scm
->creds
, sizeof(struct ucred
));
1318 unix_attach_fds(siocb
->scm
, skb
);
1319 unix_get_secdata(siocb
->scm
, skb
);
1321 skb
->h
.raw
= skb
->data
;
1322 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
1326 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1331 if (sunaddr
== NULL
)
1334 other
= unix_find_other(sunaddr
, namelen
, sk
->sk_type
,
1340 unix_state_rlock(other
);
1342 if (!unix_may_send(sk
, other
))
1345 if (sock_flag(other
, SOCK_DEAD
)) {
1347 * Check with 1003.1g - what should
1350 unix_state_runlock(other
);
1354 unix_state_wlock(sk
);
1355 if (unix_peer(sk
) == other
) {
1357 unix_state_wunlock(sk
);
1359 unix_dgram_disconnected(sk
, other
);
1361 err
= -ECONNREFUSED
;
1363 unix_state_wunlock(sk
);
1373 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1376 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1377 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1382 if (unix_peer(other
) != sk
&&
1383 (skb_queue_len(&other
->sk_receive_queue
) >
1384 other
->sk_max_ack_backlog
)) {
1390 timeo
= unix_wait_for_peer(other
, timeo
);
1392 err
= sock_intr_errno(timeo
);
1393 if (signal_pending(current
))
1399 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1400 unix_state_runlock(other
);
1401 other
->sk_data_ready(other
, len
);
1403 scm_destroy(siocb
->scm
);
1407 unix_state_runlock(other
);
1413 scm_destroy(siocb
->scm
);
1418 static int unix_stream_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1419 struct msghdr
*msg
, size_t len
)
1421 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1422 struct sock
*sk
= sock
->sk
;
1423 struct sock
*other
= NULL
;
1424 struct sockaddr_un
*sunaddr
=msg
->msg_name
;
1426 struct sk_buff
*skb
;
1428 struct scm_cookie tmp_scm
;
1430 if (NULL
== siocb
->scm
)
1431 siocb
->scm
= &tmp_scm
;
1432 err
= scm_send(sock
, msg
, siocb
->scm
);
1437 if (msg
->msg_flags
&MSG_OOB
)
1440 if (msg
->msg_namelen
) {
1441 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1446 other
= unix_peer(sk
);
1451 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1457 * Optimisation for the fact that under 0.01% of X
1458 * messages typically need breaking up.
1463 /* Keep two messages in the pipe so it schedules better */
1464 if (size
> ((sk
->sk_sndbuf
>> 1) - 64))
1465 size
= (sk
->sk_sndbuf
>> 1) - 64;
1467 if (size
> SKB_MAX_ALLOC
)
1468 size
= SKB_MAX_ALLOC
;
1474 skb
=sock_alloc_send_skb(sk
,size
,msg
->msg_flags
&MSG_DONTWAIT
, &err
);
1480 * If you pass two values to the sock_alloc_send_skb
1481 * it tries to grab the large buffer with GFP_NOFS
1482 * (which can fail easily), and if it fails grab the
1483 * fallback size buffer which is under a page and will
1486 size
= min_t(int, size
, skb_tailroom(skb
));
1488 memcpy(UNIXCREDS(skb
), &siocb
->scm
->creds
, sizeof(struct ucred
));
1490 unix_attach_fds(siocb
->scm
, skb
);
1492 if ((err
= memcpy_fromiovec(skb_put(skb
,size
), msg
->msg_iov
, size
)) != 0) {
1497 unix_state_rlock(other
);
1499 if (sock_flag(other
, SOCK_DEAD
) ||
1500 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1503 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1504 unix_state_runlock(other
);
1505 other
->sk_data_ready(other
, size
);
1509 scm_destroy(siocb
->scm
);
1515 unix_state_runlock(other
);
1518 if (sent
==0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1519 send_sig(SIGPIPE
,current
,0);
1522 scm_destroy(siocb
->scm
);
1524 return sent
? : err
;
1527 static int unix_seqpacket_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1528 struct msghdr
*msg
, size_t len
)
1531 struct sock
*sk
= sock
->sk
;
1533 err
= sock_error(sk
);
1537 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1540 if (msg
->msg_namelen
)
1541 msg
->msg_namelen
= 0;
1543 return unix_dgram_sendmsg(kiocb
, sock
, msg
, len
);
1546 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1548 struct unix_sock
*u
= unix_sk(sk
);
1550 msg
->msg_namelen
= 0;
1552 msg
->msg_namelen
= u
->addr
->len
;
1553 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1557 static int unix_dgram_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1558 struct msghdr
*msg
, size_t size
,
1561 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1562 struct scm_cookie tmp_scm
;
1563 struct sock
*sk
= sock
->sk
;
1564 struct unix_sock
*u
= unix_sk(sk
);
1565 int noblock
= flags
& MSG_DONTWAIT
;
1566 struct sk_buff
*skb
;
1573 msg
->msg_namelen
= 0;
1575 mutex_lock(&u
->readlock
);
1577 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1581 wake_up_interruptible(&u
->peer_wait
);
1584 unix_copy_addr(msg
, skb
->sk
);
1586 if (size
> skb
->len
)
1588 else if (size
< skb
->len
)
1589 msg
->msg_flags
|= MSG_TRUNC
;
1591 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, size
);
1596 siocb
->scm
= &tmp_scm
;
1597 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1599 siocb
->scm
->creds
= *UNIXCREDS(skb
);
1600 unix_set_secdata(siocb
->scm
, skb
);
1602 if (!(flags
& MSG_PEEK
))
1605 unix_detach_fds(siocb
->scm
, skb
);
1609 /* It is questionable: on PEEK we could:
1610 - do not return fds - good, but too simple 8)
1611 - return fds, and do not return them on read (old strategy,
1613 - clone fds (I chose it for now, it is the most universal
1616 POSIX 1003.1g does not actually define this clearly
1617 at all. POSIX 1003.1g doesn't define a lot of things
1622 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1626 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1629 skb_free_datagram(sk
,skb
);
1631 mutex_unlock(&u
->readlock
);
1637 * Sleep until data has arrive. But check for races..
1640 static long unix_stream_data_wait(struct sock
* sk
, long timeo
)
1644 unix_state_rlock(sk
);
1647 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
1649 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1651 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1652 signal_pending(current
) ||
1656 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1657 unix_state_runlock(sk
);
1658 timeo
= schedule_timeout(timeo
);
1659 unix_state_rlock(sk
);
1660 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1663 finish_wait(sk
->sk_sleep
, &wait
);
1664 unix_state_runlock(sk
);
1670 static int unix_stream_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1671 struct msghdr
*msg
, size_t size
,
1674 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1675 struct scm_cookie tmp_scm
;
1676 struct sock
*sk
= sock
->sk
;
1677 struct unix_sock
*u
= unix_sk(sk
);
1678 struct sockaddr_un
*sunaddr
=msg
->msg_name
;
1680 int check_creds
= 0;
1686 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1693 target
= sock_rcvlowat(sk
, flags
&MSG_WAITALL
, size
);
1694 timeo
= sock_rcvtimeo(sk
, flags
&MSG_DONTWAIT
);
1696 msg
->msg_namelen
= 0;
1698 /* Lock the socket to prevent queue disordering
1699 * while sleeps in memcpy_tomsg
1703 siocb
->scm
= &tmp_scm
;
1704 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1707 mutex_lock(&u
->readlock
);
1712 struct sk_buff
*skb
;
1714 skb
= skb_dequeue(&sk
->sk_receive_queue
);
1717 if (copied
>= target
)
1721 * POSIX 1003.1g mandates this order.
1724 if ((err
= sock_error(sk
)) != 0)
1726 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1731 mutex_unlock(&u
->readlock
);
1733 timeo
= unix_stream_data_wait(sk
, timeo
);
1735 if (signal_pending(current
)) {
1736 err
= sock_intr_errno(timeo
);
1739 mutex_lock(&u
->readlock
);
1744 /* Never glue messages from different writers */
1745 if (memcmp(UNIXCREDS(skb
), &siocb
->scm
->creds
, sizeof(siocb
->scm
->creds
)) != 0) {
1746 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1750 /* Copy credentials */
1751 siocb
->scm
->creds
= *UNIXCREDS(skb
);
1755 /* Copy address just once */
1758 unix_copy_addr(msg
, skb
->sk
);
1762 chunk
= min_t(unsigned int, skb
->len
, size
);
1763 if (memcpy_toiovec(msg
->msg_iov
, skb
->data
, chunk
)) {
1764 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1772 /* Mark read part of skb as used */
1773 if (!(flags
& MSG_PEEK
))
1775 skb_pull(skb
, chunk
);
1778 unix_detach_fds(siocb
->scm
, skb
);
1780 /* put the skb back if we didn't use it up.. */
1783 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1794 /* It is questionable, see note in unix_dgram_recvmsg.
1797 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1799 /* put message back and return */
1800 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1805 mutex_unlock(&u
->readlock
);
1806 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1808 return copied
? : err
;
1811 static int unix_shutdown(struct socket
*sock
, int mode
)
1813 struct sock
*sk
= sock
->sk
;
1816 mode
= (mode
+1)&(RCV_SHUTDOWN
|SEND_SHUTDOWN
);
1819 unix_state_wlock(sk
);
1820 sk
->sk_shutdown
|= mode
;
1821 other
=unix_peer(sk
);
1824 unix_state_wunlock(sk
);
1825 sk
->sk_state_change(sk
);
1828 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
1832 if (mode
&RCV_SHUTDOWN
)
1833 peer_mode
|= SEND_SHUTDOWN
;
1834 if (mode
&SEND_SHUTDOWN
)
1835 peer_mode
|= RCV_SHUTDOWN
;
1836 unix_state_wlock(other
);
1837 other
->sk_shutdown
|= peer_mode
;
1838 unix_state_wunlock(other
);
1839 other
->sk_state_change(other
);
1840 read_lock(&other
->sk_callback_lock
);
1841 if (peer_mode
== SHUTDOWN_MASK
)
1842 sk_wake_async(other
,1,POLL_HUP
);
1843 else if (peer_mode
& RCV_SHUTDOWN
)
1844 sk_wake_async(other
,1,POLL_IN
);
1845 read_unlock(&other
->sk_callback_lock
);
1853 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
1855 struct sock
*sk
= sock
->sk
;
1862 amount
= atomic_read(&sk
->sk_wmem_alloc
);
1863 err
= put_user(amount
, (int __user
*)arg
);
1867 struct sk_buff
*skb
;
1869 if (sk
->sk_state
== TCP_LISTEN
) {
1874 spin_lock(&sk
->sk_receive_queue
.lock
);
1875 if (sk
->sk_type
== SOCK_STREAM
||
1876 sk
->sk_type
== SOCK_SEQPACKET
) {
1877 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
1880 skb
= skb_peek(&sk
->sk_receive_queue
);
1884 spin_unlock(&sk
->sk_receive_queue
.lock
);
1885 err
= put_user(amount
, (int __user
*)arg
);
1896 static unsigned int unix_poll(struct file
* file
, struct socket
*sock
, poll_table
*wait
)
1898 struct sock
*sk
= sock
->sk
;
1901 poll_wait(file
, sk
->sk_sleep
, wait
);
1904 /* exceptional events? */
1907 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1909 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1913 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1914 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1915 mask
|= POLLIN
| POLLRDNORM
;
1917 /* Connection-based need to check for termination and startup */
1918 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) && sk
->sk_state
== TCP_CLOSE
)
1922 * we set writable also when the other side has shut down the
1923 * connection. This prevents stuck sockets.
1925 if (unix_writable(sk
))
1926 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1932 #ifdef CONFIG_PROC_FS
1933 static struct sock
*unix_seq_idx(int *iter
, loff_t pos
)
1938 for (s
= first_unix_socket(iter
); s
; s
= next_unix_socket(iter
, s
)) {
1947 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1949 spin_lock(&unix_table_lock
);
1950 return *pos
? unix_seq_idx(seq
->private, *pos
- 1) : ((void *) 1);
1953 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1958 return first_unix_socket(seq
->private);
1959 return next_unix_socket(seq
->private, v
);
1962 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
1964 spin_unlock(&unix_table_lock
);
1967 static int unix_seq_show(struct seq_file
*seq
, void *v
)
1971 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
1975 struct unix_sock
*u
= unix_sk(s
);
1976 unix_state_rlock(s
);
1978 seq_printf(seq
, "%p: %08X %08X %08X %04X %02X %5lu",
1980 atomic_read(&s
->sk_refcnt
),
1982 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
1985 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
1986 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
1994 len
= u
->addr
->len
- sizeof(short);
1995 if (!UNIX_ABSTRACT(s
))
2001 for ( ; i
< len
; i
++)
2002 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2004 unix_state_runlock(s
);
2005 seq_putc(seq
, '\n');
2011 static struct seq_operations unix_seq_ops
= {
2012 .start
= unix_seq_start
,
2013 .next
= unix_seq_next
,
2014 .stop
= unix_seq_stop
,
2015 .show
= unix_seq_show
,
2019 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2021 struct seq_file
*seq
;
2023 int *iter
= kmalloc(sizeof(int), GFP_KERNEL
);
2028 rc
= seq_open(file
, &unix_seq_ops
);
2032 seq
= file
->private_data
;
2033 seq
->private = iter
;
2042 static struct file_operations unix_seq_fops
= {
2043 .owner
= THIS_MODULE
,
2044 .open
= unix_seq_open
,
2046 .llseek
= seq_lseek
,
2047 .release
= seq_release_private
,
2052 static struct net_proto_family unix_family_ops
= {
2054 .create
= unix_create
,
2055 .owner
= THIS_MODULE
,
2058 static int __init
af_unix_init(void)
2061 struct sk_buff
*dummy_skb
;
2063 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > sizeof(dummy_skb
->cb
));
2065 rc
= proto_register(&unix_proto
, 1);
2067 printk(KERN_CRIT
"%s: Cannot create unix_sock SLAB cache!\n",
2072 sock_register(&unix_family_ops
);
2073 #ifdef CONFIG_PROC_FS
2074 proc_net_fops_create("unix", 0, &unix_seq_fops
);
2076 unix_sysctl_register();
2081 static void __exit
af_unix_exit(void)
2083 sock_unregister(PF_UNIX
);
2084 unix_sysctl_unregister();
2085 proc_net_remove("unix");
2086 proto_unregister(&unix_proto
);
2089 module_init(af_unix_init
);
2090 module_exit(af_unix_exit
);
2092 MODULE_LICENSE("GPL");
2093 MODULE_ALIAS_NETPROTO(PF_UNIX
);