2 * VMware vSockets Driver
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 /* Implementation notes:
18 * - There are two kinds of sockets: those created by user action (such as
19 * calling socket(2)) and those created by incoming connection request packets.
21 * - There are two "global" tables, one for bound sockets (sockets that have
22 * specified an address that they are responsible for) and one for connected
23 * sockets (sockets that have established a connection with another socket).
24 * These tables are "global" in that all sockets on the system are placed
25 * within them. - Note, though, that the bound table contains an extra entry
26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
27 * that list. The bound table is used solely for lookup of sockets when packets
28 * are received and that's not necessary for SOCK_DGRAM sockets since we create
29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
30 * sockets out of the bound hash buckets will reduce the chance of collisions
31 * when looking for SOCK_STREAM sockets and prevents us from having to check the
32 * socket type in the hash table lookups.
34 * - Sockets created by user action will either be "client" sockets that
35 * initiate a connection or "server" sockets that listen for connections; we do
36 * not support simultaneous connects (two "client" sockets connecting).
38 * - "Server" sockets are referred to as listener sockets throughout this
39 * implementation because they are in the TCP_LISTEN state. When a
40 * connection request is received (the second kind of socket mentioned above),
41 * we create a new socket and refer to it as a pending socket. These pending
42 * sockets are placed on the pending connection list of the listener socket.
43 * When future packets are received for the address the listener socket is
44 * bound to, we check if the source of the packet is from one that has an
45 * existing pending connection. If it does, we process the packet for the
46 * pending socket. When that socket reaches the connected state, it is removed
47 * from the listener socket's pending list and enqueued in the listener
48 * socket's accept queue. Callers of accept(2) will accept connected sockets
49 * from the listener socket's accept queue. If the socket cannot be accepted
50 * for some reason then it is marked rejected. Once the connection is
51 * accepted, it is owned by the user process and the responsibility for cleanup
52 * falls with that user process.
54 * - It is possible that these pending sockets will never reach the connected
55 * state; in fact, we may never receive another packet after the connection
56 * request. Because of this, we must schedule a cleanup function to run in the
57 * future, after some amount of time passes where a connection should have been
58 * established. This function ensures that the socket is off all lists so it
59 * cannot be retrieved, then drops all references to the socket so it is cleaned
60 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
61 * function will also cleanup rejected sockets, those that reach the connected
62 * state but leave it before they have been accepted.
64 * - Lock ordering for pending or accept queue sockets is:
66 * lock_sock(listener);
67 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
69 * Using explicit nested locking keeps lockdep happy since normally only one
70 * lock of a given class may be taken at a time.
72 * - Sockets created by user action will be cleaned up when the user process
73 * calls close(2), causing our release implementation to be called. Our release
74 * implementation will perform some cleanup then drop the last reference so our
75 * sk_destruct implementation is invoked. Our sk_destruct implementation will
76 * perform additional cleanup that's common for both types of sockets.
78 * - A socket's reference count is what ensures that the structure won't be
79 * freed. Each entry in a list (such as the "global" bound and connected tables
80 * and the listener socket's pending list and connected queue) ensures a
81 * reference. When we defer work until process context and pass a socket as our
82 * argument, we must ensure the reference count is increased to ensure the
83 * socket isn't freed before the function is run; the deferred function will
84 * then drop the reference.
86 * - sk->sk_state uses the TCP state constants because they are widely used by
87 * other address families and exposed to userspace tools like ss(8):
89 * TCP_CLOSE - unconnected
90 * TCP_SYN_SENT - connecting
91 * TCP_ESTABLISHED - connected
92 * TCP_CLOSING - disconnecting
93 * TCP_LISTEN - listening
96 #include <linux/types.h>
97 #include <linux/bitops.h>
98 #include <linux/cred.h>
99 #include <linux/init.h>
100 #include <linux/io.h>
101 #include <linux/kernel.h>
102 #include <linux/sched/signal.h>
103 #include <linux/kmod.h>
104 #include <linux/list.h>
105 #include <linux/miscdevice.h>
106 #include <linux/module.h>
107 #include <linux/mutex.h>
108 #include <linux/net.h>
109 #include <linux/poll.h>
110 #include <linux/random.h>
111 #include <linux/skbuff.h>
112 #include <linux/smp.h>
113 #include <linux/socket.h>
114 #include <linux/stddef.h>
115 #include <linux/unistd.h>
116 #include <linux/wait.h>
117 #include <linux/workqueue.h>
118 #include <net/sock.h>
119 #include <net/af_vsock.h>
121 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
);
122 static void vsock_sk_destruct(struct sock
*sk
);
123 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
125 /* Protocol family. */
126 static struct proto vsock_proto
= {
128 .owner
= THIS_MODULE
,
129 .obj_size
= sizeof(struct vsock_sock
),
132 /* The default peer timeout indicates how long we will wait for a peer response
133 * to a control message.
135 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
137 static const struct vsock_transport
*transport
;
138 static DEFINE_MUTEX(vsock_register_mutex
);
142 /* Get the ID of the local context. This is transport dependent. */
144 int vm_sockets_get_local_cid(void)
146 return transport
->get_local_cid();
148 EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid
);
152 /* Each bound VSocket is stored in the bind hash table and each connected
153 * VSocket is stored in the connected hash table.
155 * Unbound sockets are all put on the same list attached to the end of the hash
156 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
157 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
158 * represents the list that addr hashes to).
160 * Specifically, we initialize the vsock_bind_table array to a size of
161 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
162 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
163 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
164 * mods with VSOCK_HASH_SIZE to ensure this.
166 #define MAX_PORT_RETRIES 24
168 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
169 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
170 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
172 /* XXX This can probably be implemented in a better way. */
173 #define VSOCK_CONN_HASH(src, dst) \
174 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
175 #define vsock_connected_sockets(src, dst) \
176 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
177 #define vsock_connected_sockets_vsk(vsk) \
178 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
180 struct list_head vsock_bind_table
[VSOCK_HASH_SIZE
+ 1];
181 EXPORT_SYMBOL_GPL(vsock_bind_table
);
182 struct list_head vsock_connected_table
[VSOCK_HASH_SIZE
];
183 EXPORT_SYMBOL_GPL(vsock_connected_table
);
184 DEFINE_SPINLOCK(vsock_table_lock
);
185 EXPORT_SYMBOL_GPL(vsock_table_lock
);
187 /* Autobind this socket to the local address if necessary. */
188 static int vsock_auto_bind(struct vsock_sock
*vsk
)
190 struct sock
*sk
= sk_vsock(vsk
);
191 struct sockaddr_vm local_addr
;
193 if (vsock_addr_bound(&vsk
->local_addr
))
195 vsock_addr_init(&local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
196 return __vsock_bind(sk
, &local_addr
);
199 static int __init
vsock_init_tables(void)
203 for (i
= 0; i
< ARRAY_SIZE(vsock_bind_table
); i
++)
204 INIT_LIST_HEAD(&vsock_bind_table
[i
]);
206 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++)
207 INIT_LIST_HEAD(&vsock_connected_table
[i
]);
211 static void __vsock_insert_bound(struct list_head
*list
,
212 struct vsock_sock
*vsk
)
215 list_add(&vsk
->bound_table
, list
);
218 static void __vsock_insert_connected(struct list_head
*list
,
219 struct vsock_sock
*vsk
)
222 list_add(&vsk
->connected_table
, list
);
225 static void __vsock_remove_bound(struct vsock_sock
*vsk
)
227 list_del_init(&vsk
->bound_table
);
231 static void __vsock_remove_connected(struct vsock_sock
*vsk
)
233 list_del_init(&vsk
->connected_table
);
237 static struct sock
*__vsock_find_bound_socket(struct sockaddr_vm
*addr
)
239 struct vsock_sock
*vsk
;
241 list_for_each_entry(vsk
, vsock_bound_sockets(addr
), bound_table
)
242 if (addr
->svm_port
== vsk
->local_addr
.svm_port
)
243 return sk_vsock(vsk
);
248 static struct sock
*__vsock_find_connected_socket(struct sockaddr_vm
*src
,
249 struct sockaddr_vm
*dst
)
251 struct vsock_sock
*vsk
;
253 list_for_each_entry(vsk
, vsock_connected_sockets(src
, dst
),
255 if (vsock_addr_equals_addr(src
, &vsk
->remote_addr
) &&
256 dst
->svm_port
== vsk
->local_addr
.svm_port
) {
257 return sk_vsock(vsk
);
264 static void vsock_insert_unbound(struct vsock_sock
*vsk
)
266 spin_lock_bh(&vsock_table_lock
);
267 __vsock_insert_bound(vsock_unbound_sockets
, vsk
);
268 spin_unlock_bh(&vsock_table_lock
);
271 void vsock_insert_connected(struct vsock_sock
*vsk
)
273 struct list_head
*list
= vsock_connected_sockets(
274 &vsk
->remote_addr
, &vsk
->local_addr
);
276 spin_lock_bh(&vsock_table_lock
);
277 __vsock_insert_connected(list
, vsk
);
278 spin_unlock_bh(&vsock_table_lock
);
280 EXPORT_SYMBOL_GPL(vsock_insert_connected
);
282 void vsock_remove_bound(struct vsock_sock
*vsk
)
284 spin_lock_bh(&vsock_table_lock
);
285 __vsock_remove_bound(vsk
);
286 spin_unlock_bh(&vsock_table_lock
);
288 EXPORT_SYMBOL_GPL(vsock_remove_bound
);
290 void vsock_remove_connected(struct vsock_sock
*vsk
)
292 spin_lock_bh(&vsock_table_lock
);
293 __vsock_remove_connected(vsk
);
294 spin_unlock_bh(&vsock_table_lock
);
296 EXPORT_SYMBOL_GPL(vsock_remove_connected
);
298 struct sock
*vsock_find_bound_socket(struct sockaddr_vm
*addr
)
302 spin_lock_bh(&vsock_table_lock
);
303 sk
= __vsock_find_bound_socket(addr
);
307 spin_unlock_bh(&vsock_table_lock
);
311 EXPORT_SYMBOL_GPL(vsock_find_bound_socket
);
313 struct sock
*vsock_find_connected_socket(struct sockaddr_vm
*src
,
314 struct sockaddr_vm
*dst
)
318 spin_lock_bh(&vsock_table_lock
);
319 sk
= __vsock_find_connected_socket(src
, dst
);
323 spin_unlock_bh(&vsock_table_lock
);
327 EXPORT_SYMBOL_GPL(vsock_find_connected_socket
);
329 static bool vsock_in_bound_table(struct vsock_sock
*vsk
)
333 spin_lock_bh(&vsock_table_lock
);
334 ret
= __vsock_in_bound_table(vsk
);
335 spin_unlock_bh(&vsock_table_lock
);
340 static bool vsock_in_connected_table(struct vsock_sock
*vsk
)
344 spin_lock_bh(&vsock_table_lock
);
345 ret
= __vsock_in_connected_table(vsk
);
346 spin_unlock_bh(&vsock_table_lock
);
351 void vsock_remove_sock(struct vsock_sock
*vsk
)
353 if (vsock_in_bound_table(vsk
))
354 vsock_remove_bound(vsk
);
356 if (vsock_in_connected_table(vsk
))
357 vsock_remove_connected(vsk
);
359 EXPORT_SYMBOL_GPL(vsock_remove_sock
);
361 void vsock_for_each_connected_socket(void (*fn
)(struct sock
*sk
))
365 spin_lock_bh(&vsock_table_lock
);
367 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++) {
368 struct vsock_sock
*vsk
;
369 list_for_each_entry(vsk
, &vsock_connected_table
[i
],
374 spin_unlock_bh(&vsock_table_lock
);
376 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket
);
378 void vsock_add_pending(struct sock
*listener
, struct sock
*pending
)
380 struct vsock_sock
*vlistener
;
381 struct vsock_sock
*vpending
;
383 vlistener
= vsock_sk(listener
);
384 vpending
= vsock_sk(pending
);
388 list_add_tail(&vpending
->pending_links
, &vlistener
->pending_links
);
390 EXPORT_SYMBOL_GPL(vsock_add_pending
);
392 void vsock_remove_pending(struct sock
*listener
, struct sock
*pending
)
394 struct vsock_sock
*vpending
= vsock_sk(pending
);
396 list_del_init(&vpending
->pending_links
);
400 EXPORT_SYMBOL_GPL(vsock_remove_pending
);
402 void vsock_enqueue_accept(struct sock
*listener
, struct sock
*connected
)
404 struct vsock_sock
*vlistener
;
405 struct vsock_sock
*vconnected
;
407 vlistener
= vsock_sk(listener
);
408 vconnected
= vsock_sk(connected
);
410 sock_hold(connected
);
412 list_add_tail(&vconnected
->accept_queue
, &vlistener
->accept_queue
);
414 EXPORT_SYMBOL_GPL(vsock_enqueue_accept
);
416 static struct sock
*vsock_dequeue_accept(struct sock
*listener
)
418 struct vsock_sock
*vlistener
;
419 struct vsock_sock
*vconnected
;
421 vlistener
= vsock_sk(listener
);
423 if (list_empty(&vlistener
->accept_queue
))
426 vconnected
= list_entry(vlistener
->accept_queue
.next
,
427 struct vsock_sock
, accept_queue
);
429 list_del_init(&vconnected
->accept_queue
);
431 /* The caller will need a reference on the connected socket so we let
432 * it call sock_put().
435 return sk_vsock(vconnected
);
438 static bool vsock_is_accept_queue_empty(struct sock
*sk
)
440 struct vsock_sock
*vsk
= vsock_sk(sk
);
441 return list_empty(&vsk
->accept_queue
);
444 static bool vsock_is_pending(struct sock
*sk
)
446 struct vsock_sock
*vsk
= vsock_sk(sk
);
447 return !list_empty(&vsk
->pending_links
);
450 static int vsock_send_shutdown(struct sock
*sk
, int mode
)
452 return transport
->shutdown(vsock_sk(sk
), mode
);
455 static void vsock_pending_work(struct work_struct
*work
)
458 struct sock
*listener
;
459 struct vsock_sock
*vsk
;
462 vsk
= container_of(work
, struct vsock_sock
, pending_work
.work
);
464 listener
= vsk
->listener
;
468 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
470 if (vsock_is_pending(sk
)) {
471 vsock_remove_pending(listener
, sk
);
473 listener
->sk_ack_backlog
--;
474 } else if (!vsk
->rejected
) {
475 /* We are not on the pending list and accept() did not reject
476 * us, so we must have been accepted by our user process. We
477 * just need to drop our references to the sockets and be on
484 /* We need to remove ourself from the global connected sockets list so
485 * incoming packets can't find this socket, and to reduce the reference
488 if (vsock_in_connected_table(vsk
))
489 vsock_remove_connected(vsk
);
491 sk
->sk_state
= TCP_CLOSE
;
495 release_sock(listener
);
503 /**** SOCKET OPERATIONS ****/
505 static int __vsock_bind_stream(struct vsock_sock
*vsk
,
506 struct sockaddr_vm
*addr
)
509 struct sockaddr_vm new_addr
;
512 port
= LAST_RESERVED_PORT
+ 1 +
513 prandom_u32_max(U32_MAX
- LAST_RESERVED_PORT
);
515 vsock_addr_init(&new_addr
, addr
->svm_cid
, addr
->svm_port
);
517 if (addr
->svm_port
== VMADDR_PORT_ANY
) {
521 for (i
= 0; i
< MAX_PORT_RETRIES
; i
++) {
522 if (port
<= LAST_RESERVED_PORT
)
523 port
= LAST_RESERVED_PORT
+ 1;
525 new_addr
.svm_port
= port
++;
527 if (!__vsock_find_bound_socket(&new_addr
)) {
534 return -EADDRNOTAVAIL
;
536 /* If port is in reserved range, ensure caller
537 * has necessary privileges.
539 if (addr
->svm_port
<= LAST_RESERVED_PORT
&&
540 !capable(CAP_NET_BIND_SERVICE
)) {
544 if (__vsock_find_bound_socket(&new_addr
))
548 vsock_addr_init(&vsk
->local_addr
, new_addr
.svm_cid
, new_addr
.svm_port
);
550 /* Remove stream sockets from the unbound list and add them to the hash
551 * table for easy lookup by its address. The unbound list is simply an
552 * extra entry at the end of the hash table, a trick used by AF_UNIX.
554 __vsock_remove_bound(vsk
);
555 __vsock_insert_bound(vsock_bound_sockets(&vsk
->local_addr
), vsk
);
560 static int __vsock_bind_dgram(struct vsock_sock
*vsk
,
561 struct sockaddr_vm
*addr
)
563 return transport
->dgram_bind(vsk
, addr
);
566 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
)
568 struct vsock_sock
*vsk
= vsock_sk(sk
);
572 /* First ensure this socket isn't already bound. */
573 if (vsock_addr_bound(&vsk
->local_addr
))
576 /* Now bind to the provided address or select appropriate values if
577 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
578 * like AF_INET prevents binding to a non-local IP address (in most
579 * cases), we only allow binding to the local CID.
581 cid
= transport
->get_local_cid();
582 if (addr
->svm_cid
!= cid
&& addr
->svm_cid
!= VMADDR_CID_ANY
)
583 return -EADDRNOTAVAIL
;
585 switch (sk
->sk_socket
->type
) {
587 spin_lock_bh(&vsock_table_lock
);
588 retval
= __vsock_bind_stream(vsk
, addr
);
589 spin_unlock_bh(&vsock_table_lock
);
593 retval
= __vsock_bind_dgram(vsk
, addr
);
604 static void vsock_connect_timeout(struct work_struct
*work
);
606 struct sock
*__vsock_create(struct net
*net
,
614 struct vsock_sock
*psk
;
615 struct vsock_sock
*vsk
;
617 sk
= sk_alloc(net
, AF_VSOCK
, priority
, &vsock_proto
, kern
);
621 sock_init_data(sock
, sk
);
623 /* sk->sk_type is normally set in sock_init_data, but only if sock is
624 * non-NULL. We make sure that our sockets always have a type by
625 * setting it here if needed.
631 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
632 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
634 sk
->sk_destruct
= vsock_sk_destruct
;
635 sk
->sk_backlog_rcv
= vsock_queue_rcv_skb
;
636 sock_reset_flag(sk
, SOCK_DONE
);
638 INIT_LIST_HEAD(&vsk
->bound_table
);
639 INIT_LIST_HEAD(&vsk
->connected_table
);
640 vsk
->listener
= NULL
;
641 INIT_LIST_HEAD(&vsk
->pending_links
);
642 INIT_LIST_HEAD(&vsk
->accept_queue
);
643 vsk
->rejected
= false;
644 vsk
->sent_request
= false;
645 vsk
->ignore_connecting_rst
= false;
646 vsk
->peer_shutdown
= 0;
647 INIT_DELAYED_WORK(&vsk
->connect_work
, vsock_connect_timeout
);
648 INIT_DELAYED_WORK(&vsk
->pending_work
, vsock_pending_work
);
650 psk
= parent
? vsock_sk(parent
) : NULL
;
652 vsk
->trusted
= psk
->trusted
;
653 vsk
->owner
= get_cred(psk
->owner
);
654 vsk
->connect_timeout
= psk
->connect_timeout
;
656 vsk
->trusted
= capable(CAP_NET_ADMIN
);
657 vsk
->owner
= get_current_cred();
658 vsk
->connect_timeout
= VSOCK_DEFAULT_CONNECT_TIMEOUT
;
661 if (transport
->init(vsk
, psk
) < 0) {
667 vsock_insert_unbound(vsk
);
671 EXPORT_SYMBOL_GPL(__vsock_create
);
673 static void __vsock_release(struct sock
*sk
)
677 struct sock
*pending
;
678 struct vsock_sock
*vsk
;
681 pending
= NULL
; /* Compiler warning. */
683 transport
->release(vsk
);
687 sk
->sk_shutdown
= SHUTDOWN_MASK
;
689 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)))
692 /* Clean up any sockets that never were accepted. */
693 while ((pending
= vsock_dequeue_accept(sk
)) != NULL
) {
694 __vsock_release(pending
);
703 static void vsock_sk_destruct(struct sock
*sk
)
705 struct vsock_sock
*vsk
= vsock_sk(sk
);
707 transport
->destruct(vsk
);
709 /* When clearing these addresses, there's no need to set the family and
710 * possibly register the address family with the kernel.
712 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
713 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
715 put_cred(vsk
->owner
);
718 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
722 err
= sock_queue_rcv_skb(sk
, skb
);
729 s64
vsock_stream_has_data(struct vsock_sock
*vsk
)
731 return transport
->stream_has_data(vsk
);
733 EXPORT_SYMBOL_GPL(vsock_stream_has_data
);
735 s64
vsock_stream_has_space(struct vsock_sock
*vsk
)
737 return transport
->stream_has_space(vsk
);
739 EXPORT_SYMBOL_GPL(vsock_stream_has_space
);
741 static int vsock_release(struct socket
*sock
)
743 __vsock_release(sock
->sk
);
745 sock
->state
= SS_FREE
;
751 vsock_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
755 struct sockaddr_vm
*vm_addr
;
759 if (vsock_addr_cast(addr
, addr_len
, &vm_addr
) != 0)
763 err
= __vsock_bind(sk
, vm_addr
);
769 static int vsock_getname(struct socket
*sock
,
770 struct sockaddr
*addr
, int peer
)
774 struct vsock_sock
*vsk
;
775 struct sockaddr_vm
*vm_addr
;
784 if (sock
->state
!= SS_CONNECTED
) {
788 vm_addr
= &vsk
->remote_addr
;
790 vm_addr
= &vsk
->local_addr
;
798 /* sys_getsockname() and sys_getpeername() pass us a
799 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
800 * that macro is defined in socket.c instead of .h, so we hardcode its
803 BUILD_BUG_ON(sizeof(*vm_addr
) > 128);
804 memcpy(addr
, vm_addr
, sizeof(*vm_addr
));
805 err
= sizeof(*vm_addr
);
812 static int vsock_shutdown(struct socket
*sock
, int mode
)
817 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
818 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
819 * here like the other address families do. Note also that the
820 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
821 * which is what we want.
825 if ((mode
& ~SHUTDOWN_MASK
) || !mode
)
828 /* If this is a STREAM socket and it is not connected then bail out
829 * immediately. If it is a DGRAM socket then we must first kick the
830 * socket so that it wakes up from any sleeping calls, for example
831 * recv(), and then afterwards return the error.
835 if (sock
->state
== SS_UNCONNECTED
) {
837 if (sk
->sk_type
== SOCK_STREAM
)
840 sock
->state
= SS_DISCONNECTING
;
844 /* Receive and send shutdowns are treated alike. */
845 mode
= mode
& (RCV_SHUTDOWN
| SEND_SHUTDOWN
);
848 sk
->sk_shutdown
|= mode
;
849 sk
->sk_state_change(sk
);
852 if (sk
->sk_type
== SOCK_STREAM
) {
853 sock_reset_flag(sk
, SOCK_DONE
);
854 vsock_send_shutdown(sk
, mode
);
861 static __poll_t
vsock_poll(struct file
*file
, struct socket
*sock
,
866 struct vsock_sock
*vsk
;
871 poll_wait(file
, sk_sleep(sk
), wait
);
875 /* Signify that there has been an error on this socket. */
878 /* INET sockets treat local write shutdown and peer write shutdown as a
879 * case of EPOLLHUP set.
881 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
882 ((sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
883 (vsk
->peer_shutdown
& SEND_SHUTDOWN
))) {
887 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
888 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
892 if (sock
->type
== SOCK_DGRAM
) {
893 /* For datagram sockets we can read if there is something in
894 * the queue and write as long as the socket isn't shutdown for
897 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
898 (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
899 mask
|= EPOLLIN
| EPOLLRDNORM
;
902 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
903 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
905 } else if (sock
->type
== SOCK_STREAM
) {
908 /* Listening sockets that have connections in their accept
911 if (sk
->sk_state
== TCP_LISTEN
912 && !vsock_is_accept_queue_empty(sk
))
913 mask
|= EPOLLIN
| EPOLLRDNORM
;
915 /* If there is something in the queue then we can read. */
916 if (transport
->stream_is_active(vsk
) &&
917 !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
918 bool data_ready_now
= false;
919 int ret
= transport
->notify_poll_in(
920 vsk
, 1, &data_ready_now
);
925 mask
|= EPOLLIN
| EPOLLRDNORM
;
930 /* Sockets whose connections have been closed, reset, or
931 * terminated should also be considered read, and we check the
932 * shutdown flag for that.
934 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
935 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
936 mask
|= EPOLLIN
| EPOLLRDNORM
;
939 /* Connected sockets that can produce data can be written. */
940 if (sk
->sk_state
== TCP_ESTABLISHED
) {
941 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
942 bool space_avail_now
= false;
943 int ret
= transport
->notify_poll_out(
944 vsk
, 1, &space_avail_now
);
949 /* Remove EPOLLWRBAND since INET
950 * sockets are not setting it.
952 mask
|= EPOLLOUT
| EPOLLWRNORM
;
958 /* Simulate INET socket poll behaviors, which sets
959 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
960 * but local send is not shutdown.
962 if (sk
->sk_state
== TCP_CLOSE
|| sk
->sk_state
== TCP_CLOSING
) {
963 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
964 mask
|= EPOLLOUT
| EPOLLWRNORM
;
974 static int vsock_dgram_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
979 struct vsock_sock
*vsk
;
980 struct sockaddr_vm
*remote_addr
;
982 if (msg
->msg_flags
& MSG_OOB
)
985 /* For now, MSG_DONTWAIT is always assumed... */
992 err
= vsock_auto_bind(vsk
);
997 /* If the provided message contains an address, use that. Otherwise
998 * fall back on the socket's remote handle (if it has been connected).
1000 if (msg
->msg_name
&&
1001 vsock_addr_cast(msg
->msg_name
, msg
->msg_namelen
,
1002 &remote_addr
) == 0) {
1003 /* Ensure this address is of the right type and is a valid
1007 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
1008 remote_addr
->svm_cid
= transport
->get_local_cid();
1010 if (!vsock_addr_bound(remote_addr
)) {
1014 } else if (sock
->state
== SS_CONNECTED
) {
1015 remote_addr
= &vsk
->remote_addr
;
1017 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
1018 remote_addr
->svm_cid
= transport
->get_local_cid();
1020 /* XXX Should connect() or this function ensure remote_addr is
1023 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1032 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1033 remote_addr
->svm_port
)) {
1038 err
= transport
->dgram_enqueue(vsk
, remote_addr
, msg
, len
);
1045 static int vsock_dgram_connect(struct socket
*sock
,
1046 struct sockaddr
*addr
, int addr_len
, int flags
)
1050 struct vsock_sock
*vsk
;
1051 struct sockaddr_vm
*remote_addr
;
1056 err
= vsock_addr_cast(addr
, addr_len
, &remote_addr
);
1057 if (err
== -EAFNOSUPPORT
&& remote_addr
->svm_family
== AF_UNSPEC
) {
1059 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
,
1061 sock
->state
= SS_UNCONNECTED
;
1064 } else if (err
!= 0)
1069 err
= vsock_auto_bind(vsk
);
1073 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1074 remote_addr
->svm_port
)) {
1079 memcpy(&vsk
->remote_addr
, remote_addr
, sizeof(vsk
->remote_addr
));
1080 sock
->state
= SS_CONNECTED
;
1087 static int vsock_dgram_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1088 size_t len
, int flags
)
1090 return transport
->dgram_dequeue(vsock_sk(sock
->sk
), msg
, len
, flags
);
1093 static const struct proto_ops vsock_dgram_ops
= {
1095 .owner
= THIS_MODULE
,
1096 .release
= vsock_release
,
1098 .connect
= vsock_dgram_connect
,
1099 .socketpair
= sock_no_socketpair
,
1100 .accept
= sock_no_accept
,
1101 .getname
= vsock_getname
,
1103 .ioctl
= sock_no_ioctl
,
1104 .listen
= sock_no_listen
,
1105 .shutdown
= vsock_shutdown
,
1106 .setsockopt
= sock_no_setsockopt
,
1107 .getsockopt
= sock_no_getsockopt
,
1108 .sendmsg
= vsock_dgram_sendmsg
,
1109 .recvmsg
= vsock_dgram_recvmsg
,
1110 .mmap
= sock_no_mmap
,
1111 .sendpage
= sock_no_sendpage
,
1114 static int vsock_transport_cancel_pkt(struct vsock_sock
*vsk
)
1116 if (!transport
->cancel_pkt
)
1119 return transport
->cancel_pkt(vsk
);
1122 static void vsock_connect_timeout(struct work_struct
*work
)
1125 struct vsock_sock
*vsk
;
1128 vsk
= container_of(work
, struct vsock_sock
, connect_work
.work
);
1132 if (sk
->sk_state
== TCP_SYN_SENT
&&
1133 (sk
->sk_shutdown
!= SHUTDOWN_MASK
)) {
1134 sk
->sk_state
= TCP_CLOSE
;
1135 sk
->sk_err
= ETIMEDOUT
;
1136 sk
->sk_error_report(sk
);
1141 vsock_transport_cancel_pkt(vsk
);
1146 static int vsock_stream_connect(struct socket
*sock
, struct sockaddr
*addr
,
1147 int addr_len
, int flags
)
1151 struct vsock_sock
*vsk
;
1152 struct sockaddr_vm
*remote_addr
;
1162 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1163 switch (sock
->state
) {
1167 case SS_DISCONNECTING
:
1171 /* This continues on so we can move sock into the SS_CONNECTED
1172 * state once the connection has completed (at which point err
1173 * will be set to zero also). Otherwise, we will either wait
1174 * for the connection or return -EALREADY should this be a
1175 * non-blocking call.
1180 if ((sk
->sk_state
== TCP_LISTEN
) ||
1181 vsock_addr_cast(addr
, addr_len
, &remote_addr
) != 0) {
1186 /* The hypervisor and well-known contexts do not have socket
1189 if (!transport
->stream_allow(remote_addr
->svm_cid
,
1190 remote_addr
->svm_port
)) {
1195 /* Set the remote address that we are connecting to. */
1196 memcpy(&vsk
->remote_addr
, remote_addr
,
1197 sizeof(vsk
->remote_addr
));
1199 err
= vsock_auto_bind(vsk
);
1203 sk
->sk_state
= TCP_SYN_SENT
;
1205 err
= transport
->connect(vsk
);
1209 /* Mark sock as connecting and set the error code to in
1210 * progress in case this is a non-blocking connect.
1212 sock
->state
= SS_CONNECTING
;
1216 /* The receive path will handle all communication until we are able to
1217 * enter the connected state. Here we wait for the connection to be
1218 * completed or a notification of an error.
1220 timeout
= vsk
->connect_timeout
;
1221 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1223 while (sk
->sk_state
!= TCP_ESTABLISHED
&& sk
->sk_err
== 0) {
1224 if (flags
& O_NONBLOCK
) {
1225 /* If we're not going to block, we schedule a timeout
1226 * function to generate a timeout on the connection
1227 * attempt, in case the peer doesn't respond in a
1228 * timely manner. We hold on to the socket until the
1232 schedule_delayed_work(&vsk
->connect_work
, timeout
);
1234 /* Skip ahead to preserve error code set above. */
1239 timeout
= schedule_timeout(timeout
);
1242 if (signal_pending(current
)) {
1243 err
= sock_intr_errno(timeout
);
1244 sk
->sk_state
= TCP_CLOSE
;
1245 sock
->state
= SS_UNCONNECTED
;
1246 vsock_transport_cancel_pkt(vsk
);
1248 } else if (timeout
== 0) {
1250 sk
->sk_state
= TCP_CLOSE
;
1251 sock
->state
= SS_UNCONNECTED
;
1252 vsock_transport_cancel_pkt(vsk
);
1256 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1261 sk
->sk_state
= TCP_CLOSE
;
1262 sock
->state
= SS_UNCONNECTED
;
1268 finish_wait(sk_sleep(sk
), &wait
);
1274 static int vsock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
1277 struct sock
*listener
;
1279 struct sock
*connected
;
1280 struct vsock_sock
*vconnected
;
1285 listener
= sock
->sk
;
1287 lock_sock(listener
);
1289 if (sock
->type
!= SOCK_STREAM
) {
1294 if (listener
->sk_state
!= TCP_LISTEN
) {
1299 /* Wait for children sockets to appear; these are the new sockets
1300 * created upon connection establishment.
1302 timeout
= sock_sndtimeo(listener
, flags
& O_NONBLOCK
);
1303 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1305 while ((connected
= vsock_dequeue_accept(listener
)) == NULL
&&
1306 listener
->sk_err
== 0) {
1307 release_sock(listener
);
1308 timeout
= schedule_timeout(timeout
);
1309 finish_wait(sk_sleep(listener
), &wait
);
1310 lock_sock(listener
);
1312 if (signal_pending(current
)) {
1313 err
= sock_intr_errno(timeout
);
1315 } else if (timeout
== 0) {
1320 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1322 finish_wait(sk_sleep(listener
), &wait
);
1324 if (listener
->sk_err
)
1325 err
= -listener
->sk_err
;
1328 listener
->sk_ack_backlog
--;
1330 lock_sock_nested(connected
, SINGLE_DEPTH_NESTING
);
1331 vconnected
= vsock_sk(connected
);
1333 /* If the listener socket has received an error, then we should
1334 * reject this socket and return. Note that we simply mark the
1335 * socket rejected, drop our reference, and let the cleanup
1336 * function handle the cleanup; the fact that we found it in
1337 * the listener's accept queue guarantees that the cleanup
1338 * function hasn't run yet.
1341 vconnected
->rejected
= true;
1343 newsock
->state
= SS_CONNECTED
;
1344 sock_graft(connected
, newsock
);
1347 release_sock(connected
);
1348 sock_put(connected
);
1352 release_sock(listener
);
1356 static int vsock_listen(struct socket
*sock
, int backlog
)
1360 struct vsock_sock
*vsk
;
1366 if (sock
->type
!= SOCK_STREAM
) {
1371 if (sock
->state
!= SS_UNCONNECTED
) {
1378 if (!vsock_addr_bound(&vsk
->local_addr
)) {
1383 sk
->sk_max_ack_backlog
= backlog
;
1384 sk
->sk_state
= TCP_LISTEN
;
1393 static int vsock_stream_setsockopt(struct socket
*sock
,
1396 char __user
*optval
,
1397 unsigned int optlen
)
1401 struct vsock_sock
*vsk
;
1404 if (level
!= AF_VSOCK
)
1405 return -ENOPROTOOPT
;
1407 #define COPY_IN(_v) \
1409 if (optlen < sizeof(_v)) { \
1413 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1426 case SO_VM_SOCKETS_BUFFER_SIZE
:
1428 transport
->set_buffer_size(vsk
, val
);
1431 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1433 transport
->set_max_buffer_size(vsk
, val
);
1436 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1438 transport
->set_min_buffer_size(vsk
, val
);
1441 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1442 struct __kernel_old_timeval tv
;
1444 if (tv
.tv_sec
>= 0 && tv
.tv_usec
< USEC_PER_SEC
&&
1445 tv
.tv_sec
< (MAX_SCHEDULE_TIMEOUT
/ HZ
- 1)) {
1446 vsk
->connect_timeout
= tv
.tv_sec
* HZ
+
1447 DIV_ROUND_UP(tv
.tv_usec
, (1000000 / HZ
));
1448 if (vsk
->connect_timeout
== 0)
1449 vsk
->connect_timeout
=
1450 VSOCK_DEFAULT_CONNECT_TIMEOUT
;
1470 static int vsock_stream_getsockopt(struct socket
*sock
,
1471 int level
, int optname
,
1472 char __user
*optval
,
1478 struct vsock_sock
*vsk
;
1481 if (level
!= AF_VSOCK
)
1482 return -ENOPROTOOPT
;
1484 err
= get_user(len
, optlen
);
1488 #define COPY_OUT(_v) \
1490 if (len < sizeof(_v)) \
1494 if (copy_to_user(optval, &_v, len) != 0) \
1504 case SO_VM_SOCKETS_BUFFER_SIZE
:
1505 val
= transport
->get_buffer_size(vsk
);
1509 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1510 val
= transport
->get_max_buffer_size(vsk
);
1514 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1515 val
= transport
->get_min_buffer_size(vsk
);
1519 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1520 struct __kernel_old_timeval tv
;
1521 tv
.tv_sec
= vsk
->connect_timeout
/ HZ
;
1523 (vsk
->connect_timeout
-
1524 tv
.tv_sec
* HZ
) * (1000000 / HZ
);
1529 return -ENOPROTOOPT
;
1532 err
= put_user(len
, optlen
);
1541 static int vsock_stream_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1545 struct vsock_sock
*vsk
;
1546 ssize_t total_written
;
1549 struct vsock_transport_send_notify_data send_data
;
1550 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1557 if (msg
->msg_flags
& MSG_OOB
)
1562 /* Callers should not provide a destination with stream sockets. */
1563 if (msg
->msg_namelen
) {
1564 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1568 /* Send data only if both sides are not shutdown in the direction. */
1569 if (sk
->sk_shutdown
& SEND_SHUTDOWN
||
1570 vsk
->peer_shutdown
& RCV_SHUTDOWN
) {
1575 if (sk
->sk_state
!= TCP_ESTABLISHED
||
1576 !vsock_addr_bound(&vsk
->local_addr
)) {
1581 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1582 err
= -EDESTADDRREQ
;
1586 /* Wait for room in the produce queue to enqueue our user's data. */
1587 timeout
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1589 err
= transport
->notify_send_init(vsk
, &send_data
);
1593 while (total_written
< len
) {
1596 add_wait_queue(sk_sleep(sk
), &wait
);
1597 while (vsock_stream_has_space(vsk
) == 0 &&
1599 !(sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
1600 !(vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1602 /* Don't wait for non-blocking sockets. */
1605 remove_wait_queue(sk_sleep(sk
), &wait
);
1609 err
= transport
->notify_send_pre_block(vsk
, &send_data
);
1611 remove_wait_queue(sk_sleep(sk
), &wait
);
1616 timeout
= wait_woken(&wait
, TASK_INTERRUPTIBLE
, timeout
);
1618 if (signal_pending(current
)) {
1619 err
= sock_intr_errno(timeout
);
1620 remove_wait_queue(sk_sleep(sk
), &wait
);
1622 } else if (timeout
== 0) {
1624 remove_wait_queue(sk_sleep(sk
), &wait
);
1628 remove_wait_queue(sk_sleep(sk
), &wait
);
1630 /* These checks occur both as part of and after the loop
1631 * conditional since we need to check before and after
1637 } else if ((sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
1638 (vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1643 err
= transport
->notify_send_pre_enqueue(vsk
, &send_data
);
1647 /* Note that enqueue will only write as many bytes as are free
1648 * in the produce queue, so we don't need to ensure len is
1649 * smaller than the queue size. It is the caller's
1650 * responsibility to check how many bytes we were able to send.
1653 written
= transport
->stream_enqueue(
1655 len
- total_written
);
1661 total_written
+= written
;
1663 err
= transport
->notify_send_post_enqueue(
1664 vsk
, written
, &send_data
);
1671 if (total_written
> 0)
1672 err
= total_written
;
1680 vsock_stream_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
1684 struct vsock_sock
*vsk
;
1689 struct vsock_transport_recv_notify_data recv_data
;
1699 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
1700 /* Recvmsg is supposed to return 0 if a peer performs an
1701 * orderly shutdown. Differentiate between that case and when a
1702 * peer has not connected or a local shutdown occured with the
1705 if (sock_flag(sk
, SOCK_DONE
))
1713 if (flags
& MSG_OOB
) {
1718 /* We don't check peer_shutdown flag here since peer may actually shut
1719 * down, but there can be data in the queue that a local socket can
1722 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1727 /* It is valid on Linux to pass in a zero-length receive buffer. This
1728 * is not an error. We may as well bail out now.
1735 /* We must not copy less than target bytes into the user's buffer
1736 * before returning successfully, so we wait for the consume queue to
1737 * have that much data to consume before dequeueing. Note that this
1738 * makes it impossible to handle cases where target is greater than the
1741 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1742 if (target
>= transport
->stream_rcvhiwat(vsk
)) {
1746 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1749 err
= transport
->notify_recv_init(vsk
, target
, &recv_data
);
1757 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1758 ready
= vsock_stream_has_data(vsk
);
1761 if (sk
->sk_err
!= 0 ||
1762 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1763 (vsk
->peer_shutdown
& SEND_SHUTDOWN
)) {
1764 finish_wait(sk_sleep(sk
), &wait
);
1767 /* Don't wait for non-blocking sockets. */
1770 finish_wait(sk_sleep(sk
), &wait
);
1774 err
= transport
->notify_recv_pre_block(
1775 vsk
, target
, &recv_data
);
1777 finish_wait(sk_sleep(sk
), &wait
);
1781 timeout
= schedule_timeout(timeout
);
1784 if (signal_pending(current
)) {
1785 err
= sock_intr_errno(timeout
);
1786 finish_wait(sk_sleep(sk
), &wait
);
1788 } else if (timeout
== 0) {
1790 finish_wait(sk_sleep(sk
), &wait
);
1796 finish_wait(sk_sleep(sk
), &wait
);
1799 /* Invalid queue pair content. XXX This should
1800 * be changed to a connection reset in a later
1808 err
= transport
->notify_recv_pre_dequeue(
1809 vsk
, target
, &recv_data
);
1813 read
= transport
->stream_dequeue(
1815 len
- copied
, flags
);
1823 err
= transport
->notify_recv_post_dequeue(
1825 !(flags
& MSG_PEEK
), &recv_data
);
1829 if (read
>= target
|| flags
& MSG_PEEK
)
1838 else if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1849 static const struct proto_ops vsock_stream_ops
= {
1851 .owner
= THIS_MODULE
,
1852 .release
= vsock_release
,
1854 .connect
= vsock_stream_connect
,
1855 .socketpair
= sock_no_socketpair
,
1856 .accept
= vsock_accept
,
1857 .getname
= vsock_getname
,
1859 .ioctl
= sock_no_ioctl
,
1860 .listen
= vsock_listen
,
1861 .shutdown
= vsock_shutdown
,
1862 .setsockopt
= vsock_stream_setsockopt
,
1863 .getsockopt
= vsock_stream_getsockopt
,
1864 .sendmsg
= vsock_stream_sendmsg
,
1865 .recvmsg
= vsock_stream_recvmsg
,
1866 .mmap
= sock_no_mmap
,
1867 .sendpage
= sock_no_sendpage
,
1870 static int vsock_create(struct net
*net
, struct socket
*sock
,
1871 int protocol
, int kern
)
1876 if (protocol
&& protocol
!= PF_VSOCK
)
1877 return -EPROTONOSUPPORT
;
1879 switch (sock
->type
) {
1881 sock
->ops
= &vsock_dgram_ops
;
1884 sock
->ops
= &vsock_stream_ops
;
1887 return -ESOCKTNOSUPPORT
;
1890 sock
->state
= SS_UNCONNECTED
;
1892 return __vsock_create(net
, sock
, NULL
, GFP_KERNEL
, 0, kern
) ? 0 : -ENOMEM
;
1895 static const struct net_proto_family vsock_family_ops
= {
1897 .create
= vsock_create
,
1898 .owner
= THIS_MODULE
,
1901 static long vsock_dev_do_ioctl(struct file
*filp
,
1902 unsigned int cmd
, void __user
*ptr
)
1904 u32 __user
*p
= ptr
;
1908 case IOCTL_VM_SOCKETS_GET_LOCAL_CID
:
1909 if (put_user(transport
->get_local_cid(), p
) != 0)
1914 pr_err("Unknown ioctl %d\n", cmd
);
1921 static long vsock_dev_ioctl(struct file
*filp
,
1922 unsigned int cmd
, unsigned long arg
)
1924 return vsock_dev_do_ioctl(filp
, cmd
, (void __user
*)arg
);
1927 #ifdef CONFIG_COMPAT
1928 static long vsock_dev_compat_ioctl(struct file
*filp
,
1929 unsigned int cmd
, unsigned long arg
)
1931 return vsock_dev_do_ioctl(filp
, cmd
, compat_ptr(arg
));
1935 static const struct file_operations vsock_device_ops
= {
1936 .owner
= THIS_MODULE
,
1937 .unlocked_ioctl
= vsock_dev_ioctl
,
1938 #ifdef CONFIG_COMPAT
1939 .compat_ioctl
= vsock_dev_compat_ioctl
,
1941 .open
= nonseekable_open
,
1944 static struct miscdevice vsock_device
= {
1946 .fops
= &vsock_device_ops
,
1949 int __vsock_core_init(const struct vsock_transport
*t
, struct module
*owner
)
1951 int err
= mutex_lock_interruptible(&vsock_register_mutex
);
1961 /* Transport must be the owner of the protocol so that it can't
1962 * unload while there are open sockets.
1964 vsock_proto
.owner
= owner
;
1967 vsock_device
.minor
= MISC_DYNAMIC_MINOR
;
1968 err
= misc_register(&vsock_device
);
1970 pr_err("Failed to register misc device\n");
1971 goto err_reset_transport
;
1974 err
= proto_register(&vsock_proto
, 1); /* we want our slab */
1976 pr_err("Cannot register vsock protocol\n");
1977 goto err_deregister_misc
;
1980 err
= sock_register(&vsock_family_ops
);
1982 pr_err("could not register af_vsock (%d) address family: %d\n",
1984 goto err_unregister_proto
;
1987 mutex_unlock(&vsock_register_mutex
);
1990 err_unregister_proto
:
1991 proto_unregister(&vsock_proto
);
1992 err_deregister_misc
:
1993 misc_deregister(&vsock_device
);
1994 err_reset_transport
:
1997 mutex_unlock(&vsock_register_mutex
);
2000 EXPORT_SYMBOL_GPL(__vsock_core_init
);
2002 void vsock_core_exit(void)
2004 mutex_lock(&vsock_register_mutex
);
2006 misc_deregister(&vsock_device
);
2007 sock_unregister(AF_VSOCK
);
2008 proto_unregister(&vsock_proto
);
2010 /* We do not want the assignment below re-ordered. */
2014 mutex_unlock(&vsock_register_mutex
);
2016 EXPORT_SYMBOL_GPL(vsock_core_exit
);
2018 const struct vsock_transport
*vsock_core_get_transport(void)
2020 /* vsock_register_mutex not taken since only the transport uses this
2021 * function and only while registered.
2025 EXPORT_SYMBOL_GPL(vsock_core_get_transport
);
2027 static void __exit
vsock_exit(void)
2029 /* Do nothing. This function makes this module removable. */
2032 module_init(vsock_init_tables
);
2033 module_exit(vsock_exit
);
2035 MODULE_AUTHOR("VMware, Inc.");
2036 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2037 MODULE_VERSION("1.0.2.0-k");
2038 MODULE_LICENSE("GPL v2");