2 * VMware vSockets Driver
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 /* Implementation notes:
18 * - There are two kinds of sockets: those created by user action (such as
19 * calling socket(2)) and those created by incoming connection request packets.
21 * - There are two "global" tables, one for bound sockets (sockets that have
22 * specified an address that they are responsible for) and one for connected
23 * sockets (sockets that have established a connection with another socket).
24 * These tables are "global" in that all sockets on the system are placed
25 * within them. - Note, though, that the bound table contains an extra entry
26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
27 * that list. The bound table is used solely for lookup of sockets when packets
28 * are received and that's not necessary for SOCK_DGRAM sockets since we create
29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
30 * sockets out of the bound hash buckets will reduce the chance of collisions
31 * when looking for SOCK_STREAM sockets and prevents us from having to check the
32 * socket type in the hash table lookups.
34 * - Sockets created by user action will either be "client" sockets that
35 * initiate a connection or "server" sockets that listen for connections; we do
36 * not support simultaneous connects (two "client" sockets connecting).
38 * - "Server" sockets are referred to as listener sockets throughout this
39 * implementation because they are in the TCP_LISTEN state. When a
40 * connection request is received (the second kind of socket mentioned above),
41 * we create a new socket and refer to it as a pending socket. These pending
42 * sockets are placed on the pending connection list of the listener socket.
43 * When future packets are received for the address the listener socket is
44 * bound to, we check if the source of the packet is from one that has an
45 * existing pending connection. If it does, we process the packet for the
46 * pending socket. When that socket reaches the connected state, it is removed
47 * from the listener socket's pending list and enqueued in the listener
48 * socket's accept queue. Callers of accept(2) will accept connected sockets
49 * from the listener socket's accept queue. If the socket cannot be accepted
50 * for some reason then it is marked rejected. Once the connection is
51 * accepted, it is owned by the user process and the responsibility for cleanup
52 * falls with that user process.
54 * - It is possible that these pending sockets will never reach the connected
55 * state; in fact, we may never receive another packet after the connection
56 * request. Because of this, we must schedule a cleanup function to run in the
57 * future, after some amount of time passes where a connection should have been
58 * established. This function ensures that the socket is off all lists so it
59 * cannot be retrieved, then drops all references to the socket so it is cleaned
60 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
61 * function will also cleanup rejected sockets, those that reach the connected
62 * state but leave it before they have been accepted.
64 * - Lock ordering for pending or accept queue sockets is:
66 * lock_sock(listener);
67 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
69 * Using explicit nested locking keeps lockdep happy since normally only one
70 * lock of a given class may be taken at a time.
72 * - Sockets created by user action will be cleaned up when the user process
73 * calls close(2), causing our release implementation to be called. Our release
74 * implementation will perform some cleanup then drop the last reference so our
75 * sk_destruct implementation is invoked. Our sk_destruct implementation will
76 * perform additional cleanup that's common for both types of sockets.
78 * - A socket's reference count is what ensures that the structure won't be
79 * freed. Each entry in a list (such as the "global" bound and connected tables
80 * and the listener socket's pending list and connected queue) ensures a
81 * reference. When we defer work until process context and pass a socket as our
82 * argument, we must ensure the reference count is increased to ensure the
83 * socket isn't freed before the function is run; the deferred function will
84 * then drop the reference.
86 * - sk->sk_state uses the TCP state constants because they are widely used by
87 * other address families and exposed to userspace tools like ss(8):
89 * TCP_CLOSE - unconnected
90 * TCP_SYN_SENT - connecting
91 * TCP_ESTABLISHED - connected
92 * TCP_CLOSING - disconnecting
93 * TCP_LISTEN - listening
96 #include <linux/types.h>
97 #include <linux/bitops.h>
98 #include <linux/cred.h>
99 #include <linux/init.h>
100 #include <linux/io.h>
101 #include <linux/kernel.h>
102 #include <linux/sched/signal.h>
103 #include <linux/kmod.h>
104 #include <linux/list.h>
105 #include <linux/miscdevice.h>
106 #include <linux/module.h>
107 #include <linux/mutex.h>
108 #include <linux/net.h>
109 #include <linux/poll.h>
110 #include <linux/skbuff.h>
111 #include <linux/smp.h>
112 #include <linux/socket.h>
113 #include <linux/stddef.h>
114 #include <linux/unistd.h>
115 #include <linux/wait.h>
116 #include <linux/workqueue.h>
117 #include <net/sock.h>
118 #include <net/af_vsock.h>
120 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
);
121 static void vsock_sk_destruct(struct sock
*sk
);
122 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
124 /* Protocol family. */
125 static struct proto vsock_proto
= {
127 .owner
= THIS_MODULE
,
128 .obj_size
= sizeof(struct vsock_sock
),
131 /* The default peer timeout indicates how long we will wait for a peer response
132 * to a control message.
134 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
136 static const struct vsock_transport
*transport
;
137 static DEFINE_MUTEX(vsock_register_mutex
);
141 /* Get the ID of the local context. This is transport dependent. */
143 int vm_sockets_get_local_cid(void)
145 return transport
->get_local_cid();
147 EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid
);
151 /* Each bound VSocket is stored in the bind hash table and each connected
152 * VSocket is stored in the connected hash table.
154 * Unbound sockets are all put on the same list attached to the end of the hash
155 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
156 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
157 * represents the list that addr hashes to).
159 * Specifically, we initialize the vsock_bind_table array to a size of
160 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
161 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
162 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
163 * mods with VSOCK_HASH_SIZE to ensure this.
165 #define MAX_PORT_RETRIES 24
167 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
168 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
169 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
171 /* XXX This can probably be implemented in a better way. */
172 #define VSOCK_CONN_HASH(src, dst) \
173 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
174 #define vsock_connected_sockets(src, dst) \
175 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
176 #define vsock_connected_sockets_vsk(vsk) \
177 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
179 struct list_head vsock_bind_table
[VSOCK_HASH_SIZE
+ 1];
180 EXPORT_SYMBOL_GPL(vsock_bind_table
);
181 struct list_head vsock_connected_table
[VSOCK_HASH_SIZE
];
182 EXPORT_SYMBOL_GPL(vsock_connected_table
);
183 DEFINE_SPINLOCK(vsock_table_lock
);
184 EXPORT_SYMBOL_GPL(vsock_table_lock
);
186 /* Autobind this socket to the local address if necessary. */
187 static int vsock_auto_bind(struct vsock_sock
*vsk
)
189 struct sock
*sk
= sk_vsock(vsk
);
190 struct sockaddr_vm local_addr
;
192 if (vsock_addr_bound(&vsk
->local_addr
))
194 vsock_addr_init(&local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
195 return __vsock_bind(sk
, &local_addr
);
198 static int __init
vsock_init_tables(void)
202 for (i
= 0; i
< ARRAY_SIZE(vsock_bind_table
); i
++)
203 INIT_LIST_HEAD(&vsock_bind_table
[i
]);
205 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++)
206 INIT_LIST_HEAD(&vsock_connected_table
[i
]);
210 static void __vsock_insert_bound(struct list_head
*list
,
211 struct vsock_sock
*vsk
)
214 list_add(&vsk
->bound_table
, list
);
217 static void __vsock_insert_connected(struct list_head
*list
,
218 struct vsock_sock
*vsk
)
221 list_add(&vsk
->connected_table
, list
);
224 static void __vsock_remove_bound(struct vsock_sock
*vsk
)
226 list_del_init(&vsk
->bound_table
);
230 static void __vsock_remove_connected(struct vsock_sock
*vsk
)
232 list_del_init(&vsk
->connected_table
);
236 static struct sock
*__vsock_find_bound_socket(struct sockaddr_vm
*addr
)
238 struct vsock_sock
*vsk
;
240 list_for_each_entry(vsk
, vsock_bound_sockets(addr
), bound_table
)
241 if (addr
->svm_port
== vsk
->local_addr
.svm_port
)
242 return sk_vsock(vsk
);
247 static struct sock
*__vsock_find_connected_socket(struct sockaddr_vm
*src
,
248 struct sockaddr_vm
*dst
)
250 struct vsock_sock
*vsk
;
252 list_for_each_entry(vsk
, vsock_connected_sockets(src
, dst
),
254 if (vsock_addr_equals_addr(src
, &vsk
->remote_addr
) &&
255 dst
->svm_port
== vsk
->local_addr
.svm_port
) {
256 return sk_vsock(vsk
);
263 static void vsock_insert_unbound(struct vsock_sock
*vsk
)
265 spin_lock_bh(&vsock_table_lock
);
266 __vsock_insert_bound(vsock_unbound_sockets
, vsk
);
267 spin_unlock_bh(&vsock_table_lock
);
270 void vsock_insert_connected(struct vsock_sock
*vsk
)
272 struct list_head
*list
= vsock_connected_sockets(
273 &vsk
->remote_addr
, &vsk
->local_addr
);
275 spin_lock_bh(&vsock_table_lock
);
276 __vsock_insert_connected(list
, vsk
);
277 spin_unlock_bh(&vsock_table_lock
);
279 EXPORT_SYMBOL_GPL(vsock_insert_connected
);
281 void vsock_remove_bound(struct vsock_sock
*vsk
)
283 spin_lock_bh(&vsock_table_lock
);
284 __vsock_remove_bound(vsk
);
285 spin_unlock_bh(&vsock_table_lock
);
287 EXPORT_SYMBOL_GPL(vsock_remove_bound
);
289 void vsock_remove_connected(struct vsock_sock
*vsk
)
291 spin_lock_bh(&vsock_table_lock
);
292 __vsock_remove_connected(vsk
);
293 spin_unlock_bh(&vsock_table_lock
);
295 EXPORT_SYMBOL_GPL(vsock_remove_connected
);
297 struct sock
*vsock_find_bound_socket(struct sockaddr_vm
*addr
)
301 spin_lock_bh(&vsock_table_lock
);
302 sk
= __vsock_find_bound_socket(addr
);
306 spin_unlock_bh(&vsock_table_lock
);
310 EXPORT_SYMBOL_GPL(vsock_find_bound_socket
);
312 struct sock
*vsock_find_connected_socket(struct sockaddr_vm
*src
,
313 struct sockaddr_vm
*dst
)
317 spin_lock_bh(&vsock_table_lock
);
318 sk
= __vsock_find_connected_socket(src
, dst
);
322 spin_unlock_bh(&vsock_table_lock
);
326 EXPORT_SYMBOL_GPL(vsock_find_connected_socket
);
328 static bool vsock_in_bound_table(struct vsock_sock
*vsk
)
332 spin_lock_bh(&vsock_table_lock
);
333 ret
= __vsock_in_bound_table(vsk
);
334 spin_unlock_bh(&vsock_table_lock
);
339 static bool vsock_in_connected_table(struct vsock_sock
*vsk
)
343 spin_lock_bh(&vsock_table_lock
);
344 ret
= __vsock_in_connected_table(vsk
);
345 spin_unlock_bh(&vsock_table_lock
);
350 void vsock_remove_sock(struct vsock_sock
*vsk
)
352 if (vsock_in_bound_table(vsk
))
353 vsock_remove_bound(vsk
);
355 if (vsock_in_connected_table(vsk
))
356 vsock_remove_connected(vsk
);
358 EXPORT_SYMBOL_GPL(vsock_remove_sock
);
360 void vsock_for_each_connected_socket(void (*fn
)(struct sock
*sk
))
364 spin_lock_bh(&vsock_table_lock
);
366 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++) {
367 struct vsock_sock
*vsk
;
368 list_for_each_entry(vsk
, &vsock_connected_table
[i
],
373 spin_unlock_bh(&vsock_table_lock
);
375 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket
);
377 void vsock_add_pending(struct sock
*listener
, struct sock
*pending
)
379 struct vsock_sock
*vlistener
;
380 struct vsock_sock
*vpending
;
382 vlistener
= vsock_sk(listener
);
383 vpending
= vsock_sk(pending
);
387 list_add_tail(&vpending
->pending_links
, &vlistener
->pending_links
);
389 EXPORT_SYMBOL_GPL(vsock_add_pending
);
391 void vsock_remove_pending(struct sock
*listener
, struct sock
*pending
)
393 struct vsock_sock
*vpending
= vsock_sk(pending
);
395 list_del_init(&vpending
->pending_links
);
399 EXPORT_SYMBOL_GPL(vsock_remove_pending
);
401 void vsock_enqueue_accept(struct sock
*listener
, struct sock
*connected
)
403 struct vsock_sock
*vlistener
;
404 struct vsock_sock
*vconnected
;
406 vlistener
= vsock_sk(listener
);
407 vconnected
= vsock_sk(connected
);
409 sock_hold(connected
);
411 list_add_tail(&vconnected
->accept_queue
, &vlistener
->accept_queue
);
413 EXPORT_SYMBOL_GPL(vsock_enqueue_accept
);
415 static struct sock
*vsock_dequeue_accept(struct sock
*listener
)
417 struct vsock_sock
*vlistener
;
418 struct vsock_sock
*vconnected
;
420 vlistener
= vsock_sk(listener
);
422 if (list_empty(&vlistener
->accept_queue
))
425 vconnected
= list_entry(vlistener
->accept_queue
.next
,
426 struct vsock_sock
, accept_queue
);
428 list_del_init(&vconnected
->accept_queue
);
430 /* The caller will need a reference on the connected socket so we let
431 * it call sock_put().
434 return sk_vsock(vconnected
);
437 static bool vsock_is_accept_queue_empty(struct sock
*sk
)
439 struct vsock_sock
*vsk
= vsock_sk(sk
);
440 return list_empty(&vsk
->accept_queue
);
443 static bool vsock_is_pending(struct sock
*sk
)
445 struct vsock_sock
*vsk
= vsock_sk(sk
);
446 return !list_empty(&vsk
->pending_links
);
449 static int vsock_send_shutdown(struct sock
*sk
, int mode
)
451 return transport
->shutdown(vsock_sk(sk
), mode
);
454 static void vsock_pending_work(struct work_struct
*work
)
457 struct sock
*listener
;
458 struct vsock_sock
*vsk
;
461 vsk
= container_of(work
, struct vsock_sock
, pending_work
.work
);
463 listener
= vsk
->listener
;
467 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
469 if (vsock_is_pending(sk
)) {
470 vsock_remove_pending(listener
, sk
);
472 listener
->sk_ack_backlog
--;
473 } else if (!vsk
->rejected
) {
474 /* We are not on the pending list and accept() did not reject
475 * us, so we must have been accepted by our user process. We
476 * just need to drop our references to the sockets and be on
483 /* We need to remove ourself from the global connected sockets list so
484 * incoming packets can't find this socket, and to reduce the reference
487 if (vsock_in_connected_table(vsk
))
488 vsock_remove_connected(vsk
);
490 sk
->sk_state
= TCP_CLOSE
;
494 release_sock(listener
);
502 /**** SOCKET OPERATIONS ****/
504 static int __vsock_bind_stream(struct vsock_sock
*vsk
,
505 struct sockaddr_vm
*addr
)
507 static u32 port
= LAST_RESERVED_PORT
+ 1;
508 struct sockaddr_vm new_addr
;
510 vsock_addr_init(&new_addr
, addr
->svm_cid
, addr
->svm_port
);
512 if (addr
->svm_port
== VMADDR_PORT_ANY
) {
516 for (i
= 0; i
< MAX_PORT_RETRIES
; i
++) {
517 if (port
<= LAST_RESERVED_PORT
)
518 port
= LAST_RESERVED_PORT
+ 1;
520 new_addr
.svm_port
= port
++;
522 if (!__vsock_find_bound_socket(&new_addr
)) {
529 return -EADDRNOTAVAIL
;
531 /* If port is in reserved range, ensure caller
532 * has necessary privileges.
534 if (addr
->svm_port
<= LAST_RESERVED_PORT
&&
535 !capable(CAP_NET_BIND_SERVICE
)) {
539 if (__vsock_find_bound_socket(&new_addr
))
543 vsock_addr_init(&vsk
->local_addr
, new_addr
.svm_cid
, new_addr
.svm_port
);
545 /* Remove stream sockets from the unbound list and add them to the hash
546 * table for easy lookup by its address. The unbound list is simply an
547 * extra entry at the end of the hash table, a trick used by AF_UNIX.
549 __vsock_remove_bound(vsk
);
550 __vsock_insert_bound(vsock_bound_sockets(&vsk
->local_addr
), vsk
);
555 static int __vsock_bind_dgram(struct vsock_sock
*vsk
,
556 struct sockaddr_vm
*addr
)
558 return transport
->dgram_bind(vsk
, addr
);
561 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
)
563 struct vsock_sock
*vsk
= vsock_sk(sk
);
567 /* First ensure this socket isn't already bound. */
568 if (vsock_addr_bound(&vsk
->local_addr
))
571 /* Now bind to the provided address or select appropriate values if
572 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
573 * like AF_INET prevents binding to a non-local IP address (in most
574 * cases), we only allow binding to the local CID.
576 cid
= transport
->get_local_cid();
577 if (addr
->svm_cid
!= cid
&& addr
->svm_cid
!= VMADDR_CID_ANY
)
578 return -EADDRNOTAVAIL
;
580 switch (sk
->sk_socket
->type
) {
582 spin_lock_bh(&vsock_table_lock
);
583 retval
= __vsock_bind_stream(vsk
, addr
);
584 spin_unlock_bh(&vsock_table_lock
);
588 retval
= __vsock_bind_dgram(vsk
, addr
);
599 static void vsock_connect_timeout(struct work_struct
*work
);
601 struct sock
*__vsock_create(struct net
*net
,
609 struct vsock_sock
*psk
;
610 struct vsock_sock
*vsk
;
612 sk
= sk_alloc(net
, AF_VSOCK
, priority
, &vsock_proto
, kern
);
616 sock_init_data(sock
, sk
);
618 /* sk->sk_type is normally set in sock_init_data, but only if sock is
619 * non-NULL. We make sure that our sockets always have a type by
620 * setting it here if needed.
626 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
627 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
629 sk
->sk_destruct
= vsock_sk_destruct
;
630 sk
->sk_backlog_rcv
= vsock_queue_rcv_skb
;
631 sock_reset_flag(sk
, SOCK_DONE
);
633 INIT_LIST_HEAD(&vsk
->bound_table
);
634 INIT_LIST_HEAD(&vsk
->connected_table
);
635 vsk
->listener
= NULL
;
636 INIT_LIST_HEAD(&vsk
->pending_links
);
637 INIT_LIST_HEAD(&vsk
->accept_queue
);
638 vsk
->rejected
= false;
639 vsk
->sent_request
= false;
640 vsk
->ignore_connecting_rst
= false;
641 vsk
->peer_shutdown
= 0;
642 INIT_DELAYED_WORK(&vsk
->connect_work
, vsock_connect_timeout
);
643 INIT_DELAYED_WORK(&vsk
->pending_work
, vsock_pending_work
);
645 psk
= parent
? vsock_sk(parent
) : NULL
;
647 vsk
->trusted
= psk
->trusted
;
648 vsk
->owner
= get_cred(psk
->owner
);
649 vsk
->connect_timeout
= psk
->connect_timeout
;
651 vsk
->trusted
= capable(CAP_NET_ADMIN
);
652 vsk
->owner
= get_current_cred();
653 vsk
->connect_timeout
= VSOCK_DEFAULT_CONNECT_TIMEOUT
;
656 if (transport
->init(vsk
, psk
) < 0) {
662 vsock_insert_unbound(vsk
);
666 EXPORT_SYMBOL_GPL(__vsock_create
);
668 static void __vsock_release(struct sock
*sk
)
672 struct sock
*pending
;
673 struct vsock_sock
*vsk
;
676 pending
= NULL
; /* Compiler warning. */
678 transport
->release(vsk
);
682 sk
->sk_shutdown
= SHUTDOWN_MASK
;
684 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)))
687 /* Clean up any sockets that never were accepted. */
688 while ((pending
= vsock_dequeue_accept(sk
)) != NULL
) {
689 __vsock_release(pending
);
698 static void vsock_sk_destruct(struct sock
*sk
)
700 struct vsock_sock
*vsk
= vsock_sk(sk
);
702 transport
->destruct(vsk
);
704 /* When clearing these addresses, there's no need to set the family and
705 * possibly register the address family with the kernel.
707 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
708 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
710 put_cred(vsk
->owner
);
713 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
717 err
= sock_queue_rcv_skb(sk
, skb
);
724 s64
vsock_stream_has_data(struct vsock_sock
*vsk
)
726 return transport
->stream_has_data(vsk
);
728 EXPORT_SYMBOL_GPL(vsock_stream_has_data
);
730 s64
vsock_stream_has_space(struct vsock_sock
*vsk
)
732 return transport
->stream_has_space(vsk
);
734 EXPORT_SYMBOL_GPL(vsock_stream_has_space
);
736 static int vsock_release(struct socket
*sock
)
738 __vsock_release(sock
->sk
);
740 sock
->state
= SS_FREE
;
746 vsock_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
750 struct sockaddr_vm
*vm_addr
;
754 if (vsock_addr_cast(addr
, addr_len
, &vm_addr
) != 0)
758 err
= __vsock_bind(sk
, vm_addr
);
764 static int vsock_getname(struct socket
*sock
,
765 struct sockaddr
*addr
, int peer
)
769 struct vsock_sock
*vsk
;
770 struct sockaddr_vm
*vm_addr
;
779 if (sock
->state
!= SS_CONNECTED
) {
783 vm_addr
= &vsk
->remote_addr
;
785 vm_addr
= &vsk
->local_addr
;
793 /* sys_getsockname() and sys_getpeername() pass us a
794 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
795 * that macro is defined in socket.c instead of .h, so we hardcode its
798 BUILD_BUG_ON(sizeof(*vm_addr
) > 128);
799 memcpy(addr
, vm_addr
, sizeof(*vm_addr
));
800 err
= sizeof(*vm_addr
);
807 static int vsock_shutdown(struct socket
*sock
, int mode
)
812 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
813 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
814 * here like the other address families do. Note also that the
815 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
816 * which is what we want.
820 if ((mode
& ~SHUTDOWN_MASK
) || !mode
)
823 /* If this is a STREAM socket and it is not connected then bail out
824 * immediately. If it is a DGRAM socket then we must first kick the
825 * socket so that it wakes up from any sleeping calls, for example
826 * recv(), and then afterwards return the error.
830 if (sock
->state
== SS_UNCONNECTED
) {
832 if (sk
->sk_type
== SOCK_STREAM
)
835 sock
->state
= SS_DISCONNECTING
;
839 /* Receive and send shutdowns are treated alike. */
840 mode
= mode
& (RCV_SHUTDOWN
| SEND_SHUTDOWN
);
843 sk
->sk_shutdown
|= mode
;
844 sk
->sk_state_change(sk
);
847 if (sk
->sk_type
== SOCK_STREAM
) {
848 sock_reset_flag(sk
, SOCK_DONE
);
849 vsock_send_shutdown(sk
, mode
);
856 static __poll_t
vsock_poll(struct file
*file
, struct socket
*sock
,
861 struct vsock_sock
*vsk
;
866 poll_wait(file
, sk_sleep(sk
), wait
);
870 /* Signify that there has been an error on this socket. */
873 /* INET sockets treat local write shutdown and peer write shutdown as a
874 * case of EPOLLHUP set.
876 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
877 ((sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
878 (vsk
->peer_shutdown
& SEND_SHUTDOWN
))) {
882 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
883 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
887 if (sock
->type
== SOCK_DGRAM
) {
888 /* For datagram sockets we can read if there is something in
889 * the queue and write as long as the socket isn't shutdown for
892 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
893 (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
894 mask
|= EPOLLIN
| EPOLLRDNORM
;
897 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
898 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
900 } else if (sock
->type
== SOCK_STREAM
) {
903 /* Listening sockets that have connections in their accept
906 if (sk
->sk_state
== TCP_LISTEN
907 && !vsock_is_accept_queue_empty(sk
))
908 mask
|= EPOLLIN
| EPOLLRDNORM
;
910 /* If there is something in the queue then we can read. */
911 if (transport
->stream_is_active(vsk
) &&
912 !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
913 bool data_ready_now
= false;
914 int ret
= transport
->notify_poll_in(
915 vsk
, 1, &data_ready_now
);
920 mask
|= EPOLLIN
| EPOLLRDNORM
;
925 /* Sockets whose connections have been closed, reset, or
926 * terminated should also be considered read, and we check the
927 * shutdown flag for that.
929 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
930 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
931 mask
|= EPOLLIN
| EPOLLRDNORM
;
934 /* Connected sockets that can produce data can be written. */
935 if (sk
->sk_state
== TCP_ESTABLISHED
) {
936 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
937 bool space_avail_now
= false;
938 int ret
= transport
->notify_poll_out(
939 vsk
, 1, &space_avail_now
);
944 /* Remove EPOLLWRBAND since INET
945 * sockets are not setting it.
947 mask
|= EPOLLOUT
| EPOLLWRNORM
;
953 /* Simulate INET socket poll behaviors, which sets
954 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
955 * but local send is not shutdown.
957 if (sk
->sk_state
== TCP_CLOSE
|| sk
->sk_state
== TCP_CLOSING
) {
958 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
959 mask
|= EPOLLOUT
| EPOLLWRNORM
;
969 static int vsock_dgram_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
974 struct vsock_sock
*vsk
;
975 struct sockaddr_vm
*remote_addr
;
977 if (msg
->msg_flags
& MSG_OOB
)
980 /* For now, MSG_DONTWAIT is always assumed... */
987 err
= vsock_auto_bind(vsk
);
992 /* If the provided message contains an address, use that. Otherwise
993 * fall back on the socket's remote handle (if it has been connected).
996 vsock_addr_cast(msg
->msg_name
, msg
->msg_namelen
,
997 &remote_addr
) == 0) {
998 /* Ensure this address is of the right type and is a valid
1002 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
1003 remote_addr
->svm_cid
= transport
->get_local_cid();
1005 if (!vsock_addr_bound(remote_addr
)) {
1009 } else if (sock
->state
== SS_CONNECTED
) {
1010 remote_addr
= &vsk
->remote_addr
;
1012 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
1013 remote_addr
->svm_cid
= transport
->get_local_cid();
1015 /* XXX Should connect() or this function ensure remote_addr is
1018 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1027 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1028 remote_addr
->svm_port
)) {
1033 err
= transport
->dgram_enqueue(vsk
, remote_addr
, msg
, len
);
1040 static int vsock_dgram_connect(struct socket
*sock
,
1041 struct sockaddr
*addr
, int addr_len
, int flags
)
1045 struct vsock_sock
*vsk
;
1046 struct sockaddr_vm
*remote_addr
;
1051 err
= vsock_addr_cast(addr
, addr_len
, &remote_addr
);
1052 if (err
== -EAFNOSUPPORT
&& remote_addr
->svm_family
== AF_UNSPEC
) {
1054 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
,
1056 sock
->state
= SS_UNCONNECTED
;
1059 } else if (err
!= 0)
1064 err
= vsock_auto_bind(vsk
);
1068 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1069 remote_addr
->svm_port
)) {
1074 memcpy(&vsk
->remote_addr
, remote_addr
, sizeof(vsk
->remote_addr
));
1075 sock
->state
= SS_CONNECTED
;
1082 static int vsock_dgram_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1083 size_t len
, int flags
)
1085 return transport
->dgram_dequeue(vsock_sk(sock
->sk
), msg
, len
, flags
);
1088 static const struct proto_ops vsock_dgram_ops
= {
1090 .owner
= THIS_MODULE
,
1091 .release
= vsock_release
,
1093 .connect
= vsock_dgram_connect
,
1094 .socketpair
= sock_no_socketpair
,
1095 .accept
= sock_no_accept
,
1096 .getname
= vsock_getname
,
1098 .ioctl
= sock_no_ioctl
,
1099 .listen
= sock_no_listen
,
1100 .shutdown
= vsock_shutdown
,
1101 .setsockopt
= sock_no_setsockopt
,
1102 .getsockopt
= sock_no_getsockopt
,
1103 .sendmsg
= vsock_dgram_sendmsg
,
1104 .recvmsg
= vsock_dgram_recvmsg
,
1105 .mmap
= sock_no_mmap
,
1106 .sendpage
= sock_no_sendpage
,
1109 static int vsock_transport_cancel_pkt(struct vsock_sock
*vsk
)
1111 if (!transport
->cancel_pkt
)
1114 return transport
->cancel_pkt(vsk
);
1117 static void vsock_connect_timeout(struct work_struct
*work
)
1120 struct vsock_sock
*vsk
;
1123 vsk
= container_of(work
, struct vsock_sock
, connect_work
.work
);
1127 if (sk
->sk_state
== TCP_SYN_SENT
&&
1128 (sk
->sk_shutdown
!= SHUTDOWN_MASK
)) {
1129 sk
->sk_state
= TCP_CLOSE
;
1130 sk
->sk_err
= ETIMEDOUT
;
1131 sk
->sk_error_report(sk
);
1136 vsock_transport_cancel_pkt(vsk
);
1141 static int vsock_stream_connect(struct socket
*sock
, struct sockaddr
*addr
,
1142 int addr_len
, int flags
)
1146 struct vsock_sock
*vsk
;
1147 struct sockaddr_vm
*remote_addr
;
1157 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1158 switch (sock
->state
) {
1162 case SS_DISCONNECTING
:
1166 /* This continues on so we can move sock into the SS_CONNECTED
1167 * state once the connection has completed (at which point err
1168 * will be set to zero also). Otherwise, we will either wait
1169 * for the connection or return -EALREADY should this be a
1170 * non-blocking call.
1175 if ((sk
->sk_state
== TCP_LISTEN
) ||
1176 vsock_addr_cast(addr
, addr_len
, &remote_addr
) != 0) {
1181 /* The hypervisor and well-known contexts do not have socket
1184 if (!transport
->stream_allow(remote_addr
->svm_cid
,
1185 remote_addr
->svm_port
)) {
1190 /* Set the remote address that we are connecting to. */
1191 memcpy(&vsk
->remote_addr
, remote_addr
,
1192 sizeof(vsk
->remote_addr
));
1194 err
= vsock_auto_bind(vsk
);
1198 sk
->sk_state
= TCP_SYN_SENT
;
1200 err
= transport
->connect(vsk
);
1204 /* Mark sock as connecting and set the error code to in
1205 * progress in case this is a non-blocking connect.
1207 sock
->state
= SS_CONNECTING
;
1211 /* The receive path will handle all communication until we are able to
1212 * enter the connected state. Here we wait for the connection to be
1213 * completed or a notification of an error.
1215 timeout
= vsk
->connect_timeout
;
1216 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1218 while (sk
->sk_state
!= TCP_ESTABLISHED
&& sk
->sk_err
== 0) {
1219 if (flags
& O_NONBLOCK
) {
1220 /* If we're not going to block, we schedule a timeout
1221 * function to generate a timeout on the connection
1222 * attempt, in case the peer doesn't respond in a
1223 * timely manner. We hold on to the socket until the
1227 schedule_delayed_work(&vsk
->connect_work
, timeout
);
1229 /* Skip ahead to preserve error code set above. */
1234 timeout
= schedule_timeout(timeout
);
1237 if (signal_pending(current
)) {
1238 err
= sock_intr_errno(timeout
);
1239 sk
->sk_state
= TCP_CLOSE
;
1240 sock
->state
= SS_UNCONNECTED
;
1241 vsock_transport_cancel_pkt(vsk
);
1243 } else if (timeout
== 0) {
1245 sk
->sk_state
= TCP_CLOSE
;
1246 sock
->state
= SS_UNCONNECTED
;
1247 vsock_transport_cancel_pkt(vsk
);
1251 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1256 sk
->sk_state
= TCP_CLOSE
;
1257 sock
->state
= SS_UNCONNECTED
;
1263 finish_wait(sk_sleep(sk
), &wait
);
1269 static int vsock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
1272 struct sock
*listener
;
1274 struct sock
*connected
;
1275 struct vsock_sock
*vconnected
;
1280 listener
= sock
->sk
;
1282 lock_sock(listener
);
1284 if (sock
->type
!= SOCK_STREAM
) {
1289 if (listener
->sk_state
!= TCP_LISTEN
) {
1294 /* Wait for children sockets to appear; these are the new sockets
1295 * created upon connection establishment.
1297 timeout
= sock_sndtimeo(listener
, flags
& O_NONBLOCK
);
1298 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1300 while ((connected
= vsock_dequeue_accept(listener
)) == NULL
&&
1301 listener
->sk_err
== 0) {
1302 release_sock(listener
);
1303 timeout
= schedule_timeout(timeout
);
1304 finish_wait(sk_sleep(listener
), &wait
);
1305 lock_sock(listener
);
1307 if (signal_pending(current
)) {
1308 err
= sock_intr_errno(timeout
);
1310 } else if (timeout
== 0) {
1315 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1317 finish_wait(sk_sleep(listener
), &wait
);
1319 if (listener
->sk_err
)
1320 err
= -listener
->sk_err
;
1323 listener
->sk_ack_backlog
--;
1325 lock_sock_nested(connected
, SINGLE_DEPTH_NESTING
);
1326 vconnected
= vsock_sk(connected
);
1328 /* If the listener socket has received an error, then we should
1329 * reject this socket and return. Note that we simply mark the
1330 * socket rejected, drop our reference, and let the cleanup
1331 * function handle the cleanup; the fact that we found it in
1332 * the listener's accept queue guarantees that the cleanup
1333 * function hasn't run yet.
1336 vconnected
->rejected
= true;
1338 newsock
->state
= SS_CONNECTED
;
1339 sock_graft(connected
, newsock
);
1342 release_sock(connected
);
1343 sock_put(connected
);
1347 release_sock(listener
);
1351 static int vsock_listen(struct socket
*sock
, int backlog
)
1355 struct vsock_sock
*vsk
;
1361 if (sock
->type
!= SOCK_STREAM
) {
1366 if (sock
->state
!= SS_UNCONNECTED
) {
1373 if (!vsock_addr_bound(&vsk
->local_addr
)) {
1378 sk
->sk_max_ack_backlog
= backlog
;
1379 sk
->sk_state
= TCP_LISTEN
;
1388 static int vsock_stream_setsockopt(struct socket
*sock
,
1391 char __user
*optval
,
1392 unsigned int optlen
)
1396 struct vsock_sock
*vsk
;
1399 if (level
!= AF_VSOCK
)
1400 return -ENOPROTOOPT
;
1402 #define COPY_IN(_v) \
1404 if (optlen < sizeof(_v)) { \
1408 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1421 case SO_VM_SOCKETS_BUFFER_SIZE
:
1423 transport
->set_buffer_size(vsk
, val
);
1426 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1428 transport
->set_max_buffer_size(vsk
, val
);
1431 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1433 transport
->set_min_buffer_size(vsk
, val
);
1436 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1439 if (tv
.tv_sec
>= 0 && tv
.tv_usec
< USEC_PER_SEC
&&
1440 tv
.tv_sec
< (MAX_SCHEDULE_TIMEOUT
/ HZ
- 1)) {
1441 vsk
->connect_timeout
= tv
.tv_sec
* HZ
+
1442 DIV_ROUND_UP(tv
.tv_usec
, (1000000 / HZ
));
1443 if (vsk
->connect_timeout
== 0)
1444 vsk
->connect_timeout
=
1445 VSOCK_DEFAULT_CONNECT_TIMEOUT
;
1465 static int vsock_stream_getsockopt(struct socket
*sock
,
1466 int level
, int optname
,
1467 char __user
*optval
,
1473 struct vsock_sock
*vsk
;
1476 if (level
!= AF_VSOCK
)
1477 return -ENOPROTOOPT
;
1479 err
= get_user(len
, optlen
);
1483 #define COPY_OUT(_v) \
1485 if (len < sizeof(_v)) \
1489 if (copy_to_user(optval, &_v, len) != 0) \
1499 case SO_VM_SOCKETS_BUFFER_SIZE
:
1500 val
= transport
->get_buffer_size(vsk
);
1504 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1505 val
= transport
->get_max_buffer_size(vsk
);
1509 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1510 val
= transport
->get_min_buffer_size(vsk
);
1514 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1516 tv
.tv_sec
= vsk
->connect_timeout
/ HZ
;
1518 (vsk
->connect_timeout
-
1519 tv
.tv_sec
* HZ
) * (1000000 / HZ
);
1524 return -ENOPROTOOPT
;
1527 err
= put_user(len
, optlen
);
1536 static int vsock_stream_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1540 struct vsock_sock
*vsk
;
1541 ssize_t total_written
;
1544 struct vsock_transport_send_notify_data send_data
;
1545 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1552 if (msg
->msg_flags
& MSG_OOB
)
1557 /* Callers should not provide a destination with stream sockets. */
1558 if (msg
->msg_namelen
) {
1559 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1563 /* Send data only if both sides are not shutdown in the direction. */
1564 if (sk
->sk_shutdown
& SEND_SHUTDOWN
||
1565 vsk
->peer_shutdown
& RCV_SHUTDOWN
) {
1570 if (sk
->sk_state
!= TCP_ESTABLISHED
||
1571 !vsock_addr_bound(&vsk
->local_addr
)) {
1576 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1577 err
= -EDESTADDRREQ
;
1581 /* Wait for room in the produce queue to enqueue our user's data. */
1582 timeout
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1584 err
= transport
->notify_send_init(vsk
, &send_data
);
1588 while (total_written
< len
) {
1591 add_wait_queue(sk_sleep(sk
), &wait
);
1592 while (vsock_stream_has_space(vsk
) == 0 &&
1594 !(sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
1595 !(vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1597 /* Don't wait for non-blocking sockets. */
1600 remove_wait_queue(sk_sleep(sk
), &wait
);
1604 err
= transport
->notify_send_pre_block(vsk
, &send_data
);
1606 remove_wait_queue(sk_sleep(sk
), &wait
);
1611 timeout
= wait_woken(&wait
, TASK_INTERRUPTIBLE
, timeout
);
1613 if (signal_pending(current
)) {
1614 err
= sock_intr_errno(timeout
);
1615 remove_wait_queue(sk_sleep(sk
), &wait
);
1617 } else if (timeout
== 0) {
1619 remove_wait_queue(sk_sleep(sk
), &wait
);
1623 remove_wait_queue(sk_sleep(sk
), &wait
);
1625 /* These checks occur both as part of and after the loop
1626 * conditional since we need to check before and after
1632 } else if ((sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
1633 (vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1638 err
= transport
->notify_send_pre_enqueue(vsk
, &send_data
);
1642 /* Note that enqueue will only write as many bytes as are free
1643 * in the produce queue, so we don't need to ensure len is
1644 * smaller than the queue size. It is the caller's
1645 * responsibility to check how many bytes we were able to send.
1648 written
= transport
->stream_enqueue(
1650 len
- total_written
);
1656 total_written
+= written
;
1658 err
= transport
->notify_send_post_enqueue(
1659 vsk
, written
, &send_data
);
1666 if (total_written
> 0)
1667 err
= total_written
;
1675 vsock_stream_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
1679 struct vsock_sock
*vsk
;
1684 struct vsock_transport_recv_notify_data recv_data
;
1694 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
1695 /* Recvmsg is supposed to return 0 if a peer performs an
1696 * orderly shutdown. Differentiate between that case and when a
1697 * peer has not connected or a local shutdown occured with the
1700 if (sock_flag(sk
, SOCK_DONE
))
1708 if (flags
& MSG_OOB
) {
1713 /* We don't check peer_shutdown flag here since peer may actually shut
1714 * down, but there can be data in the queue that a local socket can
1717 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1722 /* It is valid on Linux to pass in a zero-length receive buffer. This
1723 * is not an error. We may as well bail out now.
1730 /* We must not copy less than target bytes into the user's buffer
1731 * before returning successfully, so we wait for the consume queue to
1732 * have that much data to consume before dequeueing. Note that this
1733 * makes it impossible to handle cases where target is greater than the
1736 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1737 if (target
>= transport
->stream_rcvhiwat(vsk
)) {
1741 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1744 err
= transport
->notify_recv_init(vsk
, target
, &recv_data
);
1752 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1753 ready
= vsock_stream_has_data(vsk
);
1756 if (sk
->sk_err
!= 0 ||
1757 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1758 (vsk
->peer_shutdown
& SEND_SHUTDOWN
)) {
1759 finish_wait(sk_sleep(sk
), &wait
);
1762 /* Don't wait for non-blocking sockets. */
1765 finish_wait(sk_sleep(sk
), &wait
);
1769 err
= transport
->notify_recv_pre_block(
1770 vsk
, target
, &recv_data
);
1772 finish_wait(sk_sleep(sk
), &wait
);
1776 timeout
= schedule_timeout(timeout
);
1779 if (signal_pending(current
)) {
1780 err
= sock_intr_errno(timeout
);
1781 finish_wait(sk_sleep(sk
), &wait
);
1783 } else if (timeout
== 0) {
1785 finish_wait(sk_sleep(sk
), &wait
);
1791 finish_wait(sk_sleep(sk
), &wait
);
1794 /* Invalid queue pair content. XXX This should
1795 * be changed to a connection reset in a later
1803 err
= transport
->notify_recv_pre_dequeue(
1804 vsk
, target
, &recv_data
);
1808 read
= transport
->stream_dequeue(
1810 len
- copied
, flags
);
1818 err
= transport
->notify_recv_post_dequeue(
1820 !(flags
& MSG_PEEK
), &recv_data
);
1824 if (read
>= target
|| flags
& MSG_PEEK
)
1833 else if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1844 static const struct proto_ops vsock_stream_ops
= {
1846 .owner
= THIS_MODULE
,
1847 .release
= vsock_release
,
1849 .connect
= vsock_stream_connect
,
1850 .socketpair
= sock_no_socketpair
,
1851 .accept
= vsock_accept
,
1852 .getname
= vsock_getname
,
1854 .ioctl
= sock_no_ioctl
,
1855 .listen
= vsock_listen
,
1856 .shutdown
= vsock_shutdown
,
1857 .setsockopt
= vsock_stream_setsockopt
,
1858 .getsockopt
= vsock_stream_getsockopt
,
1859 .sendmsg
= vsock_stream_sendmsg
,
1860 .recvmsg
= vsock_stream_recvmsg
,
1861 .mmap
= sock_no_mmap
,
1862 .sendpage
= sock_no_sendpage
,
1865 static int vsock_create(struct net
*net
, struct socket
*sock
,
1866 int protocol
, int kern
)
1871 if (protocol
&& protocol
!= PF_VSOCK
)
1872 return -EPROTONOSUPPORT
;
1874 switch (sock
->type
) {
1876 sock
->ops
= &vsock_dgram_ops
;
1879 sock
->ops
= &vsock_stream_ops
;
1882 return -ESOCKTNOSUPPORT
;
1885 sock
->state
= SS_UNCONNECTED
;
1887 return __vsock_create(net
, sock
, NULL
, GFP_KERNEL
, 0, kern
) ? 0 : -ENOMEM
;
1890 static const struct net_proto_family vsock_family_ops
= {
1892 .create
= vsock_create
,
1893 .owner
= THIS_MODULE
,
1896 static long vsock_dev_do_ioctl(struct file
*filp
,
1897 unsigned int cmd
, void __user
*ptr
)
1899 u32 __user
*p
= ptr
;
1903 case IOCTL_VM_SOCKETS_GET_LOCAL_CID
:
1904 if (put_user(transport
->get_local_cid(), p
) != 0)
1909 pr_err("Unknown ioctl %d\n", cmd
);
1916 static long vsock_dev_ioctl(struct file
*filp
,
1917 unsigned int cmd
, unsigned long arg
)
1919 return vsock_dev_do_ioctl(filp
, cmd
, (void __user
*)arg
);
1922 #ifdef CONFIG_COMPAT
1923 static long vsock_dev_compat_ioctl(struct file
*filp
,
1924 unsigned int cmd
, unsigned long arg
)
1926 return vsock_dev_do_ioctl(filp
, cmd
, compat_ptr(arg
));
1930 static const struct file_operations vsock_device_ops
= {
1931 .owner
= THIS_MODULE
,
1932 .unlocked_ioctl
= vsock_dev_ioctl
,
1933 #ifdef CONFIG_COMPAT
1934 .compat_ioctl
= vsock_dev_compat_ioctl
,
1936 .open
= nonseekable_open
,
1939 static struct miscdevice vsock_device
= {
1941 .fops
= &vsock_device_ops
,
1944 int __vsock_core_init(const struct vsock_transport
*t
, struct module
*owner
)
1946 int err
= mutex_lock_interruptible(&vsock_register_mutex
);
1956 /* Transport must be the owner of the protocol so that it can't
1957 * unload while there are open sockets.
1959 vsock_proto
.owner
= owner
;
1962 vsock_device
.minor
= MISC_DYNAMIC_MINOR
;
1963 err
= misc_register(&vsock_device
);
1965 pr_err("Failed to register misc device\n");
1966 goto err_reset_transport
;
1969 err
= proto_register(&vsock_proto
, 1); /* we want our slab */
1971 pr_err("Cannot register vsock protocol\n");
1972 goto err_deregister_misc
;
1975 err
= sock_register(&vsock_family_ops
);
1977 pr_err("could not register af_vsock (%d) address family: %d\n",
1979 goto err_unregister_proto
;
1982 mutex_unlock(&vsock_register_mutex
);
1985 err_unregister_proto
:
1986 proto_unregister(&vsock_proto
);
1987 err_deregister_misc
:
1988 misc_deregister(&vsock_device
);
1989 err_reset_transport
:
1992 mutex_unlock(&vsock_register_mutex
);
1995 EXPORT_SYMBOL_GPL(__vsock_core_init
);
1997 void vsock_core_exit(void)
1999 mutex_lock(&vsock_register_mutex
);
2001 misc_deregister(&vsock_device
);
2002 sock_unregister(AF_VSOCK
);
2003 proto_unregister(&vsock_proto
);
2005 /* We do not want the assignment below re-ordered. */
2009 mutex_unlock(&vsock_register_mutex
);
2011 EXPORT_SYMBOL_GPL(vsock_core_exit
);
2013 const struct vsock_transport
*vsock_core_get_transport(void)
2015 /* vsock_register_mutex not taken since only the transport uses this
2016 * function and only while registered.
2020 EXPORT_SYMBOL_GPL(vsock_core_get_transport
);
2022 static void __exit
vsock_exit(void)
2024 /* Do nothing. This function makes this module removable. */
2027 module_init(vsock_init_tables
);
2028 module_exit(vsock_exit
);
2030 MODULE_AUTHOR("VMware, Inc.");
2031 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2032 MODULE_VERSION("1.0.2.0-k");
2033 MODULE_LICENSE("GPL v2");