1 // SPDX-License-Identifier: GPL-2.0-only
3 * VMware vSockets Driver
5 * Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
8 #include <linux/types.h>
9 #include <linux/socket.h>
10 #include <linux/stddef.h>
13 #include "vmci_transport_notify.h"
15 #define PKT_FIELD(vsk, field_name) \
16 (vmci_trans(vsk)->notify.pkt_q_state.field_name)
18 static bool vmci_transport_notify_waiting_write(struct vsock_sock
*vsk
)
23 if (!PKT_FIELD(vsk
, peer_waiting_write
))
26 /* When the sender blocks, we take that as a sign that the sender is
27 * faster than the receiver. To reduce the transmit rate of the sender,
28 * we delay the sending of the read notification by decreasing the
29 * write_notify_window. The notification is delayed until the number of
30 * bytes used in the queue drops below the write_notify_window.
33 if (!PKT_FIELD(vsk
, peer_waiting_write_detected
)) {
34 PKT_FIELD(vsk
, peer_waiting_write_detected
) = true;
35 if (PKT_FIELD(vsk
, write_notify_window
) < PAGE_SIZE
) {
36 PKT_FIELD(vsk
, write_notify_window
) =
37 PKT_FIELD(vsk
, write_notify_min_window
);
39 PKT_FIELD(vsk
, write_notify_window
) -= PAGE_SIZE
;
40 if (PKT_FIELD(vsk
, write_notify_window
) <
41 PKT_FIELD(vsk
, write_notify_min_window
))
42 PKT_FIELD(vsk
, write_notify_window
) =
43 PKT_FIELD(vsk
, write_notify_min_window
);
47 notify_limit
= vmci_trans(vsk
)->consume_size
-
48 PKT_FIELD(vsk
, write_notify_window
);
50 /* The notify_limit is used to delay notifications in the case where
51 * flow control is enabled. Below the test is expressed in terms of
52 * free space in the queue: if free_space > ConsumeSize -
53 * write_notify_window then notify An alternate way of expressing this
54 * is to rewrite the expression to use the data ready in the receive
55 * queue: if write_notify_window > bufferReady then notify as
56 * free_space == ConsumeSize - bufferReady.
59 retval
= vmci_qpair_consume_free_space(vmci_trans(vsk
)->qpair
) >
63 /* Once we notify the peer, we reset the detected flag so the
64 * next wait will again cause a decrease in the window size.
67 PKT_FIELD(vsk
, peer_waiting_write_detected
) = false;
73 vmci_transport_handle_read(struct sock
*sk
,
74 struct vmci_transport_packet
*pkt
,
76 struct sockaddr_vm
*dst
, struct sockaddr_vm
*src
)
78 sk
->sk_write_space(sk
);
82 vmci_transport_handle_wrote(struct sock
*sk
,
83 struct vmci_transport_packet
*pkt
,
85 struct sockaddr_vm
*dst
, struct sockaddr_vm
*src
)
87 sk
->sk_data_ready(sk
);
90 static void vsock_block_update_write_window(struct sock
*sk
)
92 struct vsock_sock
*vsk
= vsock_sk(sk
);
94 if (PKT_FIELD(vsk
, write_notify_window
) < vmci_trans(vsk
)->consume_size
)
95 PKT_FIELD(vsk
, write_notify_window
) =
96 min(PKT_FIELD(vsk
, write_notify_window
) + PAGE_SIZE
,
97 vmci_trans(vsk
)->consume_size
);
100 static int vmci_transport_send_read_notification(struct sock
*sk
)
102 struct vsock_sock
*vsk
;
104 unsigned int retries
;
112 if (vmci_transport_notify_waiting_write(vsk
)) {
113 /* Notify the peer that we have read, retrying the send on
114 * failure up to our maximum value. XXX For now we just log
115 * the failure, but later we should schedule a work item to
116 * handle the resend until it succeeds. That would require
117 * keeping track of work items in the vsk and cleaning them up
120 while (!(vsk
->peer_shutdown
& RCV_SHUTDOWN
) &&
122 retries
< VMCI_TRANSPORT_MAX_DGRAM_RESENDS
) {
123 err
= vmci_transport_send_read(sk
);
130 if (retries
>= VMCI_TRANSPORT_MAX_DGRAM_RESENDS
&& !sent_read
)
131 pr_err("%p unable to send read notification to peer\n",
134 PKT_FIELD(vsk
, peer_waiting_write
) = false;
140 static void vmci_transport_notify_pkt_socket_init(struct sock
*sk
)
142 struct vsock_sock
*vsk
= vsock_sk(sk
);
144 PKT_FIELD(vsk
, write_notify_window
) = PAGE_SIZE
;
145 PKT_FIELD(vsk
, write_notify_min_window
) = PAGE_SIZE
;
146 PKT_FIELD(vsk
, peer_waiting_write
) = false;
147 PKT_FIELD(vsk
, peer_waiting_write_detected
) = false;
150 static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock
*vsk
)
152 PKT_FIELD(vsk
, write_notify_window
) = PAGE_SIZE
;
153 PKT_FIELD(vsk
, write_notify_min_window
) = PAGE_SIZE
;
154 PKT_FIELD(vsk
, peer_waiting_write
) = false;
155 PKT_FIELD(vsk
, peer_waiting_write_detected
) = false;
159 vmci_transport_notify_pkt_poll_in(struct sock
*sk
,
160 size_t target
, bool *data_ready_now
)
162 struct vsock_sock
*vsk
= vsock_sk(sk
);
164 if (vsock_stream_has_data(vsk
)) {
165 *data_ready_now
= true;
167 /* We can't read right now because there is nothing in the
168 * queue. Ask for notifications when there is something to
171 if (sk
->sk_state
== TCP_ESTABLISHED
)
172 vsock_block_update_write_window(sk
);
173 *data_ready_now
= false;
180 vmci_transport_notify_pkt_poll_out(struct sock
*sk
,
181 size_t target
, bool *space_avail_now
)
183 s64 produce_q_free_space
;
184 struct vsock_sock
*vsk
= vsock_sk(sk
);
186 produce_q_free_space
= vsock_stream_has_space(vsk
);
187 if (produce_q_free_space
> 0) {
188 *space_avail_now
= true;
190 } else if (produce_q_free_space
== 0) {
191 /* This is a connected socket but we can't currently send data.
192 * Nothing else to do.
194 *space_avail_now
= false;
201 vmci_transport_notify_pkt_recv_init(
204 struct vmci_transport_recv_notify_data
*data
)
206 struct vsock_sock
*vsk
= vsock_sk(sk
);
208 data
->consume_head
= 0;
209 data
->produce_tail
= 0;
210 data
->notify_on_block
= false;
212 if (PKT_FIELD(vsk
, write_notify_min_window
) < target
+ 1) {
213 PKT_FIELD(vsk
, write_notify_min_window
) = target
+ 1;
214 if (PKT_FIELD(vsk
, write_notify_window
) <
215 PKT_FIELD(vsk
, write_notify_min_window
)) {
216 /* If the current window is smaller than the new
217 * minimal window size, we need to reevaluate whether
218 * we need to notify the sender. If the number of ready
219 * bytes are smaller than the new window, we need to
220 * send a notification to the sender before we block.
223 PKT_FIELD(vsk
, write_notify_window
) =
224 PKT_FIELD(vsk
, write_notify_min_window
);
225 data
->notify_on_block
= true;
233 vmci_transport_notify_pkt_recv_pre_block(
236 struct vmci_transport_recv_notify_data
*data
)
240 vsock_block_update_write_window(sk
);
242 if (data
->notify_on_block
) {
243 err
= vmci_transport_send_read_notification(sk
);
246 data
->notify_on_block
= false;
253 vmci_transport_notify_pkt_recv_post_dequeue(
258 struct vmci_transport_recv_notify_data
*data
)
260 struct vsock_sock
*vsk
;
262 bool was_full
= false;
272 vmci_qpair_consume_free_space(vmci_trans(vsk
)->qpair
);
273 was_full
= free_space
== copied
;
276 PKT_FIELD(vsk
, peer_waiting_write
) = true;
278 err
= vmci_transport_send_read_notification(sk
);
282 /* See the comment in
283 * vmci_transport_notify_pkt_send_post_enqueue().
285 sk
->sk_data_ready(sk
);
292 vmci_transport_notify_pkt_send_init(
294 struct vmci_transport_send_notify_data
*data
)
296 data
->consume_head
= 0;
297 data
->produce_tail
= 0;
303 vmci_transport_notify_pkt_send_post_enqueue(
306 struct vmci_transport_send_notify_data
*data
)
309 struct vsock_sock
*vsk
;
310 bool sent_wrote
= false;
319 vmci_qpair_produce_buf_ready(vmci_trans(vsk
)->qpair
) == written
;
321 while (!(vsk
->peer_shutdown
& RCV_SHUTDOWN
) &&
323 retries
< VMCI_TRANSPORT_MAX_DGRAM_RESENDS
) {
324 err
= vmci_transport_send_wrote(sk
);
332 if (retries
>= VMCI_TRANSPORT_MAX_DGRAM_RESENDS
&& !sent_wrote
) {
333 pr_err("%p unable to send wrote notification to peer\n",
342 vmci_transport_notify_pkt_handle_pkt(
344 struct vmci_transport_packet
*pkt
,
346 struct sockaddr_vm
*dst
,
347 struct sockaddr_vm
*src
, bool *pkt_processed
)
349 bool processed
= false;
352 case VMCI_TRANSPORT_PACKET_TYPE_WROTE
:
353 vmci_transport_handle_wrote(sk
, pkt
, bottom_half
, dst
, src
);
356 case VMCI_TRANSPORT_PACKET_TYPE_READ
:
357 vmci_transport_handle_read(sk
, pkt
, bottom_half
, dst
, src
);
363 *pkt_processed
= processed
;
366 static void vmci_transport_notify_pkt_process_request(struct sock
*sk
)
368 struct vsock_sock
*vsk
= vsock_sk(sk
);
370 PKT_FIELD(vsk
, write_notify_window
) = vmci_trans(vsk
)->consume_size
;
371 if (vmci_trans(vsk
)->consume_size
<
372 PKT_FIELD(vsk
, write_notify_min_window
))
373 PKT_FIELD(vsk
, write_notify_min_window
) =
374 vmci_trans(vsk
)->consume_size
;
377 static void vmci_transport_notify_pkt_process_negotiate(struct sock
*sk
)
379 struct vsock_sock
*vsk
= vsock_sk(sk
);
381 PKT_FIELD(vsk
, write_notify_window
) = vmci_trans(vsk
)->consume_size
;
382 if (vmci_trans(vsk
)->consume_size
<
383 PKT_FIELD(vsk
, write_notify_min_window
))
384 PKT_FIELD(vsk
, write_notify_min_window
) =
385 vmci_trans(vsk
)->consume_size
;
389 vmci_transport_notify_pkt_recv_pre_dequeue(
392 struct vmci_transport_recv_notify_data
*data
)
394 return 0; /* NOP for QState. */
398 vmci_transport_notify_pkt_send_pre_block(
400 struct vmci_transport_send_notify_data
*data
)
402 return 0; /* NOP for QState. */
406 vmci_transport_notify_pkt_send_pre_enqueue(
408 struct vmci_transport_send_notify_data
*data
)
410 return 0; /* NOP for QState. */
413 /* Socket always on control packet based operations. */
414 const struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops
= {
415 .socket_init
= vmci_transport_notify_pkt_socket_init
,
416 .socket_destruct
= vmci_transport_notify_pkt_socket_destruct
,
417 .poll_in
= vmci_transport_notify_pkt_poll_in
,
418 .poll_out
= vmci_transport_notify_pkt_poll_out
,
419 .handle_notify_pkt
= vmci_transport_notify_pkt_handle_pkt
,
420 .recv_init
= vmci_transport_notify_pkt_recv_init
,
421 .recv_pre_block
= vmci_transport_notify_pkt_recv_pre_block
,
422 .recv_pre_dequeue
= vmci_transport_notify_pkt_recv_pre_dequeue
,
423 .recv_post_dequeue
= vmci_transport_notify_pkt_recv_post_dequeue
,
424 .send_init
= vmci_transport_notify_pkt_send_init
,
425 .send_pre_block
= vmci_transport_notify_pkt_send_pre_block
,
426 .send_pre_enqueue
= vmci_transport_notify_pkt_send_pre_enqueue
,
427 .send_post_enqueue
= vmci_transport_notify_pkt_send_post_enqueue
,
428 .process_request
= vmci_transport_notify_pkt_process_request
,
429 .process_negotiate
= vmci_transport_notify_pkt_process_negotiate
,