1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
7 * Copy user space data into send buffer, if send buffer space available.
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
11 * Copyright IBM Corp. 2016
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
27 #include "smc_close.h"
31 #define SMC_TX_WORK_DELAY 0
32 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
34 /***************************** sndbuf producer *******************************/
36 /* callback implementation for sk.sk_write_space()
37 * to wakeup sndbuf producers that blocked with smc_tx_wait().
38 * called under sk_socket lock.
40 static void smc_tx_write_space(struct sock
*sk
)
42 struct socket
*sock
= sk
->sk_socket
;
43 struct smc_sock
*smc
= smc_sk(sk
);
46 /* similar to sk_stream_write_space */
47 if (atomic_read(&smc
->conn
.sndbuf_space
) && sock
) {
48 clear_bit(SOCK_NOSPACE
, &sock
->flags
);
50 wq
= rcu_dereference(sk
->sk_wq
);
51 if (skwq_has_sleeper(wq
))
52 wake_up_interruptible_poll(&wq
->wait
,
53 EPOLLOUT
| EPOLLWRNORM
|
55 if (wq
&& wq
->fasync_list
&& !(sk
->sk_shutdown
& SEND_SHUTDOWN
))
56 sock_wake_async(wq
, SOCK_WAKE_SPACE
, POLL_OUT
);
61 /* Wakeup sndbuf producers that blocked with smc_tx_wait().
62 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
64 void smc_tx_sndbuf_nonfull(struct smc_sock
*smc
)
66 if (smc
->sk
.sk_socket
&&
67 test_bit(SOCK_NOSPACE
, &smc
->sk
.sk_socket
->flags
))
68 smc
->sk
.sk_write_space(&smc
->sk
);
71 /* blocks sndbuf producer until at least one byte of free space available
72 * or urgent Byte was consumed
74 static int smc_tx_wait(struct smc_sock
*smc
, int flags
)
76 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
77 struct smc_connection
*conn
= &smc
->conn
;
78 struct sock
*sk
= &smc
->sk
;
82 /* similar to sk_stream_wait_memory */
83 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
84 add_wait_queue(sk_sleep(sk
), &wait
);
86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
88 (sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
90 conn
->local_tx_ctrl
.conn_state_flags
.peer_done_writing
) {
94 if (smc_cdc_rxed_any_close(conn
)) {
99 /* ensure EPOLLOUT is subsequently generated */
100 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
104 if (signal_pending(current
)) {
105 rc
= sock_intr_errno(timeo
);
108 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
109 if (atomic_read(&conn
->sndbuf_space
) && !conn
->urg_tx_pend
)
110 break; /* at least 1 byte of free & no urgent data */
111 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
112 sk_wait_event(sk
, &timeo
,
114 (sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
115 smc_cdc_rxed_any_close(conn
) ||
116 (atomic_read(&conn
->sndbuf_space
) &&
120 remove_wait_queue(sk_sleep(sk
), &wait
);
124 static bool smc_tx_is_corked(struct smc_sock
*smc
)
126 struct tcp_sock
*tp
= tcp_sk(smc
->clcsock
->sk
);
128 return (tp
->nonagle
& TCP_NAGLE_CORK
) ? true : false;
131 /* sndbuf producer: main API called by socket layer.
132 * called under sock lock.
134 int smc_tx_sendmsg(struct smc_sock
*smc
, struct msghdr
*msg
, size_t len
)
136 size_t copylen
, send_done
= 0, send_remaining
= len
;
137 size_t chunk_len
, chunk_off
, chunk_len_sum
;
138 struct smc_connection
*conn
= &smc
->conn
;
139 union smc_host_cursor prep
;
140 struct sock
*sk
= &smc
->sk
;
146 /* This should be in poll */
147 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
149 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
154 while (msg_data_left(msg
)) {
155 if (sk
->sk_state
== SMC_INIT
)
157 if (smc
->sk
.sk_shutdown
& SEND_SHUTDOWN
||
158 (smc
->sk
.sk_err
== ECONNABORTED
) ||
161 if (smc_cdc_rxed_any_close(conn
))
162 return send_done
?: -ECONNRESET
;
164 if (msg
->msg_flags
& MSG_OOB
)
165 conn
->local_tx_ctrl
.prod_flags
.urg_data_pending
= 1;
167 if (!atomic_read(&conn
->sndbuf_space
) || conn
->urg_tx_pend
) {
170 rc
= smc_tx_wait(smc
, msg
->msg_flags
);
176 /* initialize variables for 1st iteration of subsequent loop */
177 /* could be just 1 byte, even after smc_tx_wait above */
178 writespace
= atomic_read(&conn
->sndbuf_space
);
179 /* not more than what user space asked for */
180 copylen
= min_t(size_t, send_remaining
, writespace
);
181 /* determine start of sndbuf */
182 sndbuf_base
= conn
->sndbuf_desc
->cpu_addr
;
183 smc_curs_copy(&prep
, &conn
->tx_curs_prep
, conn
);
184 tx_cnt_prep
= prep
.count
;
185 /* determine chunks where to write into sndbuf */
186 /* either unwrapped case, or 1st chunk of wrapped case */
187 chunk_len
= min_t(size_t, copylen
, conn
->sndbuf_desc
->len
-
189 chunk_len_sum
= chunk_len
;
190 chunk_off
= tx_cnt_prep
;
191 smc_sndbuf_sync_sg_for_cpu(conn
);
192 for (chunk
= 0; chunk
< 2; chunk
++) {
193 rc
= memcpy_from_msg(sndbuf_base
+ chunk_off
,
196 smc_sndbuf_sync_sg_for_device(conn
);
201 send_done
+= chunk_len
;
202 send_remaining
-= chunk_len
;
204 if (chunk_len_sum
== copylen
)
205 break; /* either on 1st or 2nd iteration */
206 /* prepare next (== 2nd) iteration */
207 chunk_len
= copylen
- chunk_len
; /* remainder */
208 chunk_len_sum
+= chunk_len
;
209 chunk_off
= 0; /* modulo offset in send ring buffer */
211 smc_sndbuf_sync_sg_for_device(conn
);
213 smc_curs_add(conn
->sndbuf_desc
->len
, &prep
, copylen
);
214 smc_curs_copy(&conn
->tx_curs_prep
, &prep
, conn
);
215 /* increased in send tasklet smc_cdc_tx_handler() */
216 smp_mb__before_atomic();
217 atomic_sub(copylen
, &conn
->sndbuf_space
);
218 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
219 smp_mb__after_atomic();
220 /* since we just produced more new data into sndbuf,
221 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
223 if ((msg
->msg_flags
& MSG_OOB
) && !send_remaining
)
224 conn
->urg_tx_pend
= true;
225 if ((msg
->msg_flags
& MSG_MORE
|| smc_tx_is_corked(smc
)) &&
226 (atomic_read(&conn
->sndbuf_space
) >
227 (conn
->sndbuf_desc
->len
>> 1)))
228 /* for a corked socket defer the RDMA writes if there
229 * is still sufficient sndbuf_space available
231 schedule_delayed_work(&conn
->tx_work
,
234 smc_tx_sndbuf_nonempty(conn
);
235 } /* while (msg_data_left(msg)) */
240 rc
= sk_stream_error(sk
, msg
->msg_flags
, rc
);
241 /* make sure we wake any epoll edge trigger waiter */
242 if (unlikely(rc
== -EAGAIN
))
243 sk
->sk_write_space(sk
);
247 /***************************** sndbuf consumer *******************************/
249 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */
250 int smcd_tx_ism_write(struct smc_connection
*conn
, void *data
, size_t len
,
251 u32 offset
, int signal
)
253 struct smc_ism_position pos
;
256 memset(&pos
, 0, sizeof(pos
));
257 pos
.token
= conn
->peer_token
;
258 pos
.index
= conn
->peer_rmbe_idx
;
259 pos
.offset
= conn
->tx_off
+ offset
;
261 rc
= smc_ism_write(conn
->lgr
->smcd
, &pos
, data
, len
);
263 conn
->local_tx_ctrl
.conn_state_flags
.peer_conn_abort
= 1;
267 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
268 static int smc_tx_rdma_write(struct smc_connection
*conn
, int peer_rmbe_offset
,
269 int num_sges
, struct ib_rdma_wr
*rdma_wr
)
271 struct smc_link_group
*lgr
= conn
->lgr
;
272 struct smc_link
*link
;
275 link
= &lgr
->lnk
[SMC_SINGLE_LINK
];
276 rdma_wr
->wr
.wr_id
= smc_wr_tx_get_next_wr_id(link
);
277 rdma_wr
->wr
.num_sge
= num_sges
;
278 rdma_wr
->remote_addr
=
279 lgr
->rtokens
[conn
->rtoken_idx
][SMC_SINGLE_LINK
].dma_addr
+
280 /* RMBE within RMB */
282 /* offset within RMBE */
284 rdma_wr
->rkey
= lgr
->rtokens
[conn
->rtoken_idx
][SMC_SINGLE_LINK
].rkey
;
285 rc
= ib_post_send(link
->roce_qp
, &rdma_wr
->wr
, NULL
);
287 smc_lgr_terminate_sched(lgr
);
291 /* sndbuf consumer */
292 static inline void smc_tx_advance_cursors(struct smc_connection
*conn
,
293 union smc_host_cursor
*prod
,
294 union smc_host_cursor
*sent
,
297 smc_curs_add(conn
->peer_rmbe_size
, prod
, len
);
298 /* increased in recv tasklet smc_cdc_msg_rcv() */
299 smp_mb__before_atomic();
300 /* data in flight reduces usable snd_wnd */
301 atomic_sub(len
, &conn
->peer_rmbe_space
);
302 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
303 smp_mb__after_atomic();
304 smc_curs_add(conn
->sndbuf_desc
->len
, sent
, len
);
307 /* SMC-R helper for smc_tx_rdma_writes() */
308 static int smcr_tx_rdma_writes(struct smc_connection
*conn
, size_t len
,
309 size_t src_off
, size_t src_len
,
310 size_t dst_off
, size_t dst_len
,
311 struct smc_rdma_wr
*wr_rdma_buf
)
313 dma_addr_t dma_addr
=
314 sg_dma_address(conn
->sndbuf_desc
->sgt
[SMC_SINGLE_LINK
].sgl
);
315 int src_len_sum
= src_len
, dst_len_sum
= dst_len
;
316 int sent_count
= src_off
;
317 int srcchunk
, dstchunk
;
321 for (dstchunk
= 0; dstchunk
< 2; dstchunk
++) {
323 wr_rdma_buf
->wr_tx_rdma
[dstchunk
].wr
.sg_list
;
326 for (srcchunk
= 0; srcchunk
< 2; srcchunk
++) {
327 sge
[srcchunk
].addr
= dma_addr
+ src_off
;
328 sge
[srcchunk
].length
= src_len
;
332 if (src_off
>= conn
->sndbuf_desc
->len
)
333 src_off
-= conn
->sndbuf_desc
->len
;
334 /* modulo in send ring */
335 if (src_len_sum
== dst_len
)
336 break; /* either on 1st or 2nd iteration */
337 /* prepare next (== 2nd) iteration */
338 src_len
= dst_len
- src_len
; /* remainder */
339 src_len_sum
+= src_len
;
341 rc
= smc_tx_rdma_write(conn
, dst_off
, num_sges
,
342 &wr_rdma_buf
->wr_tx_rdma
[dstchunk
]);
345 if (dst_len_sum
== len
)
346 break; /* either on 1st or 2nd iteration */
347 /* prepare next (== 2nd) iteration */
348 dst_off
= 0; /* modulo offset in RMBE ring buffer */
349 dst_len
= len
- dst_len
; /* remainder */
350 dst_len_sum
+= dst_len
;
351 src_len
= min_t(int, dst_len
, conn
->sndbuf_desc
->len
-
353 src_len_sum
= src_len
;
358 /* SMC-D helper for smc_tx_rdma_writes() */
359 static int smcd_tx_rdma_writes(struct smc_connection
*conn
, size_t len
,
360 size_t src_off
, size_t src_len
,
361 size_t dst_off
, size_t dst_len
)
363 int src_len_sum
= src_len
, dst_len_sum
= dst_len
;
364 int srcchunk
, dstchunk
;
367 for (dstchunk
= 0; dstchunk
< 2; dstchunk
++) {
368 for (srcchunk
= 0; srcchunk
< 2; srcchunk
++) {
369 void *data
= conn
->sndbuf_desc
->cpu_addr
+ src_off
;
371 rc
= smcd_tx_ism_write(conn
, data
, src_len
, dst_off
+
372 sizeof(struct smcd_cdc_msg
), 0);
377 if (src_off
>= conn
->sndbuf_desc
->len
)
378 src_off
-= conn
->sndbuf_desc
->len
;
379 /* modulo in send ring */
380 if (src_len_sum
== dst_len
)
381 break; /* either on 1st or 2nd iteration */
382 /* prepare next (== 2nd) iteration */
383 src_len
= dst_len
- src_len
; /* remainder */
384 src_len_sum
+= src_len
;
386 if (dst_len_sum
== len
)
387 break; /* either on 1st or 2nd iteration */
388 /* prepare next (== 2nd) iteration */
389 dst_off
= 0; /* modulo offset in RMBE ring buffer */
390 dst_len
= len
- dst_len
; /* remainder */
391 dst_len_sum
+= dst_len
;
392 src_len
= min_t(int, dst_len
, conn
->sndbuf_desc
->len
- src_off
);
393 src_len_sum
= src_len
;
398 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
399 * usable snd_wnd as max transmit
401 static int smc_tx_rdma_writes(struct smc_connection
*conn
,
402 struct smc_rdma_wr
*wr_rdma_buf
)
404 size_t len
, src_len
, dst_off
, dst_len
; /* current chunk values */
405 union smc_host_cursor sent
, prep
, prod
, cons
;
406 struct smc_cdc_producer_flags
*pflags
;
407 int to_send
, rmbespace
;
411 smc_curs_copy(&sent
, &conn
->tx_curs_sent
, conn
);
412 smc_curs_copy(&prep
, &conn
->tx_curs_prep
, conn
);
413 /* cf. wmem_alloc - (snd_max - snd_una) */
414 to_send
= smc_curs_diff(conn
->sndbuf_desc
->len
, &sent
, &prep
);
418 /* destination: RMBE */
420 rmbespace
= atomic_read(&conn
->peer_rmbe_space
);
423 smc_curs_copy(&prod
, &conn
->local_tx_ctrl
.prod
, conn
);
424 smc_curs_copy(&cons
, &conn
->local_rx_ctrl
.cons
, conn
);
426 /* if usable snd_wnd closes ask peer to advertise once it opens again */
427 pflags
= &conn
->local_tx_ctrl
.prod_flags
;
428 pflags
->write_blocked
= (to_send
>= rmbespace
);
429 /* cf. usable snd_wnd */
430 len
= min(to_send
, rmbespace
);
432 /* initialize variables for first iteration of subsequent nested loop */
433 dst_off
= prod
.count
;
434 if (prod
.wrap
== cons
.wrap
) {
435 /* the filled destination area is unwrapped,
436 * hence the available free destination space is wrapped
437 * and we need 2 destination chunks of sum len; start with 1st
438 * which is limited by what's available in sndbuf
440 dst_len
= min_t(size_t,
441 conn
->peer_rmbe_size
- prod
.count
, len
);
443 /* the filled destination area is wrapped,
444 * hence the available free destination space is unwrapped
445 * and we need a single destination chunk of entire len
449 /* dst_len determines the maximum src_len */
450 if (sent
.count
+ dst_len
<= conn
->sndbuf_desc
->len
) {
451 /* unwrapped src case: single chunk of entire dst_len */
454 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
455 src_len
= conn
->sndbuf_desc
->len
- sent
.count
;
458 if (conn
->lgr
->is_smcd
)
459 rc
= smcd_tx_rdma_writes(conn
, len
, sent
.count
, src_len
,
462 rc
= smcr_tx_rdma_writes(conn
, len
, sent
.count
, src_len
,
463 dst_off
, dst_len
, wr_rdma_buf
);
467 if (conn
->urg_tx_pend
&& len
== to_send
)
468 pflags
->urg_data_present
= 1;
469 smc_tx_advance_cursors(conn
, &prod
, &sent
, len
);
470 /* update connection's cursors with advanced local cursors */
471 smc_curs_copy(&conn
->local_tx_ctrl
.prod
, &prod
, conn
);
473 smc_curs_copy(&conn
->tx_curs_sent
, &sent
, conn
);/* src: local sndbuf */
478 /* Wakeup sndbuf consumers from any context (IRQ or process)
479 * since there is more data to transmit; usable snd_wnd as max transmit
481 static int smcr_tx_sndbuf_nonempty(struct smc_connection
*conn
)
483 struct smc_cdc_producer_flags
*pflags
= &conn
->local_tx_ctrl
.prod_flags
;
484 struct smc_rdma_wr
*wr_rdma_buf
;
485 struct smc_cdc_tx_pend
*pend
;
486 struct smc_wr_buf
*wr_buf
;
489 rc
= smc_cdc_get_free_slot(conn
, &wr_buf
, &wr_rdma_buf
, &pend
);
492 struct smc_sock
*smc
=
493 container_of(conn
, struct smc_sock
, conn
);
495 if (smc
->sk
.sk_err
== ECONNABORTED
)
496 return sock_error(&smc
->sk
);
500 mod_delayed_work(system_wq
, &conn
->tx_work
,
506 spin_lock_bh(&conn
->send_lock
);
507 if (!pflags
->urg_data_present
) {
508 rc
= smc_tx_rdma_writes(conn
, wr_rdma_buf
);
510 smc_wr_tx_put_slot(&conn
->lgr
->lnk
[SMC_SINGLE_LINK
],
511 (struct smc_wr_tx_pend_priv
*)pend
);
516 rc
= smc_cdc_msg_send(conn
, wr_buf
, pend
);
517 if (!rc
&& pflags
->urg_data_present
) {
518 pflags
->urg_data_pending
= 0;
519 pflags
->urg_data_present
= 0;
523 spin_unlock_bh(&conn
->send_lock
);
527 static int smcd_tx_sndbuf_nonempty(struct smc_connection
*conn
)
529 struct smc_cdc_producer_flags
*pflags
= &conn
->local_tx_ctrl
.prod_flags
;
532 spin_lock_bh(&conn
->send_lock
);
533 if (!pflags
->urg_data_present
)
534 rc
= smc_tx_rdma_writes(conn
, NULL
);
536 rc
= smcd_cdc_msg_send(conn
);
538 if (!rc
&& pflags
->urg_data_present
) {
539 pflags
->urg_data_pending
= 0;
540 pflags
->urg_data_present
= 0;
542 spin_unlock_bh(&conn
->send_lock
);
546 int smc_tx_sndbuf_nonempty(struct smc_connection
*conn
)
551 conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_abort
)
552 return -EPIPE
; /* connection being aborted */
553 if (conn
->lgr
->is_smcd
)
554 rc
= smcd_tx_sndbuf_nonempty(conn
);
556 rc
= smcr_tx_sndbuf_nonempty(conn
);
559 /* trigger socket release if connection is closing */
560 struct smc_sock
*smc
= container_of(conn
, struct smc_sock
,
562 smc_close_wake_tx_prepared(smc
);
567 /* Wakeup sndbuf consumers from process context
568 * since there is more data to transmit
570 void smc_tx_work(struct work_struct
*work
)
572 struct smc_connection
*conn
= container_of(to_delayed_work(work
),
573 struct smc_connection
,
575 struct smc_sock
*smc
= container_of(conn
, struct smc_sock
, conn
);
582 rc
= smc_tx_sndbuf_nonempty(conn
);
583 if (!rc
&& conn
->local_rx_ctrl
.prod_flags
.write_blocked
&&
584 !atomic_read(&conn
->bytes_to_rcv
))
585 conn
->local_rx_ctrl
.prod_flags
.write_blocked
= 0;
588 release_sock(&smc
->sk
);
591 void smc_tx_consumer_update(struct smc_connection
*conn
, bool force
)
593 union smc_host_cursor cfed
, cons
, prod
;
594 int sender_free
= conn
->rmb_desc
->len
;
597 smc_curs_copy(&cons
, &conn
->local_tx_ctrl
.cons
, conn
);
598 smc_curs_copy(&cfed
, &conn
->rx_curs_confirmed
, conn
);
599 to_confirm
= smc_curs_diff(conn
->rmb_desc
->len
, &cfed
, &cons
);
600 if (to_confirm
> conn
->rmbe_update_limit
) {
601 smc_curs_copy(&prod
, &conn
->local_rx_ctrl
.prod
, conn
);
602 sender_free
= conn
->rmb_desc
->len
-
603 smc_curs_diff_large(conn
->rmb_desc
->len
,
607 if (conn
->local_rx_ctrl
.prod_flags
.cons_curs_upd_req
||
609 ((to_confirm
> conn
->rmbe_update_limit
) &&
610 ((sender_free
<= (conn
->rmb_desc
->len
/ 2)) ||
611 conn
->local_rx_ctrl
.prod_flags
.write_blocked
))) {
613 conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_abort
)
615 if ((smc_cdc_get_slot_and_msg_send(conn
) < 0) &&
617 schedule_delayed_work(&conn
->tx_work
,
622 if (conn
->local_rx_ctrl
.prod_flags
.write_blocked
&&
623 !atomic_read(&conn
->bytes_to_rcv
))
624 conn
->local_rx_ctrl
.prod_flags
.write_blocked
= 0;
627 /***************************** send initialize *******************************/
629 /* Initialize send properties on connection establishment. NB: not __init! */
630 void smc_tx_init(struct smc_sock
*smc
)
632 smc
->sk
.sk_write_space
= smc_tx_write_space
;