Linux 5.7.7
[linux/fpc-iii.git] / net / smc / smc_cdc.c
blob164f1584861b84dd1ee4aaf64efd52425912e08f
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Connection Data Control (CDC)
6 * handles flow control
8 * Copyright IBM Corp. 2016
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 #include <linux/spinlock.h>
15 #include "smc.h"
16 #include "smc_wr.h"
17 #include "smc_cdc.h"
18 #include "smc_tx.h"
19 #include "smc_rx.h"
20 #include "smc_close.h"
22 /********************************** send *************************************/
24 /* handler for send/transmission completion of a CDC msg */
25 static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
26 struct smc_link *link,
27 enum ib_wc_status wc_status)
29 struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
30 struct smc_connection *conn = cdcpend->conn;
31 struct smc_sock *smc;
32 int diff;
34 if (!conn)
35 /* already dismissed */
36 return;
38 smc = container_of(conn, struct smc_sock, conn);
39 bh_lock_sock(&smc->sk);
40 if (!wc_status) {
41 diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
42 &cdcpend->conn->tx_curs_fin,
43 &cdcpend->cursor);
44 /* sndbuf_space is decreased in smc_sendmsg */
45 smp_mb__before_atomic();
46 atomic_add(diff, &cdcpend->conn->sndbuf_space);
47 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
48 smp_mb__after_atomic();
49 smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
51 smc_tx_sndbuf_nonfull(smc);
52 bh_unlock_sock(&smc->sk);
55 int smc_cdc_get_free_slot(struct smc_connection *conn,
56 struct smc_wr_buf **wr_buf,
57 struct smc_rdma_wr **wr_rdma_buf,
58 struct smc_cdc_tx_pend **pend)
60 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
61 int rc;
63 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
64 wr_rdma_buf,
65 (struct smc_wr_tx_pend_priv **)pend);
66 if (conn->killed)
67 /* abnormal termination */
68 rc = -EPIPE;
69 return rc;
72 static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
73 struct smc_cdc_tx_pend *pend)
75 BUILD_BUG_ON_MSG(
76 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
77 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
78 BUILD_BUG_ON_MSG(
79 offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
80 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
81 BUILD_BUG_ON_MSG(
82 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
83 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
84 pend->conn = conn;
85 pend->cursor = conn->tx_curs_sent;
86 pend->p_cursor = conn->local_tx_ctrl.prod;
87 pend->ctrl_seq = conn->tx_cdc_seq;
90 int smc_cdc_msg_send(struct smc_connection *conn,
91 struct smc_wr_buf *wr_buf,
92 struct smc_cdc_tx_pend *pend)
94 union smc_host_cursor cfed;
95 struct smc_link *link;
96 int rc;
98 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
100 smc_cdc_add_pending_send(conn, pend);
102 conn->tx_cdc_seq++;
103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
105 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
106 if (!rc) {
107 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
108 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
111 return rc;
114 static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
116 struct smc_cdc_tx_pend *pend;
117 struct smc_wr_buf *wr_buf;
118 int rc;
120 rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
121 if (rc)
122 return rc;
124 spin_lock_bh(&conn->send_lock);
125 rc = smc_cdc_msg_send(conn, wr_buf, pend);
126 spin_unlock_bh(&conn->send_lock);
127 return rc;
130 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
132 int rc;
134 if (!conn->lgr || (conn->lgr->is_smcd && conn->lgr->peer_shutdown))
135 return -EPIPE;
137 if (conn->lgr->is_smcd) {
138 spin_lock_bh(&conn->send_lock);
139 rc = smcd_cdc_msg_send(conn);
140 spin_unlock_bh(&conn->send_lock);
141 } else {
142 rc = smcr_cdc_get_slot_and_msg_send(conn);
145 return rc;
148 static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
149 unsigned long data)
151 struct smc_connection *conn = (struct smc_connection *)data;
152 struct smc_cdc_tx_pend *cdc_pend =
153 (struct smc_cdc_tx_pend *)tx_pend;
155 return cdc_pend->conn == conn;
158 static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
160 struct smc_cdc_tx_pend *cdc_pend =
161 (struct smc_cdc_tx_pend *)tx_pend;
163 cdc_pend->conn = NULL;
166 void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
168 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
170 smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
171 smc_cdc_tx_filter, smc_cdc_tx_dismisser,
172 (unsigned long)conn);
175 /* Send a SMC-D CDC header.
176 * This increments the free space available in our send buffer.
177 * Also update the confirmed receive buffer with what was sent to the peer.
179 int smcd_cdc_msg_send(struct smc_connection *conn)
181 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
182 union smc_host_cursor curs;
183 struct smcd_cdc_msg cdc;
184 int rc, diff;
186 memset(&cdc, 0, sizeof(cdc));
187 cdc.common.type = SMC_CDC_MSG_TYPE;
188 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
189 cdc.prod.wrap = curs.wrap;
190 cdc.prod.count = curs.count;
191 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
192 cdc.cons.wrap = curs.wrap;
193 cdc.cons.count = curs.count;
194 cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
195 cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
196 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
197 if (rc)
198 return rc;
199 smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
200 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
201 /* Calculate transmitted data and increment free send buffer space */
202 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
203 &conn->tx_curs_sent);
204 /* increased by confirmed number of bytes */
205 smp_mb__before_atomic();
206 atomic_add(diff, &conn->sndbuf_space);
207 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
208 smp_mb__after_atomic();
209 smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
211 smc_tx_sndbuf_nonfull(smc);
212 return rc;
215 /********************************* receive ***********************************/
217 static inline bool smc_cdc_before(u16 seq1, u16 seq2)
219 return (s16)(seq1 - seq2) < 0;
222 static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
223 int *diff_prod)
225 struct smc_connection *conn = &smc->conn;
226 char *base;
228 /* new data included urgent business */
229 smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
230 conn->urg_state = SMC_URG_VALID;
231 if (!sock_flag(&smc->sk, SOCK_URGINLINE))
232 /* we'll skip the urgent byte, so don't account for it */
233 (*diff_prod)--;
234 base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
235 if (conn->urg_curs.count)
236 conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
237 else
238 conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
239 sk_send_sigurg(&smc->sk);
242 static void smc_cdc_msg_recv_action(struct smc_sock *smc,
243 struct smc_cdc_msg *cdc)
245 union smc_host_cursor cons_old, prod_old;
246 struct smc_connection *conn = &smc->conn;
247 int diff_cons, diff_prod;
249 smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
250 smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
251 smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
253 diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
254 &conn->local_rx_ctrl.cons);
255 if (diff_cons) {
256 /* peer_rmbe_space is decreased during data transfer with RDMA
257 * write
259 smp_mb__before_atomic();
260 atomic_add(diff_cons, &conn->peer_rmbe_space);
261 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
262 smp_mb__after_atomic();
265 diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
266 &conn->local_rx_ctrl.prod);
267 if (diff_prod) {
268 if (conn->local_rx_ctrl.prod_flags.urg_data_present)
269 smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
270 /* bytes_to_rcv is decreased in smc_recvmsg */
271 smp_mb__before_atomic();
272 atomic_add(diff_prod, &conn->bytes_to_rcv);
273 /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
274 smp_mb__after_atomic();
275 smc->sk.sk_data_ready(&smc->sk);
276 } else {
277 if (conn->local_rx_ctrl.prod_flags.write_blocked)
278 smc->sk.sk_data_ready(&smc->sk);
279 if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
280 conn->urg_state = SMC_URG_NOTYET;
283 /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
284 if ((diff_cons && smc_tx_prepared_sends(conn)) ||
285 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
286 conn->local_rx_ctrl.prod_flags.urg_data_pending)
287 smc_tx_sndbuf_nonempty(conn);
289 if (diff_cons && conn->urg_tx_pend &&
290 atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
291 /* urg data confirmed by peer, indicate we're ready for more */
292 conn->urg_tx_pend = false;
293 smc->sk.sk_write_space(&smc->sk);
296 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
297 smc->sk.sk_err = ECONNRESET;
298 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
300 if (smc_cdc_rxed_any_close_or_senddone(conn)) {
301 smc->sk.sk_shutdown |= RCV_SHUTDOWN;
302 if (smc->clcsock && smc->clcsock->sk)
303 smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
304 sock_set_flag(&smc->sk, SOCK_DONE);
305 sock_hold(&smc->sk); /* sock_put in close_work */
306 if (!schedule_work(&conn->close_work))
307 sock_put(&smc->sk);
311 /* called under tasklet context */
312 static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
314 sock_hold(&smc->sk);
315 bh_lock_sock(&smc->sk);
316 smc_cdc_msg_recv_action(smc, cdc);
317 bh_unlock_sock(&smc->sk);
318 sock_put(&smc->sk); /* no free sk in softirq-context */
321 /* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
322 * handler to indicate update in the DMBE.
324 * Context:
325 * - tasklet context
327 static void smcd_cdc_rx_tsklet(unsigned long data)
329 struct smc_connection *conn = (struct smc_connection *)data;
330 struct smcd_cdc_msg *data_cdc;
331 struct smcd_cdc_msg cdc;
332 struct smc_sock *smc;
334 if (!conn || conn->killed)
335 return;
337 data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
338 smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
339 smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
340 smc = container_of(conn, struct smc_sock, conn);
341 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
344 /* Initialize receive tasklet. Called from ISM device IRQ handler to start
345 * receiver side.
347 void smcd_cdc_rx_init(struct smc_connection *conn)
349 tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
352 /***************************** init, exit, misc ******************************/
354 static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
356 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
357 struct smc_cdc_msg *cdc = buf;
358 struct smc_connection *conn;
359 struct smc_link_group *lgr;
360 struct smc_sock *smc;
362 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
363 return; /* short message */
364 if (cdc->len != SMC_WR_TX_SIZE)
365 return; /* invalid message */
367 /* lookup connection */
368 lgr = smc_get_lgr(link);
369 read_lock_bh(&lgr->conns_lock);
370 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
371 read_unlock_bh(&lgr->conns_lock);
372 if (!conn)
373 return;
374 smc = container_of(conn, struct smc_sock, conn);
376 if (!cdc->prod_flags.failover_validation) {
377 if (smc_cdc_before(ntohs(cdc->seqno),
378 conn->local_rx_ctrl.seqno))
379 /* received seqno is old */
380 return;
382 smc_cdc_msg_recv(smc, cdc);
385 static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
387 .handler = smc_cdc_rx_handler,
388 .type = SMC_CDC_MSG_TYPE
391 .handler = NULL,
395 int __init smc_cdc_init(void)
397 struct smc_wr_rx_handler *handler;
398 int rc = 0;
400 for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
401 INIT_HLIST_NODE(&handler->list);
402 rc = smc_wr_rx_register_handler(handler);
403 if (rc)
404 break;
406 return rc;