1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Connection Data Control (CDC)
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/kernel.h> /* max_t */
16 #include <linux/atomic.h>
18 #include <linux/compiler.h>
24 #define SMC_CDC_MSG_TYPE 0xFE
26 /* in network byte order */
27 union smc_cdc_cursor
{ /* SMC cursor */
33 #ifdef KERNEL_HAS_ATOMIC64
34 atomic64_t acurs
; /* for atomic processing */
36 u64 acurs
; /* for atomic processing */
40 /* in network byte order */
42 struct smc_wr_rx_hdr common
; /* .type = 0xFE */
46 union smc_cdc_cursor prod
;
47 union smc_cdc_cursor cons
; /* piggy backed "ack" */
48 struct smc_cdc_producer_flags prod_flags
;
49 struct smc_cdc_conn_state_flags conn_state_flags
;
53 /* SMC-D cursor format */
54 union smcd_cdc_cursor
{
58 struct smc_cdc_producer_flags prod_flags
;
59 struct smc_cdc_conn_state_flags conn_state_flags
;
61 #ifdef KERNEL_HAS_ATOMIC64
62 atomic64_t acurs
; /* for atomic processing */
64 u64 acurs
; /* for atomic processing */
68 /* CDC message for SMC-D */
70 struct smc_wr_rx_hdr common
; /* Type = 0xFE */
72 union smcd_cdc_cursor prod
;
73 union smcd_cdc_cursor cons
;
77 static inline bool smc_cdc_rxed_any_close(struct smc_connection
*conn
)
79 return conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_abort
||
80 conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_closed
;
83 static inline bool smc_cdc_rxed_any_close_or_senddone(
84 struct smc_connection
*conn
)
86 return smc_cdc_rxed_any_close(conn
) ||
87 conn
->local_rx_ctrl
.conn_state_flags
.peer_done_writing
;
90 static inline void smc_curs_add(int size
, union smc_host_cursor
*curs
,
94 if (curs
->count
>= size
) {
100 /* SMC cursors are 8 bytes long and require atomic reading and writing */
101 static inline u64
smc_curs_read(union smc_host_cursor
*curs
,
102 struct smc_connection
*conn
)
104 #ifndef KERNEL_HAS_ATOMIC64
108 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
110 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
113 return atomic64_read(&curs
->acurs
);
117 /* Copy cursor src into tgt */
118 static inline void smc_curs_copy(union smc_host_cursor
*tgt
,
119 union smc_host_cursor
*src
,
120 struct smc_connection
*conn
)
122 #ifndef KERNEL_HAS_ATOMIC64
125 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
126 tgt
->acurs
= src
->acurs
;
127 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
129 atomic64_set(&tgt
->acurs
, atomic64_read(&src
->acurs
));
133 static inline void smc_curs_copy_net(union smc_cdc_cursor
*tgt
,
134 union smc_cdc_cursor
*src
,
135 struct smc_connection
*conn
)
137 #ifndef KERNEL_HAS_ATOMIC64
140 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
141 tgt
->acurs
= src
->acurs
;
142 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
144 atomic64_set(&tgt
->acurs
, atomic64_read(&src
->acurs
));
148 static inline void smcd_curs_copy(union smcd_cdc_cursor
*tgt
,
149 union smcd_cdc_cursor
*src
,
150 struct smc_connection
*conn
)
152 #ifndef KERNEL_HAS_ATOMIC64
155 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
156 tgt
->acurs
= src
->acurs
;
157 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
159 atomic64_set(&tgt
->acurs
, atomic64_read(&src
->acurs
));
163 /* calculate cursor difference between old and new, where old <= new */
164 static inline int smc_curs_diff(unsigned int size
,
165 union smc_host_cursor
*old
,
166 union smc_host_cursor
*new)
168 if (old
->wrap
!= new->wrap
)
170 ((size
- old
->count
) + new->count
));
172 return max_t(int, 0, (new->count
- old
->count
));
175 /* calculate cursor difference between old and new - returns negative
176 * value in case old > new
178 static inline int smc_curs_comp(unsigned int size
,
179 union smc_host_cursor
*old
,
180 union smc_host_cursor
*new)
182 if (old
->wrap
> new->wrap
||
183 (old
->wrap
== new->wrap
&& old
->count
> new->count
))
184 return -smc_curs_diff(size
, new, old
);
185 return smc_curs_diff(size
, old
, new);
188 static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor
*peer
,
189 union smc_host_cursor
*local
,
190 struct smc_connection
*conn
)
192 union smc_host_cursor temp
;
194 smc_curs_copy(&temp
, local
, conn
);
195 peer
->count
= htonl(temp
.count
);
196 peer
->wrap
= htons(temp
.wrap
);
197 /* peer->reserved = htons(0); must be ensured by caller */
200 static inline void smc_host_msg_to_cdc(struct smc_cdc_msg
*peer
,
201 struct smc_host_cdc_msg
*local
,
202 struct smc_connection
*conn
)
204 peer
->common
.type
= local
->common
.type
;
205 peer
->len
= local
->len
;
206 peer
->seqno
= htons(local
->seqno
);
207 peer
->token
= htonl(local
->token
);
208 smc_host_cursor_to_cdc(&peer
->prod
, &local
->prod
, conn
);
209 smc_host_cursor_to_cdc(&peer
->cons
, &local
->cons
, conn
);
210 peer
->prod_flags
= local
->prod_flags
;
211 peer
->conn_state_flags
= local
->conn_state_flags
;
214 static inline void smc_cdc_cursor_to_host(union smc_host_cursor
*local
,
215 union smc_cdc_cursor
*peer
,
216 struct smc_connection
*conn
)
218 union smc_host_cursor temp
, old
;
219 union smc_cdc_cursor net
;
221 smc_curs_copy(&old
, local
, conn
);
222 smc_curs_copy_net(&net
, peer
, conn
);
223 temp
.count
= ntohl(net
.count
);
224 temp
.wrap
= ntohs(net
.wrap
);
225 if ((old
.wrap
> temp
.wrap
) && temp
.wrap
)
227 if ((old
.wrap
== temp
.wrap
) &&
228 (old
.count
> temp
.count
))
230 smc_curs_copy(local
, &temp
, conn
);
233 static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg
*local
,
234 struct smc_cdc_msg
*peer
,
235 struct smc_connection
*conn
)
237 local
->common
.type
= peer
->common
.type
;
238 local
->len
= peer
->len
;
239 local
->seqno
= ntohs(peer
->seqno
);
240 local
->token
= ntohl(peer
->token
);
241 smc_cdc_cursor_to_host(&local
->prod
, &peer
->prod
, conn
);
242 smc_cdc_cursor_to_host(&local
->cons
, &peer
->cons
, conn
);
243 local
->prod_flags
= peer
->prod_flags
;
244 local
->conn_state_flags
= peer
->conn_state_flags
;
247 static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg
*local
,
248 struct smcd_cdc_msg
*peer
)
250 union smc_host_cursor temp
;
252 temp
.wrap
= peer
->prod
.wrap
;
253 temp
.count
= peer
->prod
.count
;
254 atomic64_set(&local
->prod
.acurs
, atomic64_read(&temp
.acurs
));
256 temp
.wrap
= peer
->cons
.wrap
;
257 temp
.count
= peer
->cons
.count
;
258 atomic64_set(&local
->cons
.acurs
, atomic64_read(&temp
.acurs
));
259 local
->prod_flags
= peer
->cons
.prod_flags
;
260 local
->conn_state_flags
= peer
->cons
.conn_state_flags
;
263 static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg
*local
,
264 struct smc_cdc_msg
*peer
,
265 struct smc_connection
*conn
)
267 if (conn
->lgr
->is_smcd
)
268 smcd_cdc_msg_to_host(local
, (struct smcd_cdc_msg
*)peer
);
270 smcr_cdc_msg_to_host(local
, peer
, conn
);
273 struct smc_cdc_tx_pend
;
275 int smc_cdc_get_free_slot(struct smc_connection
*conn
,
276 struct smc_wr_buf
**wr_buf
,
277 struct smc_cdc_tx_pend
**pend
);
278 void smc_cdc_tx_dismiss_slots(struct smc_connection
*conn
);
279 int smc_cdc_msg_send(struct smc_connection
*conn
, struct smc_wr_buf
*wr_buf
,
280 struct smc_cdc_tx_pend
*pend
);
281 int smc_cdc_get_slot_and_msg_send(struct smc_connection
*conn
);
282 int smcd_cdc_msg_send(struct smc_connection
*conn
);
283 int smc_cdc_init(void) __init
;
284 void smcd_cdc_rx_init(struct smc_connection
*conn
);
286 #endif /* SMC_CDC_H */