1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright 2021 Marvell. All rights reserved. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/list.h>
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/qed/common_hsi.h>
12 #include <linux/qed/storage_common.h>
13 #include <linux/qed/nvmetcp_common.h>
14 #include <linux/qed/qed_nvmetcp_if.h>
15 #include "qed_nvmetcp_fw_funcs.h"
17 #define NVMETCP_NUM_SGES_IN_CACHE 0x4
19 bool nvmetcp_is_slow_sgl(u16 num_sges
, bool small_mid_sge
)
21 return (num_sges
> SCSI_NUM_SGES_SLOW_SGL_THR
&& small_mid_sge
);
24 void init_scsi_sgl_context(struct scsi_sgl_params
*ctx_sgl_params
,
25 struct scsi_cached_sges
*ctx_data_desc
,
26 struct storage_sgl_task_params
*sgl_params
)
28 u8 num_sges_to_init
= (u8
)(sgl_params
->num_sges
> NVMETCP_NUM_SGES_IN_CACHE
?
29 NVMETCP_NUM_SGES_IN_CACHE
: sgl_params
->num_sges
);
33 ctx_sgl_params
->sgl_addr
.lo
= cpu_to_le32(sgl_params
->sgl_phys_addr
.lo
);
34 ctx_sgl_params
->sgl_addr
.hi
= cpu_to_le32(sgl_params
->sgl_phys_addr
.hi
);
35 ctx_sgl_params
->sgl_total_length
= cpu_to_le32(sgl_params
->total_buffer_size
);
36 ctx_sgl_params
->sgl_num_sges
= cpu_to_le16(sgl_params
->num_sges
);
38 for (sge_index
= 0; sge_index
< num_sges_to_init
; sge_index
++) {
39 ctx_data_desc
->sge
[sge_index
].sge_addr
.lo
=
40 cpu_to_le32(sgl_params
->sgl
[sge_index
].sge_addr
.lo
);
41 ctx_data_desc
->sge
[sge_index
].sge_addr
.hi
=
42 cpu_to_le32(sgl_params
->sgl
[sge_index
].sge_addr
.hi
);
43 ctx_data_desc
->sge
[sge_index
].sge_len
=
44 cpu_to_le32(sgl_params
->sgl
[sge_index
].sge_len
);
48 static inline u32
calc_rw_task_size(struct nvmetcp_task_params
*task_params
,
49 enum nvmetcp_task_type task_type
)
53 if (task_type
== NVMETCP_TASK_TYPE_HOST_WRITE
)
54 io_size
= task_params
->tx_io_size
;
56 io_size
= task_params
->rx_io_size
;
58 if (unlikely(!io_size
))
64 static inline void init_sqe(struct nvmetcp_task_params
*task_params
,
65 struct storage_sgl_task_params
*sgl_task_params
,
66 enum nvmetcp_task_type task_type
)
68 if (!task_params
->sqe
)
71 memset(task_params
->sqe
, 0, sizeof(*task_params
->sqe
));
72 task_params
->sqe
->task_id
= cpu_to_le16(task_params
->itid
);
75 case NVMETCP_TASK_TYPE_HOST_WRITE
: {
79 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
80 NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD
, 1);
81 SET_FIELD(task_params
->sqe
->flags
, NVMETCP_WQE_WQE_TYPE
,
82 NVMETCP_WQE_TYPE_NORMAL
);
83 if (task_params
->tx_io_size
) {
84 if (task_params
->send_write_incapsule
)
85 buf_size
= calc_rw_task_size(task_params
, task_type
);
87 if (nvmetcp_is_slow_sgl(sgl_task_params
->num_sges
,
88 sgl_task_params
->small_mid_sge
))
89 num_sges
= NVMETCP_WQE_NUM_SGES_SLOWIO
;
91 num_sges
= min((u16
)sgl_task_params
->num_sges
,
92 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
);
94 SET_FIELD(task_params
->sqe
->flags
, NVMETCP_WQE_NUM_SGES
, num_sges
);
95 SET_FIELD(task_params
->sqe
->contlen_cdbsize
, NVMETCP_WQE_CONT_LEN
, buf_size
);
98 case NVMETCP_TASK_TYPE_HOST_READ
: {
99 SET_FIELD(task_params
->sqe
->flags
, NVMETCP_WQE_WQE_TYPE
,
100 NVMETCP_WQE_TYPE_NORMAL
);
101 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
102 NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD
, 1);
105 case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST
: {
106 SET_FIELD(task_params
->sqe
->flags
, NVMETCP_WQE_WQE_TYPE
,
107 NVMETCP_WQE_TYPE_MIDDLE_PATH
);
109 if (task_params
->tx_io_size
) {
110 SET_FIELD(task_params
->sqe
->contlen_cdbsize
, NVMETCP_WQE_CONT_LEN
,
111 task_params
->tx_io_size
);
112 SET_FIELD(task_params
->sqe
->flags
, NVMETCP_WQE_NUM_SGES
,
113 min((u16
)sgl_task_params
->num_sges
,
114 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
));
118 case NVMETCP_TASK_TYPE_CLEANUP
:
119 SET_FIELD(task_params
->sqe
->flags
, NVMETCP_WQE_WQE_TYPE
,
120 NVMETCP_WQE_TYPE_TASK_CLEANUP
);
127 /* The following function initializes of NVMeTCP task params */
129 init_nvmetcp_task_params(struct e5_nvmetcp_task_context
*context
,
130 struct nvmetcp_task_params
*task_params
,
131 enum nvmetcp_task_type task_type
)
133 context
->ystorm_st_context
.state
.cccid
= task_params
->host_cccid
;
134 SET_FIELD(context
->ustorm_st_context
.error_flags
, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP
, 1);
135 context
->ustorm_st_context
.nvme_tcp_opaque_lo
= cpu_to_le32(task_params
->opq
.lo
);
136 context
->ustorm_st_context
.nvme_tcp_opaque_hi
= cpu_to_le32(task_params
->opq
.hi
);
139 /* The following function initializes default values to all tasks */
141 init_default_nvmetcp_task(struct nvmetcp_task_params
*task_params
,
142 void *pdu_header
, void *nvme_cmd
,
143 enum nvmetcp_task_type task_type
)
145 struct e5_nvmetcp_task_context
*context
= task_params
->context
;
146 const u8 val_byte
= context
->mstorm_ag_context
.cdu_validation
;
149 memset(context
, 0, sizeof(*context
));
150 init_nvmetcp_task_params(context
, task_params
,
151 (enum nvmetcp_task_type
)task_type
);
153 /* Swapping requirements used below, will be removed in future FW versions */
154 if (task_type
== NVMETCP_TASK_TYPE_HOST_WRITE
||
155 task_type
== NVMETCP_TASK_TYPE_HOST_READ
) {
157 dw_index
< QED_NVMETCP_CMN_HDR_SIZE
/ sizeof(u32
);
159 context
->ystorm_st_context
.pdu_hdr
.task_hdr
.reg
[dw_index
] =
160 cpu_to_le32(__swab32(((u32
*)pdu_header
)[dw_index
]));
162 for (dw_index
= QED_NVMETCP_CMN_HDR_SIZE
/ sizeof(u32
);
163 dw_index
< QED_NVMETCP_CMD_HDR_SIZE
/ sizeof(u32
);
165 context
->ystorm_st_context
.pdu_hdr
.task_hdr
.reg
[dw_index
] =
166 cpu_to_le32(__swab32(((u32
*)nvme_cmd
)[dw_index
- 2]));
169 dw_index
< QED_NVMETCP_NON_IO_HDR_SIZE
/ sizeof(u32
);
171 context
->ystorm_st_context
.pdu_hdr
.task_hdr
.reg
[dw_index
] =
172 cpu_to_le32(__swab32(((u32
*)pdu_header
)[dw_index
]));
175 /* M-Storm Context: */
176 context
->mstorm_ag_context
.cdu_validation
= val_byte
;
177 context
->mstorm_st_context
.task_type
= (u8
)(task_type
);
178 context
->mstorm_ag_context
.task_cid
= cpu_to_le16(task_params
->conn_icid
);
180 /* Ustorm Context: */
181 SET_FIELD(context
->ustorm_ag_context
.flags1
, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV
, 1);
182 context
->ustorm_st_context
.task_type
= (u8
)(task_type
);
183 context
->ustorm_st_context
.cq_rss_number
= task_params
->cq_rss_number
;
184 context
->ustorm_ag_context
.icid
= cpu_to_le16(task_params
->conn_icid
);
187 /* The following function initializes the U-Storm Task Contexts */
189 init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx
*ustorm_st_context
,
190 struct e5_ustorm_nvmetcp_task_ag_ctx
*ustorm_ag_context
,
191 u32 remaining_recv_len
,
192 u32 expected_data_transfer_len
, u8 num_sges
,
193 bool tx_dif_conn_err_en
)
195 /* Remaining data to be received in bytes. Used in validations*/
196 ustorm_st_context
->rem_rcv_len
= cpu_to_le32(remaining_recv_len
);
197 ustorm_ag_context
->exp_data_acked
= cpu_to_le32(expected_data_transfer_len
);
198 ustorm_st_context
->exp_data_transfer_len
= cpu_to_le32(expected_data_transfer_len
);
199 SET_FIELD(ustorm_st_context
->reg1_map
, REG1_NUM_SGES
, num_sges
);
200 SET_FIELD(ustorm_ag_context
->flags2
, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN
,
201 tx_dif_conn_err_en
? 1 : 0);
204 /* The following function initializes Local Completion Contexts: */
206 set_local_completion_context(struct e5_nvmetcp_task_context
*context
)
208 SET_FIELD(context
->ystorm_st_context
.state
.flags
,
209 YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP
, 1);
210 SET_FIELD(context
->ustorm_st_context
.flags
,
211 USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP
, 1);
214 /* Common Fastpath task init function: */
216 init_rw_nvmetcp_task(struct nvmetcp_task_params
*task_params
,
217 enum nvmetcp_task_type task_type
,
218 void *pdu_header
, void *nvme_cmd
,
219 struct storage_sgl_task_params
*sgl_task_params
)
221 struct e5_nvmetcp_task_context
*context
= task_params
->context
;
222 u32 task_size
= calc_rw_task_size(task_params
, task_type
);
223 bool slow_io
= false;
226 init_default_nvmetcp_task(task_params
, pdu_header
, nvme_cmd
, task_type
);
229 if (task_params
->tx_io_size
) {
230 /* if data to transmit: */
231 init_scsi_sgl_context(&context
->ystorm_st_context
.state
.sgl_params
,
232 &context
->ystorm_st_context
.state
.data_desc
,
234 slow_io
= nvmetcp_is_slow_sgl(sgl_task_params
->num_sges
,
235 sgl_task_params
->small_mid_sge
);
237 (u8
)(!slow_io
? min((u32
)sgl_task_params
->num_sges
,
238 (u32
)SCSI_NUM_SGES_SLOW_SGL_THR
) :
239 NVMETCP_WQE_NUM_SGES_SLOWIO
);
241 SET_FIELD(context
->ystorm_st_context
.state
.flags
,
242 YSTORM_NVMETCP_TASK_STATE_SLOW_IO
, 1);
244 } else if (task_params
->rx_io_size
) {
245 /* if data to receive: */
246 init_scsi_sgl_context(&context
->mstorm_st_context
.sgl_params
,
247 &context
->mstorm_st_context
.data_desc
,
250 (u8
)(!nvmetcp_is_slow_sgl(sgl_task_params
->num_sges
,
251 sgl_task_params
->small_mid_sge
) ?
252 min((u32
)sgl_task_params
->num_sges
,
253 (u32
)SCSI_NUM_SGES_SLOW_SGL_THR
) :
254 NVMETCP_WQE_NUM_SGES_SLOWIO
);
255 context
->mstorm_st_context
.rem_task_size
= cpu_to_le32(task_size
);
258 /* Ustorm context: */
259 init_ustorm_task_contexts(&context
->ustorm_st_context
,
260 &context
->ustorm_ag_context
,
261 /* Remaining Receive length is the Task Size */
263 /* The size of the transmitted task */
269 /* Set exp_data_acked */
270 if (task_type
== NVMETCP_TASK_TYPE_HOST_WRITE
) {
271 if (task_params
->send_write_incapsule
)
272 context
->ustorm_ag_context
.exp_data_acked
= task_size
;
274 context
->ustorm_ag_context
.exp_data_acked
= 0;
275 } else if (task_type
== NVMETCP_TASK_TYPE_HOST_READ
) {
276 context
->ustorm_ag_context
.exp_data_acked
= 0;
279 context
->ustorm_ag_context
.exp_cont_len
= 0;
280 init_sqe(task_params
, sgl_task_params
, task_type
);
284 init_common_initiator_read_task(struct nvmetcp_task_params
*task_params
,
285 struct nvme_tcp_cmd_pdu
*cmd_pdu_header
,
286 struct nvme_command
*nvme_cmd
,
287 struct storage_sgl_task_params
*sgl_task_params
)
289 init_rw_nvmetcp_task(task_params
, NVMETCP_TASK_TYPE_HOST_READ
,
290 cmd_pdu_header
, nvme_cmd
, sgl_task_params
);
293 void init_nvmetcp_host_read_task(struct nvmetcp_task_params
*task_params
,
294 struct nvme_tcp_cmd_pdu
*cmd_pdu_header
,
295 struct nvme_command
*nvme_cmd
,
296 struct storage_sgl_task_params
*sgl_task_params
)
298 init_common_initiator_read_task(task_params
, (void *)cmd_pdu_header
,
299 (void *)nvme_cmd
, sgl_task_params
);
303 init_common_initiator_write_task(struct nvmetcp_task_params
*task_params
,
304 struct nvme_tcp_cmd_pdu
*cmd_pdu_header
,
305 struct nvme_command
*nvme_cmd
,
306 struct storage_sgl_task_params
*sgl_task_params
)
308 init_rw_nvmetcp_task(task_params
, NVMETCP_TASK_TYPE_HOST_WRITE
,
309 cmd_pdu_header
, nvme_cmd
, sgl_task_params
);
312 void init_nvmetcp_host_write_task(struct nvmetcp_task_params
*task_params
,
313 struct nvme_tcp_cmd_pdu
*cmd_pdu_header
,
314 struct nvme_command
*nvme_cmd
,
315 struct storage_sgl_task_params
*sgl_task_params
)
317 init_common_initiator_write_task(task_params
, (void *)cmd_pdu_header
,
318 (void *)nvme_cmd
, sgl_task_params
);
322 init_common_login_request_task(struct nvmetcp_task_params
*task_params
,
323 void *login_req_pdu_header
,
324 struct storage_sgl_task_params
*tx_sgl_task_params
,
325 struct storage_sgl_task_params
*rx_sgl_task_params
)
327 struct e5_nvmetcp_task_context
*context
= task_params
->context
;
329 init_default_nvmetcp_task(task_params
, (void *)login_req_pdu_header
, NULL
,
330 NVMETCP_TASK_TYPE_INIT_CONN_REQUEST
);
332 /* Ustorm Context: */
333 init_ustorm_task_contexts(&context
->ustorm_st_context
,
334 &context
->ustorm_ag_context
,
336 /* Remaining Receive length is the Task Size */
337 task_params
->rx_io_size
?
338 rx_sgl_task_params
->total_buffer_size
: 0,
340 /* The size of the transmitted task */
341 task_params
->tx_io_size
?
342 tx_sgl_task_params
->total_buffer_size
: 0,
344 0); /* tx_dif_conn_err_en */
347 if (task_params
->tx_io_size
)
348 init_scsi_sgl_context(&context
->ystorm_st_context
.state
.sgl_params
,
349 &context
->ystorm_st_context
.state
.data_desc
,
351 if (task_params
->rx_io_size
)
352 init_scsi_sgl_context(&context
->mstorm_st_context
.sgl_params
,
353 &context
->mstorm_st_context
.data_desc
,
356 context
->mstorm_st_context
.rem_task_size
=
357 cpu_to_le32(task_params
->rx_io_size
?
358 rx_sgl_task_params
->total_buffer_size
: 0);
359 init_sqe(task_params
, tx_sgl_task_params
, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST
);
362 /* The following function initializes Login task in Host mode: */
363 void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params
*task_params
,
364 struct nvme_tcp_icreq_pdu
*init_conn_req_pdu_hdr
,
365 struct storage_sgl_task_params
*tx_sgl_task_params
,
366 struct storage_sgl_task_params
*rx_sgl_task_params
)
368 init_common_login_request_task(task_params
, init_conn_req_pdu_hdr
,
369 tx_sgl_task_params
, rx_sgl_task_params
);
372 void init_cleanup_task_nvmetcp(struct nvmetcp_task_params
*task_params
)
374 init_sqe(task_params
, NULL
, NVMETCP_TASK_TYPE_CLEANUP
);