crypto: arm64/aes-ghash - yield NEON after every block of input
[linux/fpc-iii.git] / drivers / scsi / qedi / qedi_fw_api.c
bloba269da1a6c75cdd432fc07c5defecec3c752575e
1 /* QLogic iSCSI Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include "qedi_hsi.h"
12 #include <linux/qed/qed_if.h>
14 #include "qedi_fw_iscsi.h"
15 #include "qedi_fw_scsi.h"
17 #define SCSI_NUM_SGES_IN_CACHE 0x4
19 static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
21 return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
24 static
25 void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
26 struct scsi_cached_sges *ctx_data_desc,
27 struct scsi_sgl_task_params *sgl_task_params)
29 u8 sge_index;
30 u8 num_sges;
31 u32 val;
33 num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
34 SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
36 /* sgl params */
37 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
38 ctx_sgl_params->sgl_addr.lo = val;
39 val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
40 ctx_sgl_params->sgl_addr.hi = val;
41 val = cpu_to_le32(sgl_task_params->total_buffer_size);
42 ctx_sgl_params->sgl_total_length = val;
43 ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
45 for (sge_index = 0; sge_index < num_sges; sge_index++) {
46 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
47 ctx_data_desc->sge[sge_index].sge_addr.lo = val;
48 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
49 ctx_data_desc->sge[sge_index].sge_addr.hi = val;
50 val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
51 ctx_data_desc->sge[sge_index].sge_len = val;
55 static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
56 enum iscsi_task_type task_type,
57 struct scsi_sgl_task_params *sgl_task_params,
58 struct scsi_dif_task_params *dif_task_params)
60 u32 io_size;
62 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
63 task_type == ISCSI_TASK_TYPE_TARGET_READ)
64 io_size = task_params->tx_io_size;
65 else
66 io_size = task_params->rx_io_size;
68 if (!io_size)
69 return 0;
71 if (!dif_task_params)
72 return io_size;
74 return !dif_task_params->dif_on_network ?
75 io_size : sgl_task_params->total_buffer_size;
78 static void
79 init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
80 struct scsi_dif_task_params *dif_task_params)
82 if (!dif_task_params)
83 return;
85 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
86 dif_task_params->dif_block_size_log);
87 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
88 dif_task_params->dif_on_network ? 1 : 0);
89 SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
90 dif_task_params->dif_on_host ? 1 : 0);
93 static void init_sqe(struct iscsi_task_params *task_params,
94 struct scsi_sgl_task_params *sgl_task_params,
95 struct scsi_dif_task_params *dif_task_params,
96 struct iscsi_common_hdr *pdu_header,
97 struct scsi_initiator_cmd_params *cmd_params,
98 enum iscsi_task_type task_type,
99 bool is_cleanup)
101 if (!task_params->sqe)
102 return;
104 memset(task_params->sqe, 0, sizeof(*task_params->sqe));
105 task_params->sqe->task_id = cpu_to_le16(task_params->itid);
106 if (is_cleanup) {
107 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
108 ISCSI_WQE_TYPE_TASK_CLEANUP);
109 return;
112 switch (task_type) {
113 case ISCSI_TASK_TYPE_INITIATOR_WRITE:
115 u32 buf_size = 0;
116 u32 num_sges = 0;
118 init_dif_context_flags(&task_params->sqe->prot_flags,
119 dif_task_params);
121 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
122 ISCSI_WQE_TYPE_NORMAL);
124 if (task_params->tx_io_size) {
125 buf_size = calc_rw_task_size(task_params, task_type,
126 sgl_task_params,
127 dif_task_params);
129 if (scsi_is_slow_sgl(sgl_task_params->num_sges,
130 sgl_task_params->small_mid_sge))
131 num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
132 else
133 num_sges = min(sgl_task_params->num_sges,
134 (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
137 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
138 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
139 buf_size);
141 if (GET_FIELD(pdu_header->hdr_second_dword,
142 ISCSI_CMD_HDR_TOTAL_AHS_LEN))
143 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
144 cmd_params->extended_cdb_sge.sge_len);
146 break;
147 case ISCSI_TASK_TYPE_INITIATOR_READ:
148 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
149 ISCSI_WQE_TYPE_NORMAL);
151 if (GET_FIELD(pdu_header->hdr_second_dword,
152 ISCSI_CMD_HDR_TOTAL_AHS_LEN))
153 SET_FIELD(task_params->sqe->contlen_cdbsize,
154 ISCSI_WQE_CDB_SIZE,
155 cmd_params->extended_cdb_sge.sge_len);
156 break;
157 case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
158 case ISCSI_TASK_TYPE_MIDPATH:
160 bool advance_statsn = true;
162 if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
163 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
164 ISCSI_WQE_TYPE_LOGIN);
165 else
166 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
167 ISCSI_WQE_TYPE_MIDDLE_PATH);
169 if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
170 u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
171 ISCSI_COMMON_HDR_OPCODE);
173 if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
174 (opcode != ISCSI_OPCODE_NOP_IN ||
175 pdu_header->itt == ISCSI_TTT_ALL_ONES))
176 advance_statsn = false;
179 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
180 advance_statsn ? 1 : 0);
182 if (task_params->tx_io_size) {
183 SET_FIELD(task_params->sqe->contlen_cdbsize,
184 ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
186 if (scsi_is_slow_sgl(sgl_task_params->num_sges,
187 sgl_task_params->small_mid_sge))
188 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
189 ISCSI_WQE_NUM_SGES_SLOWIO);
190 else
191 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
192 min(sgl_task_params->num_sges,
193 (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
196 break;
197 default:
198 break;
202 static void init_default_iscsi_task(struct iscsi_task_params *task_params,
203 struct data_hdr *pdu_header,
204 enum iscsi_task_type task_type)
206 struct e4_iscsi_task_context *context;
207 u32 val;
208 u16 index;
209 u8 val_byte;
211 context = task_params->context;
212 val_byte = context->mstorm_ag_context.cdu_validation;
213 memset(context, 0, sizeof(*context));
214 context->mstorm_ag_context.cdu_validation = val_byte;
216 for (index = 0; index <
217 ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
218 index++) {
219 val = cpu_to_le32(pdu_header->data[index]);
220 context->ystorm_st_context.pdu_hdr.data.data[index] = val;
223 context->mstorm_st_context.task_type = task_type;
224 context->mstorm_ag_context.task_cid =
225 cpu_to_le16(task_params->conn_icid);
227 SET_FIELD(context->ustorm_ag_context.flags1,
228 E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
230 context->ustorm_st_context.task_type = task_type;
231 context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
232 context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
235 static
236 void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
237 struct scsi_initiator_cmd_params *cmd)
239 union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
240 u32 val;
242 if (!cmd->extended_cdb_sge.sge_len)
243 return;
245 SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
246 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
247 cmd->extended_cdb_sge.sge_len);
248 val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
249 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
250 val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
251 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
252 val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
253 ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len = val;
256 static
257 void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
258 struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
259 u32 remaining_recv_len, u32 expected_data_transfer_len,
260 u8 num_sges, bool tx_dif_conn_err_en)
262 u32 val;
264 ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
265 ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
266 val = cpu_to_le32(expected_data_transfer_len);
267 ustorm_st_cxt->exp_data_transfer_len = val;
268 SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
269 SET_FIELD(ustorm_ag_cxt->flags2,
270 E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
271 tx_dif_conn_err_en ? 1 : 0);
274 static
275 void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
276 struct iscsi_conn_params *conn_params,
277 enum iscsi_task_type task_type,
278 u32 task_size,
279 u32 exp_data_transfer_len,
280 u8 total_ahs_length)
282 u32 max_unsolicited_data = 0, val;
284 if (total_ahs_length &&
285 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
286 task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
287 SET_FIELD(context->ustorm_st_context.flags2,
288 USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
290 switch (task_type) {
291 case ISCSI_TASK_TYPE_INITIATOR_WRITE:
292 if (!conn_params->initial_r2t)
293 max_unsolicited_data = conn_params->first_burst_length;
294 else if (conn_params->immediate_data)
295 max_unsolicited_data =
296 min(conn_params->first_burst_length,
297 conn_params->max_send_pdu_length);
299 context->ustorm_ag_context.exp_data_acked =
300 cpu_to_le32(total_ahs_length == 0 ?
301 min(exp_data_transfer_len,
302 max_unsolicited_data) :
303 ((u32)(total_ahs_length +
304 ISCSI_AHS_CNTL_SIZE)));
305 break;
306 case ISCSI_TASK_TYPE_TARGET_READ:
307 val = cpu_to_le32(exp_data_transfer_len);
308 context->ustorm_ag_context.exp_data_acked = val;
309 break;
310 case ISCSI_TASK_TYPE_INITIATOR_READ:
311 context->ustorm_ag_context.exp_data_acked =
312 cpu_to_le32((total_ahs_length == 0 ? 0 :
313 total_ahs_length +
314 ISCSI_AHS_CNTL_SIZE));
315 break;
316 case ISCSI_TASK_TYPE_TARGET_WRITE:
317 val = cpu_to_le32(task_size);
318 context->ustorm_ag_context.exp_cont_len = val;
319 break;
320 default:
321 break;
325 static
326 void init_rtdif_task_context(struct rdif_task_context *rdif_context,
327 struct tdif_task_context *tdif_context,
328 struct scsi_dif_task_params *dif_task_params,
329 enum iscsi_task_type task_type)
331 u32 val;
333 if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
334 return;
336 if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
337 task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
338 rdif_context->app_tag_value =
339 cpu_to_le16(dif_task_params->application_tag);
340 rdif_context->partial_crc_value = cpu_to_le16(0xffff);
341 val = cpu_to_le32(dif_task_params->initial_ref_tag);
342 rdif_context->initial_ref_tag = val;
343 rdif_context->app_tag_mask =
344 cpu_to_le16(dif_task_params->application_tag_mask);
345 SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
346 dif_task_params->crc_seed ? 1 : 0);
347 SET_FIELD(rdif_context->flags0,
348 RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
349 dif_task_params->host_guard_type);
350 SET_FIELD(rdif_context->flags0,
351 RDIF_TASK_CONTEXT_PROTECTION_TYPE,
352 dif_task_params->protection_type);
353 SET_FIELD(rdif_context->flags0,
354 RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
355 SET_FIELD(rdif_context->flags0,
356 RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
357 dif_task_params->keep_ref_tag_const ? 1 : 0);
358 SET_FIELD(rdif_context->flags1,
359 RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
360 (dif_task_params->validate_app_tag &&
361 dif_task_params->dif_on_network) ? 1 : 0);
362 SET_FIELD(rdif_context->flags1,
363 RDIF_TASK_CONTEXT_VALIDATE_GUARD,
364 (dif_task_params->validate_guard &&
365 dif_task_params->dif_on_network) ? 1 : 0);
366 SET_FIELD(rdif_context->flags1,
367 RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
368 (dif_task_params->validate_ref_tag &&
369 dif_task_params->dif_on_network) ? 1 : 0);
370 SET_FIELD(rdif_context->flags1,
371 RDIF_TASK_CONTEXT_HOST_INTERFACE,
372 dif_task_params->dif_on_host ? 1 : 0);
373 SET_FIELD(rdif_context->flags1,
374 RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
375 dif_task_params->dif_on_network ? 1 : 0);
376 SET_FIELD(rdif_context->flags1,
377 RDIF_TASK_CONTEXT_FORWARD_GUARD,
378 dif_task_params->forward_guard ? 1 : 0);
379 SET_FIELD(rdif_context->flags1,
380 RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
381 dif_task_params->forward_app_tag ? 1 : 0);
382 SET_FIELD(rdif_context->flags1,
383 RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
384 dif_task_params->forward_ref_tag ? 1 : 0);
385 SET_FIELD(rdif_context->flags1,
386 RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
387 dif_task_params->forward_app_tag_with_mask ? 1 : 0);
388 SET_FIELD(rdif_context->flags1,
389 RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
390 dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
391 SET_FIELD(rdif_context->flags1,
392 RDIF_TASK_CONTEXT_INTERVAL_SIZE,
393 dif_task_params->dif_block_size_log - 9);
394 SET_FIELD(rdif_context->state,
395 RDIF_TASK_CONTEXT_REF_TAG_MASK,
396 dif_task_params->ref_tag_mask);
397 SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
398 dif_task_params->ignore_app_tag);
401 if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
402 task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
403 tdif_context->app_tag_value =
404 cpu_to_le16(dif_task_params->application_tag);
405 tdif_context->partial_crc_value_b =
406 cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
407 tdif_context->partial_crc_value_a =
408 cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
409 SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
410 dif_task_params->crc_seed ? 1 : 0);
412 SET_FIELD(tdif_context->flags0,
413 TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
414 dif_task_params->tx_dif_conn_err_en ? 1 : 0);
415 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
416 dif_task_params->forward_guard ? 1 : 0);
417 SET_FIELD(tdif_context->flags1,
418 TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
419 dif_task_params->forward_app_tag ? 1 : 0);
420 SET_FIELD(tdif_context->flags1,
421 TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
422 dif_task_params->forward_ref_tag ? 1 : 0);
423 SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
424 dif_task_params->dif_block_size_log - 9);
425 SET_FIELD(tdif_context->flags1,
426 TDIF_TASK_CONTEXT_HOST_INTERFACE,
427 dif_task_params->dif_on_host ? 1 : 0);
428 SET_FIELD(tdif_context->flags1,
429 TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
430 dif_task_params->dif_on_network ? 1 : 0);
431 val = cpu_to_le32(dif_task_params->initial_ref_tag);
432 tdif_context->initial_ref_tag = val;
433 tdif_context->app_tag_mask =
434 cpu_to_le16(dif_task_params->application_tag_mask);
435 SET_FIELD(tdif_context->flags0,
436 TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
437 dif_task_params->host_guard_type);
438 SET_FIELD(tdif_context->flags0,
439 TDIF_TASK_CONTEXT_PROTECTION_TYPE,
440 dif_task_params->protection_type);
441 SET_FIELD(tdif_context->flags0,
442 TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
443 dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
444 SET_FIELD(tdif_context->flags0,
445 TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
446 dif_task_params->keep_ref_tag_const ? 1 : 0);
447 SET_FIELD(tdif_context->flags1,
448 TDIF_TASK_CONTEXT_VALIDATE_GUARD,
449 (dif_task_params->validate_guard &&
450 dif_task_params->dif_on_host) ? 1 : 0);
451 SET_FIELD(tdif_context->flags1,
452 TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
453 (dif_task_params->validate_app_tag &&
454 dif_task_params->dif_on_host) ? 1 : 0);
455 SET_FIELD(tdif_context->flags1,
456 TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
457 (dif_task_params->validate_ref_tag &&
458 dif_task_params->dif_on_host) ? 1 : 0);
459 SET_FIELD(tdif_context->flags1,
460 TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
461 dif_task_params->forward_app_tag_with_mask ? 1 : 0);
462 SET_FIELD(tdif_context->flags1,
463 TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
464 dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
465 SET_FIELD(tdif_context->flags1,
466 TDIF_TASK_CONTEXT_REF_TAG_MASK,
467 dif_task_params->ref_tag_mask);
468 SET_FIELD(tdif_context->flags0,
469 TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
470 dif_task_params->ignore_app_tag ? 1 : 0);
474 static void set_local_completion_context(struct e4_iscsi_task_context *context)
476 SET_FIELD(context->ystorm_st_context.state.flags,
477 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
478 SET_FIELD(context->ustorm_st_context.flags,
479 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
482 static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
483 enum iscsi_task_type task_type,
484 struct iscsi_conn_params *conn_params,
485 struct iscsi_common_hdr *pdu_header,
486 struct scsi_sgl_task_params *sgl_task_params,
487 struct scsi_initiator_cmd_params *cmd_params,
488 struct scsi_dif_task_params *dif_task_params)
490 u32 exp_data_transfer_len = conn_params->max_burst_length;
491 struct e4_iscsi_task_context *cxt;
492 bool slow_io = false;
493 u32 task_size, val;
494 u8 num_sges = 0;
496 task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
497 dif_task_params);
499 init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
500 task_type);
502 cxt = task_params->context;
505 if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
506 set_local_completion_context(cxt);
507 } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
508 val = cpu_to_le32(task_size +
509 ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
510 cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
511 cxt->mstorm_st_context.expected_itt =
512 cpu_to_le32(pdu_header->itt);
513 } else {
514 val = cpu_to_le32(task_size);
515 cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
516 val;
517 init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
518 cmd_params);
519 val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
520 cxt->mstorm_st_context.sense_db.lo = val;
522 val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
523 cxt->mstorm_st_context.sense_db.hi = val;
526 if (task_params->tx_io_size) {
527 init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
528 dif_task_params);
529 init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
530 dif_task_params);
531 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
532 &cxt->ystorm_st_context.state.data_desc,
533 sgl_task_params);
535 slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
536 sgl_task_params->small_mid_sge);
538 num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
539 (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
540 ISCSI_WQE_NUM_SGES_SLOWIO;
542 if (slow_io) {
543 SET_FIELD(cxt->ystorm_st_context.state.flags,
544 YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
546 } else if (task_params->rx_io_size) {
547 init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
548 dif_task_params);
549 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
550 &cxt->mstorm_st_context.data_desc,
551 sgl_task_params);
552 num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
553 sgl_task_params->small_mid_sge) ?
554 min_t(u16, sgl_task_params->num_sges,
555 (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
556 ISCSI_WQE_NUM_SGES_SLOWIO;
557 cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
560 if (exp_data_transfer_len > task_size ||
561 task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
562 exp_data_transfer_len = task_size;
564 init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
565 &task_params->context->ustorm_ag_context,
566 task_size, exp_data_transfer_len, num_sges,
567 dif_task_params ?
568 dif_task_params->tx_dif_conn_err_en : false);
570 set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
571 task_type, task_size,
572 exp_data_transfer_len,
573 GET_FIELD(pdu_header->hdr_second_dword,
574 ISCSI_CMD_HDR_TOTAL_AHS_LEN));
576 if (dif_task_params)
577 init_rtdif_task_context(&task_params->context->rdif_context,
578 &task_params->context->tdif_context,
579 dif_task_params, task_type);
581 init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
582 cmd_params, task_type, false);
584 return 0;
587 int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
588 struct iscsi_conn_params *conn_params,
589 struct scsi_initiator_cmd_params *cmd_params,
590 struct iscsi_cmd_hdr *cmd_header,
591 struct scsi_sgl_task_params *tx_sgl_params,
592 struct scsi_sgl_task_params *rx_sgl_params,
593 struct scsi_dif_task_params *dif_task_params)
595 if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
596 return init_rw_iscsi_task(task_params,
597 ISCSI_TASK_TYPE_INITIATOR_WRITE,
598 conn_params,
599 (struct iscsi_common_hdr *)cmd_header,
600 tx_sgl_params, cmd_params,
601 dif_task_params);
602 else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ) ||
603 (task_params->rx_io_size == 0 && task_params->tx_io_size == 0))
604 return init_rw_iscsi_task(task_params,
605 ISCSI_TASK_TYPE_INITIATOR_READ,
606 conn_params,
607 (struct iscsi_common_hdr *)cmd_header,
608 rx_sgl_params, cmd_params,
609 dif_task_params);
610 else
611 return -1;
614 int init_initiator_login_request_task(struct iscsi_task_params *task_params,
615 struct iscsi_login_req_hdr *login_header,
616 struct scsi_sgl_task_params *tx_params,
617 struct scsi_sgl_task_params *rx_params)
619 struct e4_iscsi_task_context *cxt;
621 cxt = task_params->context;
623 init_default_iscsi_task(task_params,
624 (struct data_hdr *)login_header,
625 ISCSI_TASK_TYPE_MIDPATH);
627 init_ustorm_task_contexts(&cxt->ustorm_st_context,
628 &cxt->ustorm_ag_context,
629 task_params->rx_io_size ?
630 rx_params->total_buffer_size : 0,
631 task_params->tx_io_size ?
632 tx_params->total_buffer_size : 0, 0,
635 if (task_params->tx_io_size)
636 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
637 &cxt->ystorm_st_context.state.data_desc,
638 tx_params);
640 if (task_params->rx_io_size)
641 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
642 &cxt->mstorm_st_context.data_desc,
643 rx_params);
645 cxt->mstorm_st_context.rem_task_size =
646 cpu_to_le32(task_params->rx_io_size ?
647 rx_params->total_buffer_size : 0);
649 init_sqe(task_params, tx_params, NULL,
650 (struct iscsi_common_hdr *)login_header, NULL,
651 ISCSI_TASK_TYPE_MIDPATH, false);
653 return 0;
656 int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
657 struct iscsi_nop_out_hdr *nop_out_pdu_header,
658 struct scsi_sgl_task_params *tx_sgl_task_params,
659 struct scsi_sgl_task_params *rx_sgl_task_params)
661 struct e4_iscsi_task_context *cxt;
663 cxt = task_params->context;
665 init_default_iscsi_task(task_params,
666 (struct data_hdr *)nop_out_pdu_header,
667 ISCSI_TASK_TYPE_MIDPATH);
669 if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
670 set_local_completion_context(task_params->context);
672 if (task_params->tx_io_size)
673 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
674 &cxt->ystorm_st_context.state.data_desc,
675 tx_sgl_task_params);
677 if (task_params->rx_io_size)
678 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
679 &cxt->mstorm_st_context.data_desc,
680 rx_sgl_task_params);
682 init_ustorm_task_contexts(&cxt->ustorm_st_context,
683 &cxt->ustorm_ag_context,
684 task_params->rx_io_size ?
685 rx_sgl_task_params->total_buffer_size : 0,
686 task_params->tx_io_size ?
687 tx_sgl_task_params->total_buffer_size : 0,
688 0, 0);
690 cxt->mstorm_st_context.rem_task_size =
691 cpu_to_le32(task_params->rx_io_size ?
692 rx_sgl_task_params->total_buffer_size :
695 init_sqe(task_params, tx_sgl_task_params, NULL,
696 (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
697 ISCSI_TASK_TYPE_MIDPATH, false);
699 return 0;
702 int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
703 struct iscsi_logout_req_hdr *logout_hdr,
704 struct scsi_sgl_task_params *tx_params,
705 struct scsi_sgl_task_params *rx_params)
707 struct e4_iscsi_task_context *cxt;
709 cxt = task_params->context;
711 init_default_iscsi_task(task_params,
712 (struct data_hdr *)logout_hdr,
713 ISCSI_TASK_TYPE_MIDPATH);
715 if (task_params->tx_io_size)
716 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
717 &cxt->ystorm_st_context.state.data_desc,
718 tx_params);
720 if (task_params->rx_io_size)
721 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
722 &cxt->mstorm_st_context.data_desc,
723 rx_params);
725 init_ustorm_task_contexts(&cxt->ustorm_st_context,
726 &cxt->ustorm_ag_context,
727 task_params->rx_io_size ?
728 rx_params->total_buffer_size : 0,
729 task_params->tx_io_size ?
730 tx_params->total_buffer_size : 0,
731 0, 0);
733 cxt->mstorm_st_context.rem_task_size =
734 cpu_to_le32(task_params->rx_io_size ?
735 rx_params->total_buffer_size : 0);
737 init_sqe(task_params, tx_params, NULL,
738 (struct iscsi_common_hdr *)logout_hdr, NULL,
739 ISCSI_TASK_TYPE_MIDPATH, false);
741 return 0;
744 int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
745 struct iscsi_tmf_request_hdr *tmf_header)
747 init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
748 ISCSI_TASK_TYPE_MIDPATH);
750 init_sqe(task_params, NULL, NULL,
751 (struct iscsi_common_hdr *)tmf_header, NULL,
752 ISCSI_TASK_TYPE_MIDPATH, false);
754 return 0;
757 int init_initiator_text_request_task(struct iscsi_task_params *task_params,
758 struct iscsi_text_request_hdr *text_header,
759 struct scsi_sgl_task_params *tx_params,
760 struct scsi_sgl_task_params *rx_params)
762 struct e4_iscsi_task_context *cxt;
764 cxt = task_params->context;
766 init_default_iscsi_task(task_params,
767 (struct data_hdr *)text_header,
768 ISCSI_TASK_TYPE_MIDPATH);
770 if (task_params->tx_io_size)
771 init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
772 &cxt->ystorm_st_context.state.data_desc,
773 tx_params);
775 if (task_params->rx_io_size)
776 init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
777 &cxt->mstorm_st_context.data_desc,
778 rx_params);
780 cxt->mstorm_st_context.rem_task_size =
781 cpu_to_le32(task_params->rx_io_size ?
782 rx_params->total_buffer_size : 0);
784 init_ustorm_task_contexts(&cxt->ustorm_st_context,
785 &cxt->ustorm_ag_context,
786 task_params->rx_io_size ?
787 rx_params->total_buffer_size : 0,
788 task_params->tx_io_size ?
789 tx_params->total_buffer_size : 0, 0, 0);
791 init_sqe(task_params, tx_params, NULL,
792 (struct iscsi_common_hdr *)text_header, NULL,
793 ISCSI_TASK_TYPE_MIDPATH, false);
795 return 0;
798 int init_cleanup_task(struct iscsi_task_params *task_params)
800 init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
801 true);
802 return 0;