1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
6 #include <linux/types.h>
7 #include <asm/byteorder.h>
9 #include <linux/qed/qed_if.h>
11 #include "qedi_fw_iscsi.h"
12 #include "qedi_fw_scsi.h"
14 #define SCSI_NUM_SGES_IN_CACHE 0x4
16 static bool scsi_is_slow_sgl(u16 num_sges
, bool small_mid_sge
)
18 return (num_sges
> SCSI_NUM_SGES_SLOW_SGL_THR
&& small_mid_sge
);
22 void init_scsi_sgl_context(struct scsi_sgl_params
*ctx_sgl_params
,
23 struct scsi_cached_sges
*ctx_data_desc
,
24 struct scsi_sgl_task_params
*sgl_task_params
)
30 num_sges
= (sgl_task_params
->num_sges
> SCSI_NUM_SGES_IN_CACHE
) ?
31 SCSI_NUM_SGES_IN_CACHE
: sgl_task_params
->num_sges
;
34 val
= cpu_to_le32(sgl_task_params
->sgl_phys_addr
.lo
);
35 ctx_sgl_params
->sgl_addr
.lo
= val
;
36 val
= cpu_to_le32(sgl_task_params
->sgl_phys_addr
.hi
);
37 ctx_sgl_params
->sgl_addr
.hi
= val
;
38 val
= cpu_to_le32(sgl_task_params
->total_buffer_size
);
39 ctx_sgl_params
->sgl_total_length
= val
;
40 ctx_sgl_params
->sgl_num_sges
= cpu_to_le16(sgl_task_params
->num_sges
);
42 for (sge_index
= 0; sge_index
< num_sges
; sge_index
++) {
43 val
= cpu_to_le32(sgl_task_params
->sgl
[sge_index
].sge_addr
.lo
);
44 ctx_data_desc
->sge
[sge_index
].sge_addr
.lo
= val
;
45 val
= cpu_to_le32(sgl_task_params
->sgl
[sge_index
].sge_addr
.hi
);
46 ctx_data_desc
->sge
[sge_index
].sge_addr
.hi
= val
;
47 val
= cpu_to_le32(sgl_task_params
->sgl
[sge_index
].sge_len
);
48 ctx_data_desc
->sge
[sge_index
].sge_len
= val
;
52 static u32
calc_rw_task_size(struct iscsi_task_params
*task_params
,
53 enum iscsi_task_type task_type
,
54 struct scsi_sgl_task_params
*sgl_task_params
,
55 struct scsi_dif_task_params
*dif_task_params
)
59 if (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
||
60 task_type
== ISCSI_TASK_TYPE_TARGET_READ
)
61 io_size
= task_params
->tx_io_size
;
63 io_size
= task_params
->rx_io_size
;
71 return !dif_task_params
->dif_on_network
?
72 io_size
: sgl_task_params
->total_buffer_size
;
76 init_dif_context_flags(struct iscsi_dif_flags
*ctx_dif_flags
,
77 struct scsi_dif_task_params
*dif_task_params
)
82 SET_FIELD(ctx_dif_flags
->flags
, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG
,
83 dif_task_params
->dif_block_size_log
);
84 SET_FIELD(ctx_dif_flags
->flags
, ISCSI_DIF_FLAGS_DIF_TO_PEER
,
85 dif_task_params
->dif_on_network
? 1 : 0);
86 SET_FIELD(ctx_dif_flags
->flags
, ISCSI_DIF_FLAGS_HOST_INTERFACE
,
87 dif_task_params
->dif_on_host
? 1 : 0);
90 static void init_sqe(struct iscsi_task_params
*task_params
,
91 struct scsi_sgl_task_params
*sgl_task_params
,
92 struct scsi_dif_task_params
*dif_task_params
,
93 struct iscsi_common_hdr
*pdu_header
,
94 struct scsi_initiator_cmd_params
*cmd_params
,
95 enum iscsi_task_type task_type
,
98 if (!task_params
->sqe
)
101 memset(task_params
->sqe
, 0, sizeof(*task_params
->sqe
));
102 task_params
->sqe
->task_id
= cpu_to_le16(task_params
->itid
);
104 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
105 ISCSI_WQE_TYPE_TASK_CLEANUP
);
110 case ISCSI_TASK_TYPE_INITIATOR_WRITE
:
115 init_dif_context_flags(&task_params
->sqe
->prot_flags
,
118 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
119 ISCSI_WQE_TYPE_NORMAL
);
121 if (task_params
->tx_io_size
) {
122 buf_size
= calc_rw_task_size(task_params
, task_type
,
126 if (scsi_is_slow_sgl(sgl_task_params
->num_sges
,
127 sgl_task_params
->small_mid_sge
))
128 num_sges
= ISCSI_WQE_NUM_SGES_SLOWIO
;
130 num_sges
= min(sgl_task_params
->num_sges
,
131 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
);
134 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_NUM_SGES
,
136 SET_FIELD(task_params
->sqe
->contlen_cdbsize
, ISCSI_WQE_CONT_LEN
,
139 if (GET_FIELD(pdu_header
->hdr_second_dword
,
140 ISCSI_CMD_HDR_TOTAL_AHS_LEN
))
141 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
143 cmd_params
->extended_cdb_sge
.sge_len
);
146 case ISCSI_TASK_TYPE_INITIATOR_READ
:
147 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
148 ISCSI_WQE_TYPE_NORMAL
);
150 if (GET_FIELD(pdu_header
->hdr_second_dword
,
151 ISCSI_CMD_HDR_TOTAL_AHS_LEN
))
152 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
154 cmd_params
->extended_cdb_sge
.sge_len
);
156 case ISCSI_TASK_TYPE_LOGIN_RESPONSE
:
157 case ISCSI_TASK_TYPE_MIDPATH
:
159 bool advance_statsn
= true;
161 if (task_type
== ISCSI_TASK_TYPE_LOGIN_RESPONSE
)
162 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
163 ISCSI_WQE_TYPE_LOGIN
);
165 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
166 ISCSI_WQE_TYPE_MIDDLE_PATH
);
168 if (task_type
== ISCSI_TASK_TYPE_MIDPATH
) {
169 u8 opcode
= GET_FIELD(pdu_header
->hdr_first_byte
,
170 ISCSI_COMMON_HDR_OPCODE
);
172 if (opcode
!= ISCSI_OPCODE_TEXT_RESPONSE
&&
173 (opcode
!= ISCSI_OPCODE_NOP_IN
||
174 pdu_header
->itt
== ISCSI_TTT_ALL_ONES
))
175 advance_statsn
= false;
178 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_RESPONSE
,
179 advance_statsn
? 1 : 0);
181 if (task_params
->tx_io_size
) {
182 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
183 ISCSI_WQE_CONT_LEN
, task_params
->tx_io_size
);
185 if (scsi_is_slow_sgl(sgl_task_params
->num_sges
,
186 sgl_task_params
->small_mid_sge
))
187 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_NUM_SGES
,
188 ISCSI_WQE_NUM_SGES_SLOWIO
);
190 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_NUM_SGES
,
191 min(sgl_task_params
->num_sges
,
192 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
));
201 static void init_default_iscsi_task(struct iscsi_task_params
*task_params
,
202 struct data_hdr
*pdu_header
,
203 enum iscsi_task_type task_type
)
205 struct e4_iscsi_task_context
*context
;
210 context
= task_params
->context
;
211 val_byte
= context
->mstorm_ag_context
.cdu_validation
;
212 memset(context
, 0, sizeof(*context
));
213 context
->mstorm_ag_context
.cdu_validation
= val_byte
;
215 for (index
= 0; index
<
216 ARRAY_SIZE(context
->ystorm_st_context
.pdu_hdr
.data
.data
);
218 val
= cpu_to_le32(pdu_header
->data
[index
]);
219 context
->ystorm_st_context
.pdu_hdr
.data
.data
[index
] = val
;
222 context
->mstorm_st_context
.task_type
= task_type
;
223 context
->mstorm_ag_context
.task_cid
=
224 cpu_to_le16(task_params
->conn_icid
);
226 SET_FIELD(context
->ustorm_ag_context
.flags1
,
227 E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV
, 1);
229 context
->ustorm_st_context
.task_type
= task_type
;
230 context
->ustorm_st_context
.cq_rss_number
= task_params
->cq_rss_number
;
231 context
->ustorm_ag_context
.icid
= cpu_to_le16(task_params
->conn_icid
);
235 void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx
*ystc
,
236 struct scsi_initiator_cmd_params
*cmd
)
238 union iscsi_task_hdr
*ctx_pdu_hdr
= &ystc
->pdu_hdr
;
241 if (!cmd
->extended_cdb_sge
.sge_len
)
244 SET_FIELD(ctx_pdu_hdr
->ext_cdb_cmd
.hdr_second_dword
,
245 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE
,
246 cmd
->extended_cdb_sge
.sge_len
);
247 val
= cpu_to_le32(cmd
->extended_cdb_sge
.sge_addr
.lo
);
248 ctx_pdu_hdr
->ext_cdb_cmd
.cdb_sge
.sge_addr
.lo
= val
;
249 val
= cpu_to_le32(cmd
->extended_cdb_sge
.sge_addr
.hi
);
250 ctx_pdu_hdr
->ext_cdb_cmd
.cdb_sge
.sge_addr
.hi
= val
;
251 val
= cpu_to_le32(cmd
->extended_cdb_sge
.sge_len
);
252 ctx_pdu_hdr
->ext_cdb_cmd
.cdb_sge
.sge_len
= val
;
256 void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx
*ustorm_st_cxt
,
257 struct e4_ustorm_iscsi_task_ag_ctx
*ustorm_ag_cxt
,
258 u32 remaining_recv_len
, u32 expected_data_transfer_len
,
259 u8 num_sges
, bool tx_dif_conn_err_en
)
263 ustorm_st_cxt
->rem_rcv_len
= cpu_to_le32(remaining_recv_len
);
264 ustorm_ag_cxt
->exp_data_acked
= cpu_to_le32(expected_data_transfer_len
);
265 val
= cpu_to_le32(expected_data_transfer_len
);
266 ustorm_st_cxt
->exp_data_transfer_len
= val
;
267 SET_FIELD(ustorm_st_cxt
->reg1
.reg1_map
, ISCSI_REG1_NUM_SGES
, num_sges
);
268 SET_FIELD(ustorm_ag_cxt
->flags2
,
269 E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN
,
270 tx_dif_conn_err_en
? 1 : 0);
274 void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context
*context
,
275 struct iscsi_conn_params
*conn_params
,
276 enum iscsi_task_type task_type
,
278 u32 exp_data_transfer_len
,
281 u32 max_unsolicited_data
= 0, val
;
283 if (total_ahs_length
&&
284 (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
||
285 task_type
== ISCSI_TASK_TYPE_INITIATOR_READ
))
286 SET_FIELD(context
->ustorm_st_context
.flags2
,
287 USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST
, 1);
290 case ISCSI_TASK_TYPE_INITIATOR_WRITE
:
291 if (!conn_params
->initial_r2t
)
292 max_unsolicited_data
= conn_params
->first_burst_length
;
293 else if (conn_params
->immediate_data
)
294 max_unsolicited_data
=
295 min(conn_params
->first_burst_length
,
296 conn_params
->max_send_pdu_length
);
298 context
->ustorm_ag_context
.exp_data_acked
=
299 cpu_to_le32(total_ahs_length
== 0 ?
300 min(exp_data_transfer_len
,
301 max_unsolicited_data
) :
302 ((u32
)(total_ahs_length
+
303 ISCSI_AHS_CNTL_SIZE
)));
305 case ISCSI_TASK_TYPE_TARGET_READ
:
306 val
= cpu_to_le32(exp_data_transfer_len
);
307 context
->ustorm_ag_context
.exp_data_acked
= val
;
309 case ISCSI_TASK_TYPE_INITIATOR_READ
:
310 context
->ustorm_ag_context
.exp_data_acked
=
311 cpu_to_le32((total_ahs_length
== 0 ? 0 :
313 ISCSI_AHS_CNTL_SIZE
));
315 case ISCSI_TASK_TYPE_TARGET_WRITE
:
316 val
= cpu_to_le32(task_size
);
317 context
->ustorm_ag_context
.exp_cont_len
= val
;
325 void init_rtdif_task_context(struct rdif_task_context
*rdif_context
,
326 struct tdif_task_context
*tdif_context
,
327 struct scsi_dif_task_params
*dif_task_params
,
328 enum iscsi_task_type task_type
)
332 if (!dif_task_params
->dif_on_network
|| !dif_task_params
->dif_on_host
)
335 if (task_type
== ISCSI_TASK_TYPE_TARGET_WRITE
||
336 task_type
== ISCSI_TASK_TYPE_INITIATOR_READ
) {
337 rdif_context
->app_tag_value
=
338 cpu_to_le16(dif_task_params
->application_tag
);
339 rdif_context
->partial_crc_value
= cpu_to_le16(0xffff);
340 val
= cpu_to_le32(dif_task_params
->initial_ref_tag
);
341 rdif_context
->initial_ref_tag
= val
;
342 rdif_context
->app_tag_mask
=
343 cpu_to_le16(dif_task_params
->application_tag_mask
);
344 SET_FIELD(rdif_context
->flags0
, RDIF_TASK_CONTEXT_CRC_SEED
,
345 dif_task_params
->crc_seed
? 1 : 0);
346 SET_FIELD(rdif_context
->flags0
,
347 RDIF_TASK_CONTEXT_HOST_GUARD_TYPE
,
348 dif_task_params
->host_guard_type
);
349 SET_FIELD(rdif_context
->flags0
,
350 RDIF_TASK_CONTEXT_PROTECTION_TYPE
,
351 dif_task_params
->protection_type
);
352 SET_FIELD(rdif_context
->flags0
,
353 RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID
, 1);
354 SET_FIELD(rdif_context
->flags0
,
355 RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST
,
356 dif_task_params
->keep_ref_tag_const
? 1 : 0);
357 SET_FIELD(rdif_context
->flags1
,
358 RDIF_TASK_CONTEXT_VALIDATE_APP_TAG
,
359 (dif_task_params
->validate_app_tag
&&
360 dif_task_params
->dif_on_network
) ? 1 : 0);
361 SET_FIELD(rdif_context
->flags1
,
362 RDIF_TASK_CONTEXT_VALIDATE_GUARD
,
363 (dif_task_params
->validate_guard
&&
364 dif_task_params
->dif_on_network
) ? 1 : 0);
365 SET_FIELD(rdif_context
->flags1
,
366 RDIF_TASK_CONTEXT_VALIDATE_REF_TAG
,
367 (dif_task_params
->validate_ref_tag
&&
368 dif_task_params
->dif_on_network
) ? 1 : 0);
369 SET_FIELD(rdif_context
->flags1
,
370 RDIF_TASK_CONTEXT_HOST_INTERFACE
,
371 dif_task_params
->dif_on_host
? 1 : 0);
372 SET_FIELD(rdif_context
->flags1
,
373 RDIF_TASK_CONTEXT_NETWORK_INTERFACE
,
374 dif_task_params
->dif_on_network
? 1 : 0);
375 SET_FIELD(rdif_context
->flags1
,
376 RDIF_TASK_CONTEXT_FORWARD_GUARD
,
377 dif_task_params
->forward_guard
? 1 : 0);
378 SET_FIELD(rdif_context
->flags1
,
379 RDIF_TASK_CONTEXT_FORWARD_APP_TAG
,
380 dif_task_params
->forward_app_tag
? 1 : 0);
381 SET_FIELD(rdif_context
->flags1
,
382 RDIF_TASK_CONTEXT_FORWARD_REF_TAG
,
383 dif_task_params
->forward_ref_tag
? 1 : 0);
384 SET_FIELD(rdif_context
->flags1
,
385 RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK
,
386 dif_task_params
->forward_app_tag_with_mask
? 1 : 0);
387 SET_FIELD(rdif_context
->flags1
,
388 RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK
,
389 dif_task_params
->forward_ref_tag_with_mask
? 1 : 0);
390 SET_FIELD(rdif_context
->flags1
,
391 RDIF_TASK_CONTEXT_INTERVAL_SIZE
,
392 dif_task_params
->dif_block_size_log
- 9);
393 SET_FIELD(rdif_context
->state
,
394 RDIF_TASK_CONTEXT_REF_TAG_MASK
,
395 dif_task_params
->ref_tag_mask
);
396 SET_FIELD(rdif_context
->state
, RDIF_TASK_CONTEXT_IGNORE_APP_TAG
,
397 dif_task_params
->ignore_app_tag
);
400 if (task_type
== ISCSI_TASK_TYPE_TARGET_READ
||
401 task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
) {
402 tdif_context
->app_tag_value
=
403 cpu_to_le16(dif_task_params
->application_tag
);
404 tdif_context
->partial_crc_value_b
=
405 cpu_to_le16(dif_task_params
->crc_seed
? 0xffff : 0x0000);
406 tdif_context
->partial_crc_value_a
=
407 cpu_to_le16(dif_task_params
->crc_seed
? 0xffff : 0x0000);
408 SET_FIELD(tdif_context
->flags0
, TDIF_TASK_CONTEXT_CRC_SEED
,
409 dif_task_params
->crc_seed
? 1 : 0);
411 SET_FIELD(tdif_context
->flags0
,
412 TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP
,
413 dif_task_params
->tx_dif_conn_err_en
? 1 : 0);
414 SET_FIELD(tdif_context
->flags1
, TDIF_TASK_CONTEXT_FORWARD_GUARD
,
415 dif_task_params
->forward_guard
? 1 : 0);
416 SET_FIELD(tdif_context
->flags1
,
417 TDIF_TASK_CONTEXT_FORWARD_APP_TAG
,
418 dif_task_params
->forward_app_tag
? 1 : 0);
419 SET_FIELD(tdif_context
->flags1
,
420 TDIF_TASK_CONTEXT_FORWARD_REF_TAG
,
421 dif_task_params
->forward_ref_tag
? 1 : 0);
422 SET_FIELD(tdif_context
->flags1
, TDIF_TASK_CONTEXT_INTERVAL_SIZE
,
423 dif_task_params
->dif_block_size_log
- 9);
424 SET_FIELD(tdif_context
->flags1
,
425 TDIF_TASK_CONTEXT_HOST_INTERFACE
,
426 dif_task_params
->dif_on_host
? 1 : 0);
427 SET_FIELD(tdif_context
->flags1
,
428 TDIF_TASK_CONTEXT_NETWORK_INTERFACE
,
429 dif_task_params
->dif_on_network
? 1 : 0);
430 val
= cpu_to_le32(dif_task_params
->initial_ref_tag
);
431 tdif_context
->initial_ref_tag
= val
;
432 tdif_context
->app_tag_mask
=
433 cpu_to_le16(dif_task_params
->application_tag_mask
);
434 SET_FIELD(tdif_context
->flags0
,
435 TDIF_TASK_CONTEXT_HOST_GUARD_TYPE
,
436 dif_task_params
->host_guard_type
);
437 SET_FIELD(tdif_context
->flags0
,
438 TDIF_TASK_CONTEXT_PROTECTION_TYPE
,
439 dif_task_params
->protection_type
);
440 SET_FIELD(tdif_context
->flags0
,
441 TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID
,
442 dif_task_params
->initial_ref_tag_is_valid
? 1 : 0);
443 SET_FIELD(tdif_context
->flags0
,
444 TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST
,
445 dif_task_params
->keep_ref_tag_const
? 1 : 0);
446 SET_FIELD(tdif_context
->flags1
,
447 TDIF_TASK_CONTEXT_VALIDATE_GUARD
,
448 (dif_task_params
->validate_guard
&&
449 dif_task_params
->dif_on_host
) ? 1 : 0);
450 SET_FIELD(tdif_context
->flags1
,
451 TDIF_TASK_CONTEXT_VALIDATE_APP_TAG
,
452 (dif_task_params
->validate_app_tag
&&
453 dif_task_params
->dif_on_host
) ? 1 : 0);
454 SET_FIELD(tdif_context
->flags1
,
455 TDIF_TASK_CONTEXT_VALIDATE_REF_TAG
,
456 (dif_task_params
->validate_ref_tag
&&
457 dif_task_params
->dif_on_host
) ? 1 : 0);
458 SET_FIELD(tdif_context
->flags1
,
459 TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK
,
460 dif_task_params
->forward_app_tag_with_mask
? 1 : 0);
461 SET_FIELD(tdif_context
->flags1
,
462 TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK
,
463 dif_task_params
->forward_ref_tag_with_mask
? 1 : 0);
464 SET_FIELD(tdif_context
->flags1
,
465 TDIF_TASK_CONTEXT_REF_TAG_MASK
,
466 dif_task_params
->ref_tag_mask
);
467 SET_FIELD(tdif_context
->flags0
,
468 TDIF_TASK_CONTEXT_IGNORE_APP_TAG
,
469 dif_task_params
->ignore_app_tag
? 1 : 0);
473 static void set_local_completion_context(struct e4_iscsi_task_context
*context
)
475 SET_FIELD(context
->ystorm_st_context
.state
.flags
,
476 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP
, 1);
477 SET_FIELD(context
->ustorm_st_context
.flags
,
478 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP
, 1);
481 static int init_rw_iscsi_task(struct iscsi_task_params
*task_params
,
482 enum iscsi_task_type task_type
,
483 struct iscsi_conn_params
*conn_params
,
484 struct iscsi_common_hdr
*pdu_header
,
485 struct scsi_sgl_task_params
*sgl_task_params
,
486 struct scsi_initiator_cmd_params
*cmd_params
,
487 struct scsi_dif_task_params
*dif_task_params
)
489 u32 exp_data_transfer_len
= conn_params
->max_burst_length
;
490 struct e4_iscsi_task_context
*cxt
;
491 bool slow_io
= false;
495 task_size
= calc_rw_task_size(task_params
, task_type
, sgl_task_params
,
498 init_default_iscsi_task(task_params
, (struct data_hdr
*)pdu_header
,
501 cxt
= task_params
->context
;
504 if (task_type
== ISCSI_TASK_TYPE_TARGET_READ
) {
505 set_local_completion_context(cxt
);
506 } else if (task_type
== ISCSI_TASK_TYPE_TARGET_WRITE
) {
507 val
= cpu_to_le32(task_size
+
508 ((struct iscsi_r2t_hdr
*)pdu_header
)->buffer_offset
);
509 cxt
->ystorm_st_context
.pdu_hdr
.r2t
.desired_data_trns_len
= val
;
510 cxt
->mstorm_st_context
.expected_itt
=
511 cpu_to_le32(pdu_header
->itt
);
513 val
= cpu_to_le32(task_size
);
514 cxt
->ystorm_st_context
.pdu_hdr
.cmd
.expected_transfer_length
=
516 init_initiator_rw_cdb_ystorm_context(&cxt
->ystorm_st_context
,
518 val
= cpu_to_le32(cmd_params
->sense_data_buffer_phys_addr
.lo
);
519 cxt
->mstorm_st_context
.sense_db
.lo
= val
;
521 val
= cpu_to_le32(cmd_params
->sense_data_buffer_phys_addr
.hi
);
522 cxt
->mstorm_st_context
.sense_db
.hi
= val
;
525 if (task_params
->tx_io_size
) {
526 init_dif_context_flags(&cxt
->ystorm_st_context
.state
.dif_flags
,
528 init_dif_context_flags(&cxt
->ustorm_st_context
.dif_flags
,
530 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
531 &cxt
->ystorm_st_context
.state
.data_desc
,
534 slow_io
= scsi_is_slow_sgl(sgl_task_params
->num_sges
,
535 sgl_task_params
->small_mid_sge
);
537 num_sges
= !slow_io
? min_t(u16
, sgl_task_params
->num_sges
,
538 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
) :
539 ISCSI_WQE_NUM_SGES_SLOWIO
;
542 SET_FIELD(cxt
->ystorm_st_context
.state
.flags
,
543 YSTORM_ISCSI_TASK_STATE_SLOW_IO
, 1);
545 } else if (task_params
->rx_io_size
) {
546 init_dif_context_flags(&cxt
->mstorm_st_context
.dif_flags
,
548 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
549 &cxt
->mstorm_st_context
.data_desc
,
551 num_sges
= !scsi_is_slow_sgl(sgl_task_params
->num_sges
,
552 sgl_task_params
->small_mid_sge
) ?
553 min_t(u16
, sgl_task_params
->num_sges
,
554 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
) :
555 ISCSI_WQE_NUM_SGES_SLOWIO
;
556 cxt
->mstorm_st_context
.rem_task_size
= cpu_to_le32(task_size
);
559 if (exp_data_transfer_len
> task_size
||
560 task_type
!= ISCSI_TASK_TYPE_TARGET_WRITE
)
561 exp_data_transfer_len
= task_size
;
563 init_ustorm_task_contexts(&task_params
->context
->ustorm_st_context
,
564 &task_params
->context
->ustorm_ag_context
,
565 task_size
, exp_data_transfer_len
, num_sges
,
567 dif_task_params
->tx_dif_conn_err_en
: false);
569 set_rw_exp_data_acked_and_cont_len(task_params
->context
, conn_params
,
570 task_type
, task_size
,
571 exp_data_transfer_len
,
572 GET_FIELD(pdu_header
->hdr_second_dword
,
573 ISCSI_CMD_HDR_TOTAL_AHS_LEN
));
576 init_rtdif_task_context(&task_params
->context
->rdif_context
,
577 &task_params
->context
->tdif_context
,
578 dif_task_params
, task_type
);
580 init_sqe(task_params
, sgl_task_params
, dif_task_params
, pdu_header
,
581 cmd_params
, task_type
, false);
586 int init_initiator_rw_iscsi_task(struct iscsi_task_params
*task_params
,
587 struct iscsi_conn_params
*conn_params
,
588 struct scsi_initiator_cmd_params
*cmd_params
,
589 struct iscsi_cmd_hdr
*cmd_header
,
590 struct scsi_sgl_task_params
*tx_sgl_params
,
591 struct scsi_sgl_task_params
*rx_sgl_params
,
592 struct scsi_dif_task_params
*dif_task_params
)
594 if (GET_FIELD(cmd_header
->flags_attr
, ISCSI_CMD_HDR_WRITE
))
595 return init_rw_iscsi_task(task_params
,
596 ISCSI_TASK_TYPE_INITIATOR_WRITE
,
598 (struct iscsi_common_hdr
*)cmd_header
,
599 tx_sgl_params
, cmd_params
,
601 else if (GET_FIELD(cmd_header
->flags_attr
, ISCSI_CMD_HDR_READ
) ||
602 (task_params
->rx_io_size
== 0 && task_params
->tx_io_size
== 0))
603 return init_rw_iscsi_task(task_params
,
604 ISCSI_TASK_TYPE_INITIATOR_READ
,
606 (struct iscsi_common_hdr
*)cmd_header
,
607 rx_sgl_params
, cmd_params
,
613 int init_initiator_login_request_task(struct iscsi_task_params
*task_params
,
614 struct iscsi_login_req_hdr
*login_header
,
615 struct scsi_sgl_task_params
*tx_params
,
616 struct scsi_sgl_task_params
*rx_params
)
618 struct e4_iscsi_task_context
*cxt
;
620 cxt
= task_params
->context
;
622 init_default_iscsi_task(task_params
,
623 (struct data_hdr
*)login_header
,
624 ISCSI_TASK_TYPE_MIDPATH
);
626 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
627 &cxt
->ustorm_ag_context
,
628 task_params
->rx_io_size
?
629 rx_params
->total_buffer_size
: 0,
630 task_params
->tx_io_size
?
631 tx_params
->total_buffer_size
: 0, 0,
634 if (task_params
->tx_io_size
)
635 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
636 &cxt
->ystorm_st_context
.state
.data_desc
,
639 if (task_params
->rx_io_size
)
640 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
641 &cxt
->mstorm_st_context
.data_desc
,
644 cxt
->mstorm_st_context
.rem_task_size
=
645 cpu_to_le32(task_params
->rx_io_size
?
646 rx_params
->total_buffer_size
: 0);
648 init_sqe(task_params
, tx_params
, NULL
,
649 (struct iscsi_common_hdr
*)login_header
, NULL
,
650 ISCSI_TASK_TYPE_MIDPATH
, false);
655 int init_initiator_nop_out_task(struct iscsi_task_params
*task_params
,
656 struct iscsi_nop_out_hdr
*nop_out_pdu_header
,
657 struct scsi_sgl_task_params
*tx_sgl_task_params
,
658 struct scsi_sgl_task_params
*rx_sgl_task_params
)
660 struct e4_iscsi_task_context
*cxt
;
662 cxt
= task_params
->context
;
664 init_default_iscsi_task(task_params
,
665 (struct data_hdr
*)nop_out_pdu_header
,
666 ISCSI_TASK_TYPE_MIDPATH
);
668 if (nop_out_pdu_header
->itt
== ISCSI_ITT_ALL_ONES
)
669 set_local_completion_context(task_params
->context
);
671 if (task_params
->tx_io_size
)
672 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
673 &cxt
->ystorm_st_context
.state
.data_desc
,
676 if (task_params
->rx_io_size
)
677 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
678 &cxt
->mstorm_st_context
.data_desc
,
681 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
682 &cxt
->ustorm_ag_context
,
683 task_params
->rx_io_size
?
684 rx_sgl_task_params
->total_buffer_size
: 0,
685 task_params
->tx_io_size
?
686 tx_sgl_task_params
->total_buffer_size
: 0,
689 cxt
->mstorm_st_context
.rem_task_size
=
690 cpu_to_le32(task_params
->rx_io_size
?
691 rx_sgl_task_params
->total_buffer_size
:
694 init_sqe(task_params
, tx_sgl_task_params
, NULL
,
695 (struct iscsi_common_hdr
*)nop_out_pdu_header
, NULL
,
696 ISCSI_TASK_TYPE_MIDPATH
, false);
701 int init_initiator_logout_request_task(struct iscsi_task_params
*task_params
,
702 struct iscsi_logout_req_hdr
*logout_hdr
,
703 struct scsi_sgl_task_params
*tx_params
,
704 struct scsi_sgl_task_params
*rx_params
)
706 struct e4_iscsi_task_context
*cxt
;
708 cxt
= task_params
->context
;
710 init_default_iscsi_task(task_params
,
711 (struct data_hdr
*)logout_hdr
,
712 ISCSI_TASK_TYPE_MIDPATH
);
714 if (task_params
->tx_io_size
)
715 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
716 &cxt
->ystorm_st_context
.state
.data_desc
,
719 if (task_params
->rx_io_size
)
720 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
721 &cxt
->mstorm_st_context
.data_desc
,
724 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
725 &cxt
->ustorm_ag_context
,
726 task_params
->rx_io_size
?
727 rx_params
->total_buffer_size
: 0,
728 task_params
->tx_io_size
?
729 tx_params
->total_buffer_size
: 0,
732 cxt
->mstorm_st_context
.rem_task_size
=
733 cpu_to_le32(task_params
->rx_io_size
?
734 rx_params
->total_buffer_size
: 0);
736 init_sqe(task_params
, tx_params
, NULL
,
737 (struct iscsi_common_hdr
*)logout_hdr
, NULL
,
738 ISCSI_TASK_TYPE_MIDPATH
, false);
743 int init_initiator_tmf_request_task(struct iscsi_task_params
*task_params
,
744 struct iscsi_tmf_request_hdr
*tmf_header
)
746 init_default_iscsi_task(task_params
, (struct data_hdr
*)tmf_header
,
747 ISCSI_TASK_TYPE_MIDPATH
);
749 init_sqe(task_params
, NULL
, NULL
,
750 (struct iscsi_common_hdr
*)tmf_header
, NULL
,
751 ISCSI_TASK_TYPE_MIDPATH
, false);
756 int init_initiator_text_request_task(struct iscsi_task_params
*task_params
,
757 struct iscsi_text_request_hdr
*text_header
,
758 struct scsi_sgl_task_params
*tx_params
,
759 struct scsi_sgl_task_params
*rx_params
)
761 struct e4_iscsi_task_context
*cxt
;
763 cxt
= task_params
->context
;
765 init_default_iscsi_task(task_params
,
766 (struct data_hdr
*)text_header
,
767 ISCSI_TASK_TYPE_MIDPATH
);
769 if (task_params
->tx_io_size
)
770 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
771 &cxt
->ystorm_st_context
.state
.data_desc
,
774 if (task_params
->rx_io_size
)
775 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
776 &cxt
->mstorm_st_context
.data_desc
,
779 cxt
->mstorm_st_context
.rem_task_size
=
780 cpu_to_le32(task_params
->rx_io_size
?
781 rx_params
->total_buffer_size
: 0);
783 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
784 &cxt
->ustorm_ag_context
,
785 task_params
->rx_io_size
?
786 rx_params
->total_buffer_size
: 0,
787 task_params
->tx_io_size
?
788 tx_params
->total_buffer_size
: 0, 0, 0);
790 init_sqe(task_params
, tx_params
, NULL
,
791 (struct iscsi_common_hdr
*)text_header
, NULL
,
792 ISCSI_TASK_TYPE_MIDPATH
, false);
797 int init_cleanup_task(struct iscsi_task_params
*task_params
)
799 init_sqe(task_params
, NULL
, NULL
, NULL
, NULL
, ISCSI_TASK_TYPE_MIDPATH
,