1 /* QLogic iSCSI Offload Driver
2 * Copyright (c) 2016 Cavium Inc.
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/qed/qed_if.h>
14 #include "qedi_fw_iscsi.h"
15 #include "qedi_fw_scsi.h"
17 #define SCSI_NUM_SGES_IN_CACHE 0x4
19 static bool scsi_is_slow_sgl(u16 num_sges
, bool small_mid_sge
)
21 return (num_sges
> SCSI_NUM_SGES_SLOW_SGL_THR
&& small_mid_sge
);
25 void init_scsi_sgl_context(struct scsi_sgl_params
*ctx_sgl_params
,
26 struct scsi_cached_sges
*ctx_data_desc
,
27 struct scsi_sgl_task_params
*sgl_task_params
)
33 num_sges
= (sgl_task_params
->num_sges
> SCSI_NUM_SGES_IN_CACHE
) ?
34 SCSI_NUM_SGES_IN_CACHE
: sgl_task_params
->num_sges
;
37 val
= cpu_to_le32(sgl_task_params
->sgl_phys_addr
.lo
);
38 ctx_sgl_params
->sgl_addr
.lo
= val
;
39 val
= cpu_to_le32(sgl_task_params
->sgl_phys_addr
.hi
);
40 ctx_sgl_params
->sgl_addr
.hi
= val
;
41 val
= cpu_to_le32(sgl_task_params
->total_buffer_size
);
42 ctx_sgl_params
->sgl_total_length
= val
;
43 ctx_sgl_params
->sgl_num_sges
= cpu_to_le16(sgl_task_params
->num_sges
);
45 for (sge_index
= 0; sge_index
< num_sges
; sge_index
++) {
46 val
= cpu_to_le32(sgl_task_params
->sgl
[sge_index
].sge_addr
.lo
);
47 ctx_data_desc
->sge
[sge_index
].sge_addr
.lo
= val
;
48 val
= cpu_to_le32(sgl_task_params
->sgl
[sge_index
].sge_addr
.hi
);
49 ctx_data_desc
->sge
[sge_index
].sge_addr
.hi
= val
;
50 val
= cpu_to_le32(sgl_task_params
->sgl
[sge_index
].sge_len
);
51 ctx_data_desc
->sge
[sge_index
].sge_len
= val
;
55 static u32
calc_rw_task_size(struct iscsi_task_params
*task_params
,
56 enum iscsi_task_type task_type
,
57 struct scsi_sgl_task_params
*sgl_task_params
,
58 struct scsi_dif_task_params
*dif_task_params
)
62 if (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
||
63 task_type
== ISCSI_TASK_TYPE_TARGET_READ
)
64 io_size
= task_params
->tx_io_size
;
66 io_size
= task_params
->rx_io_size
;
74 return !dif_task_params
->dif_on_network
?
75 io_size
: sgl_task_params
->total_buffer_size
;
79 init_dif_context_flags(struct iscsi_dif_flags
*ctx_dif_flags
,
80 struct scsi_dif_task_params
*dif_task_params
)
85 SET_FIELD(ctx_dif_flags
->flags
, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG
,
86 dif_task_params
->dif_block_size_log
);
87 SET_FIELD(ctx_dif_flags
->flags
, ISCSI_DIF_FLAGS_DIF_TO_PEER
,
88 dif_task_params
->dif_on_network
? 1 : 0);
89 SET_FIELD(ctx_dif_flags
->flags
, ISCSI_DIF_FLAGS_HOST_INTERFACE
,
90 dif_task_params
->dif_on_host
? 1 : 0);
93 static void init_sqe(struct iscsi_task_params
*task_params
,
94 struct scsi_sgl_task_params
*sgl_task_params
,
95 struct scsi_dif_task_params
*dif_task_params
,
96 struct iscsi_common_hdr
*pdu_header
,
97 struct scsi_initiator_cmd_params
*cmd_params
,
98 enum iscsi_task_type task_type
,
101 if (!task_params
->sqe
)
104 memset(task_params
->sqe
, 0, sizeof(*task_params
->sqe
));
105 task_params
->sqe
->task_id
= cpu_to_le16(task_params
->itid
);
107 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
108 ISCSI_WQE_TYPE_TASK_CLEANUP
);
113 case ISCSI_TASK_TYPE_INITIATOR_WRITE
:
118 init_dif_context_flags(&task_params
->sqe
->prot_flags
,
121 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
122 ISCSI_WQE_TYPE_NORMAL
);
124 if (task_params
->tx_io_size
) {
125 buf_size
= calc_rw_task_size(task_params
, task_type
,
129 if (scsi_is_slow_sgl(sgl_task_params
->num_sges
,
130 sgl_task_params
->small_mid_sge
))
131 num_sges
= ISCSI_WQE_NUM_SGES_SLOWIO
;
133 num_sges
= min(sgl_task_params
->num_sges
,
134 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
);
137 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_NUM_SGES
,
139 SET_FIELD(task_params
->sqe
->contlen_cdbsize
, ISCSI_WQE_CONT_LEN
,
142 if (GET_FIELD(pdu_header
->hdr_second_dword
,
143 ISCSI_CMD_HDR_TOTAL_AHS_LEN
))
144 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
146 cmd_params
->extended_cdb_sge
.sge_len
);
149 case ISCSI_TASK_TYPE_INITIATOR_READ
:
150 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
151 ISCSI_WQE_TYPE_NORMAL
);
153 if (GET_FIELD(pdu_header
->hdr_second_dword
,
154 ISCSI_CMD_HDR_TOTAL_AHS_LEN
))
155 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
157 cmd_params
->extended_cdb_sge
.sge_len
);
159 case ISCSI_TASK_TYPE_LOGIN_RESPONSE
:
160 case ISCSI_TASK_TYPE_MIDPATH
:
162 bool advance_statsn
= true;
164 if (task_type
== ISCSI_TASK_TYPE_LOGIN_RESPONSE
)
165 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
166 ISCSI_WQE_TYPE_LOGIN
);
168 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_WQE_TYPE
,
169 ISCSI_WQE_TYPE_MIDDLE_PATH
);
171 if (task_type
== ISCSI_TASK_TYPE_MIDPATH
) {
172 u8 opcode
= GET_FIELD(pdu_header
->hdr_first_byte
,
173 ISCSI_COMMON_HDR_OPCODE
);
175 if (opcode
!= ISCSI_OPCODE_TEXT_RESPONSE
&&
176 (opcode
!= ISCSI_OPCODE_NOP_IN
||
177 pdu_header
->itt
== ISCSI_TTT_ALL_ONES
))
178 advance_statsn
= false;
181 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_RESPONSE
,
182 advance_statsn
? 1 : 0);
184 if (task_params
->tx_io_size
) {
185 SET_FIELD(task_params
->sqe
->contlen_cdbsize
,
186 ISCSI_WQE_CONT_LEN
, task_params
->tx_io_size
);
188 if (scsi_is_slow_sgl(sgl_task_params
->num_sges
,
189 sgl_task_params
->small_mid_sge
))
190 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_NUM_SGES
,
191 ISCSI_WQE_NUM_SGES_SLOWIO
);
193 SET_FIELD(task_params
->sqe
->flags
, ISCSI_WQE_NUM_SGES
,
194 min(sgl_task_params
->num_sges
,
195 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
));
204 static void init_default_iscsi_task(struct iscsi_task_params
*task_params
,
205 struct data_hdr
*pdu_header
,
206 enum iscsi_task_type task_type
)
208 struct e4_iscsi_task_context
*context
;
213 context
= task_params
->context
;
214 val_byte
= context
->mstorm_ag_context
.cdu_validation
;
215 memset(context
, 0, sizeof(*context
));
216 context
->mstorm_ag_context
.cdu_validation
= val_byte
;
218 for (index
= 0; index
<
219 ARRAY_SIZE(context
->ystorm_st_context
.pdu_hdr
.data
.data
);
221 val
= cpu_to_le32(pdu_header
->data
[index
]);
222 context
->ystorm_st_context
.pdu_hdr
.data
.data
[index
] = val
;
225 context
->mstorm_st_context
.task_type
= task_type
;
226 context
->mstorm_ag_context
.task_cid
=
227 cpu_to_le16(task_params
->conn_icid
);
229 SET_FIELD(context
->ustorm_ag_context
.flags1
,
230 E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV
, 1);
232 context
->ustorm_st_context
.task_type
= task_type
;
233 context
->ustorm_st_context
.cq_rss_number
= task_params
->cq_rss_number
;
234 context
->ustorm_ag_context
.icid
= cpu_to_le16(task_params
->conn_icid
);
238 void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx
*ystc
,
239 struct scsi_initiator_cmd_params
*cmd
)
241 union iscsi_task_hdr
*ctx_pdu_hdr
= &ystc
->pdu_hdr
;
244 if (!cmd
->extended_cdb_sge
.sge_len
)
247 SET_FIELD(ctx_pdu_hdr
->ext_cdb_cmd
.hdr_second_dword
,
248 ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE
,
249 cmd
->extended_cdb_sge
.sge_len
);
250 val
= cpu_to_le32(cmd
->extended_cdb_sge
.sge_addr
.lo
);
251 ctx_pdu_hdr
->ext_cdb_cmd
.cdb_sge
.sge_addr
.lo
= val
;
252 val
= cpu_to_le32(cmd
->extended_cdb_sge
.sge_addr
.hi
);
253 ctx_pdu_hdr
->ext_cdb_cmd
.cdb_sge
.sge_addr
.hi
= val
;
254 val
= cpu_to_le32(cmd
->extended_cdb_sge
.sge_len
);
255 ctx_pdu_hdr
->ext_cdb_cmd
.cdb_sge
.sge_len
= val
;
259 void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx
*ustorm_st_cxt
,
260 struct e4_ustorm_iscsi_task_ag_ctx
*ustorm_ag_cxt
,
261 u32 remaining_recv_len
, u32 expected_data_transfer_len
,
262 u8 num_sges
, bool tx_dif_conn_err_en
)
266 ustorm_st_cxt
->rem_rcv_len
= cpu_to_le32(remaining_recv_len
);
267 ustorm_ag_cxt
->exp_data_acked
= cpu_to_le32(expected_data_transfer_len
);
268 val
= cpu_to_le32(expected_data_transfer_len
);
269 ustorm_st_cxt
->exp_data_transfer_len
= val
;
270 SET_FIELD(ustorm_st_cxt
->reg1
.reg1_map
, ISCSI_REG1_NUM_SGES
, num_sges
);
271 SET_FIELD(ustorm_ag_cxt
->flags2
,
272 E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN
,
273 tx_dif_conn_err_en
? 1 : 0);
277 void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context
*context
,
278 struct iscsi_conn_params
*conn_params
,
279 enum iscsi_task_type task_type
,
281 u32 exp_data_transfer_len
,
284 u32 max_unsolicited_data
= 0, val
;
286 if (total_ahs_length
&&
287 (task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
||
288 task_type
== ISCSI_TASK_TYPE_INITIATOR_READ
))
289 SET_FIELD(context
->ustorm_st_context
.flags2
,
290 USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST
, 1);
293 case ISCSI_TASK_TYPE_INITIATOR_WRITE
:
294 if (!conn_params
->initial_r2t
)
295 max_unsolicited_data
= conn_params
->first_burst_length
;
296 else if (conn_params
->immediate_data
)
297 max_unsolicited_data
=
298 min(conn_params
->first_burst_length
,
299 conn_params
->max_send_pdu_length
);
301 context
->ustorm_ag_context
.exp_data_acked
=
302 cpu_to_le32(total_ahs_length
== 0 ?
303 min(exp_data_transfer_len
,
304 max_unsolicited_data
) :
305 ((u32
)(total_ahs_length
+
306 ISCSI_AHS_CNTL_SIZE
)));
308 case ISCSI_TASK_TYPE_TARGET_READ
:
309 val
= cpu_to_le32(exp_data_transfer_len
);
310 context
->ustorm_ag_context
.exp_data_acked
= val
;
312 case ISCSI_TASK_TYPE_INITIATOR_READ
:
313 context
->ustorm_ag_context
.exp_data_acked
=
314 cpu_to_le32((total_ahs_length
== 0 ? 0 :
316 ISCSI_AHS_CNTL_SIZE
));
318 case ISCSI_TASK_TYPE_TARGET_WRITE
:
319 val
= cpu_to_le32(task_size
);
320 context
->ustorm_ag_context
.exp_cont_len
= val
;
328 void init_rtdif_task_context(struct rdif_task_context
*rdif_context
,
329 struct tdif_task_context
*tdif_context
,
330 struct scsi_dif_task_params
*dif_task_params
,
331 enum iscsi_task_type task_type
)
335 if (!dif_task_params
->dif_on_network
|| !dif_task_params
->dif_on_host
)
338 if (task_type
== ISCSI_TASK_TYPE_TARGET_WRITE
||
339 task_type
== ISCSI_TASK_TYPE_INITIATOR_READ
) {
340 rdif_context
->app_tag_value
=
341 cpu_to_le16(dif_task_params
->application_tag
);
342 rdif_context
->partial_crc_value
= cpu_to_le16(0xffff);
343 val
= cpu_to_le32(dif_task_params
->initial_ref_tag
);
344 rdif_context
->initial_ref_tag
= val
;
345 rdif_context
->app_tag_mask
=
346 cpu_to_le16(dif_task_params
->application_tag_mask
);
347 SET_FIELD(rdif_context
->flags0
, RDIF_TASK_CONTEXT_CRC_SEED
,
348 dif_task_params
->crc_seed
? 1 : 0);
349 SET_FIELD(rdif_context
->flags0
,
350 RDIF_TASK_CONTEXT_HOST_GUARD_TYPE
,
351 dif_task_params
->host_guard_type
);
352 SET_FIELD(rdif_context
->flags0
,
353 RDIF_TASK_CONTEXT_PROTECTION_TYPE
,
354 dif_task_params
->protection_type
);
355 SET_FIELD(rdif_context
->flags0
,
356 RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID
, 1);
357 SET_FIELD(rdif_context
->flags0
,
358 RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST
,
359 dif_task_params
->keep_ref_tag_const
? 1 : 0);
360 SET_FIELD(rdif_context
->flags1
,
361 RDIF_TASK_CONTEXT_VALIDATE_APP_TAG
,
362 (dif_task_params
->validate_app_tag
&&
363 dif_task_params
->dif_on_network
) ? 1 : 0);
364 SET_FIELD(rdif_context
->flags1
,
365 RDIF_TASK_CONTEXT_VALIDATE_GUARD
,
366 (dif_task_params
->validate_guard
&&
367 dif_task_params
->dif_on_network
) ? 1 : 0);
368 SET_FIELD(rdif_context
->flags1
,
369 RDIF_TASK_CONTEXT_VALIDATE_REF_TAG
,
370 (dif_task_params
->validate_ref_tag
&&
371 dif_task_params
->dif_on_network
) ? 1 : 0);
372 SET_FIELD(rdif_context
->flags1
,
373 RDIF_TASK_CONTEXT_HOST_INTERFACE
,
374 dif_task_params
->dif_on_host
? 1 : 0);
375 SET_FIELD(rdif_context
->flags1
,
376 RDIF_TASK_CONTEXT_NETWORK_INTERFACE
,
377 dif_task_params
->dif_on_network
? 1 : 0);
378 SET_FIELD(rdif_context
->flags1
,
379 RDIF_TASK_CONTEXT_FORWARD_GUARD
,
380 dif_task_params
->forward_guard
? 1 : 0);
381 SET_FIELD(rdif_context
->flags1
,
382 RDIF_TASK_CONTEXT_FORWARD_APP_TAG
,
383 dif_task_params
->forward_app_tag
? 1 : 0);
384 SET_FIELD(rdif_context
->flags1
,
385 RDIF_TASK_CONTEXT_FORWARD_REF_TAG
,
386 dif_task_params
->forward_ref_tag
? 1 : 0);
387 SET_FIELD(rdif_context
->flags1
,
388 RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK
,
389 dif_task_params
->forward_app_tag_with_mask
? 1 : 0);
390 SET_FIELD(rdif_context
->flags1
,
391 RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK
,
392 dif_task_params
->forward_ref_tag_with_mask
? 1 : 0);
393 SET_FIELD(rdif_context
->flags1
,
394 RDIF_TASK_CONTEXT_INTERVAL_SIZE
,
395 dif_task_params
->dif_block_size_log
- 9);
396 SET_FIELD(rdif_context
->state
,
397 RDIF_TASK_CONTEXT_REF_TAG_MASK
,
398 dif_task_params
->ref_tag_mask
);
399 SET_FIELD(rdif_context
->state
, RDIF_TASK_CONTEXT_IGNORE_APP_TAG
,
400 dif_task_params
->ignore_app_tag
);
403 if (task_type
== ISCSI_TASK_TYPE_TARGET_READ
||
404 task_type
== ISCSI_TASK_TYPE_INITIATOR_WRITE
) {
405 tdif_context
->app_tag_value
=
406 cpu_to_le16(dif_task_params
->application_tag
);
407 tdif_context
->partial_crc_value_b
=
408 cpu_to_le16(dif_task_params
->crc_seed
? 0xffff : 0x0000);
409 tdif_context
->partial_crc_value_a
=
410 cpu_to_le16(dif_task_params
->crc_seed
? 0xffff : 0x0000);
411 SET_FIELD(tdif_context
->flags0
, TDIF_TASK_CONTEXT_CRC_SEED
,
412 dif_task_params
->crc_seed
? 1 : 0);
414 SET_FIELD(tdif_context
->flags0
,
415 TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP
,
416 dif_task_params
->tx_dif_conn_err_en
? 1 : 0);
417 SET_FIELD(tdif_context
->flags1
, TDIF_TASK_CONTEXT_FORWARD_GUARD
,
418 dif_task_params
->forward_guard
? 1 : 0);
419 SET_FIELD(tdif_context
->flags1
,
420 TDIF_TASK_CONTEXT_FORWARD_APP_TAG
,
421 dif_task_params
->forward_app_tag
? 1 : 0);
422 SET_FIELD(tdif_context
->flags1
,
423 TDIF_TASK_CONTEXT_FORWARD_REF_TAG
,
424 dif_task_params
->forward_ref_tag
? 1 : 0);
425 SET_FIELD(tdif_context
->flags1
, TDIF_TASK_CONTEXT_INTERVAL_SIZE
,
426 dif_task_params
->dif_block_size_log
- 9);
427 SET_FIELD(tdif_context
->flags1
,
428 TDIF_TASK_CONTEXT_HOST_INTERFACE
,
429 dif_task_params
->dif_on_host
? 1 : 0);
430 SET_FIELD(tdif_context
->flags1
,
431 TDIF_TASK_CONTEXT_NETWORK_INTERFACE
,
432 dif_task_params
->dif_on_network
? 1 : 0);
433 val
= cpu_to_le32(dif_task_params
->initial_ref_tag
);
434 tdif_context
->initial_ref_tag
= val
;
435 tdif_context
->app_tag_mask
=
436 cpu_to_le16(dif_task_params
->application_tag_mask
);
437 SET_FIELD(tdif_context
->flags0
,
438 TDIF_TASK_CONTEXT_HOST_GUARD_TYPE
,
439 dif_task_params
->host_guard_type
);
440 SET_FIELD(tdif_context
->flags0
,
441 TDIF_TASK_CONTEXT_PROTECTION_TYPE
,
442 dif_task_params
->protection_type
);
443 SET_FIELD(tdif_context
->flags0
,
444 TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID
,
445 dif_task_params
->initial_ref_tag_is_valid
? 1 : 0);
446 SET_FIELD(tdif_context
->flags0
,
447 TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST
,
448 dif_task_params
->keep_ref_tag_const
? 1 : 0);
449 SET_FIELD(tdif_context
->flags1
,
450 TDIF_TASK_CONTEXT_VALIDATE_GUARD
,
451 (dif_task_params
->validate_guard
&&
452 dif_task_params
->dif_on_host
) ? 1 : 0);
453 SET_FIELD(tdif_context
->flags1
,
454 TDIF_TASK_CONTEXT_VALIDATE_APP_TAG
,
455 (dif_task_params
->validate_app_tag
&&
456 dif_task_params
->dif_on_host
) ? 1 : 0);
457 SET_FIELD(tdif_context
->flags1
,
458 TDIF_TASK_CONTEXT_VALIDATE_REF_TAG
,
459 (dif_task_params
->validate_ref_tag
&&
460 dif_task_params
->dif_on_host
) ? 1 : 0);
461 SET_FIELD(tdif_context
->flags1
,
462 TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK
,
463 dif_task_params
->forward_app_tag_with_mask
? 1 : 0);
464 SET_FIELD(tdif_context
->flags1
,
465 TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK
,
466 dif_task_params
->forward_ref_tag_with_mask
? 1 : 0);
467 SET_FIELD(tdif_context
->flags1
,
468 TDIF_TASK_CONTEXT_REF_TAG_MASK
,
469 dif_task_params
->ref_tag_mask
);
470 SET_FIELD(tdif_context
->flags0
,
471 TDIF_TASK_CONTEXT_IGNORE_APP_TAG
,
472 dif_task_params
->ignore_app_tag
? 1 : 0);
476 static void set_local_completion_context(struct e4_iscsi_task_context
*context
)
478 SET_FIELD(context
->ystorm_st_context
.state
.flags
,
479 YSTORM_ISCSI_TASK_STATE_LOCAL_COMP
, 1);
480 SET_FIELD(context
->ustorm_st_context
.flags
,
481 USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP
, 1);
484 static int init_rw_iscsi_task(struct iscsi_task_params
*task_params
,
485 enum iscsi_task_type task_type
,
486 struct iscsi_conn_params
*conn_params
,
487 struct iscsi_common_hdr
*pdu_header
,
488 struct scsi_sgl_task_params
*sgl_task_params
,
489 struct scsi_initiator_cmd_params
*cmd_params
,
490 struct scsi_dif_task_params
*dif_task_params
)
492 u32 exp_data_transfer_len
= conn_params
->max_burst_length
;
493 struct e4_iscsi_task_context
*cxt
;
494 bool slow_io
= false;
498 task_size
= calc_rw_task_size(task_params
, task_type
, sgl_task_params
,
501 init_default_iscsi_task(task_params
, (struct data_hdr
*)pdu_header
,
504 cxt
= task_params
->context
;
507 if (task_type
== ISCSI_TASK_TYPE_TARGET_READ
) {
508 set_local_completion_context(cxt
);
509 } else if (task_type
== ISCSI_TASK_TYPE_TARGET_WRITE
) {
510 val
= cpu_to_le32(task_size
+
511 ((struct iscsi_r2t_hdr
*)pdu_header
)->buffer_offset
);
512 cxt
->ystorm_st_context
.pdu_hdr
.r2t
.desired_data_trns_len
= val
;
513 cxt
->mstorm_st_context
.expected_itt
=
514 cpu_to_le32(pdu_header
->itt
);
516 val
= cpu_to_le32(task_size
);
517 cxt
->ystorm_st_context
.pdu_hdr
.cmd
.expected_transfer_length
=
519 init_initiator_rw_cdb_ystorm_context(&cxt
->ystorm_st_context
,
521 val
= cpu_to_le32(cmd_params
->sense_data_buffer_phys_addr
.lo
);
522 cxt
->mstorm_st_context
.sense_db
.lo
= val
;
524 val
= cpu_to_le32(cmd_params
->sense_data_buffer_phys_addr
.hi
);
525 cxt
->mstorm_st_context
.sense_db
.hi
= val
;
528 if (task_params
->tx_io_size
) {
529 init_dif_context_flags(&cxt
->ystorm_st_context
.state
.dif_flags
,
531 init_dif_context_flags(&cxt
->ustorm_st_context
.dif_flags
,
533 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
534 &cxt
->ystorm_st_context
.state
.data_desc
,
537 slow_io
= scsi_is_slow_sgl(sgl_task_params
->num_sges
,
538 sgl_task_params
->small_mid_sge
);
540 num_sges
= !slow_io
? min_t(u16
, sgl_task_params
->num_sges
,
541 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
) :
542 ISCSI_WQE_NUM_SGES_SLOWIO
;
545 SET_FIELD(cxt
->ystorm_st_context
.state
.flags
,
546 YSTORM_ISCSI_TASK_STATE_SLOW_IO
, 1);
548 } else if (task_params
->rx_io_size
) {
549 init_dif_context_flags(&cxt
->mstorm_st_context
.dif_flags
,
551 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
552 &cxt
->mstorm_st_context
.data_desc
,
554 num_sges
= !scsi_is_slow_sgl(sgl_task_params
->num_sges
,
555 sgl_task_params
->small_mid_sge
) ?
556 min_t(u16
, sgl_task_params
->num_sges
,
557 (u16
)SCSI_NUM_SGES_SLOW_SGL_THR
) :
558 ISCSI_WQE_NUM_SGES_SLOWIO
;
559 cxt
->mstorm_st_context
.rem_task_size
= cpu_to_le32(task_size
);
562 if (exp_data_transfer_len
> task_size
||
563 task_type
!= ISCSI_TASK_TYPE_TARGET_WRITE
)
564 exp_data_transfer_len
= task_size
;
566 init_ustorm_task_contexts(&task_params
->context
->ustorm_st_context
,
567 &task_params
->context
->ustorm_ag_context
,
568 task_size
, exp_data_transfer_len
, num_sges
,
570 dif_task_params
->tx_dif_conn_err_en
: false);
572 set_rw_exp_data_acked_and_cont_len(task_params
->context
, conn_params
,
573 task_type
, task_size
,
574 exp_data_transfer_len
,
575 GET_FIELD(pdu_header
->hdr_second_dword
,
576 ISCSI_CMD_HDR_TOTAL_AHS_LEN
));
579 init_rtdif_task_context(&task_params
->context
->rdif_context
,
580 &task_params
->context
->tdif_context
,
581 dif_task_params
, task_type
);
583 init_sqe(task_params
, sgl_task_params
, dif_task_params
, pdu_header
,
584 cmd_params
, task_type
, false);
589 int init_initiator_rw_iscsi_task(struct iscsi_task_params
*task_params
,
590 struct iscsi_conn_params
*conn_params
,
591 struct scsi_initiator_cmd_params
*cmd_params
,
592 struct iscsi_cmd_hdr
*cmd_header
,
593 struct scsi_sgl_task_params
*tx_sgl_params
,
594 struct scsi_sgl_task_params
*rx_sgl_params
,
595 struct scsi_dif_task_params
*dif_task_params
)
597 if (GET_FIELD(cmd_header
->flags_attr
, ISCSI_CMD_HDR_WRITE
))
598 return init_rw_iscsi_task(task_params
,
599 ISCSI_TASK_TYPE_INITIATOR_WRITE
,
601 (struct iscsi_common_hdr
*)cmd_header
,
602 tx_sgl_params
, cmd_params
,
604 else if (GET_FIELD(cmd_header
->flags_attr
, ISCSI_CMD_HDR_READ
) ||
605 (task_params
->rx_io_size
== 0 && task_params
->tx_io_size
== 0))
606 return init_rw_iscsi_task(task_params
,
607 ISCSI_TASK_TYPE_INITIATOR_READ
,
609 (struct iscsi_common_hdr
*)cmd_header
,
610 rx_sgl_params
, cmd_params
,
616 int init_initiator_login_request_task(struct iscsi_task_params
*task_params
,
617 struct iscsi_login_req_hdr
*login_header
,
618 struct scsi_sgl_task_params
*tx_params
,
619 struct scsi_sgl_task_params
*rx_params
)
621 struct e4_iscsi_task_context
*cxt
;
623 cxt
= task_params
->context
;
625 init_default_iscsi_task(task_params
,
626 (struct data_hdr
*)login_header
,
627 ISCSI_TASK_TYPE_MIDPATH
);
629 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
630 &cxt
->ustorm_ag_context
,
631 task_params
->rx_io_size
?
632 rx_params
->total_buffer_size
: 0,
633 task_params
->tx_io_size
?
634 tx_params
->total_buffer_size
: 0, 0,
637 if (task_params
->tx_io_size
)
638 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
639 &cxt
->ystorm_st_context
.state
.data_desc
,
642 if (task_params
->rx_io_size
)
643 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
644 &cxt
->mstorm_st_context
.data_desc
,
647 cxt
->mstorm_st_context
.rem_task_size
=
648 cpu_to_le32(task_params
->rx_io_size
?
649 rx_params
->total_buffer_size
: 0);
651 init_sqe(task_params
, tx_params
, NULL
,
652 (struct iscsi_common_hdr
*)login_header
, NULL
,
653 ISCSI_TASK_TYPE_MIDPATH
, false);
658 int init_initiator_nop_out_task(struct iscsi_task_params
*task_params
,
659 struct iscsi_nop_out_hdr
*nop_out_pdu_header
,
660 struct scsi_sgl_task_params
*tx_sgl_task_params
,
661 struct scsi_sgl_task_params
*rx_sgl_task_params
)
663 struct e4_iscsi_task_context
*cxt
;
665 cxt
= task_params
->context
;
667 init_default_iscsi_task(task_params
,
668 (struct data_hdr
*)nop_out_pdu_header
,
669 ISCSI_TASK_TYPE_MIDPATH
);
671 if (nop_out_pdu_header
->itt
== ISCSI_ITT_ALL_ONES
)
672 set_local_completion_context(task_params
->context
);
674 if (task_params
->tx_io_size
)
675 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
676 &cxt
->ystorm_st_context
.state
.data_desc
,
679 if (task_params
->rx_io_size
)
680 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
681 &cxt
->mstorm_st_context
.data_desc
,
684 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
685 &cxt
->ustorm_ag_context
,
686 task_params
->rx_io_size
?
687 rx_sgl_task_params
->total_buffer_size
: 0,
688 task_params
->tx_io_size
?
689 tx_sgl_task_params
->total_buffer_size
: 0,
692 cxt
->mstorm_st_context
.rem_task_size
=
693 cpu_to_le32(task_params
->rx_io_size
?
694 rx_sgl_task_params
->total_buffer_size
:
697 init_sqe(task_params
, tx_sgl_task_params
, NULL
,
698 (struct iscsi_common_hdr
*)nop_out_pdu_header
, NULL
,
699 ISCSI_TASK_TYPE_MIDPATH
, false);
704 int init_initiator_logout_request_task(struct iscsi_task_params
*task_params
,
705 struct iscsi_logout_req_hdr
*logout_hdr
,
706 struct scsi_sgl_task_params
*tx_params
,
707 struct scsi_sgl_task_params
*rx_params
)
709 struct e4_iscsi_task_context
*cxt
;
711 cxt
= task_params
->context
;
713 init_default_iscsi_task(task_params
,
714 (struct data_hdr
*)logout_hdr
,
715 ISCSI_TASK_TYPE_MIDPATH
);
717 if (task_params
->tx_io_size
)
718 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
719 &cxt
->ystorm_st_context
.state
.data_desc
,
722 if (task_params
->rx_io_size
)
723 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
724 &cxt
->mstorm_st_context
.data_desc
,
727 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
728 &cxt
->ustorm_ag_context
,
729 task_params
->rx_io_size
?
730 rx_params
->total_buffer_size
: 0,
731 task_params
->tx_io_size
?
732 tx_params
->total_buffer_size
: 0,
735 cxt
->mstorm_st_context
.rem_task_size
=
736 cpu_to_le32(task_params
->rx_io_size
?
737 rx_params
->total_buffer_size
: 0);
739 init_sqe(task_params
, tx_params
, NULL
,
740 (struct iscsi_common_hdr
*)logout_hdr
, NULL
,
741 ISCSI_TASK_TYPE_MIDPATH
, false);
746 int init_initiator_tmf_request_task(struct iscsi_task_params
*task_params
,
747 struct iscsi_tmf_request_hdr
*tmf_header
)
749 init_default_iscsi_task(task_params
, (struct data_hdr
*)tmf_header
,
750 ISCSI_TASK_TYPE_MIDPATH
);
752 init_sqe(task_params
, NULL
, NULL
,
753 (struct iscsi_common_hdr
*)tmf_header
, NULL
,
754 ISCSI_TASK_TYPE_MIDPATH
, false);
759 int init_initiator_text_request_task(struct iscsi_task_params
*task_params
,
760 struct iscsi_text_request_hdr
*text_header
,
761 struct scsi_sgl_task_params
*tx_params
,
762 struct scsi_sgl_task_params
*rx_params
)
764 struct e4_iscsi_task_context
*cxt
;
766 cxt
= task_params
->context
;
768 init_default_iscsi_task(task_params
,
769 (struct data_hdr
*)text_header
,
770 ISCSI_TASK_TYPE_MIDPATH
);
772 if (task_params
->tx_io_size
)
773 init_scsi_sgl_context(&cxt
->ystorm_st_context
.state
.sgl_params
,
774 &cxt
->ystorm_st_context
.state
.data_desc
,
777 if (task_params
->rx_io_size
)
778 init_scsi_sgl_context(&cxt
->mstorm_st_context
.sgl_params
,
779 &cxt
->mstorm_st_context
.data_desc
,
782 cxt
->mstorm_st_context
.rem_task_size
=
783 cpu_to_le32(task_params
->rx_io_size
?
784 rx_params
->total_buffer_size
: 0);
786 init_ustorm_task_contexts(&cxt
->ustorm_st_context
,
787 &cxt
->ustorm_ag_context
,
788 task_params
->rx_io_size
?
789 rx_params
->total_buffer_size
: 0,
790 task_params
->tx_io_size
?
791 tx_params
->total_buffer_size
: 0, 0, 0);
793 init_sqe(task_params
, tx_params
, NULL
,
794 (struct iscsi_common_hdr
*)text_header
, NULL
,
795 ISCSI_TASK_TYPE_MIDPATH
, false);
800 int init_cleanup_task(struct iscsi_task_params
*task_params
)
802 init_sqe(task_params
, NULL
, NULL
, NULL
, NULL
, ISCSI_TASK_TYPE_MIDPATH
,