1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static int qla_start_scsi_type6(srb_t
*sp
);
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
25 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
26 struct scsi_qla_host
*vha
= sp
->vha
;
30 /* Set transfer direction */
31 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
34 vha
->qla_stats
.output_requests
++;
35 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
38 vha
->qla_stats
.input_requests
++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment descriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment descriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 put_unaligned_le32(CONTINUE_TYPE
, &cont_pkt
->entry_type
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
125 cont_a64_entry_t
*cont_pkt
;
127 /* Adjust ring index. */
129 if (req
->ring_index
== req
->length
) {
131 req
->ring_ptr
= req
->ring
;
136 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
138 /* Load packet defaults. */
139 put_unaligned_le32(IS_QLAFX00(vha
->hw
) ? CONTINUE_A64_TYPE_FX00
:
140 CONTINUE_A64_TYPE
, &cont_pkt
->entry_type
);
146 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
148 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
150 /* We always use DIFF Bundling for best performance */
153 /* Translate SCSI opcode to a protection opcode */
154 switch (scsi_get_prot_op(cmd
)) {
155 case SCSI_PROT_READ_STRIP
:
156 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
158 case SCSI_PROT_WRITE_INSERT
:
159 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
161 case SCSI_PROT_READ_INSERT
:
162 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
164 case SCSI_PROT_WRITE_STRIP
:
165 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
167 case SCSI_PROT_READ_PASS
:
168 case SCSI_PROT_WRITE_PASS
:
169 if (cmd
->prot_flags
& SCSI_PROT_IP_CHECKSUM
)
170 *fw_prot_opts
|= PO_MODE_DIF_TCP_CKSUM
;
172 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
174 default: /* Normal Request */
175 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
179 if (!(cmd
->prot_flags
& SCSI_PROT_GUARD_CHECK
))
180 *fw_prot_opts
|= PO_DISABLE_GUARD_CHECK
;
182 return scsi_prot_sg_count(cmd
);
186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
193 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
197 struct dsd32
*cur_dsd
;
198 scsi_qla_host_t
*vha
;
199 struct scsi_cmnd
*cmd
;
200 struct scatterlist
*sg
;
203 cmd
= GET_CMD_SP(sp
);
205 /* Update entry type to indicate Command Type 2 IOCB */
206 put_unaligned_le32(COMMAND_TYPE
, &cmd_pkt
->entry_type
);
208 /* No data transfer */
209 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
210 cmd_pkt
->byte_count
= cpu_to_le32(0);
215 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
217 /* Three DSDs are available in the Command Type 2 IOCB */
218 avail_dsds
= ARRAY_SIZE(cmd_pkt
->dsd32
);
219 cur_dsd
= cmd_pkt
->dsd32
;
221 /* Load data segments */
222 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
223 cont_entry_t
*cont_pkt
;
225 /* Allocate additional continuation packets? */
226 if (avail_dsds
== 0) {
228 * Seven DSDs are available in the Continuation
231 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
232 cur_dsd
= cont_pkt
->dsd
;
233 avail_dsds
= ARRAY_SIZE(cont_pkt
->dsd
);
236 append_dsd32(&cur_dsd
, sg
);
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
253 struct dsd64
*cur_dsd
;
254 scsi_qla_host_t
*vha
;
255 struct scsi_cmnd
*cmd
;
256 struct scatterlist
*sg
;
259 cmd
= GET_CMD_SP(sp
);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 put_unaligned_le32(COMMAND_A64_TYPE
, &cmd_pkt
->entry_type
);
264 /* No data transfer */
265 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
266 cmd_pkt
->byte_count
= cpu_to_le32(0);
271 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
273 /* Two DSDs are available in the Command Type 3 IOCB */
274 avail_dsds
= ARRAY_SIZE(cmd_pkt
->dsd64
);
275 cur_dsd
= cmd_pkt
->dsd64
;
277 /* Load data segments */
278 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
279 cont_a64_entry_t
*cont_pkt
;
281 /* Allocate additional continuation packets? */
282 if (avail_dsds
== 0) {
284 * Five DSDs are available in the Continuation
287 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
288 cur_dsd
= cont_pkt
->dsd
;
289 avail_dsds
= ARRAY_SIZE(cont_pkt
->dsd
);
292 append_dsd64(&cur_dsd
, sg
);
298 * Find the first handle that is not in use, starting from
299 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
300 * associated with @req.
302 uint32_t qla2xxx_get_next_handle(struct req_que
*req
)
304 uint32_t index
, handle
= req
->current_outstanding_cmd
;
306 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
308 if (handle
== req
->num_outstanding_cmds
)
310 if (!req
->outstanding_cmds
[handle
])
318 * qla2x00_start_scsi() - Send a SCSI command to the ISP
319 * @sp: command to send to the ISP
321 * Returns non-zero if a failure occurred, else zero.
324 qla2x00_start_scsi(srb_t
*sp
)
328 scsi_qla_host_t
*vha
;
329 struct scsi_cmnd
*cmd
;
332 cmd_entry_t
*cmd_pkt
;
336 struct device_reg_2xxx __iomem
*reg
;
337 struct qla_hw_data
*ha
;
341 /* Setup device pointers. */
344 reg
= &ha
->iobase
->isp
;
345 cmd
= GET_CMD_SP(sp
);
346 req
= ha
->req_q_map
[0];
347 rsp
= ha
->rsp_q_map
[0];
348 /* So we know we haven't pci_map'ed anything yet */
351 /* Send marker if required */
352 if (vha
->marker_needed
!= 0) {
353 if (qla2x00_marker(vha
, ha
->base_qpair
, 0, 0, MK_SYNC_ALL
) !=
355 return (QLA_FUNCTION_FAILED
);
357 vha
->marker_needed
= 0;
360 /* Acquire ring specific lock */
361 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
363 handle
= qla2xxx_get_next_handle(req
);
367 /* Map the sg table so we have an accurate count of sg entries needed */
368 if (scsi_sg_count(cmd
)) {
369 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
370 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
378 /* Calculate the number of request entries needed. */
379 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
380 if (req
->cnt
< (req_cnt
+ 2)) {
381 cnt
= rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha
, reg
));
382 if (req
->ring_index
< cnt
)
383 req
->cnt
= cnt
- req
->ring_index
;
385 req
->cnt
= req
->length
-
386 (req
->ring_index
- cnt
);
387 /* If still no head room then bail out */
388 if (req
->cnt
< (req_cnt
+ 2))
392 /* Build command packet */
393 req
->current_outstanding_cmd
= handle
;
394 req
->outstanding_cmds
[handle
] = sp
;
396 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
399 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
400 cmd_pkt
->handle
= handle
;
401 /* Zero out remaining portion of packet. */
402 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
403 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
404 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
406 /* Set target ID and LUN number*/
407 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
408 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
409 cmd_pkt
->control_flags
= cpu_to_le16(CF_SIMPLE_TAG
);
411 /* Load SCSI command packet. */
412 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
413 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
415 /* Build IOCB segments */
416 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
418 /* Set total data segment count. */
419 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
422 /* Adjust ring index. */
424 if (req
->ring_index
== req
->length
) {
426 req
->ring_ptr
= req
->ring
;
430 sp
->flags
|= SRB_DMA_VALID
;
432 /* Set chip new ring index. */
433 wrt_reg_word(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
434 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
436 /* Manage unprocessed RIO/ZIO commands in response queue. */
437 if (vha
->flags
.process_response_queue
&&
438 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
439 qla2x00_process_response_queue(rsp
);
441 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
442 return (QLA_SUCCESS
);
448 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
450 return (QLA_FUNCTION_FAILED
);
454 * qla2x00_start_iocbs() - Execute the IOCB command
456 * @req: request queue
459 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
461 struct qla_hw_data
*ha
= vha
->hw
;
462 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
464 if (IS_P3P_TYPE(ha
)) {
465 qla82xx_start_iocbs(vha
);
467 /* Adjust ring index. */
469 if (req
->ring_index
== req
->length
) {
471 req
->ring_ptr
= req
->ring
;
475 /* Set chip new ring index. */
476 if (ha
->mqenable
|| IS_QLA27XX(ha
) || IS_QLA28XX(ha
)) {
477 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
478 } else if (IS_QLA83XX(ha
)) {
479 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
480 rd_reg_dword_relaxed(&ha
->iobase
->isp24
.hccr
);
481 } else if (IS_QLAFX00(ha
)) {
482 wrt_reg_dword(®
->ispfx00
.req_q_in
, req
->ring_index
);
483 rd_reg_dword_relaxed(®
->ispfx00
.req_q_in
);
484 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
485 } else if (IS_FWI2_CAPABLE(ha
)) {
486 wrt_reg_dword(®
->isp24
.req_q_in
, req
->ring_index
);
487 rd_reg_dword_relaxed(®
->isp24
.req_q_in
);
489 wrt_reg_word(ISP_REQ_Q_IN(ha
, ®
->isp
),
491 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha
, ®
->isp
));
497 * __qla2x00_marker() - Send a marker IOCB to the firmware.
499 * @qpair: queue pair pointer
502 * @type: marker modifier
504 * Can be called from both normal and interrupt context.
506 * Returns non-zero if a failure occurred, else zero.
509 __qla2x00_marker(struct scsi_qla_host
*vha
, struct qla_qpair
*qpair
,
510 uint16_t loop_id
, uint64_t lun
, uint8_t type
)
513 struct mrk_entry_24xx
*mrk24
= NULL
;
514 struct req_que
*req
= qpair
->req
;
515 struct qla_hw_data
*ha
= vha
->hw
;
516 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
518 mrk
= (mrk_entry_t
*)__qla2x00_alloc_iocbs(qpair
, NULL
);
520 ql_log(ql_log_warn
, base_vha
, 0x3026,
521 "Failed to allocate Marker IOCB.\n");
523 return (QLA_FUNCTION_FAILED
);
526 mrk24
= (struct mrk_entry_24xx
*)mrk
;
528 mrk
->entry_type
= MARKER_TYPE
;
529 mrk
->modifier
= type
;
530 if (type
!= MK_SYNC_ALL
) {
531 if (IS_FWI2_CAPABLE(ha
)) {
532 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
533 int_to_scsilun(lun
, (struct scsi_lun
*)&mrk24
->lun
);
534 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
535 mrk24
->vp_index
= vha
->vp_idx
;
537 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
538 mrk
->lun
= cpu_to_le16((uint16_t)lun
);
542 if (IS_FWI2_CAPABLE(ha
))
543 mrk24
->handle
= QLA_SKIP_HANDLE
;
547 qla2x00_start_iocbs(vha
, req
);
549 return (QLA_SUCCESS
);
553 qla2x00_marker(struct scsi_qla_host
*vha
, struct qla_qpair
*qpair
,
554 uint16_t loop_id
, uint64_t lun
, uint8_t type
)
557 unsigned long flags
= 0;
559 spin_lock_irqsave(qpair
->qp_lock_ptr
, flags
);
560 ret
= __qla2x00_marker(vha
, qpair
, loop_id
, lun
, type
);
561 spin_unlock_irqrestore(qpair
->qp_lock_ptr
, flags
);
567 * qla2x00_issue_marker
570 * Caller CAN have hardware lock held as specified by ha_locked parameter.
571 * Might release it, then reaquire.
573 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
576 if (__qla2x00_marker(vha
, vha
->hw
->base_qpair
, 0, 0,
577 MK_SYNC_ALL
) != QLA_SUCCESS
)
578 return QLA_FUNCTION_FAILED
;
580 if (qla2x00_marker(vha
, vha
->hw
->base_qpair
, 0, 0,
581 MK_SYNC_ALL
) != QLA_SUCCESS
)
582 return QLA_FUNCTION_FAILED
;
584 vha
->marker_needed
= 0;
590 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
593 struct dsd64
*cur_dsd
= NULL
, *next_dsd
;
594 struct scsi_cmnd
*cmd
;
595 struct scatterlist
*cur_seg
;
597 uint8_t first_iocb
= 1;
598 uint32_t dsd_list_len
;
599 struct dsd_dma
*dsd_ptr
;
601 struct qla_qpair
*qpair
= sp
->qpair
;
603 cmd
= GET_CMD_SP(sp
);
605 /* Update entry type to indicate Command Type 3 IOCB */
606 put_unaligned_le32(COMMAND_TYPE_6
, &cmd_pkt
->entry_type
);
608 /* No data transfer */
609 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
||
611 cmd_pkt
->byte_count
= cpu_to_le32(0);
615 /* Set transfer direction */
616 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
617 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
618 qpair
->counters
.output_bytes
+= scsi_bufflen(cmd
);
619 qpair
->counters
.output_requests
++;
620 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
621 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
622 qpair
->counters
.input_bytes
+= scsi_bufflen(cmd
);
623 qpair
->counters
.input_requests
++;
626 cur_seg
= scsi_sglist(cmd
);
627 ctx
= &sp
->u
.scmd
.ct6_ctx
;
630 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
631 QLA_DSDS_PER_IOCB
: tot_dsds
;
632 tot_dsds
-= avail_dsds
;
633 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
635 dsd_ptr
= list_first_entry(&qpair
->dsd_list
, struct dsd_dma
, list
);
636 next_dsd
= dsd_ptr
->dsd_addr
;
637 list_del(&dsd_ptr
->list
);
639 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
645 put_unaligned_le64(dsd_ptr
->dsd_list_dma
,
646 &cmd_pkt
->fcp_dsd
.address
);
647 cmd_pkt
->fcp_dsd
.length
= cpu_to_le32(dsd_list_len
);
649 put_unaligned_le64(dsd_ptr
->dsd_list_dma
,
651 cur_dsd
->length
= cpu_to_le32(dsd_list_len
);
656 append_dsd64(&cur_dsd
, cur_seg
);
657 cur_seg
= sg_next(cur_seg
);
662 /* Null termination */
663 cur_dsd
->address
= 0;
666 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
674 * @dsds: number of data segment descriptors needed
676 * Returns the number of dsd list needed to store @dsds.
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds
)
681 uint16_t dsd_lists
= 0;
683 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
684 if (dsds
% QLA_DSDS_PER_IOCB
)
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
697 * @req: pointer to request queue
700 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
701 uint16_t tot_dsds
, struct req_que
*req
)
704 struct dsd64
*cur_dsd
;
705 scsi_qla_host_t
*vha
;
706 struct scsi_cmnd
*cmd
;
707 struct scatterlist
*sg
;
709 struct qla_qpair
*qpair
= sp
->qpair
;
711 cmd
= GET_CMD_SP(sp
);
713 /* Update entry type to indicate Command Type 3 IOCB */
714 put_unaligned_le32(COMMAND_TYPE_7
, &cmd_pkt
->entry_type
);
716 /* No data transfer */
717 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
718 cmd_pkt
->byte_count
= cpu_to_le32(0);
724 /* Set transfer direction */
725 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
726 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_WRITE_DATA
);
727 qpair
->counters
.output_bytes
+= scsi_bufflen(cmd
);
728 qpair
->counters
.output_requests
++;
729 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
730 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_READ_DATA
);
731 qpair
->counters
.input_bytes
+= scsi_bufflen(cmd
);
732 qpair
->counters
.input_requests
++;
735 /* One DSD is available in the Command Type 3 IOCB */
737 cur_dsd
= &cmd_pkt
->dsd
;
739 /* Load data segments */
741 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
742 cont_a64_entry_t
*cont_pkt
;
744 /* Allocate additional continuation packets? */
745 if (avail_dsds
== 0) {
747 * Five DSDs are available in the Continuation
750 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, req
);
751 cur_dsd
= cont_pkt
->dsd
;
752 avail_dsds
= ARRAY_SIZE(cont_pkt
->dsd
);
755 append_dsd64(&cur_dsd
, sg
);
760 struct fw_dif_context
{
763 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
764 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
768 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
773 unsigned int protcnt
)
775 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
777 pkt
->ref_tag
= cpu_to_le32(scsi_prot_ref_tag(cmd
));
779 if (cmd
->prot_flags
& SCSI_PROT_REF_CHECK
&&
780 qla2x00_hba_err_chk_enabled(sp
)) {
781 pkt
->ref_tag_mask
[0] = 0xff;
782 pkt
->ref_tag_mask
[1] = 0xff;
783 pkt
->ref_tag_mask
[2] = 0xff;
784 pkt
->ref_tag_mask
[3] = 0xff;
787 pkt
->app_tag
= cpu_to_le16(0);
788 pkt
->app_tag_mask
[0] = 0x0;
789 pkt
->app_tag_mask
[1] = 0x0;
793 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
796 struct scatterlist
*sg
;
797 uint32_t cumulative_partial
, sg_len
;
798 dma_addr_t sg_dma_addr
;
800 if (sgx
->num_bytes
== sgx
->tot_bytes
)
804 cumulative_partial
= sgx
->tot_partial
;
806 sg_dma_addr
= sg_dma_address(sg
);
807 sg_len
= sg_dma_len(sg
);
809 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
811 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
812 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
813 sgx
->tot_partial
= 0;
814 sgx
->num_bytes
+= blk_sz
;
817 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
818 sgx
->tot_partial
+= sgx
->dma_len
;
822 sgx
->bytes_consumed
+= sgx
->dma_len
;
824 if (sg_len
== sgx
->bytes_consumed
) {
828 sgx
->bytes_consumed
= 0;
835 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
836 struct dsd64
*dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
839 uint8_t avail_dsds
= 0;
840 uint32_t dsd_list_len
;
841 struct dsd_dma
*dsd_ptr
;
842 struct scatterlist
*sg_prot
;
843 struct dsd64
*cur_dsd
= dsd
;
844 uint16_t used_dsds
= tot_dsds
;
845 uint32_t prot_int
; /* protection interval */
849 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
850 struct scsi_cmnd
*cmd
;
852 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
854 cmd
= GET_CMD_SP(sp
);
855 prot_int
= scsi_prot_interval(cmd
);
857 sgx
.tot_bytes
= scsi_bufflen(cmd
);
858 sgx
.cur_sg
= scsi_sglist(cmd
);
861 sg_prot
= scsi_prot_sglist(cmd
);
863 prot_int
= tc
->blk_sz
;
864 sgx
.tot_bytes
= tc
->bufflen
;
866 sg_prot
= tc
->prot_sg
;
872 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
874 sle_dma
= sgx
.dma_addr
;
875 sle_dma_len
= sgx
.dma_len
;
877 /* Allocate additional continuation packets? */
878 if (avail_dsds
== 0) {
879 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
880 QLA_DSDS_PER_IOCB
: used_dsds
;
881 dsd_list_len
= (avail_dsds
+ 1) * 12;
882 used_dsds
-= avail_dsds
;
884 /* allocate tracking DS */
885 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
889 /* allocate new list */
890 dsd_ptr
->dsd_addr
= next_dsd
=
891 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
892 &dsd_ptr
->dsd_list_dma
);
896 * Need to cleanup only this dsd_ptr, rest
897 * will be done by sp_free_dma()
904 list_add_tail(&dsd_ptr
->list
,
905 &sp
->u
.scmd
.crc_ctx
->dsd_list
);
907 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
909 list_add_tail(&dsd_ptr
->list
,
910 &(tc
->ctx
->dsd_list
));
911 *tc
->ctx_dsd_alloced
= 1;
915 /* add new list to cmd iocb or last list */
916 put_unaligned_le64(dsd_ptr
->dsd_list_dma
,
918 cur_dsd
->length
= cpu_to_le32(dsd_list_len
);
921 put_unaligned_le64(sle_dma
, &cur_dsd
->address
);
922 cur_dsd
->length
= cpu_to_le32(sle_dma_len
);
927 /* Got a full protection interval */
928 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
931 tot_prot_dma_len
+= sle_dma_len
;
932 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
933 tot_prot_dma_len
= 0;
934 sg_prot
= sg_next(sg_prot
);
937 partial
= 1; /* So as to not re-enter this block */
941 /* Null termination */
942 cur_dsd
->address
= 0;
949 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
950 struct dsd64
*dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
953 uint8_t avail_dsds
= 0;
954 uint32_t dsd_list_len
;
955 struct dsd_dma
*dsd_ptr
;
956 struct scatterlist
*sg
, *sgl
;
957 struct dsd64
*cur_dsd
= dsd
;
959 uint16_t used_dsds
= tot_dsds
;
960 struct scsi_cmnd
*cmd
;
963 cmd
= GET_CMD_SP(sp
);
964 sgl
= scsi_sglist(cmd
);
973 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
974 /* Allocate additional continuation packets? */
975 if (avail_dsds
== 0) {
976 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
977 QLA_DSDS_PER_IOCB
: used_dsds
;
978 dsd_list_len
= (avail_dsds
+ 1) * 12;
979 used_dsds
-= avail_dsds
;
981 /* allocate tracking DS */
982 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
986 /* allocate new list */
987 dsd_ptr
->dsd_addr
= next_dsd
=
988 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
989 &dsd_ptr
->dsd_list_dma
);
993 * Need to cleanup only this dsd_ptr, rest
994 * will be done by sp_free_dma()
1001 list_add_tail(&dsd_ptr
->list
,
1002 &sp
->u
.scmd
.crc_ctx
->dsd_list
);
1004 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1006 list_add_tail(&dsd_ptr
->list
,
1007 &(tc
->ctx
->dsd_list
));
1008 *tc
->ctx_dsd_alloced
= 1;
1011 /* add new list to cmd iocb or last list */
1012 put_unaligned_le64(dsd_ptr
->dsd_list_dma
,
1014 cur_dsd
->length
= cpu_to_le32(dsd_list_len
);
1017 append_dsd64(&cur_dsd
, sg
);
1021 /* Null termination */
1022 cur_dsd
->address
= 0;
1023 cur_dsd
->length
= 0;
1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1030 struct dsd64
*cur_dsd
, uint16_t tot_dsds
, struct qla_tgt_cmd
*tc
)
1032 struct dsd_dma
*dsd_ptr
= NULL
, *dif_dsd
, *nxt_dsd
;
1033 struct scatterlist
*sg
, *sgl
;
1034 struct crc_context
*difctx
= NULL
;
1035 struct scsi_qla_host
*vha
;
1037 uint avail_dsds
= 0;
1038 uint used_dsds
= tot_dsds
;
1039 bool dif_local_dma_alloc
= false;
1040 bool direction_to_device
= false;
1044 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1046 sgl
= scsi_prot_sglist(cmd
);
1048 difctx
= sp
->u
.scmd
.crc_ctx
;
1049 direction_to_device
= cmd
->sc_data_direction
== DMA_TO_DEVICE
;
1050 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
, 0xe021,
1051 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1052 __func__
, cmd
, difctx
, sp
);
1057 direction_to_device
= tc
->dma_data_direction
== DMA_TO_DEVICE
;
1063 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
, 0xe021,
1064 "%s: enter (write=%u)\n", __func__
, direction_to_device
);
1066 /* if initiator doing write or target doing read */
1067 if (direction_to_device
) {
1068 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1069 u64 sle_phys
= sg_phys(sg
);
1071 /* If SGE addr + len flips bits in upper 32-bits */
1072 if (MSD(sle_phys
+ sg
->length
) ^ MSD(sle_phys
)) {
1073 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
, 0xe022,
1074 "%s: page boundary crossing (phys=%llx len=%x)\n",
1075 __func__
, sle_phys
, sg
->length
);
1078 ha
->dif_bundle_crossed_pages
++;
1079 dif_local_dma_alloc
= true;
1081 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
,
1083 "%s: difctx pointer is NULL\n",
1089 ha
->dif_bundle_writes
++;
1091 ha
->dif_bundle_reads
++;
1094 if (ql2xdifbundlinginternalbuffers
)
1095 dif_local_dma_alloc
= direction_to_device
;
1097 if (dif_local_dma_alloc
) {
1098 u32 track_difbundl_buf
= 0;
1099 u32 ldma_sg_len
= 0;
1102 difctx
->no_dif_bundl
= 0;
1103 difctx
->dif_bundl_len
= 0;
1105 /* Track DSD buffers */
1106 INIT_LIST_HEAD(&difctx
->ldif_dsd_list
);
1107 /* Track local DMA buffers */
1108 INIT_LIST_HEAD(&difctx
->ldif_dma_hndl_list
);
1110 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1111 u32 sglen
= sg_dma_len(sg
);
1113 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
, 0xe023,
1114 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1115 __func__
, i
, (u64
)sg_phys(sg
), sglen
, ldma_sg_len
,
1116 difctx
->dif_bundl_len
, ldma_needed
);
1123 * Allocate list item to store
1126 dsd_ptr
= kzalloc(sizeof(*dsd_ptr
),
1129 ql_dbg(ql_dbg_tgt
, vha
, 0xe024,
1130 "%s: failed alloc dsd_ptr\n",
1134 ha
->dif_bundle_kallocs
++;
1136 /* allocate dma buffer */
1137 dsd_ptr
->dsd_addr
= dma_pool_alloc
1138 (ha
->dif_bundl_pool
, GFP_ATOMIC
,
1139 &dsd_ptr
->dsd_list_dma
);
1140 if (!dsd_ptr
->dsd_addr
) {
1141 ql_dbg(ql_dbg_tgt
, vha
, 0xe024,
1142 "%s: failed alloc ->dsd_ptr\n",
1145 * need to cleanup only this
1146 * dsd_ptr rest will be done
1150 ha
->dif_bundle_kallocs
--;
1153 ha
->dif_bundle_dma_allocs
++;
1155 difctx
->no_dif_bundl
++;
1156 list_add_tail(&dsd_ptr
->list
,
1157 &difctx
->ldif_dma_hndl_list
);
1160 /* xfrlen is min of dma pool size and sglen */
1162 (DIF_BUNDLING_DMA_POOL_SIZE
- ldma_sg_len
)) ?
1163 DIF_BUNDLING_DMA_POOL_SIZE
- ldma_sg_len
:
1166 /* replace with local allocated dma buffer */
1167 sg_pcopy_to_buffer(sgl
, sg_nents(sgl
),
1168 dsd_ptr
->dsd_addr
+ ldma_sg_len
, xfrlen
,
1169 difctx
->dif_bundl_len
);
1170 difctx
->dif_bundl_len
+= xfrlen
;
1172 ldma_sg_len
+= xfrlen
;
1173 if (ldma_sg_len
== DIF_BUNDLING_DMA_POOL_SIZE
||
1181 track_difbundl_buf
= used_dsds
= difctx
->no_dif_bundl
;
1182 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
, 0xe025,
1183 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1184 difctx
->dif_bundl_len
, difctx
->no_dif_bundl
,
1185 track_difbundl_buf
);
1188 sp
->flags
|= SRB_DIF_BUNDL_DMA_VALID
;
1190 tc
->prot_flags
= DIF_BUNDL_DMA_VALID
;
1192 list_for_each_entry_safe(dif_dsd
, nxt_dsd
,
1193 &difctx
->ldif_dma_hndl_list
, list
) {
1194 u32 sglen
= (difctx
->dif_bundl_len
>
1195 DIF_BUNDLING_DMA_POOL_SIZE
) ?
1196 DIF_BUNDLING_DMA_POOL_SIZE
: difctx
->dif_bundl_len
;
1198 BUG_ON(track_difbundl_buf
== 0);
1200 /* Allocate additional continuation packets? */
1201 if (avail_dsds
== 0) {
1202 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
,
1204 "%s: adding continuation iocb's\n",
1206 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1207 QLA_DSDS_PER_IOCB
: used_dsds
;
1208 dsd_list_len
= (avail_dsds
+ 1) * 12;
1209 used_dsds
-= avail_dsds
;
1211 /* allocate tracking DS */
1212 dsd_ptr
= kzalloc(sizeof(*dsd_ptr
), GFP_ATOMIC
);
1214 ql_dbg(ql_dbg_tgt
, vha
, 0xe026,
1215 "%s: failed alloc dsd_ptr\n",
1219 ha
->dif_bundle_kallocs
++;
1221 difctx
->no_ldif_dsd
++;
1222 /* allocate new list */
1224 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1225 &dsd_ptr
->dsd_list_dma
);
1226 if (!dsd_ptr
->dsd_addr
) {
1227 ql_dbg(ql_dbg_tgt
, vha
, 0xe026,
1228 "%s: failed alloc ->dsd_addr\n",
1231 * need to cleanup only this dsd_ptr
1232 * rest will be done by sp_free_dma()
1235 ha
->dif_bundle_kallocs
--;
1238 ha
->dif_bundle_dma_allocs
++;
1241 list_add_tail(&dsd_ptr
->list
,
1242 &difctx
->ldif_dsd_list
);
1243 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1245 list_add_tail(&dsd_ptr
->list
,
1246 &difctx
->ldif_dsd_list
);
1247 tc
->ctx_dsd_alloced
= 1;
1250 /* add new list to cmd iocb or last list */
1251 put_unaligned_le64(dsd_ptr
->dsd_list_dma
,
1253 cur_dsd
->length
= cpu_to_le32(dsd_list_len
);
1254 cur_dsd
= dsd_ptr
->dsd_addr
;
1256 put_unaligned_le64(dif_dsd
->dsd_list_dma
,
1258 cur_dsd
->length
= cpu_to_le32(sglen
);
1261 difctx
->dif_bundl_len
-= sglen
;
1262 track_difbundl_buf
--;
1265 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
, vha
, 0xe026,
1266 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__
,
1267 difctx
->no_ldif_dsd
, difctx
->no_dif_bundl
);
1269 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1270 /* Allocate additional continuation packets? */
1271 if (avail_dsds
== 0) {
1272 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1273 QLA_DSDS_PER_IOCB
: used_dsds
;
1274 dsd_list_len
= (avail_dsds
+ 1) * 12;
1275 used_dsds
-= avail_dsds
;
1277 /* allocate tracking DS */
1278 dsd_ptr
= kzalloc(sizeof(*dsd_ptr
), GFP_ATOMIC
);
1280 ql_dbg(ql_dbg_tgt
+ ql_dbg_verbose
,
1282 "%s: failed alloc dsd_dma...\n",
1287 /* allocate new list */
1289 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1290 &dsd_ptr
->dsd_list_dma
);
1291 if (!dsd_ptr
->dsd_addr
) {
1292 /* need to cleanup only this dsd_ptr */
1293 /* rest will be done by sp_free_dma() */
1299 list_add_tail(&dsd_ptr
->list
,
1301 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1303 list_add_tail(&dsd_ptr
->list
,
1305 tc
->ctx_dsd_alloced
= 1;
1308 /* add new list to cmd iocb or last list */
1309 put_unaligned_le64(dsd_ptr
->dsd_list_dma
,
1311 cur_dsd
->length
= cpu_to_le32(dsd_list_len
);
1312 cur_dsd
= dsd_ptr
->dsd_addr
;
1314 append_dsd64(&cur_dsd
, sg
);
1318 /* Null termination */
1319 cur_dsd
->address
= 0;
1320 cur_dsd
->length
= 0;
1326 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1327 * Type 6 IOCB types.
1329 * @sp: SRB command to process
1330 * @cmd_pkt: Command type 3 IOCB
1331 * @tot_dsds: Total number of segments to transfer
1332 * @tot_prot_dsds: Total number of segments with protection information
1333 * @fw_prot_opts: Protection options to be passed to firmware
1336 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1337 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1339 struct dsd64
*cur_dsd
;
1341 scsi_qla_host_t
*vha
;
1342 struct scsi_cmnd
*cmd
;
1343 uint32_t total_bytes
= 0;
1344 uint32_t data_bytes
;
1346 uint8_t bundling
= 1;
1348 struct crc_context
*crc_ctx_pkt
= NULL
;
1349 struct qla_hw_data
*ha
;
1350 uint8_t additional_fcpcdb_len
;
1351 uint16_t fcp_cmnd_len
;
1352 struct fcp_cmnd
*fcp_cmnd
;
1353 dma_addr_t crc_ctx_dma
;
1355 cmd
= GET_CMD_SP(sp
);
1357 /* Update entry type to indicate Command Type CRC_2 IOCB */
1358 put_unaligned_le32(COMMAND_TYPE_CRC_2
, &cmd_pkt
->entry_type
);
1363 /* No data transfer */
1364 data_bytes
= scsi_bufflen(cmd
);
1365 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1366 cmd_pkt
->byte_count
= cpu_to_le32(0);
1370 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1372 /* Set transfer direction */
1373 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1374 cmd_pkt
->control_flags
=
1375 cpu_to_le16(CF_WRITE_DATA
);
1376 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1377 cmd_pkt
->control_flags
=
1378 cpu_to_le16(CF_READ_DATA
);
1381 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1382 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1383 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1384 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1387 /* Allocate CRC context from global pool */
1388 crc_ctx_pkt
= sp
->u
.scmd
.crc_ctx
=
1389 dma_pool_zalloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1392 goto crc_queuing_error
;
1394 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1396 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1399 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1401 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1403 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1404 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1406 put_unaligned_le64(crc_ctx_dma
, &cmd_pkt
->crc_context_address
);
1407 cmd_pkt
->crc_context_len
= cpu_to_le16(CRC_CONTEXT_LEN_FW
);
1409 /* Determine SCSI command length -- align to 4 byte boundary */
1410 if (cmd
->cmd_len
> 16) {
1411 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1412 if ((cmd
->cmd_len
% 4) != 0) {
1413 /* SCSI cmd > 16 bytes must be multiple of 4 */
1414 goto crc_queuing_error
;
1416 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1418 additional_fcpcdb_len
= 0;
1419 fcp_cmnd_len
= 12 + 16 + 4;
1422 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1424 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1425 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1426 fcp_cmnd
->additional_cdb_len
|= 1;
1427 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1428 fcp_cmnd
->additional_cdb_len
|= 2;
1430 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1431 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1432 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1433 put_unaligned_le64(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
,
1434 &cmd_pkt
->fcp_cmnd_dseg_address
);
1435 fcp_cmnd
->task_management
= 0;
1436 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1438 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1440 /* Compute dif len and adjust data len to incude protection */
1442 blk_size
= cmd
->device
->sector_size
;
1443 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1445 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1446 case SCSI_PROT_READ_INSERT
:
1447 case SCSI_PROT_WRITE_STRIP
:
1448 total_bytes
= data_bytes
;
1449 data_bytes
+= dif_bytes
;
1452 case SCSI_PROT_READ_STRIP
:
1453 case SCSI_PROT_WRITE_INSERT
:
1454 case SCSI_PROT_READ_PASS
:
1455 case SCSI_PROT_WRITE_PASS
:
1456 total_bytes
= data_bytes
+ dif_bytes
;
1462 if (!qla2x00_hba_err_chk_enabled(sp
))
1463 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1464 /* HBA error checking enabled */
1465 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
1466 if ((scsi_get_prot_type(GET_CMD_SP(sp
)) == SCSI_PROT_DIF_TYPE1
)
1467 || (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1468 SCSI_PROT_DIF_TYPE2
))
1469 fw_prot_opts
|= BIT_10
;
1470 else if (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1471 SCSI_PROT_DIF_TYPE3
)
1472 fw_prot_opts
|= BIT_11
;
1476 cur_dsd
= &crc_ctx_pkt
->u
.nobundling
.data_dsd
[0];
1479 * Configure Bundling if we need to fetch interlaving
1480 * protection PCI accesses
1482 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1483 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1484 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1486 cur_dsd
= &crc_ctx_pkt
->u
.bundling
.data_dsd
[0];
1489 /* Finish the common fields of CRC pkt */
1490 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1491 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1492 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1493 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
1494 /* Fibre channel byte count */
1495 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1496 fcp_dl
= (__be32
*)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1497 additional_fcpcdb_len
);
1498 *fcp_dl
= htonl(total_bytes
);
1500 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1501 cmd_pkt
->byte_count
= cpu_to_le32(0);
1504 /* Walks data segments */
1506 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1508 if (!bundling
&& tot_prot_dsds
) {
1509 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1510 cur_dsd
, tot_dsds
, NULL
))
1511 goto crc_queuing_error
;
1512 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1513 (tot_dsds
- tot_prot_dsds
), NULL
))
1514 goto crc_queuing_error
;
1516 if (bundling
&& tot_prot_dsds
) {
1517 /* Walks dif segments */
1518 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1519 cur_dsd
= &crc_ctx_pkt
->u
.bundling
.dif_dsd
;
1520 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1521 tot_prot_dsds
, NULL
))
1522 goto crc_queuing_error
;
1527 /* Cleanup will be performed by the caller */
1529 return QLA_FUNCTION_FAILED
;
1533 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1534 * @sp: command to send to the ISP
1536 * Returns non-zero if a failure occurred, else zero.
1539 qla24xx_start_scsi(srb_t
*sp
)
1542 unsigned long flags
;
1545 struct cmd_type_7
*cmd_pkt
;
1549 struct req_que
*req
= NULL
;
1550 struct rsp_que
*rsp
;
1551 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1552 struct scsi_qla_host
*vha
= sp
->vha
;
1553 struct qla_hw_data
*ha
= vha
->hw
;
1555 if (sp
->fcport
->edif
.enable
&& (sp
->fcport
->flags
& FCF_FCSP_DEVICE
))
1556 return qla28xx_start_scsi_edif(sp
);
1558 /* Setup device pointers. */
1562 /* So we know we haven't pci_map'ed anything yet */
1565 /* Send marker if required */
1566 if (vha
->marker_needed
!= 0) {
1567 if (qla2x00_marker(vha
, ha
->base_qpair
, 0, 0, MK_SYNC_ALL
) !=
1569 return QLA_FUNCTION_FAILED
;
1570 vha
->marker_needed
= 0;
1573 /* Acquire ring specific lock */
1574 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1576 handle
= qla2xxx_get_next_handle(req
);
1580 /* Map the sg table so we have an accurate count of sg entries needed */
1581 if (scsi_sg_count(cmd
)) {
1582 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1583 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1584 if (unlikely(!nseg
))
1590 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1592 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
1593 sp
->iores
.exch_cnt
= 1;
1594 sp
->iores
.iocb_cnt
= req_cnt
;
1595 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
))
1598 if (req
->cnt
< (req_cnt
+ 2)) {
1599 if (IS_SHADOW_REG_CAPABLE(ha
)) {
1600 cnt
= *req
->out_ptr
;
1602 cnt
= rd_reg_dword_relaxed(req
->req_q_out
);
1603 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
))
1607 if (req
->ring_index
< cnt
)
1608 req
->cnt
= cnt
- req
->ring_index
;
1610 req
->cnt
= req
->length
-
1611 (req
->ring_index
- cnt
);
1612 if (req
->cnt
< (req_cnt
+ 2))
1616 /* Build command packet. */
1617 req
->current_outstanding_cmd
= handle
;
1618 req
->outstanding_cmds
[handle
] = sp
;
1619 sp
->handle
= handle
;
1620 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1621 req
->cnt
-= req_cnt
;
1623 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1624 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
1626 /* Zero out remaining portion of packet. */
1627 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1628 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1629 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1630 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1632 /* Set NPORT-ID and LUN number*/
1633 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1634 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1635 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1636 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1637 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1639 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1640 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1642 cmd_pkt
->task
= TSK_SIMPLE
;
1644 /* Load SCSI command packet. */
1645 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1646 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1648 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1650 /* Build IOCB segments */
1651 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1653 /* Set total data segment count. */
1654 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1656 /* Adjust ring index. */
1658 if (req
->ring_index
== req
->length
) {
1659 req
->ring_index
= 0;
1660 req
->ring_ptr
= req
->ring
;
1664 sp
->qpair
->cmd_cnt
++;
1665 sp
->flags
|= SRB_DMA_VALID
;
1667 /* Set chip new ring index. */
1668 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
1670 /* Manage unprocessed RIO/ZIO commands in response queue. */
1671 if (vha
->flags
.process_response_queue
&&
1672 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1673 qla24xx_process_response_queue(vha
, rsp
);
1675 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1680 scsi_dma_unmap(cmd
);
1682 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
1683 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1685 return QLA_FUNCTION_FAILED
;
1689 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1690 * @sp: command to send to the ISP
1692 * Returns non-zero if a failure occurred, else zero.
1695 qla24xx_dif_start_scsi(srb_t
*sp
)
1698 unsigned long flags
;
1702 uint16_t req_cnt
= 0;
1704 uint16_t tot_prot_dsds
;
1705 uint16_t fw_prot_opts
= 0;
1706 struct req_que
*req
= NULL
;
1707 struct rsp_que
*rsp
= NULL
;
1708 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1709 struct scsi_qla_host
*vha
= sp
->vha
;
1710 struct qla_hw_data
*ha
= vha
->hw
;
1711 struct cmd_type_crc_2
*cmd_pkt
;
1712 uint32_t status
= 0;
1714 #define QDSS_GOT_Q_SPACE BIT_0
1716 /* Only process protection or >16 cdb in this routine */
1717 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1718 if (cmd
->cmd_len
<= 16)
1719 return qla24xx_start_scsi(sp
);
1721 return qla_start_scsi_type6(sp
);
1724 /* Setup device pointers. */
1728 /* So we know we haven't pci_map'ed anything yet */
1731 /* Send marker if required */
1732 if (vha
->marker_needed
!= 0) {
1733 if (qla2x00_marker(vha
, ha
->base_qpair
, 0, 0, MK_SYNC_ALL
) !=
1735 return QLA_FUNCTION_FAILED
;
1736 vha
->marker_needed
= 0;
1739 /* Acquire ring specific lock */
1740 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1742 handle
= qla2xxx_get_next_handle(req
);
1746 /* Compute number of required data segments */
1747 /* Map the sg table so we have an accurate count of sg entries needed */
1748 if (scsi_sg_count(cmd
)) {
1749 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1750 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1751 if (unlikely(!nseg
))
1754 sp
->flags
|= SRB_DMA_VALID
;
1756 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1757 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1758 struct qla2_sgx sgx
;
1761 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1762 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1763 sgx
.cur_sg
= scsi_sglist(cmd
);
1767 while (qla24xx_get_one_block_sg(
1768 cmd
->device
->sector_size
, &sgx
, &partial
))
1774 /* number of required data segments */
1777 /* Compute number of required protection segments */
1778 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1779 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1780 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1781 if (unlikely(!nseg
))
1784 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1786 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1787 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1788 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1795 /* Total Data and protection sg segment(s) */
1796 tot_prot_dsds
= nseg
;
1799 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
1800 sp
->iores
.exch_cnt
= 1;
1801 sp
->iores
.iocb_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1802 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
))
1805 if (req
->cnt
< (req_cnt
+ 2)) {
1806 if (IS_SHADOW_REG_CAPABLE(ha
)) {
1807 cnt
= *req
->out_ptr
;
1809 cnt
= rd_reg_dword_relaxed(req
->req_q_out
);
1810 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
))
1813 if (req
->ring_index
< cnt
)
1814 req
->cnt
= cnt
- req
->ring_index
;
1816 req
->cnt
= req
->length
-
1817 (req
->ring_index
- cnt
);
1818 if (req
->cnt
< (req_cnt
+ 2))
1822 status
|= QDSS_GOT_Q_SPACE
;
1824 /* Build header part of command packet (excluding the OPCODE). */
1825 req
->current_outstanding_cmd
= handle
;
1826 req
->outstanding_cmds
[handle
] = sp
;
1827 sp
->handle
= handle
;
1828 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1829 req
->cnt
-= req_cnt
;
1831 /* Fill-in common area */
1832 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1833 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
1835 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1836 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1838 /* Set NPORT-ID and LUN number*/
1839 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1840 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1841 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1842 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1844 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1845 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1847 /* Total Data and protection segment(s) */
1848 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1850 /* Build IOCB segments and adjust for data protection segments */
1851 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1852 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1856 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1857 /* Specify response queue number where completion should happen */
1858 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1859 cmd_pkt
->timeout
= cpu_to_le16(0);
1862 /* Adjust ring index. */
1864 if (req
->ring_index
== req
->length
) {
1865 req
->ring_index
= 0;
1866 req
->ring_ptr
= req
->ring
;
1870 sp
->qpair
->cmd_cnt
++;
1871 /* Set chip new ring index. */
1872 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
1874 /* Manage unprocessed RIO/ZIO commands in response queue. */
1875 if (vha
->flags
.process_response_queue
&&
1876 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1877 qla24xx_process_response_queue(vha
, rsp
);
1879 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1884 if (status
& QDSS_GOT_Q_SPACE
) {
1885 req
->outstanding_cmds
[handle
] = NULL
;
1886 req
->cnt
+= req_cnt
;
1888 /* Cleanup will be performed by the caller (queuecommand) */
1890 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
1891 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1893 return QLA_FUNCTION_FAILED
;
1897 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1898 * @sp: command to send to the ISP
1900 * Returns non-zero if a failure occurred, else zero.
1903 qla2xxx_start_scsi_mq(srb_t
*sp
)
1906 unsigned long flags
;
1909 struct cmd_type_7
*cmd_pkt
;
1913 struct req_que
*req
= NULL
;
1914 struct rsp_que
*rsp
;
1915 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1916 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1917 struct qla_hw_data
*ha
= vha
->hw
;
1918 struct qla_qpair
*qpair
= sp
->qpair
;
1920 if (sp
->fcport
->edif
.enable
&& (sp
->fcport
->flags
& FCF_FCSP_DEVICE
))
1921 return qla28xx_start_scsi_edif(sp
);
1923 /* Acquire qpair specific lock */
1924 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1926 /* Setup qpair pointers */
1930 /* So we know we haven't pci_map'ed anything yet */
1933 /* Send marker if required */
1934 if (vha
->marker_needed
!= 0) {
1935 if (__qla2x00_marker(vha
, qpair
, 0, 0, MK_SYNC_ALL
) !=
1937 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1938 return QLA_FUNCTION_FAILED
;
1940 vha
->marker_needed
= 0;
1943 handle
= qla2xxx_get_next_handle(req
);
1947 /* Map the sg table so we have an accurate count of sg entries needed */
1948 if (scsi_sg_count(cmd
)) {
1949 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1950 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1951 if (unlikely(!nseg
))
1957 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1959 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
1960 sp
->iores
.exch_cnt
= 1;
1961 sp
->iores
.iocb_cnt
= req_cnt
;
1962 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
))
1965 if (req
->cnt
< (req_cnt
+ 2)) {
1966 if (IS_SHADOW_REG_CAPABLE(ha
)) {
1967 cnt
= *req
->out_ptr
;
1969 cnt
= rd_reg_dword_relaxed(req
->req_q_out
);
1970 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
))
1974 if (req
->ring_index
< cnt
)
1975 req
->cnt
= cnt
- req
->ring_index
;
1977 req
->cnt
= req
->length
-
1978 (req
->ring_index
- cnt
);
1979 if (req
->cnt
< (req_cnt
+ 2))
1983 /* Build command packet. */
1984 req
->current_outstanding_cmd
= handle
;
1985 req
->outstanding_cmds
[handle
] = sp
;
1986 sp
->handle
= handle
;
1987 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1988 req
->cnt
-= req_cnt
;
1990 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1991 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
1993 /* Zero out remaining portion of packet. */
1994 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1995 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1996 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1997 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1999 /* Set NPORT-ID and LUN number*/
2000 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2001 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2002 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2003 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2004 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2006 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2007 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2009 cmd_pkt
->task
= TSK_SIMPLE
;
2011 /* Load SCSI command packet. */
2012 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2013 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
2015 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2017 /* Build IOCB segments */
2018 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
2020 /* Set total data segment count. */
2021 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2023 /* Adjust ring index. */
2025 if (req
->ring_index
== req
->length
) {
2026 req
->ring_index
= 0;
2027 req
->ring_ptr
= req
->ring
;
2031 sp
->qpair
->cmd_cnt
++;
2032 sp
->flags
|= SRB_DMA_VALID
;
2034 /* Set chip new ring index. */
2035 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
2037 /* Manage unprocessed RIO/ZIO commands in response queue. */
2038 if (vha
->flags
.process_response_queue
&&
2039 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2040 qla24xx_process_response_queue(vha
, rsp
);
2042 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2047 scsi_dma_unmap(cmd
);
2049 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
2050 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2052 return QLA_FUNCTION_FAILED
;
2057 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2058 * @sp: command to send to the ISP
2060 * Returns non-zero if a failure occurred, else zero.
2063 qla2xxx_dif_start_scsi_mq(srb_t
*sp
)
2066 unsigned long flags
;
2070 uint16_t req_cnt
= 0;
2072 uint16_t tot_prot_dsds
;
2073 uint16_t fw_prot_opts
= 0;
2074 struct req_que
*req
= NULL
;
2075 struct rsp_que
*rsp
= NULL
;
2076 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
2077 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2078 struct qla_hw_data
*ha
= vha
->hw
;
2079 struct cmd_type_crc_2
*cmd_pkt
;
2080 uint32_t status
= 0;
2081 struct qla_qpair
*qpair
= sp
->qpair
;
2083 #define QDSS_GOT_Q_SPACE BIT_0
2085 /* Check for host side state */
2086 if (!qpair
->online
) {
2087 cmd
->result
= DID_NO_CONNECT
<< 16;
2088 return QLA_INTERFACE_ERROR
;
2091 if (!qpair
->difdix_supported
&&
2092 scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
2093 cmd
->result
= DID_NO_CONNECT
<< 16;
2094 return QLA_INTERFACE_ERROR
;
2097 /* Only process protection or >16 cdb in this routine */
2098 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
2099 if (cmd
->cmd_len
<= 16)
2100 return qla2xxx_start_scsi_mq(sp
);
2102 return qla_start_scsi_type6(sp
);
2105 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
2107 /* Setup qpair pointers */
2111 /* So we know we haven't pci_map'ed anything yet */
2114 /* Send marker if required */
2115 if (vha
->marker_needed
!= 0) {
2116 if (__qla2x00_marker(vha
, qpair
, 0, 0, MK_SYNC_ALL
) !=
2118 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2119 return QLA_FUNCTION_FAILED
;
2121 vha
->marker_needed
= 0;
2124 handle
= qla2xxx_get_next_handle(req
);
2128 /* Compute number of required data segments */
2129 /* Map the sg table so we have an accurate count of sg entries needed */
2130 if (scsi_sg_count(cmd
)) {
2131 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
2132 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
2133 if (unlikely(!nseg
))
2136 sp
->flags
|= SRB_DMA_VALID
;
2138 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
2139 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
2140 struct qla2_sgx sgx
;
2143 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
2144 sgx
.tot_bytes
= scsi_bufflen(cmd
);
2145 sgx
.cur_sg
= scsi_sglist(cmd
);
2149 while (qla24xx_get_one_block_sg(
2150 cmd
->device
->sector_size
, &sgx
, &partial
))
2156 /* number of required data segments */
2159 /* Compute number of required protection segments */
2160 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
2161 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
2162 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
2163 if (unlikely(!nseg
))
2166 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
2168 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
2169 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
2170 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
2177 /* Total Data and protection sg segment(s) */
2178 tot_prot_dsds
= nseg
;
2181 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
2182 sp
->iores
.exch_cnt
= 1;
2183 sp
->iores
.iocb_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2184 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
))
2187 if (req
->cnt
< (req_cnt
+ 2)) {
2188 if (IS_SHADOW_REG_CAPABLE(ha
)) {
2189 cnt
= *req
->out_ptr
;
2191 cnt
= rd_reg_dword_relaxed(req
->req_q_out
);
2192 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
))
2196 if (req
->ring_index
< cnt
)
2197 req
->cnt
= cnt
- req
->ring_index
;
2199 req
->cnt
= req
->length
-
2200 (req
->ring_index
- cnt
);
2201 if (req
->cnt
< (req_cnt
+ 2))
2205 status
|= QDSS_GOT_Q_SPACE
;
2207 /* Build header part of command packet (excluding the OPCODE). */
2208 req
->current_outstanding_cmd
= handle
;
2209 req
->outstanding_cmds
[handle
] = sp
;
2210 sp
->handle
= handle
;
2211 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2212 req
->cnt
-= req_cnt
;
2214 /* Fill-in common area */
2215 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
2216 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
2218 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2219 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2221 /* Set NPORT-ID and LUN number*/
2222 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2223 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2224 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2225 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2227 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2228 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2230 /* Total Data and protection segment(s) */
2231 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2233 /* Build IOCB segments and adjust for data protection segments */
2234 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
2235 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
2239 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2240 cmd_pkt
->timeout
= cpu_to_le16(0);
2243 /* Adjust ring index. */
2245 if (req
->ring_index
== req
->length
) {
2246 req
->ring_index
= 0;
2247 req
->ring_ptr
= req
->ring
;
2251 sp
->qpair
->cmd_cnt
++;
2252 /* Set chip new ring index. */
2253 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
2255 /* Manage unprocessed RIO/ZIO commands in response queue. */
2256 if (vha
->flags
.process_response_queue
&&
2257 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2258 qla24xx_process_response_queue(vha
, rsp
);
2260 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2265 if (status
& QDSS_GOT_Q_SPACE
) {
2266 req
->outstanding_cmds
[handle
] = NULL
;
2267 req
->cnt
+= req_cnt
;
2269 /* Cleanup will be performed by the caller (queuecommand) */
2271 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
2272 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2274 return QLA_FUNCTION_FAILED
;
2277 /* Generic Control-SRB manipulation functions. */
2279 /* hardware_lock assumed to be held. */
2282 __qla2x00_alloc_iocbs(struct qla_qpair
*qpair
, srb_t
*sp
)
2284 scsi_qla_host_t
*vha
= qpair
->vha
;
2285 struct qla_hw_data
*ha
= vha
->hw
;
2286 struct req_que
*req
= qpair
->req
;
2287 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
2290 uint16_t cnt
, req_cnt
;
2296 if (sp
&& (sp
->type
!= SRB_SCSI_CMD
)) {
2297 /* Adjust entry-counts as needed. */
2298 req_cnt
= sp
->iocbs
;
2301 /* Check for room on request queue. */
2302 if (req
->cnt
< req_cnt
+ 2) {
2303 if (qpair
->use_shadow_reg
)
2304 cnt
= *req
->out_ptr
;
2305 else if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
) ||
2307 cnt
= rd_reg_dword(®
->isp25mq
.req_q_out
);
2308 else if (IS_P3P_TYPE(ha
))
2309 cnt
= rd_reg_dword(reg
->isp82
.req_q_out
);
2310 else if (IS_FWI2_CAPABLE(ha
))
2311 cnt
= rd_reg_dword(®
->isp24
.req_q_out
);
2312 else if (IS_QLAFX00(ha
))
2313 cnt
= rd_reg_dword(®
->ispfx00
.req_q_out
);
2315 cnt
= qla2x00_debounce_register(
2316 ISP_REQ_Q_OUT(ha
, ®
->isp
));
2318 if (!qpair
->use_shadow_reg
&& cnt
== ISP_REG16_DISCONNECT
) {
2319 qla_schedule_eeh_work(vha
);
2323 if (req
->ring_index
< cnt
)
2324 req
->cnt
= cnt
- req
->ring_index
;
2326 req
->cnt
= req
->length
-
2327 (req
->ring_index
- cnt
);
2329 if (req
->cnt
< req_cnt
+ 2)
2333 handle
= qla2xxx_get_next_handle(req
);
2335 ql_log(ql_log_warn
, vha
, 0x700b,
2336 "No room on outstanding cmd array.\n");
2340 /* Prep command array. */
2341 req
->current_outstanding_cmd
= handle
;
2342 req
->outstanding_cmds
[handle
] = sp
;
2343 sp
->handle
= handle
;
2347 req
->cnt
-= req_cnt
;
2348 pkt
= req
->ring_ptr
;
2349 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
2350 if (IS_QLAFX00(ha
)) {
2351 wrt_reg_byte((u8 __force __iomem
*)&pkt
->entry_count
, req_cnt
);
2352 wrt_reg_dword((__le32 __force __iomem
*)&pkt
->handle
, handle
);
2354 pkt
->entry_count
= req_cnt
;
2355 pkt
->handle
= handle
;
2361 qpair
->tgt_counters
.num_alloc_iocb_failed
++;
2366 qla2x00_alloc_iocbs_ready(struct qla_qpair
*qpair
, srb_t
*sp
)
2368 scsi_qla_host_t
*vha
= qpair
->vha
;
2370 if (qla2x00_reset_active(vha
))
2373 return __qla2x00_alloc_iocbs(qpair
, sp
);
2377 qla2x00_alloc_iocbs(struct scsi_qla_host
*vha
, srb_t
*sp
)
2379 return __qla2x00_alloc_iocbs(vha
->hw
->base_qpair
, sp
);
2383 qla24xx_prli_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2385 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2387 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2388 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2389 if (lio
->u
.logio
.flags
& SRB_LOGIN_NVME_PRLI
) {
2390 logio
->control_flags
|= cpu_to_le16(LCF_NVME_PRLI
);
2391 if (sp
->vha
->flags
.nvme_first_burst
)
2392 logio
->io_parameter
[0] =
2393 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST
);
2394 if (sp
->vha
->flags
.nvme2_enabled
) {
2395 /* Set service parameter BIT_7 for NVME CONF support */
2396 logio
->io_parameter
[0] |=
2397 cpu_to_le32(NVME_PRLI_SP_CONF
);
2398 /* Set service parameter BIT_8 for SLER support */
2399 logio
->io_parameter
[0] |=
2400 cpu_to_le32(NVME_PRLI_SP_SLER
);
2401 /* Set service parameter BIT_9 for PI control support */
2402 logio
->io_parameter
[0] |=
2403 cpu_to_le32(NVME_PRLI_SP_PI_CTRL
);
2407 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2408 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2409 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2410 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2411 logio
->vp_index
= sp
->vha
->vp_idx
;
2415 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2417 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2419 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2420 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
2422 if (lio
->u
.logio
.flags
& SRB_LOGIN_PRLI_ONLY
) {
2423 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2425 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
2426 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
2427 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
2428 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
2429 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
2430 if (lio
->u
.logio
.flags
& SRB_LOGIN_FCSP
) {
2431 logio
->control_flags
|=
2432 cpu_to_le16(LCF_COMMON_FEAT
| LCF_SKIP_PRLI
);
2433 logio
->io_parameter
[0] =
2434 cpu_to_le32(LIO_COMM_FEAT_FCSP
| LIO_COMM_FEAT_CIO
);
2437 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2438 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2439 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2440 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2441 logio
->vp_index
= sp
->vha
->vp_idx
;
2445 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2447 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2448 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2451 mbx
->entry_type
= MBX_IOCB_TYPE
;
2452 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2453 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
2454 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
2455 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
2456 if (HAS_EXTENDED_IDS(ha
)) {
2457 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2458 mbx
->mb10
= cpu_to_le16(opts
);
2460 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
2462 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2463 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2464 sp
->fcport
->d_id
.b
.al_pa
);
2465 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2469 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2471 u16 control_flags
= LCF_COMMAND_LOGO
;
2472 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2474 if (sp
->fcport
->explicit_logout
) {
2475 control_flags
|= LCF_EXPL_LOGO
|LCF_FREE_NPORT
;
2477 control_flags
|= LCF_IMPL_LOGO
;
2479 if (!sp
->fcport
->keep_nport_handle
)
2480 control_flags
|= LCF_FREE_NPORT
;
2483 logio
->control_flags
= cpu_to_le16(control_flags
);
2484 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2485 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2486 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2487 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2488 logio
->vp_index
= sp
->vha
->vp_idx
;
2492 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2494 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2496 mbx
->entry_type
= MBX_IOCB_TYPE
;
2497 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2498 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
2499 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
2500 cpu_to_le16(sp
->fcport
->loop_id
) :
2501 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
2502 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2503 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2504 sp
->fcport
->d_id
.b
.al_pa
);
2505 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2506 /* Implicit: mbx->mbx10 = 0. */
2510 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2512 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2513 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
2514 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2515 logio
->vp_index
= sp
->vha
->vp_idx
;
2519 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2521 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2523 mbx
->entry_type
= MBX_IOCB_TYPE
;
2524 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2525 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
2526 if (HAS_EXTENDED_IDS(ha
)) {
2527 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2528 mbx
->mb10
= cpu_to_le16(BIT_0
);
2530 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2532 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2533 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2534 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2535 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2536 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2540 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2544 struct fc_port
*fcport
= sp
->fcport
;
2545 scsi_qla_host_t
*vha
= fcport
->vha
;
2546 struct qla_hw_data
*ha
= vha
->hw
;
2547 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2548 struct req_que
*req
= sp
->qpair
->req
;
2550 flags
= iocb
->u
.tmf
.flags
;
2551 lun
= iocb
->u
.tmf
.lun
;
2553 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2554 tsk
->entry_count
= 1;
2555 tsk
->handle
= make_handle(req
->id
, tsk
->handle
);
2556 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2557 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2558 tsk
->control_flags
= cpu_to_le32(flags
);
2559 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2560 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2561 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2562 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2564 if (flags
& (TCF_LUN_RESET
| TCF_ABORT_TASK_SET
|
2565 TCF_CLEAR_TASK_SET
|TCF_CLEAR_ACA
)) {
2566 int_to_scsilun(lun
, &tsk
->lun
);
2567 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2573 qla2x00_async_done(struct srb
*sp
, int res
)
2575 if (del_timer(&sp
->u
.iocb_cmd
.timer
)) {
2577 * Successfully cancelled the timeout handler
2580 if (kref_put(&sp
->cmd_kref
, qla2x00_sp_release
))
2583 sp
->async_done(sp
, res
);
2587 qla2x00_sp_release(struct kref
*kref
)
2589 struct srb
*sp
= container_of(kref
, struct srb
, cmd_kref
);
2590 struct scsi_qla_host
*vha
= sp
->vha
;
2593 case SRB_CT_PTHRU_CMD
:
2594 /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
2595 if (sp
->u
.iocb_cmd
.u
.ctarg
.req
&&
2597 sp
->u
.iocb_cmd
.u
.ctarg
.req
!= sp
->fcport
->ct_desc
.ct_sns
)) {
2598 dma_free_coherent(&vha
->hw
->pdev
->dev
,
2599 sp
->u
.iocb_cmd
.u
.ctarg
.req_allocated_size
,
2600 sp
->u
.iocb_cmd
.u
.ctarg
.req
,
2601 sp
->u
.iocb_cmd
.u
.ctarg
.req_dma
);
2602 sp
->u
.iocb_cmd
.u
.ctarg
.req
= NULL
;
2604 if (sp
->u
.iocb_cmd
.u
.ctarg
.rsp
&&
2606 sp
->u
.iocb_cmd
.u
.ctarg
.rsp
!= sp
->fcport
->ct_desc
.ct_sns
)) {
2607 dma_free_coherent(&vha
->hw
->pdev
->dev
,
2608 sp
->u
.iocb_cmd
.u
.ctarg
.rsp_allocated_size
,
2609 sp
->u
.iocb_cmd
.u
.ctarg
.rsp
,
2610 sp
->u
.iocb_cmd
.u
.ctarg
.rsp_dma
);
2611 sp
->u
.iocb_cmd
.u
.ctarg
.rsp
= NULL
;
2622 qla2x00_init_async_sp(srb_t
*sp
, unsigned long tmo
,
2623 void (*done
)(struct srb
*sp
, int res
))
2625 timer_setup(&sp
->u
.iocb_cmd
.timer
, qla2x00_sp_timeout
, 0);
2626 sp
->done
= qla2x00_async_done
;
2627 sp
->async_done
= done
;
2628 sp
->free
= qla2x00_sp_free
;
2629 sp
->u
.iocb_cmd
.timeout
= qla2x00_async_iocb_timeout
;
2630 sp
->u
.iocb_cmd
.timer
.expires
= jiffies
+ tmo
* HZ
;
2631 if (IS_QLAFX00(sp
->vha
->hw
) && sp
->type
== SRB_FXIOCB_DCMD
)
2632 init_completion(&sp
->u
.iocb_cmd
.u
.fxiocb
.fxiocb_comp
);
2633 sp
->start_timer
= 1;
2636 static void qla2x00_els_dcmd_sp_free(srb_t
*sp
)
2638 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2641 qla2x00_free_fcport(sp
->fcport
);
2643 if (elsio
->u
.els_logo
.els_logo_pyld
)
2644 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
, DMA_POOL_SIZE
,
2645 elsio
->u
.els_logo
.els_logo_pyld
,
2646 elsio
->u
.els_logo
.els_logo_pyld_dma
);
2648 del_timer(&elsio
->timer
);
2653 qla2x00_els_dcmd_iocb_timeout(void *data
)
2656 fc_port_t
*fcport
= sp
->fcport
;
2657 struct scsi_qla_host
*vha
= sp
->vha
;
2658 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2659 unsigned long flags
= 0;
2662 ql_dbg(ql_dbg_io
, vha
, 0x3069,
2663 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2664 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
2665 fcport
->d_id
.b
.al_pa
);
2667 /* Abort the exchange */
2668 res
= qla24xx_async_abort_cmd(sp
, false);
2670 ql_dbg(ql_dbg_io
, vha
, 0x3070,
2671 "mbx abort_command failed.\n");
2672 spin_lock_irqsave(sp
->qpair
->qp_lock_ptr
, flags
);
2673 for (h
= 1; h
< sp
->qpair
->req
->num_outstanding_cmds
; h
++) {
2674 if (sp
->qpair
->req
->outstanding_cmds
[h
] == sp
) {
2675 sp
->qpair
->req
->outstanding_cmds
[h
] = NULL
;
2679 spin_unlock_irqrestore(sp
->qpair
->qp_lock_ptr
, flags
);
2680 complete(&lio
->u
.els_logo
.comp
);
2682 ql_dbg(ql_dbg_io
, vha
, 0x3071,
2683 "mbx abort_command success.\n");
2687 static void qla2x00_els_dcmd_sp_done(srb_t
*sp
, int res
)
2689 fc_port_t
*fcport
= sp
->fcport
;
2690 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2691 struct scsi_qla_host
*vha
= sp
->vha
;
2693 ql_dbg(ql_dbg_io
, vha
, 0x3072,
2694 "%s hdl=%x, portid=%02x%02x%02x done\n",
2695 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
,
2696 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2698 complete(&lio
->u
.els_logo
.comp
);
2702 qla24xx_els_dcmd_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2703 port_id_t remote_did
)
2706 fc_port_t
*fcport
= NULL
;
2707 struct srb_iocb
*elsio
= NULL
;
2708 struct qla_hw_data
*ha
= vha
->hw
;
2709 struct els_logo_payload logo_pyld
;
2710 int rval
= QLA_SUCCESS
;
2712 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2714 ql_log(ql_log_info
, vha
, 0x70e5, "fcport allocation failed\n");
2718 /* Alloc SRB structure
2721 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2723 qla2x00_free_fcport(fcport
);
2724 ql_log(ql_log_info
, vha
, 0x70e6,
2725 "SRB allocation failed\n");
2729 elsio
= &sp
->u
.iocb_cmd
;
2730 fcport
->loop_id
= 0xFFFF;
2731 fcport
->d_id
.b
.domain
= remote_did
.b
.domain
;
2732 fcport
->d_id
.b
.area
= remote_did
.b
.area
;
2733 fcport
->d_id
.b
.al_pa
= remote_did
.b
.al_pa
;
2735 ql_dbg(ql_dbg_io
, vha
, 0x3073, "portid=%02x%02x%02x done\n",
2736 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2738 sp
->type
= SRB_ELS_DCMD
;
2739 sp
->name
= "ELS_DCMD";
2740 sp
->fcport
= fcport
;
2741 qla2x00_init_async_sp(sp
, ELS_DCMD_TIMEOUT
,
2742 qla2x00_els_dcmd_sp_done
);
2743 sp
->free
= qla2x00_els_dcmd_sp_free
;
2744 sp
->u
.iocb_cmd
.timeout
= qla2x00_els_dcmd_iocb_timeout
;
2745 init_completion(&sp
->u
.iocb_cmd
.u
.els_logo
.comp
);
2747 elsio
->u
.els_logo
.els_logo_pyld
= dma_alloc_coherent(&ha
->pdev
->dev
,
2748 DMA_POOL_SIZE
, &elsio
->u
.els_logo
.els_logo_pyld_dma
,
2751 if (!elsio
->u
.els_logo
.els_logo_pyld
) {
2753 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2754 qla2x00_free_fcport(fcport
);
2755 return QLA_FUNCTION_FAILED
;
2758 memset(&logo_pyld
, 0, sizeof(struct els_logo_payload
));
2760 elsio
->u
.els_logo
.els_cmd
= els_opcode
;
2761 logo_pyld
.opcode
= els_opcode
;
2762 logo_pyld
.s_id
[0] = vha
->d_id
.b
.al_pa
;
2763 logo_pyld
.s_id
[1] = vha
->d_id
.b
.area
;
2764 logo_pyld
.s_id
[2] = vha
->d_id
.b
.domain
;
2765 host_to_fcp_swap(logo_pyld
.s_id
, sizeof(uint32_t));
2766 memcpy(&logo_pyld
.wwpn
, vha
->port_name
, WWN_SIZE
);
2768 memcpy(elsio
->u
.els_logo
.els_logo_pyld
, &logo_pyld
,
2769 sizeof(struct els_logo_payload
));
2770 ql_dbg(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x3075, "LOGO buffer:");
2771 ql_dump_buffer(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x010a,
2772 elsio
->u
.els_logo
.els_logo_pyld
,
2773 sizeof(*elsio
->u
.els_logo
.els_logo_pyld
));
2775 rval
= qla2x00_start_sp(sp
);
2776 if (rval
!= QLA_SUCCESS
) {
2778 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2779 qla2x00_free_fcport(fcport
);
2780 return QLA_FUNCTION_FAILED
;
2783 ql_dbg(ql_dbg_io
, vha
, 0x3074,
2784 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2785 sp
->name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
2786 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2788 wait_for_completion(&elsio
->u
.els_logo
.comp
);
2791 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
2796 qla24xx_els_logo_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2798 scsi_qla_host_t
*vha
= sp
->vha
;
2799 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2801 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2802 els_iocb
->entry_count
= 1;
2803 els_iocb
->sys_define
= 0;
2804 els_iocb
->entry_status
= 0;
2805 els_iocb
->handle
= sp
->handle
;
2806 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2807 els_iocb
->tx_dsd_count
= cpu_to_le16(1);
2808 els_iocb
->vp_index
= vha
->vp_idx
;
2809 els_iocb
->sof_type
= EST_SOFI3
;
2810 els_iocb
->rx_dsd_count
= 0;
2811 els_iocb
->opcode
= elsio
->u
.els_logo
.els_cmd
;
2813 els_iocb
->d_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2814 els_iocb
->d_id
[1] = sp
->fcport
->d_id
.b
.area
;
2815 els_iocb
->d_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2816 /* For SID the byte order is different than DID */
2817 els_iocb
->s_id
[1] = vha
->d_id
.b
.al_pa
;
2818 els_iocb
->s_id
[2] = vha
->d_id
.b
.area
;
2819 els_iocb
->s_id
[0] = vha
->d_id
.b
.domain
;
2821 if (elsio
->u
.els_logo
.els_cmd
== ELS_DCMD_PLOGI
) {
2822 if (vha
->hw
->flags
.edif_enabled
)
2823 els_iocb
->control_flags
= cpu_to_le16(ECF_SEC_LOGIN
);
2825 els_iocb
->control_flags
= 0;
2826 els_iocb
->tx_byte_count
= els_iocb
->tx_len
=
2827 cpu_to_le32(sizeof(struct els_plogi_payload
));
2828 put_unaligned_le64(elsio
->u
.els_plogi
.els_plogi_pyld_dma
,
2829 &els_iocb
->tx_address
);
2830 els_iocb
->rx_dsd_count
= cpu_to_le16(1);
2831 els_iocb
->rx_byte_count
= els_iocb
->rx_len
=
2832 cpu_to_le32(sizeof(struct els_plogi_payload
));
2833 put_unaligned_le64(elsio
->u
.els_plogi
.els_resp_pyld_dma
,
2834 &els_iocb
->rx_address
);
2836 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3073,
2837 "PLOGI ELS IOCB:\n");
2838 ql_dump_buffer(ql_log_info
, vha
, 0x0109,
2839 (uint8_t *)els_iocb
,
2842 els_iocb
->tx_byte_count
=
2843 cpu_to_le32(sizeof(struct els_logo_payload
));
2844 put_unaligned_le64(elsio
->u
.els_logo
.els_logo_pyld_dma
,
2845 &els_iocb
->tx_address
);
2846 els_iocb
->tx_len
= cpu_to_le32(sizeof(struct els_logo_payload
));
2848 els_iocb
->rx_byte_count
= 0;
2849 els_iocb
->rx_address
= 0;
2850 els_iocb
->rx_len
= 0;
2851 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3076,
2853 ql_dump_buffer(ql_log_info
, vha
, 0x010b,
2858 sp
->vha
->qla_stats
.control_requests
++;
2862 qla2x00_els_dcmd2_iocb_timeout(void *data
)
2865 fc_port_t
*fcport
= sp
->fcport
;
2866 struct scsi_qla_host
*vha
= sp
->vha
;
2867 unsigned long flags
= 0;
2870 ql_dbg(ql_dbg_io
+ ql_dbg_disc
, vha
, 0x3069,
2871 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2872 sp
->name
, sp
->handle
, fcport
->port_name
, fcport
->d_id
.b24
);
2874 /* Abort the exchange */
2875 res
= qla24xx_async_abort_cmd(sp
, false);
2876 ql_dbg(ql_dbg_io
, vha
, 0x3070,
2877 "mbx abort_command %s\n",
2878 (res
== QLA_SUCCESS
) ? "successful" : "failed");
2880 spin_lock_irqsave(sp
->qpair
->qp_lock_ptr
, flags
);
2881 for (h
= 1; h
< sp
->qpair
->req
->num_outstanding_cmds
; h
++) {
2882 if (sp
->qpair
->req
->outstanding_cmds
[h
] == sp
) {
2883 sp
->qpair
->req
->outstanding_cmds
[h
] = NULL
;
2887 spin_unlock_irqrestore(sp
->qpair
->qp_lock_ptr
, flags
);
2888 sp
->done(sp
, QLA_FUNCTION_TIMEOUT
);
2892 void qla2x00_els_dcmd2_free(scsi_qla_host_t
*vha
, struct els_plogi
*els_plogi
)
2894 if (els_plogi
->els_plogi_pyld
)
2895 dma_free_coherent(&vha
->hw
->pdev
->dev
,
2897 els_plogi
->els_plogi_pyld
,
2898 els_plogi
->els_plogi_pyld_dma
);
2900 if (els_plogi
->els_resp_pyld
)
2901 dma_free_coherent(&vha
->hw
->pdev
->dev
,
2903 els_plogi
->els_resp_pyld
,
2904 els_plogi
->els_resp_pyld_dma
);
2907 static void qla2x00_els_dcmd2_sp_done(srb_t
*sp
, int res
)
2909 fc_port_t
*fcport
= sp
->fcport
;
2910 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2911 struct scsi_qla_host
*vha
= sp
->vha
;
2912 struct event_arg ea
;
2913 struct qla_work_evt
*e
;
2914 struct fc_port
*conflict_fcport
;
2915 port_id_t cid
; /* conflict Nport id */
2916 const __le32
*fw_status
= sp
->u
.iocb_cmd
.u
.els_plogi
.fw_status
;
2919 ql_dbg(ql_dbg_disc
, vha
, 0x3072,
2920 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2921 sp
->name
, res
, sp
->handle
, fcport
->d_id
.b24
, fcport
->port_name
);
2923 fcport
->flags
&= ~(FCF_ASYNC_SENT
|FCF_ASYNC_ACTIVE
);
2924 /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
2925 fcport
->logout_on_delete
= 1;
2926 fcport
->chip_reset
= vha
->hw
->base_qpair
->chip_reset
;
2928 if (sp
->flags
& SRB_WAKEUP_ON_COMP
)
2929 complete(&lio
->u
.els_plogi
.comp
);
2931 switch (le32_to_cpu(fw_status
[0])) {
2932 case CS_DATA_UNDERRUN
:
2934 memset(&ea
, 0, sizeof(ea
));
2937 qla_handle_els_plogi_done(vha
, &ea
);
2941 switch (le32_to_cpu(fw_status
[1])) {
2942 case LSC_SCODE_PORTID_USED
:
2943 lid
= le32_to_cpu(fw_status
[2]) & 0xffff;
2944 qlt_find_sess_invalidate_other(vha
,
2945 wwn_to_u64(fcport
->port_name
),
2946 fcport
->d_id
, lid
, &conflict_fcport
);
2947 if (conflict_fcport
) {
2949 * Another fcport shares the same
2950 * loop_id & nport id; conflict
2951 * fcport needs to finish cleanup
2952 * before this fcport can proceed
2955 conflict_fcport
->conflict
= fcport
;
2956 fcport
->login_pause
= 1;
2957 ql_dbg(ql_dbg_disc
, vha
, 0x20ed,
2958 "%s %d %8phC pid %06x inuse with lid %#x.\n",
2961 fcport
->d_id
.b24
, lid
);
2963 ql_dbg(ql_dbg_disc
, vha
, 0x20ed,
2964 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2967 fcport
->d_id
.b24
, lid
);
2968 qla2x00_clear_loop_id(fcport
);
2969 set_bit(lid
, vha
->hw
->loop_id_map
);
2970 fcport
->loop_id
= lid
;
2971 fcport
->keep_nport_handle
= 0;
2972 qlt_schedule_sess_for_deletion(fcport
);
2976 case LSC_SCODE_NPORT_USED
:
2977 cid
.b
.domain
= (le32_to_cpu(fw_status
[2]) >> 16)
2979 cid
.b
.area
= (le32_to_cpu(fw_status
[2]) >> 8)
2981 cid
.b
.al_pa
= le32_to_cpu(fw_status
[2]) & 0xff;
2984 ql_dbg(ql_dbg_disc
, vha
, 0x20ec,
2985 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2986 __func__
, __LINE__
, fcport
->port_name
,
2987 fcport
->loop_id
, cid
.b24
);
2988 set_bit(fcport
->loop_id
,
2989 vha
->hw
->loop_id_map
);
2990 fcport
->loop_id
= FC_NO_LOOP_ID
;
2991 qla24xx_post_gnl_work(vha
, fcport
);
2994 case LSC_SCODE_NOXCB
:
2995 vha
->hw
->exch_starvation
++;
2996 if (vha
->hw
->exch_starvation
> 5) {
2997 ql_log(ql_log_warn
, vha
, 0xd046,
2998 "Exchange starvation. Resetting RISC\n");
2999 vha
->hw
->exch_starvation
= 0;
3000 set_bit(ISP_ABORT_NEEDED
,
3002 qla2xxx_wake_dpc(vha
);
3007 ql_dbg(ql_dbg_disc
, vha
, 0x20eb,
3008 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
3009 __func__
, sp
->fcport
->port_name
,
3010 fw_status
[0], fw_status
[1], fw_status
[2]);
3012 fcport
->flags
&= ~FCF_ASYNC_SENT
;
3013 qlt_schedule_sess_for_deletion(fcport
);
3019 ql_dbg(ql_dbg_disc
, vha
, 0x20eb,
3020 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
3021 __func__
, sp
->fcport
->port_name
,
3022 fw_status
[0], fw_status
[1], fw_status
[2]);
3024 sp
->fcport
->flags
&= ~FCF_ASYNC_SENT
;
3025 qlt_schedule_sess_for_deletion(fcport
);
3029 e
= qla2x00_alloc_work(vha
, QLA_EVT_UNMAP
);
3031 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
3033 qla2x00_els_dcmd2_free(vha
, &elsio
->u
.els_plogi
);
3035 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
3039 qla2x00_post_work(vha
, e
);
3044 qla24xx_els_dcmd2_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
3048 struct srb_iocb
*elsio
= NULL
;
3049 struct qla_hw_data
*ha
= vha
->hw
;
3050 int rval
= QLA_SUCCESS
;
3051 void *ptr
, *resp_ptr
;
3053 /* Alloc SRB structure
3056 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
3058 ql_log(ql_log_info
, vha
, 0x70e6,
3059 "SRB allocation failed\n");
3063 fcport
->flags
|= FCF_ASYNC_SENT
;
3064 qla2x00_set_fcport_disc_state(fcport
, DSC_LOGIN_PEND
);
3065 elsio
= &sp
->u
.iocb_cmd
;
3066 ql_dbg(ql_dbg_io
, vha
, 0x3073,
3067 "%s Enter: PLOGI portid=%06x\n", __func__
, fcport
->d_id
.b24
);
3069 sp
->type
= SRB_ELS_DCMD
;
3070 sp
->name
= "ELS_DCMD";
3071 sp
->fcport
= fcport
;
3072 qla2x00_init_async_sp(sp
, ELS_DCMD_TIMEOUT
+ 2,
3073 qla2x00_els_dcmd2_sp_done
);
3074 sp
->u
.iocb_cmd
.timeout
= qla2x00_els_dcmd2_iocb_timeout
;
3076 elsio
->u
.els_plogi
.tx_size
= elsio
->u
.els_plogi
.rx_size
= DMA_POOL_SIZE
;
3078 ptr
= elsio
->u
.els_plogi
.els_plogi_pyld
=
3079 dma_alloc_coherent(&ha
->pdev
->dev
, elsio
->u
.els_plogi
.tx_size
,
3080 &elsio
->u
.els_plogi
.els_plogi_pyld_dma
, GFP_KERNEL
);
3082 if (!elsio
->u
.els_plogi
.els_plogi_pyld
) {
3083 rval
= QLA_FUNCTION_FAILED
;
3087 resp_ptr
= elsio
->u
.els_plogi
.els_resp_pyld
=
3088 dma_alloc_coherent(&ha
->pdev
->dev
, elsio
->u
.els_plogi
.rx_size
,
3089 &elsio
->u
.els_plogi
.els_resp_pyld_dma
, GFP_KERNEL
);
3091 if (!elsio
->u
.els_plogi
.els_resp_pyld
) {
3092 rval
= QLA_FUNCTION_FAILED
;
3096 ql_dbg(ql_dbg_io
, vha
, 0x3073, "PLOGI %p %p\n", ptr
, resp_ptr
);
3098 memset(ptr
, 0, sizeof(struct els_plogi_payload
));
3099 memset(resp_ptr
, 0, sizeof(struct els_plogi_payload
));
3100 memcpy(elsio
->u
.els_plogi
.els_plogi_pyld
->data
,
3101 (void *)&ha
->plogi_els_payld
+ offsetof(struct fc_els_flogi
, fl_csp
),
3102 sizeof(ha
->plogi_els_payld
) - offsetof(struct fc_els_flogi
, fl_csp
));
3104 elsio
->u
.els_plogi
.els_cmd
= els_opcode
;
3105 elsio
->u
.els_plogi
.els_plogi_pyld
->opcode
= els_opcode
;
3107 if (els_opcode
== ELS_DCMD_PLOGI
&& DBELL_ACTIVE(vha
)) {
3108 struct fc_els_flogi
*p
= ptr
;
3109 p
->fl_csp
.sp_features
|= cpu_to_be16(FC_SP_FT_SEC
);
3112 ql_dbg(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x3073, "PLOGI buffer:\n");
3113 ql_dump_buffer(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x0109,
3114 (uint8_t *)elsio
->u
.els_plogi
.els_plogi_pyld
,
3115 sizeof(*elsio
->u
.els_plogi
.els_plogi_pyld
));
3117 rval
= qla2x00_start_sp(sp
);
3118 if (rval
!= QLA_SUCCESS
) {
3119 fcport
->flags
|= FCF_LOGIN_NEEDED
;
3120 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
3123 ql_dbg(ql_dbg_disc
, vha
, 0x3074,
3124 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3125 sp
->name
, sp
->handle
, fcport
->loop_id
,
3126 fcport
->d_id
.b24
, vha
->d_id
.b24
);
3132 qla2x00_els_dcmd2_free(vha
, &elsio
->u
.els_plogi
);
3134 kref_put(&sp
->cmd_kref
, qla2x00_sp_release
);
3136 fcport
->flags
&= ~(FCF_ASYNC_SENT
| FCF_ASYNC_ACTIVE
);
3137 qla2x00_set_fcport_disc_state(fcport
, DSC_DELETED
);
3141 /* it is assume qpair lock is held */
3142 void qla_els_pt_iocb(struct scsi_qla_host
*vha
,
3143 struct els_entry_24xx
*els_iocb
,
3144 struct qla_els_pt_arg
*a
)
3146 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
3147 els_iocb
->entry_count
= 1;
3148 els_iocb
->sys_define
= 0;
3149 els_iocb
->entry_status
= 0;
3150 els_iocb
->handle
= QLA_SKIP_HANDLE
;
3151 els_iocb
->nport_handle
= a
->nport_handle
;
3152 els_iocb
->rx_xchg_address
= a
->rx_xchg_address
;
3153 els_iocb
->tx_dsd_count
= cpu_to_le16(1);
3154 els_iocb
->vp_index
= a
->vp_idx
;
3155 els_iocb
->sof_type
= EST_SOFI3
;
3156 els_iocb
->rx_dsd_count
= cpu_to_le16(0);
3157 els_iocb
->opcode
= a
->els_opcode
;
3159 els_iocb
->d_id
[0] = a
->did
.b
.al_pa
;
3160 els_iocb
->d_id
[1] = a
->did
.b
.area
;
3161 els_iocb
->d_id
[2] = a
->did
.b
.domain
;
3162 /* For SID the byte order is different than DID */
3163 els_iocb
->s_id
[1] = vha
->d_id
.b
.al_pa
;
3164 els_iocb
->s_id
[2] = vha
->d_id
.b
.area
;
3165 els_iocb
->s_id
[0] = vha
->d_id
.b
.domain
;
3167 els_iocb
->control_flags
= cpu_to_le16(a
->control_flags
);
3169 els_iocb
->tx_byte_count
= cpu_to_le32(a
->tx_byte_count
);
3170 els_iocb
->tx_len
= cpu_to_le32(a
->tx_len
);
3171 put_unaligned_le64(a
->tx_addr
, &els_iocb
->tx_address
);
3173 els_iocb
->rx_byte_count
= cpu_to_le32(a
->rx_byte_count
);
3174 els_iocb
->rx_len
= cpu_to_le32(a
->rx_len
);
3175 put_unaligned_le64(a
->rx_addr
, &els_iocb
->rx_address
);
3179 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
3181 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3182 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
3184 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
3185 els_iocb
->entry_count
= 1;
3186 els_iocb
->sys_define
= 0;
3187 els_iocb
->entry_status
= 0;
3188 els_iocb
->handle
= sp
->handle
;
3189 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3190 els_iocb
->tx_dsd_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3191 els_iocb
->vp_index
= sp
->vha
->vp_idx
;
3192 els_iocb
->sof_type
= EST_SOFI3
;
3193 els_iocb
->rx_dsd_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
3196 sp
->type
== SRB_ELS_CMD_RPT
?
3197 bsg_request
->rqst_data
.r_els
.els_code
:
3198 bsg_request
->rqst_data
.h_els
.command_code
;
3199 els_iocb
->d_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3200 els_iocb
->d_id
[1] = sp
->fcport
->d_id
.b
.area
;
3201 els_iocb
->d_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3202 els_iocb
->control_flags
= 0;
3203 els_iocb
->rx_byte_count
=
3204 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
3205 els_iocb
->tx_byte_count
=
3206 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
3208 put_unaligned_le64(sg_dma_address(bsg_job
->request_payload
.sg_list
),
3209 &els_iocb
->tx_address
);
3210 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
3211 (bsg_job
->request_payload
.sg_list
));
3213 put_unaligned_le64(sg_dma_address(bsg_job
->reply_payload
.sg_list
),
3214 &els_iocb
->rx_address
);
3215 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
3216 (bsg_job
->reply_payload
.sg_list
));
3218 sp
->vha
->qla_stats
.control_requests
++;
3222 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
3224 uint16_t avail_dsds
;
3225 struct dsd64
*cur_dsd
;
3226 struct scatterlist
*sg
;
3229 scsi_qla_host_t
*vha
= sp
->vha
;
3230 struct qla_hw_data
*ha
= vha
->hw
;
3231 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3232 int entry_count
= 1;
3234 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
3235 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
3236 ct_iocb
->entry_status
= 0;
3237 ct_iocb
->handle1
= sp
->handle
;
3238 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
3239 ct_iocb
->status
= cpu_to_le16(0);
3240 ct_iocb
->control_flags
= cpu_to_le16(0);
3241 ct_iocb
->timeout
= 0;
3242 ct_iocb
->cmd_dsd_count
=
3243 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3244 ct_iocb
->total_dsd_count
=
3245 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
3246 ct_iocb
->req_bytecount
=
3247 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
3248 ct_iocb
->rsp_bytecount
=
3249 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
3251 put_unaligned_le64(sg_dma_address(bsg_job
->request_payload
.sg_list
),
3252 &ct_iocb
->req_dsd
.address
);
3253 ct_iocb
->req_dsd
.length
= ct_iocb
->req_bytecount
;
3255 put_unaligned_le64(sg_dma_address(bsg_job
->reply_payload
.sg_list
),
3256 &ct_iocb
->rsp_dsd
.address
);
3257 ct_iocb
->rsp_dsd
.length
= ct_iocb
->rsp_bytecount
;
3260 cur_dsd
= &ct_iocb
->rsp_dsd
;
3262 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
3264 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
3265 cont_a64_entry_t
*cont_pkt
;
3267 /* Allocate additional continuation packets? */
3268 if (avail_dsds
== 0) {
3270 * Five DSDs are available in the Cont.
3273 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
3274 vha
->hw
->req_q_map
[0]);
3275 cur_dsd
= cont_pkt
->dsd
;
3280 append_dsd64(&cur_dsd
, sg
);
3283 ct_iocb
->entry_count
= entry_count
;
3285 sp
->vha
->qla_stats
.control_requests
++;
3289 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
3291 uint16_t avail_dsds
;
3292 struct dsd64
*cur_dsd
;
3293 struct scatterlist
*sg
;
3295 uint16_t cmd_dsds
, rsp_dsds
;
3296 scsi_qla_host_t
*vha
= sp
->vha
;
3297 struct qla_hw_data
*ha
= vha
->hw
;
3298 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3299 int entry_count
= 1;
3300 cont_a64_entry_t
*cont_pkt
= NULL
;
3302 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
3303 ct_iocb
->entry_status
= 0;
3304 ct_iocb
->sys_define
= 0;
3305 ct_iocb
->handle
= sp
->handle
;
3307 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3308 ct_iocb
->vp_index
= sp
->vha
->vp_idx
;
3309 ct_iocb
->comp_status
= cpu_to_le16(0);
3311 cmd_dsds
= bsg_job
->request_payload
.sg_cnt
;
3312 rsp_dsds
= bsg_job
->reply_payload
.sg_cnt
;
3314 ct_iocb
->cmd_dsd_count
= cpu_to_le16(cmd_dsds
);
3315 ct_iocb
->timeout
= 0;
3316 ct_iocb
->rsp_dsd_count
= cpu_to_le16(rsp_dsds
);
3317 ct_iocb
->cmd_byte_count
=
3318 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
3321 cur_dsd
= ct_iocb
->dsd
;
3324 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
, cmd_dsds
, index
) {
3325 /* Allocate additional continuation packets? */
3326 if (avail_dsds
== 0) {
3328 * Five DSDs are available in the Cont.
3331 cont_pkt
= qla2x00_prep_cont_type1_iocb(
3332 vha
, ha
->req_q_map
[0]);
3333 cur_dsd
= cont_pkt
->dsd
;
3338 append_dsd64(&cur_dsd
, sg
);
3344 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, rsp_dsds
, index
) {
3345 /* Allocate additional continuation packets? */
3346 if (avail_dsds
== 0) {
3348 * Five DSDs are available in the Cont.
3351 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
3353 cur_dsd
= cont_pkt
->dsd
;
3358 append_dsd64(&cur_dsd
, sg
);
3361 ct_iocb
->entry_count
= entry_count
;
3365 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3366 * @sp: command to send to the ISP
3368 * Returns non-zero if a failure occurred, else zero.
3371 qla82xx_start_scsi(srb_t
*sp
)
3374 unsigned long flags
;
3375 struct scsi_cmnd
*cmd
;
3381 struct device_reg_82xx __iomem
*reg
;
3384 uint8_t additional_cdb_len
;
3385 struct ct6_dsd
*ctx
;
3386 struct scsi_qla_host
*vha
= sp
->vha
;
3387 struct qla_hw_data
*ha
= vha
->hw
;
3388 struct req_que
*req
= NULL
;
3389 struct rsp_que
*rsp
= NULL
;
3390 struct qla_qpair
*qpair
= sp
->qpair
;
3392 /* Setup device pointers. */
3393 reg
= &ha
->iobase
->isp82
;
3394 cmd
= GET_CMD_SP(sp
);
3396 rsp
= ha
->rsp_q_map
[0];
3398 /* So we know we haven't pci_map'ed anything yet */
3401 dbval
= 0x04 | (ha
->portnum
<< 5);
3403 /* Send marker if required */
3404 if (vha
->marker_needed
!= 0) {
3405 if (qla2x00_marker(vha
, ha
->base_qpair
,
3406 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
3407 ql_log(ql_log_warn
, vha
, 0x300c,
3408 "qla2x00_marker failed for cmd=%p.\n", cmd
);
3409 return QLA_FUNCTION_FAILED
;
3411 vha
->marker_needed
= 0;
3414 /* Acquire ring specific lock */
3415 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3417 handle
= qla2xxx_get_next_handle(req
);
3421 /* Map the sg table so we have an accurate count of sg entries needed */
3422 if (scsi_sg_count(cmd
)) {
3423 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
3424 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
3425 if (unlikely(!nseg
))
3432 if (tot_dsds
> ql2xshiftctondsd
) {
3433 struct cmd_type_6
*cmd_pkt
;
3434 uint16_t more_dsd_lists
= 0;
3435 struct dsd_dma
*dsd_ptr
;
3438 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
3439 if ((more_dsd_lists
+ qpair
->dsd_inuse
) >= NUM_DSD_CHAIN
) {
3440 ql_dbg(ql_dbg_io
, vha
, 0x300d,
3441 "Num of DSD list %d is than %d for cmd=%p.\n",
3442 more_dsd_lists
+ qpair
->dsd_inuse
, NUM_DSD_CHAIN
,
3447 if (more_dsd_lists
<= qpair
->dsd_avail
)
3448 goto sufficient_dsds
;
3450 more_dsd_lists
-= qpair
->dsd_avail
;
3452 for (i
= 0; i
< more_dsd_lists
; i
++) {
3453 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
3455 ql_log(ql_log_fatal
, vha
, 0x300e,
3456 "Failed to allocate memory for dsd_dma "
3457 "for cmd=%p.\n", cmd
);
3461 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
3462 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
3463 if (!dsd_ptr
->dsd_addr
) {
3465 ql_log(ql_log_fatal
, vha
, 0x300f,
3466 "Failed to allocate memory for dsd_addr "
3467 "for cmd=%p.\n", cmd
);
3470 list_add_tail(&dsd_ptr
->list
, &qpair
->dsd_list
);
3477 if (req
->cnt
< (req_cnt
+ 2)) {
3478 cnt
= (uint16_t)rd_reg_dword_relaxed(
3479 ®
->req_q_out
[0]);
3480 if (req
->ring_index
< cnt
)
3481 req
->cnt
= cnt
- req
->ring_index
;
3483 req
->cnt
= req
->length
-
3484 (req
->ring_index
- cnt
);
3485 if (req
->cnt
< (req_cnt
+ 2))
3489 ctx
= &sp
->u
.scmd
.ct6_ctx
;
3491 memset(ctx
, 0, sizeof(struct ct6_dsd
));
3492 ctx
->fcp_cmnd
= dma_pool_zalloc(ha
->fcp_cmnd_dma_pool
,
3493 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
3494 if (!ctx
->fcp_cmnd
) {
3495 ql_log(ql_log_fatal
, vha
, 0x3011,
3496 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
3500 /* Initialize the DSD list and dma handle */
3501 INIT_LIST_HEAD(&ctx
->dsd_list
);
3502 ctx
->dsd_use_cnt
= 0;
3504 if (cmd
->cmd_len
> 16) {
3505 additional_cdb_len
= cmd
->cmd_len
- 16;
3506 if ((cmd
->cmd_len
% 4) != 0) {
3507 /* SCSI command bigger than 16 bytes must be
3510 ql_log(ql_log_warn
, vha
, 0x3012,
3511 "scsi cmd len %d not multiple of 4 "
3512 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
3513 goto queuing_error_fcp_cmnd
;
3515 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
3517 additional_cdb_len
= 0;
3518 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
3521 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
3522 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
3524 /* Zero out remaining portion of packet. */
3525 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3526 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3527 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3528 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3530 /* Set NPORT-ID and LUN number*/
3531 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3532 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3533 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3534 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3535 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3537 /* Build IOCB segments */
3538 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
3539 goto queuing_error_fcp_cmnd
;
3541 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3542 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
3544 /* build FCP_CMND IU */
3545 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
3546 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
3548 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
3549 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
3550 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
3551 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
3553 /* Populate the FCP_PRIO. */
3554 if (ha
->flags
.fcp_prio_enabled
)
3555 ctx
->fcp_cmnd
->task_attribute
|=
3556 sp
->fcport
->fcp_prio
<< 3;
3558 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3560 fcp_dl
= (__be32
*)(ctx
->fcp_cmnd
->cdb
+ 16 +
3561 additional_cdb_len
);
3562 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
3564 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
3565 put_unaligned_le64(ctx
->fcp_cmnd_dma
,
3566 &cmd_pkt
->fcp_cmnd_dseg_address
);
3568 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
3569 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3570 /* Set total data segment count. */
3571 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3572 /* Specify response queue number where
3573 * completion should happen
3575 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3577 struct cmd_type_7
*cmd_pkt
;
3579 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3580 if (req
->cnt
< (req_cnt
+ 2)) {
3581 cnt
= (uint16_t)rd_reg_dword_relaxed(
3582 ®
->req_q_out
[0]);
3583 if (req
->ring_index
< cnt
)
3584 req
->cnt
= cnt
- req
->ring_index
;
3586 req
->cnt
= req
->length
-
3587 (req
->ring_index
- cnt
);
3589 if (req
->cnt
< (req_cnt
+ 2))
3592 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
3593 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
3595 /* Zero out remaining portion of packet. */
3596 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3597 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3598 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3599 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3601 /* Set NPORT-ID and LUN number*/
3602 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3603 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3604 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3605 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3606 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3608 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3609 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
3610 sizeof(cmd_pkt
->lun
));
3612 /* Populate the FCP_PRIO. */
3613 if (ha
->flags
.fcp_prio_enabled
)
3614 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
3616 /* Load SCSI command packet. */
3617 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3618 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
3620 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3622 /* Build IOCB segments */
3623 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
3625 /* Set total data segment count. */
3626 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3627 /* Specify response queue number where
3628 * completion should happen.
3630 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3633 /* Build command packet. */
3634 req
->current_outstanding_cmd
= handle
;
3635 req
->outstanding_cmds
[handle
] = sp
;
3636 sp
->handle
= handle
;
3637 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3638 req
->cnt
-= req_cnt
;
3641 /* Adjust ring index. */
3643 if (req
->ring_index
== req
->length
) {
3644 req
->ring_index
= 0;
3645 req
->ring_ptr
= req
->ring
;
3649 sp
->flags
|= SRB_DMA_VALID
;
3651 /* Set chip new ring index. */
3652 /* write, read and verify logic */
3653 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
3655 qla82xx_wr_32(ha
, (uintptr_t __force
)ha
->nxdb_wr_ptr
, dbval
);
3657 wrt_reg_dword(ha
->nxdb_wr_ptr
, dbval
);
3659 while (rd_reg_dword(ha
->nxdb_rd_ptr
) != dbval
) {
3660 wrt_reg_dword(ha
->nxdb_wr_ptr
, dbval
);
3665 /* Manage unprocessed RIO/ZIO commands in response queue. */
3666 if (vha
->flags
.process_response_queue
&&
3667 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
3668 qla24xx_process_response_queue(vha
, rsp
);
3670 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3673 queuing_error_fcp_cmnd
:
3674 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
3677 scsi_dma_unmap(cmd
);
3679 if (sp
->u
.scmd
.crc_ctx
) {
3680 mempool_free(sp
->u
.scmd
.crc_ctx
, ha
->ctx_mempool
);
3681 sp
->u
.scmd
.crc_ctx
= NULL
;
3683 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3685 return QLA_FUNCTION_FAILED
;
3689 qla24xx_abort_iocb(srb_t
*sp
, struct abort_entry_24xx
*abt_iocb
)
3691 struct srb_iocb
*aio
= &sp
->u
.iocb_cmd
;
3692 scsi_qla_host_t
*vha
= sp
->vha
;
3693 struct req_que
*req
= sp
->qpair
->req
;
3694 srb_t
*orig_sp
= sp
->cmd_sp
;
3696 memset(abt_iocb
, 0, sizeof(struct abort_entry_24xx
));
3697 abt_iocb
->entry_type
= ABORT_IOCB_TYPE
;
3698 abt_iocb
->entry_count
= 1;
3699 abt_iocb
->handle
= make_handle(req
->id
, sp
->handle
);
3701 abt_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3702 abt_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3703 abt_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3704 abt_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3706 abt_iocb
->handle_to_abort
=
3707 make_handle(le16_to_cpu(aio
->u
.abt
.req_que_no
),
3708 aio
->u
.abt
.cmd_hndl
);
3709 abt_iocb
->vp_index
= vha
->vp_idx
;
3710 abt_iocb
->req_que_no
= aio
->u
.abt
.req_que_no
;
3712 /* need to pass original sp */
3714 qla_nvme_abort_set_option(abt_iocb
, orig_sp
);
3716 /* Send the command to the firmware */
3721 qla2x00_mb_iocb(srb_t
*sp
, struct mbx_24xx_entry
*mbx
)
3725 mbx
->entry_type
= MBX_IOCB_TYPE
;
3726 mbx
->handle
= sp
->handle
;
3727 sz
= min(ARRAY_SIZE(mbx
->mb
), ARRAY_SIZE(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
));
3729 for (i
= 0; i
< sz
; i
++)
3730 mbx
->mb
[i
] = sp
->u
.iocb_cmd
.u
.mbx
.out_mb
[i
];
3734 qla2x00_ctpthru_cmd_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_pkt
)
3736 sp
->u
.iocb_cmd
.u
.ctarg
.iocb
= ct_pkt
;
3737 qla24xx_prep_ms_iocb(sp
->vha
, &sp
->u
.iocb_cmd
.u
.ctarg
);
3738 ct_pkt
->handle
= sp
->handle
;
3741 static void qla2x00_send_notify_ack_iocb(srb_t
*sp
,
3742 struct nack_to_isp
*nack
)
3744 struct imm_ntfy_from_isp
*ntfy
= sp
->u
.iocb_cmd
.u
.nack
.ntfy
;
3746 nack
->entry_type
= NOTIFY_ACK_TYPE
;
3747 nack
->entry_count
= 1;
3748 nack
->ox_id
= ntfy
->ox_id
;
3750 nack
->u
.isp24
.handle
= sp
->handle
;
3751 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
3752 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
3753 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
3754 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB
);
3756 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
3757 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
3758 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
3759 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
3760 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
3761 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
3762 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3763 nack
->u
.isp24
.srr_flags
= 0;
3764 nack
->u
.isp24
.srr_reject_code
= 0;
3765 nack
->u
.isp24
.srr_reject_code_expl
= 0;
3766 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
3768 if (ntfy
->u
.isp24
.status_subcode
== ELS_PLOGI
&&
3769 (le16_to_cpu(ntfy
->u
.isp24
.flags
) & NOTIFY24XX_FLAGS_FCSP
) &&
3770 sp
->vha
->hw
->flags
.edif_enabled
) {
3771 ql_dbg(ql_dbg_disc
, sp
->vha
, 0x3074,
3772 "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
3773 sp
->name
, sp
->handle
, sp
->fcport
->loop_id
,
3774 sp
->fcport
->d_id
.b24
);
3775 nack
->u
.isp24
.flags
|= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP
);
3780 * Build NVME LS request
3783 qla_nvme_ls(srb_t
*sp
, struct pt_ls4_request
*cmd_pkt
)
3785 struct srb_iocb
*nvme
;
3787 nvme
= &sp
->u
.iocb_cmd
;
3788 cmd_pkt
->entry_type
= PT_LS4_REQUEST
;
3789 cmd_pkt
->entry_count
= 1;
3790 cmd_pkt
->timeout
= cpu_to_le16(nvme
->u
.nvme
.timeout_sec
);
3791 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3793 if (sp
->unsol_rsp
) {
3794 cmd_pkt
->control_flags
=
3795 cpu_to_le16(CF_LS4_RESPONDER
<< CF_LS4_SHIFT
);
3796 cmd_pkt
->nport_handle
= nvme
->u
.nvme
.nport_handle
;
3797 cmd_pkt
->exchange_address
= nvme
->u
.nvme
.exchange_address
;
3799 cmd_pkt
->control_flags
=
3800 cpu_to_le16(CF_LS4_ORIGINATOR
<< CF_LS4_SHIFT
);
3801 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3802 cmd_pkt
->rx_dseg_count
= cpu_to_le16(1);
3803 cmd_pkt
->rx_byte_count
= nvme
->u
.nvme
.rsp_len
;
3804 cmd_pkt
->dsd
[1].length
= nvme
->u
.nvme
.rsp_len
;
3805 put_unaligned_le64(nvme
->u
.nvme
.rsp_dma
, &cmd_pkt
->dsd
[1].address
);
3808 cmd_pkt
->tx_dseg_count
= cpu_to_le16(1);
3809 cmd_pkt
->tx_byte_count
= nvme
->u
.nvme
.cmd_len
;
3810 cmd_pkt
->dsd
[0].length
= nvme
->u
.nvme
.cmd_len
;
3811 put_unaligned_le64(nvme
->u
.nvme
.cmd_dma
, &cmd_pkt
->dsd
[0].address
);
3815 qla25xx_ctrlvp_iocb(srb_t
*sp
, struct vp_ctrl_entry_24xx
*vce
)
3819 vce
->entry_type
= VP_CTRL_IOCB_TYPE
;
3820 vce
->handle
= sp
->handle
;
3821 vce
->entry_count
= 1;
3822 vce
->command
= cpu_to_le16(sp
->u
.iocb_cmd
.u
.ctrlvp
.cmd
);
3823 vce
->vp_count
= cpu_to_le16(1);
3826 * index map in firmware starts with 1; decrement index
3827 * this is ok as we never use index 0
3829 map
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) / 8;
3830 pos
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) & 7;
3831 vce
->vp_idx_map
[map
] |= 1 << pos
;
3835 qla24xx_prlo_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
3837 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
3838 logio
->control_flags
=
3839 cpu_to_le16(LCF_COMMAND_PRLO
|LCF_IMPL_PRLO
);
3841 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3842 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3843 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3844 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3845 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3848 static int qla_get_iocbs_resource(struct srb
*sp
)
3851 bool push_it_through
= false;
3853 if (!ql2xenforce_iocb_limit
) {
3854 sp
->iores
.res_type
= RESOURCE_NONE
;
3857 sp
->iores
.res_type
= RESOURCE_NONE
;
3863 push_it_through
= true;
3866 case SRB_ELS_CMD_RPT
:
3867 case SRB_ELS_CMD_HST
:
3868 case SRB_ELS_CMD_HST_NOLOGIN
:
3875 case SRB_FXIOCB_DCMD
:
3876 case SRB_FXIOCB_BCMD
:
3877 sp
->iores
.res_type
= RESOURCE_NONE
;
3881 case SRB_SA_REPLACE
:
3884 case SRB_NACK_PLOGI
:
3887 case SRB_LOGOUT_CMD
:
3891 push_it_through
= true;
3895 sp
->iores
.res_type
|= RESOURCE_IOCB
;
3896 sp
->iores
.iocb_cnt
= 1;
3898 sp
->iores
.res_type
|= RESOURCE_EXCH
;
3899 sp
->iores
.exch_cnt
= 1;
3901 if (push_it_through
)
3902 sp
->iores
.res_type
|= RESOURCE_FORCE
;
3904 return qla_get_fw_resources(sp
->qpair
, &sp
->iores
);
3908 qla_marker_iocb(srb_t
*sp
, struct mrk_entry_24xx
*mrk
)
3910 mrk
->entry_type
= MARKER_TYPE
;
3911 mrk
->modifier
= sp
->u
.iocb_cmd
.u
.tmf
.modifier
;
3912 mrk
->handle
= make_handle(sp
->qpair
->req
->id
, sp
->handle
);
3913 if (sp
->u
.iocb_cmd
.u
.tmf
.modifier
!= MK_SYNC_ALL
) {
3914 mrk
->nport_handle
= cpu_to_le16(sp
->u
.iocb_cmd
.u
.tmf
.loop_id
);
3915 int_to_scsilun(sp
->u
.iocb_cmd
.u
.tmf
.lun
, (struct scsi_lun
*)&mrk
->lun
);
3916 host_to_fcp_swap(mrk
->lun
, sizeof(mrk
->lun
));
3917 mrk
->vp_index
= sp
->u
.iocb_cmd
.u
.tmf
.vp_index
;
3922 qla2x00_start_sp(srb_t
*sp
)
3924 int rval
= QLA_SUCCESS
;
3925 scsi_qla_host_t
*vha
= sp
->vha
;
3926 struct qla_hw_data
*ha
= vha
->hw
;
3927 struct qla_qpair
*qp
= sp
->qpair
;
3929 unsigned long flags
;
3931 if (vha
->hw
->flags
.eeh_busy
)
3934 spin_lock_irqsave(qp
->qp_lock_ptr
, flags
);
3935 rval
= qla_get_iocbs_resource(sp
);
3937 spin_unlock_irqrestore(qp
->qp_lock_ptr
, flags
);
3941 pkt
= qla2x00_alloc_iocbs_ready(sp
->qpair
, sp
);
3944 ql_log(ql_log_warn
, vha
, 0x700c,
3945 "qla2x00_alloc_iocbs failed.\n");
3951 IS_FWI2_CAPABLE(ha
) ?
3952 qla24xx_login_iocb(sp
, pkt
) :
3953 qla2x00_login_iocb(sp
, pkt
);
3956 qla24xx_prli_iocb(sp
, pkt
);
3958 case SRB_LOGOUT_CMD
:
3959 IS_FWI2_CAPABLE(ha
) ?
3960 qla24xx_logout_iocb(sp
, pkt
) :
3961 qla2x00_logout_iocb(sp
, pkt
);
3963 case SRB_ELS_CMD_RPT
:
3964 case SRB_ELS_CMD_HST
:
3965 qla24xx_els_iocb(sp
, pkt
);
3967 case SRB_ELS_CMD_HST_NOLOGIN
:
3968 qla_els_pt_iocb(sp
->vha
, pkt
, &sp
->u
.bsg_cmd
.u
.els_arg
);
3969 ((struct els_entry_24xx
*)pkt
)->handle
= sp
->handle
;
3972 IS_FWI2_CAPABLE(ha
) ?
3973 qla24xx_ct_iocb(sp
, pkt
) :
3974 qla2x00_ct_iocb(sp
, pkt
);
3977 IS_FWI2_CAPABLE(ha
) ?
3978 qla24xx_adisc_iocb(sp
, pkt
) :
3979 qla2x00_adisc_iocb(sp
, pkt
);
3983 qlafx00_tm_iocb(sp
, pkt
) :
3984 qla24xx_tm_iocb(sp
, pkt
);
3986 case SRB_FXIOCB_DCMD
:
3987 case SRB_FXIOCB_BCMD
:
3988 qlafx00_fxdisc_iocb(sp
, pkt
);
3991 qla_nvme_ls(sp
, pkt
);
3995 qlafx00_abort_iocb(sp
, pkt
) :
3996 qla24xx_abort_iocb(sp
, pkt
);
3999 qla24xx_els_logo_iocb(sp
, pkt
);
4001 case SRB_CT_PTHRU_CMD
:
4002 qla2x00_ctpthru_cmd_iocb(sp
, pkt
);
4005 qla2x00_mb_iocb(sp
, pkt
);
4007 case SRB_NACK_PLOGI
:
4010 qla2x00_send_notify_ack_iocb(sp
, pkt
);
4013 qla25xx_ctrlvp_iocb(sp
, pkt
);
4016 qla24xx_prlo_iocb(sp
, pkt
);
4019 qla24xx_sa_update_iocb(sp
, pkt
);
4021 case SRB_SA_REPLACE
:
4022 qla24xx_sa_replace_iocb(sp
, pkt
);
4025 qla_marker_iocb(sp
, pkt
);
4031 if (sp
->start_timer
) {
4032 /* ref: TMR timer ref
4033 * this code should be just before start_iocbs function
4034 * This will make sure that caller function don't to do
4035 * kref_put even on failure
4037 kref_get(&sp
->cmd_kref
);
4038 add_timer(&sp
->u
.iocb_cmd
.timer
);
4042 qla2x00_start_iocbs(vha
, qp
->req
);
4045 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
4046 spin_unlock_irqrestore(qp
->qp_lock_ptr
, flags
);
4051 qla25xx_build_bidir_iocb(srb_t
*sp
, struct scsi_qla_host
*vha
,
4052 struct cmd_bidir
*cmd_pkt
, uint32_t tot_dsds
)
4054 uint16_t avail_dsds
;
4055 struct dsd64
*cur_dsd
;
4056 uint32_t req_data_len
= 0;
4057 uint32_t rsp_data_len
= 0;
4058 struct scatterlist
*sg
;
4060 int entry_count
= 1;
4061 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
4063 /*Update entry type to indicate bidir command */
4064 put_unaligned_le32(COMMAND_BIDIRECTIONAL
, &cmd_pkt
->entry_type
);
4066 /* Set the transfer direction, in this set both flags
4067 * Also set the BD_WRAP_BACK flag, firmware will take care
4068 * assigning DID=SID for outgoing pkts.
4070 cmd_pkt
->wr_dseg_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
4071 cmd_pkt
->rd_dseg_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
4072 cmd_pkt
->control_flags
= cpu_to_le16(BD_WRITE_DATA
| BD_READ_DATA
|
4075 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
4076 cmd_pkt
->wr_byte_count
= cpu_to_le32(req_data_len
);
4077 cmd_pkt
->rd_byte_count
= cpu_to_le32(rsp_data_len
);
4078 cmd_pkt
->timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
4080 vha
->bidi_stats
.transfer_bytes
+= req_data_len
;
4081 vha
->bidi_stats
.io_count
++;
4083 vha
->qla_stats
.output_bytes
+= req_data_len
;
4084 vha
->qla_stats
.output_requests
++;
4086 /* Only one dsd is available for bidirectional IOCB, remaining dsds
4087 * are bundled in continuation iocb
4090 cur_dsd
= &cmd_pkt
->fcp_dsd
;
4094 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
4095 bsg_job
->request_payload
.sg_cnt
, index
) {
4096 cont_a64_entry_t
*cont_pkt
;
4098 /* Allocate additional continuation packets */
4099 if (avail_dsds
== 0) {
4100 /* Continuation type 1 IOCB can accomodate
4103 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
4104 cur_dsd
= cont_pkt
->dsd
;
4108 append_dsd64(&cur_dsd
, sg
);
4111 /* For read request DSD will always goes to continuation IOCB
4112 * and follow the write DSD. If there is room on the current IOCB
4113 * then it is added to that IOCB else new continuation IOCB is
4116 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
4117 bsg_job
->reply_payload
.sg_cnt
, index
) {
4118 cont_a64_entry_t
*cont_pkt
;
4120 /* Allocate additional continuation packets */
4121 if (avail_dsds
== 0) {
4122 /* Continuation type 1 IOCB can accomodate
4125 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
4126 cur_dsd
= cont_pkt
->dsd
;
4130 append_dsd64(&cur_dsd
, sg
);
4133 /* This value should be same as number of IOCB required for this cmd */
4134 cmd_pkt
->entry_count
= entry_count
;
4138 qla2x00_start_bidir(srb_t
*sp
, struct scsi_qla_host
*vha
, uint32_t tot_dsds
)
4141 struct qla_hw_data
*ha
= vha
->hw
;
4142 unsigned long flags
;
4147 struct cmd_bidir
*cmd_pkt
= NULL
;
4148 struct rsp_que
*rsp
;
4149 struct req_que
*req
;
4150 int rval
= EXT_STATUS_OK
;
4154 rsp
= ha
->rsp_q_map
[0];
4157 /* Send marker if required */
4158 if (vha
->marker_needed
!= 0) {
4159 if (qla2x00_marker(vha
, ha
->base_qpair
,
4160 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
)
4161 return EXT_STATUS_MAILBOX
;
4162 vha
->marker_needed
= 0;
4165 /* Acquire ring specific lock */
4166 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
4168 handle
= qla2xxx_get_next_handle(req
);
4170 rval
= EXT_STATUS_BUSY
;
4174 /* Calculate number of IOCB required */
4175 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
4177 /* Check for room on request queue. */
4178 if (req
->cnt
< req_cnt
+ 2) {
4179 if (IS_SHADOW_REG_CAPABLE(ha
)) {
4180 cnt
= *req
->out_ptr
;
4182 cnt
= rd_reg_dword_relaxed(req
->req_q_out
);
4183 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
))
4187 if (req
->ring_index
< cnt
)
4188 req
->cnt
= cnt
- req
->ring_index
;
4190 req
->cnt
= req
->length
-
4191 (req
->ring_index
- cnt
);
4193 if (req
->cnt
< req_cnt
+ 2) {
4194 rval
= EXT_STATUS_BUSY
;
4198 cmd_pkt
= (struct cmd_bidir
*)req
->ring_ptr
;
4199 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
4201 /* Zero out remaining portion of packet. */
4202 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4203 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
4204 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
4206 /* Set NPORT-ID (of vha)*/
4207 cmd_pkt
->nport_handle
= cpu_to_le16(vha
->self_login_loop_id
);
4208 cmd_pkt
->port_id
[0] = vha
->d_id
.b
.al_pa
;
4209 cmd_pkt
->port_id
[1] = vha
->d_id
.b
.area
;
4210 cmd_pkt
->port_id
[2] = vha
->d_id
.b
.domain
;
4212 qla25xx_build_bidir_iocb(sp
, vha
, cmd_pkt
, tot_dsds
);
4213 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
4214 /* Build command packet. */
4215 req
->current_outstanding_cmd
= handle
;
4216 req
->outstanding_cmds
[handle
] = sp
;
4217 sp
->handle
= handle
;
4218 req
->cnt
-= req_cnt
;
4220 /* Send the command to the firmware */
4222 qla2x00_start_iocbs(vha
, req
);
4224 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
4230 * qla_start_scsi_type6() - Send a SCSI command to the ISP
4231 * @sp: command to send to the ISP
4233 * Returns non-zero if a failure occurred, else zero.
4236 qla_start_scsi_type6(srb_t
*sp
)
4239 unsigned long flags
;
4242 struct cmd_type_6
*cmd_pkt
;
4246 struct req_que
*req
= NULL
;
4247 struct rsp_que
*rsp
;
4248 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
4249 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
4250 struct qla_hw_data
*ha
= vha
->hw
;
4251 struct qla_qpair
*qpair
= sp
->qpair
;
4252 uint16_t more_dsd_lists
= 0;
4253 struct dsd_dma
*dsd_ptr
;
4256 uint8_t additional_cdb_len
;
4257 struct ct6_dsd
*ctx
;
4259 /* Acquire qpair specific lock */
4260 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
4262 /* Setup qpair pointers */
4266 /* So we know we haven't pci_map'ed anything yet */
4269 /* Send marker if required */
4270 if (vha
->marker_needed
!= 0) {
4271 if (__qla2x00_marker(vha
, qpair
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
4272 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
4273 return QLA_FUNCTION_FAILED
;
4275 vha
->marker_needed
= 0;
4278 handle
= qla2xxx_get_next_handle(req
);
4282 /* Map the sg table so we have an accurate count of sg entries needed */
4283 if (scsi_sg_count(cmd
)) {
4284 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
4285 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
4286 if (unlikely(!nseg
))
4294 /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
4295 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
4297 sp
->iores
.res_type
= RESOURCE_IOCB
| RESOURCE_EXCH
;
4298 sp
->iores
.exch_cnt
= 1;
4299 sp
->iores
.iocb_cnt
= req_cnt
;
4301 if (qla_get_fw_resources(sp
->qpair
, &sp
->iores
))
4304 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
4305 if ((more_dsd_lists
+ qpair
->dsd_inuse
) >= NUM_DSD_CHAIN
) {
4306 ql_dbg(ql_dbg_io
, vha
, 0x3028,
4307 "Num of DSD list %d is than %d for cmd=%p.\n",
4308 more_dsd_lists
+ qpair
->dsd_inuse
, NUM_DSD_CHAIN
, cmd
);
4312 if (more_dsd_lists
<= qpair
->dsd_avail
)
4313 goto sufficient_dsds
;
4315 more_dsd_lists
-= qpair
->dsd_avail
;
4317 for (i
= 0; i
< more_dsd_lists
; i
++) {
4318 dsd_ptr
= kzalloc(sizeof(*dsd_ptr
), GFP_ATOMIC
);
4320 ql_log(ql_log_fatal
, vha
, 0x3029,
4321 "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd
);
4324 INIT_LIST_HEAD(&dsd_ptr
->list
);
4326 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
4327 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
4328 if (!dsd_ptr
->dsd_addr
) {
4330 ql_log(ql_log_fatal
, vha
, 0x302a,
4331 "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd
);
4334 list_add_tail(&dsd_ptr
->list
, &qpair
->dsd_list
);
4341 if (req
->cnt
< (req_cnt
+ 2)) {
4342 if (IS_SHADOW_REG_CAPABLE(ha
)) {
4343 cnt
= *req
->out_ptr
;
4345 cnt
= (uint16_t)rd_reg_dword_relaxed(req
->req_q_out
);
4346 if (qla2x00_check_reg16_for_disconnect(vha
, cnt
))
4350 if (req
->ring_index
< cnt
)
4351 req
->cnt
= cnt
- req
->ring_index
;
4353 req
->cnt
= req
->length
- (req
->ring_index
- cnt
);
4354 if (req
->cnt
< (req_cnt
+ 2))
4358 ctx
= &sp
->u
.scmd
.ct6_ctx
;
4360 memset(ctx
, 0, sizeof(struct ct6_dsd
));
4361 ctx
->fcp_cmnd
= dma_pool_zalloc(ha
->fcp_cmnd_dma_pool
,
4362 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
4363 if (!ctx
->fcp_cmnd
) {
4364 ql_log(ql_log_fatal
, vha
, 0x3031,
4365 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
4369 /* Initialize the DSD list and dma handle */
4370 INIT_LIST_HEAD(&ctx
->dsd_list
);
4371 ctx
->dsd_use_cnt
= 0;
4373 if (cmd
->cmd_len
> 16) {
4374 additional_cdb_len
= cmd
->cmd_len
- 16;
4375 if (cmd
->cmd_len
% 4 ||
4376 cmd
->cmd_len
> QLA_CDB_BUF_SIZE
) {
4378 * SCSI command bigger than 16 bytes must be
4379 * multiple of 4 or too big.
4381 ql_log(ql_log_warn
, vha
, 0x3033,
4382 "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
4384 goto queuing_error_fcp_cmnd
;
4386 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
4388 additional_cdb_len
= 0;
4389 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
4392 /* Build command packet. */
4393 req
->current_outstanding_cmd
= handle
;
4394 req
->outstanding_cmds
[handle
] = sp
;
4395 sp
->handle
= handle
;
4396 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
4397 req
->cnt
-= req_cnt
;
4399 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
4400 cmd_pkt
->handle
= make_handle(req
->id
, handle
);
4402 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
4403 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
4404 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
4405 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
4407 /* Set NPORT-ID and LUN number */
4408 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
4409 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
4410 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
4411 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
4412 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
4414 /* Build IOCB segments */
4415 qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
);
4417 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
4418 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
4420 /* build FCP_CMND IU */
4421 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
4422 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
4424 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
4425 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
4426 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
4427 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
4429 /* Populate the FCP_PRIO. */
4430 if (ha
->flags
.fcp_prio_enabled
)
4431 ctx
->fcp_cmnd
->task_attribute
|=
4432 sp
->fcport
->fcp_prio
<< 3;
4434 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
4436 fcp_dl
= (__be32
*)(ctx
->fcp_cmnd
->cdb
+ 16 +
4437 additional_cdb_len
);
4438 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
4440 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
4441 put_unaligned_le64(ctx
->fcp_cmnd_dma
,
4442 &cmd_pkt
->fcp_cmnd_dseg_address
);
4444 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
4445 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
4446 /* Set total data segment count. */
4447 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
4450 /* Adjust ring index. */
4452 if (req
->ring_index
== req
->length
) {
4453 req
->ring_index
= 0;
4454 req
->ring_ptr
= req
->ring
;
4459 sp
->qpair
->cmd_cnt
++;
4460 sp
->flags
|= SRB_DMA_VALID
;
4462 /* Set chip new ring index. */
4463 wrt_reg_dword(req
->req_q_in
, req
->ring_index
);
4465 /* Manage unprocessed RIO/ZIO commands in response queue. */
4466 if (vha
->flags
.process_response_queue
&&
4467 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
4468 qla24xx_process_response_queue(vha
, rsp
);
4470 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
4474 queuing_error_fcp_cmnd
:
4475 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
4479 scsi_dma_unmap(cmd
);
4481 qla_put_fw_resources(sp
->qpair
, &sp
->iores
);
4483 if (sp
->u
.scmd
.crc_ctx
) {
4484 mempool_free(sp
->u
.scmd
.crc_ctx
, ha
->ctx_mempool
);
4485 sp
->u
.scmd
.crc_ctx
= NULL
;
4488 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
4490 return QLA_FUNCTION_FAILED
;