2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
25 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
29 /* Set transfer direction */
30 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
32 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
34 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
36 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment decriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds
)
57 iocbs
+= (dsds
- 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment decriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds
)
79 iocbs
+= (dsds
- 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t
*
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
95 cont_entry_t
*cont_pkt
;
96 struct req_que
*req
= vha
->req
;
97 /* Adjust ring index. */
99 if (req
->ring_index
== req
->length
) {
101 req
->ring_ptr
= req
->ring
;
106 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt
->entry_type
)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t
*
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
124 cont_a64_entry_t
*cont_pkt
;
126 /* Adjust ring index. */
128 if (req
->ring_index
== req
->length
) {
130 req
->ring_ptr
= req
->ring
;
135 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt
->entry_type
)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
145 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
147 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
148 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
150 /* We only support T10 DIF right now */
151 if (guard
!= SHOST_DIX_GUARD_CRC
) {
152 ql_dbg(ql_dbg_io
, sp
->fcport
->vha
, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard
, cmd
);
157 /* We always use DIFF Bundling for best performance */
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(cmd
)) {
162 case SCSI_PROT_READ_STRIP
:
163 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
165 case SCSI_PROT_WRITE_INSERT
:
166 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
168 case SCSI_PROT_READ_INSERT
:
169 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
171 case SCSI_PROT_WRITE_STRIP
:
172 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
174 case SCSI_PROT_READ_PASS
:
175 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
177 case SCSI_PROT_WRITE_PASS
:
178 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
180 default: /* Normal Request */
181 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
185 return scsi_prot_sg_count(cmd
);
189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
196 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
201 scsi_qla_host_t
*vha
;
202 struct scsi_cmnd
*cmd
;
203 struct scatterlist
*sg
;
206 cmd
= GET_CMD_SP(sp
);
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
210 __constant_cpu_to_le32(COMMAND_TYPE
);
212 /* No data transfer */
213 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
214 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
218 vha
= sp
->fcport
->vha
;
219 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
221 /* Three DSDs are available in the Command Type 2 IOCB */
223 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
225 /* Load data segments */
226 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
227 cont_entry_t
*cont_pkt
;
229 /* Allocate additional continuation packets? */
230 if (avail_dsds
== 0) {
232 * Seven DSDs are available in the Continuation
235 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
236 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
240 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
241 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
254 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
259 scsi_qla_host_t
*vha
;
260 struct scsi_cmnd
*cmd
;
261 struct scatterlist
*sg
;
264 cmd
= GET_CMD_SP(sp
);
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
270 /* No data transfer */
271 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
272 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
276 vha
= sp
->fcport
->vha
;
277 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
279 /* Two DSDs are available in the Command Type 3 IOCB */
281 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
283 /* Load data segments */
284 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
286 cont_a64_entry_t
*cont_pkt
;
288 /* Allocate additional continuation packets? */
289 if (avail_dsds
== 0) {
291 * Five DSDs are available in the Continuation
294 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
295 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
299 sle_dma
= sg_dma_address(sg
);
300 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
301 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
302 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
311 * Returns non-zero if a failure occurred, else zero.
314 qla2x00_start_scsi(srb_t
*sp
)
318 scsi_qla_host_t
*vha
;
319 struct scsi_cmnd
*cmd
;
323 cmd_entry_t
*cmd_pkt
;
327 struct device_reg_2xxx __iomem
*reg
;
328 struct qla_hw_data
*ha
;
333 /* Setup device pointers. */
335 vha
= sp
->fcport
->vha
;
337 reg
= &ha
->iobase
->isp
;
338 cmd
= GET_CMD_SP(sp
);
339 req
= ha
->req_q_map
[0];
340 rsp
= ha
->rsp_q_map
[0];
341 /* So we know we haven't pci_map'ed anything yet */
344 /* Send marker if required */
345 if (vha
->marker_needed
!= 0) {
346 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
348 return (QLA_FUNCTION_FAILED
);
350 vha
->marker_needed
= 0;
353 /* Acquire ring specific lock */
354 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
356 /* Check for room in outstanding command list. */
357 handle
= req
->current_outstanding_cmd
;
358 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
360 if (handle
== MAX_OUTSTANDING_COMMANDS
)
362 if (!req
->outstanding_cmds
[handle
])
365 if (index
== MAX_OUTSTANDING_COMMANDS
)
368 /* Map the sg table so we have an accurate count of sg entries needed */
369 if (scsi_sg_count(cmd
)) {
370 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
371 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
379 /* Calculate the number of request entries needed. */
380 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
381 if (req
->cnt
< (req_cnt
+ 2)) {
382 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
383 if (req
->ring_index
< cnt
)
384 req
->cnt
= cnt
- req
->ring_index
;
386 req
->cnt
= req
->length
-
387 (req
->ring_index
- cnt
);
389 if (req
->cnt
< (req_cnt
+ 2))
392 /* Build command packet */
393 req
->current_outstanding_cmd
= handle
;
394 req
->outstanding_cmds
[handle
] = sp
;
396 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
399 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
400 cmd_pkt
->handle
= handle
;
401 /* Zero out remaining portion of packet. */
402 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
403 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
404 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
406 /* Set target ID and LUN number*/
407 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
408 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
410 /* Update tagged queuing modifier */
411 if (scsi_populate_tag_msg(cmd
, tag
)) {
413 case HEAD_OF_QUEUE_TAG
:
414 cmd_pkt
->control_flags
=
415 __constant_cpu_to_le16(CF_HEAD_TAG
);
417 case ORDERED_QUEUE_TAG
:
418 cmd_pkt
->control_flags
=
419 __constant_cpu_to_le16(CF_ORDERED_TAG
);
422 cmd_pkt
->control_flags
=
423 __constant_cpu_to_le16(CF_SIMPLE_TAG
);
428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
430 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
432 /* Build IOCB segments */
433 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
435 /* Set total data segment count. */
436 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
439 /* Adjust ring index. */
441 if (req
->ring_index
== req
->length
) {
443 req
->ring_ptr
= req
->ring
;
447 sp
->flags
|= SRB_DMA_VALID
;
449 /* Set chip new ring index. */
450 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
453 /* Manage unprocessed RIO/ZIO commands in response queue. */
454 if (vha
->flags
.process_response_queue
&&
455 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
456 qla2x00_process_response_queue(rsp
);
458 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
459 return (QLA_SUCCESS
);
465 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
467 return (QLA_FUNCTION_FAILED
);
471 * qla2x00_start_iocbs() - Execute the IOCB command
474 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
476 struct qla_hw_data
*ha
= vha
->hw
;
477 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
479 if (IS_QLA82XX(ha
)) {
480 qla82xx_start_iocbs(vha
);
482 /* Adjust ring index. */
484 if (req
->ring_index
== req
->length
) {
486 req
->ring_ptr
= req
->ring
;
490 /* Set chip new ring index. */
491 if (ha
->mqenable
|| IS_QLA83XX(ha
)) {
492 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
493 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
494 } else if (IS_FWI2_CAPABLE(ha
)) {
495 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
496 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
498 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
500 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
506 * qla2x00_marker() - Send a marker IOCB to the firmware.
510 * @type: marker modifier
512 * Can be called from both normal and interrupt context.
514 * Returns non-zero if a failure occurred, else zero.
517 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
518 struct rsp_que
*rsp
, uint16_t loop_id
,
519 uint16_t lun
, uint8_t type
)
522 struct mrk_entry_24xx
*mrk24
;
523 struct qla_hw_data
*ha
= vha
->hw
;
524 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
527 req
= ha
->req_q_map
[0];
528 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, 0);
530 ql_log(ql_log_warn
, base_vha
, 0x3026,
531 "Failed to allocate Marker IOCB.\n");
533 return (QLA_FUNCTION_FAILED
);
536 mrk
->entry_type
= MARKER_TYPE
;
537 mrk
->modifier
= type
;
538 if (type
!= MK_SYNC_ALL
) {
539 if (IS_FWI2_CAPABLE(ha
)) {
540 mrk24
= (struct mrk_entry_24xx
*) mrk
;
541 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
542 mrk24
->lun
[1] = LSB(lun
);
543 mrk24
->lun
[2] = MSB(lun
);
544 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
545 mrk24
->vp_index
= vha
->vp_idx
;
546 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
548 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
549 mrk
->lun
= cpu_to_le16(lun
);
554 qla2x00_start_iocbs(vha
, req
);
556 return (QLA_SUCCESS
);
560 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
561 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
565 unsigned long flags
= 0;
567 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
568 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
569 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
575 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
576 * Continuation Type 1 IOCBs to allocate.
578 * @dsds: number of data segment decriptors needed
580 * Returns the number of IOCB entries needed to store @dsds.
583 qla24xx_calc_iocbs(scsi_qla_host_t
*vha
, uint16_t dsds
)
589 iocbs
+= (dsds
- 1) / 5;
597 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
600 uint32_t *cur_dsd
= NULL
;
601 scsi_qla_host_t
*vha
;
602 struct qla_hw_data
*ha
;
603 struct scsi_cmnd
*cmd
;
604 struct scatterlist
*cur_seg
;
608 uint8_t first_iocb
= 1;
609 uint32_t dsd_list_len
;
610 struct dsd_dma
*dsd_ptr
;
613 cmd
= GET_CMD_SP(sp
);
615 /* Update entry type to indicate Command Type 3 IOCB */
616 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
617 __constant_cpu_to_le32(COMMAND_TYPE_6
);
619 /* No data transfer */
620 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
621 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
625 vha
= sp
->fcport
->vha
;
628 /* Set transfer direction */
629 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
630 cmd_pkt
->control_flags
=
631 __constant_cpu_to_le16(CF_WRITE_DATA
);
632 ha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
633 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
634 cmd_pkt
->control_flags
=
635 __constant_cpu_to_le16(CF_READ_DATA
);
636 ha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
639 cur_seg
= scsi_sglist(cmd
);
640 ctx
= GET_CMD_CTX_SP(sp
);
643 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
644 QLA_DSDS_PER_IOCB
: tot_dsds
;
645 tot_dsds
-= avail_dsds
;
646 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
648 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
649 struct dsd_dma
, list
);
650 next_dsd
= dsd_ptr
->dsd_addr
;
651 list_del(&dsd_ptr
->list
);
653 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
659 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
660 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
661 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
662 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
664 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
665 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
666 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
668 cur_dsd
= (uint32_t *)next_dsd
;
672 sle_dma
= sg_dma_address(cur_seg
);
673 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
674 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
675 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
676 cur_seg
= sg_next(cur_seg
);
681 /* Null termination */
685 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
690 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
691 * for Command Type 6.
693 * @dsds: number of data segment decriptors needed
695 * Returns the number of dsd list needed to store @dsds.
698 qla24xx_calc_dsd_lists(uint16_t dsds
)
700 uint16_t dsd_lists
= 0;
702 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
703 if (dsds
% QLA_DSDS_PER_IOCB
)
710 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
713 * @sp: SRB command to process
714 * @cmd_pkt: Command type 3 IOCB
715 * @tot_dsds: Total number of segments to transfer
718 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
723 scsi_qla_host_t
*vha
;
724 struct scsi_cmnd
*cmd
;
725 struct scatterlist
*sg
;
729 cmd
= GET_CMD_SP(sp
);
731 /* Update entry type to indicate Command Type 3 IOCB */
732 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
733 __constant_cpu_to_le32(COMMAND_TYPE_7
);
735 /* No data transfer */
736 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
737 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
741 vha
= sp
->fcport
->vha
;
744 /* Set transfer direction */
745 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
746 cmd_pkt
->task_mgmt_flags
=
747 __constant_cpu_to_le16(TMF_WRITE_DATA
);
748 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
750 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
751 cmd_pkt
->task_mgmt_flags
=
752 __constant_cpu_to_le16(TMF_READ_DATA
);
753 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
757 /* One DSD is available in the Command Type 3 IOCB */
759 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
761 /* Load data segments */
763 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
765 cont_a64_entry_t
*cont_pkt
;
767 /* Allocate additional continuation packets? */
768 if (avail_dsds
== 0) {
770 * Five DSDs are available in the Continuation
773 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
774 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
778 sle_dma
= sg_dma_address(sg
);
779 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
780 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
781 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
786 struct fw_dif_context
{
789 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
790 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
794 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
798 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
799 unsigned int protcnt
)
801 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
802 scsi_qla_host_t
*vha
= shost_priv(cmd
->device
->host
);
804 switch (scsi_get_prot_type(cmd
)) {
805 case SCSI_PROT_DIF_TYPE0
:
807 * No check for ql2xenablehba_err_chk, as it would be an
808 * I/O error if hba tag generation is not done.
810 pkt
->ref_tag
= cpu_to_le32((uint32_t)
811 (0xffffffff & scsi_get_lba(cmd
)));
813 if (!qla2x00_hba_err_chk_enabled(sp
))
816 pkt
->ref_tag_mask
[0] = 0xff;
817 pkt
->ref_tag_mask
[1] = 0xff;
818 pkt
->ref_tag_mask
[2] = 0xff;
819 pkt
->ref_tag_mask
[3] = 0xff;
823 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
824 * match LBA in CDB + N
826 case SCSI_PROT_DIF_TYPE2
:
827 pkt
->app_tag
= __constant_cpu_to_le16(0);
828 pkt
->app_tag_mask
[0] = 0x0;
829 pkt
->app_tag_mask
[1] = 0x0;
831 pkt
->ref_tag
= cpu_to_le32((uint32_t)
832 (0xffffffff & scsi_get_lba(cmd
)));
834 if (!qla2x00_hba_err_chk_enabled(sp
))
837 /* enable ALL bytes of the ref tag */
838 pkt
->ref_tag_mask
[0] = 0xff;
839 pkt
->ref_tag_mask
[1] = 0xff;
840 pkt
->ref_tag_mask
[2] = 0xff;
841 pkt
->ref_tag_mask
[3] = 0xff;
844 /* For Type 3 protection: 16 bit GUARD only */
845 case SCSI_PROT_DIF_TYPE3
:
846 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
847 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
852 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
855 case SCSI_PROT_DIF_TYPE1
:
856 pkt
->ref_tag
= cpu_to_le32((uint32_t)
857 (0xffffffff & scsi_get_lba(cmd
)));
858 pkt
->app_tag
= __constant_cpu_to_le16(0);
859 pkt
->app_tag_mask
[0] = 0x0;
860 pkt
->app_tag_mask
[1] = 0x0;
862 if (!qla2x00_hba_err_chk_enabled(sp
))
865 /* enable ALL bytes of the ref tag */
866 pkt
->ref_tag_mask
[0] = 0xff;
867 pkt
->ref_tag_mask
[1] = 0xff;
868 pkt
->ref_tag_mask
[2] = 0xff;
869 pkt
->ref_tag_mask
[3] = 0xff;
873 ql_dbg(ql_dbg_io
, vha
, 0x3009,
874 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
875 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
876 pkt
->ref_tag
, pkt
->app_tag
, protcnt
, (int)scsi_get_lba(cmd
),
877 scsi_get_prot_type(cmd
), cmd
);
881 dma_addr_t dma_addr
; /* OUT */
882 uint32_t dma_len
; /* OUT */
884 uint32_t tot_bytes
; /* IN */
885 struct scatterlist
*cur_sg
; /* IN */
887 /* for book keeping, bzero on initial invocation */
888 uint32_t bytes_consumed
;
890 uint32_t tot_partial
;
898 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
901 struct scatterlist
*sg
;
902 uint32_t cumulative_partial
, sg_len
;
903 dma_addr_t sg_dma_addr
;
905 if (sgx
->num_bytes
== sgx
->tot_bytes
)
909 cumulative_partial
= sgx
->tot_partial
;
911 sg_dma_addr
= sg_dma_address(sg
);
912 sg_len
= sg_dma_len(sg
);
914 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
916 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
917 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
918 sgx
->tot_partial
= 0;
919 sgx
->num_bytes
+= blk_sz
;
922 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
923 sgx
->tot_partial
+= sgx
->dma_len
;
927 sgx
->bytes_consumed
+= sgx
->dma_len
;
929 if (sg_len
== sgx
->bytes_consumed
) {
933 sgx
->bytes_consumed
= 0;
940 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
941 uint32_t *dsd
, uint16_t tot_dsds
)
944 uint8_t avail_dsds
= 0;
945 uint32_t dsd_list_len
;
946 struct dsd_dma
*dsd_ptr
;
947 struct scatterlist
*sg_prot
;
948 uint32_t *cur_dsd
= dsd
;
949 uint16_t used_dsds
= tot_dsds
;
955 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
956 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
958 prot_int
= cmd
->device
->sector_size
;
960 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
961 sgx
.tot_bytes
= scsi_bufflen(cmd
);
962 sgx
.cur_sg
= scsi_sglist(cmd
);
965 sg_prot
= scsi_prot_sglist(cmd
);
967 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
969 sle_dma
= sgx
.dma_addr
;
970 sle_dma_len
= sgx
.dma_len
;
972 /* Allocate additional continuation packets? */
973 if (avail_dsds
== 0) {
974 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
975 QLA_DSDS_PER_IOCB
: used_dsds
;
976 dsd_list_len
= (avail_dsds
+ 1) * 12;
977 used_dsds
-= avail_dsds
;
979 /* allocate tracking DS */
980 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
984 /* allocate new list */
985 dsd_ptr
->dsd_addr
= next_dsd
=
986 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
987 &dsd_ptr
->dsd_list_dma
);
991 * Need to cleanup only this dsd_ptr, rest
992 * will be done by sp_free_dma()
998 list_add_tail(&dsd_ptr
->list
,
999 &((struct crc_context
*)sp
->u
.scmd
.ctx
)->dsd_list
);
1001 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1003 /* add new list to cmd iocb or last list */
1004 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1005 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1006 *cur_dsd
++ = dsd_list_len
;
1007 cur_dsd
= (uint32_t *)next_dsd
;
1009 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1010 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1011 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
1015 /* Got a full protection interval */
1016 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
1019 tot_prot_dma_len
+= sle_dma_len
;
1020 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
1021 tot_prot_dma_len
= 0;
1022 sg_prot
= sg_next(sg_prot
);
1025 partial
= 1; /* So as to not re-enter this block */
1026 goto alloc_and_fill
;
1029 /* Null termination */
1037 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1041 uint8_t avail_dsds
= 0;
1042 uint32_t dsd_list_len
;
1043 struct dsd_dma
*dsd_ptr
;
1044 struct scatterlist
*sg
;
1045 uint32_t *cur_dsd
= dsd
;
1047 uint16_t used_dsds
= tot_dsds
;
1048 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1049 scsi_qla_host_t
*vha
= shost_priv(cmd
->device
->host
);
1053 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
1056 /* Allocate additional continuation packets? */
1057 if (avail_dsds
== 0) {
1058 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1059 QLA_DSDS_PER_IOCB
: used_dsds
;
1060 dsd_list_len
= (avail_dsds
+ 1) * 12;
1061 used_dsds
-= avail_dsds
;
1063 /* allocate tracking DS */
1064 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1068 /* allocate new list */
1069 dsd_ptr
->dsd_addr
= next_dsd
=
1070 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1071 &dsd_ptr
->dsd_list_dma
);
1075 * Need to cleanup only this dsd_ptr, rest
1076 * will be done by sp_free_dma()
1082 list_add_tail(&dsd_ptr
->list
,
1083 &((struct crc_context
*)sp
->u
.scmd
.ctx
)->dsd_list
);
1085 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1087 /* add new list to cmd iocb or last list */
1088 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1089 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1090 *cur_dsd
++ = dsd_list_len
;
1091 cur_dsd
= (uint32_t *)next_dsd
;
1093 sle_dma
= sg_dma_address(sg
);
1094 ql_dbg(ql_dbg_io
, vha
, 0x300a,
1095 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1096 i
, LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
), cmd
);
1097 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1098 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1099 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1102 if (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_PASS
) {
1103 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1104 ql_dbg(ql_dbg_io
, vha
, 0x300b,
1105 "User data buffer=%p for cmd=%p.\n", cp
, cmd
);
1108 /* Null termination */
1116 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1121 uint8_t avail_dsds
= 0;
1122 uint32_t dsd_list_len
;
1123 struct dsd_dma
*dsd_ptr
;
1124 struct scatterlist
*sg
;
1126 struct scsi_cmnd
*cmd
;
1127 uint32_t *cur_dsd
= dsd
;
1128 uint16_t used_dsds
= tot_dsds
;
1129 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
1132 cmd
= GET_CMD_SP(sp
);
1133 scsi_for_each_prot_sg(cmd
, sg
, tot_dsds
, i
) {
1136 /* Allocate additional continuation packets? */
1137 if (avail_dsds
== 0) {
1138 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1139 QLA_DSDS_PER_IOCB
: used_dsds
;
1140 dsd_list_len
= (avail_dsds
+ 1) * 12;
1141 used_dsds
-= avail_dsds
;
1143 /* allocate tracking DS */
1144 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1148 /* allocate new list */
1149 dsd_ptr
->dsd_addr
= next_dsd
=
1150 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1151 &dsd_ptr
->dsd_list_dma
);
1155 * Need to cleanup only this dsd_ptr, rest
1156 * will be done by sp_free_dma()
1162 list_add_tail(&dsd_ptr
->list
,
1163 &((struct crc_context
*)sp
->u
.scmd
.ctx
)->dsd_list
);
1165 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1167 /* add new list to cmd iocb or last list */
1168 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1169 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1170 *cur_dsd
++ = dsd_list_len
;
1171 cur_dsd
= (uint32_t *)next_dsd
;
1173 sle_dma
= sg_dma_address(sg
);
1174 if (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_PASS
) {
1175 ql_dbg(ql_dbg_io
, vha
, 0x3027,
1176 "%s(): %p, sg_entry %d - "
1177 "addr=0x%x0x%x, len=%d.\n",
1178 __func__
, cur_dsd
, i
,
1179 LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
));
1181 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1182 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1183 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1185 if (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_PASS
) {
1186 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1187 ql_dbg(ql_dbg_io
, vha
, 0x3028,
1188 "%s(): Protection Data buffer = %p.\n", __func__
,
1193 /* Null termination */
1201 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1202 * Type 6 IOCB types.
1204 * @sp: SRB command to process
1205 * @cmd_pkt: Command type 3 IOCB
1206 * @tot_dsds: Total number of segments to transfer
1209 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1210 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1212 uint32_t *cur_dsd
, *fcp_dl
;
1213 scsi_qla_host_t
*vha
;
1214 struct scsi_cmnd
*cmd
;
1215 struct scatterlist
*cur_seg
;
1217 uint32_t total_bytes
= 0;
1218 uint32_t data_bytes
;
1220 uint8_t bundling
= 1;
1223 struct crc_context
*crc_ctx_pkt
= NULL
;
1224 struct qla_hw_data
*ha
;
1225 uint8_t additional_fcpcdb_len
;
1226 uint16_t fcp_cmnd_len
;
1227 struct fcp_cmnd
*fcp_cmnd
;
1228 dma_addr_t crc_ctx_dma
;
1231 cmd
= GET_CMD_SP(sp
);
1234 /* Update entry type to indicate Command Type CRC_2 IOCB */
1235 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
1236 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
1238 vha
= sp
->fcport
->vha
;
1241 /* No data transfer */
1242 data_bytes
= scsi_bufflen(cmd
);
1243 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1244 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1248 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1250 /* Set transfer direction */
1251 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1252 cmd_pkt
->control_flags
=
1253 __constant_cpu_to_le16(CF_WRITE_DATA
);
1254 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1255 cmd_pkt
->control_flags
=
1256 __constant_cpu_to_le16(CF_READ_DATA
);
1259 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1260 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1261 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1262 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1265 /* Allocate CRC context from global pool */
1266 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1267 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1270 goto crc_queuing_error
;
1272 /* Zero out CTX area. */
1273 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1274 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1276 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1278 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1281 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1283 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1285 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1286 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1288 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1289 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1290 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1292 /* Determine SCSI command length -- align to 4 byte boundary */
1293 if (cmd
->cmd_len
> 16) {
1294 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1295 if ((cmd
->cmd_len
% 4) != 0) {
1296 /* SCSI cmd > 16 bytes must be multiple of 4 */
1297 goto crc_queuing_error
;
1299 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1301 additional_fcpcdb_len
= 0;
1302 fcp_cmnd_len
= 12 + 16 + 4;
1305 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1307 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1308 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1309 fcp_cmnd
->additional_cdb_len
|= 1;
1310 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1311 fcp_cmnd
->additional_cdb_len
|= 2;
1313 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1314 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1315 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1316 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1317 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1318 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1319 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1320 fcp_cmnd
->task_management
= 0;
1323 * Update tagged queuing modifier if using command tag queuing
1325 if (scsi_populate_tag_msg(cmd
, tag
)) {
1327 case HEAD_OF_QUEUE_TAG
:
1328 fcp_cmnd
->task_attribute
= TSK_HEAD_OF_QUEUE
;
1330 case ORDERED_QUEUE_TAG
:
1331 fcp_cmnd
->task_attribute
= TSK_ORDERED
;
1334 fcp_cmnd
->task_attribute
= 0;
1338 fcp_cmnd
->task_attribute
= 0;
1341 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1343 /* Compute dif len and adjust data len to incude protection */
1345 blk_size
= cmd
->device
->sector_size
;
1346 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1348 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1349 case SCSI_PROT_READ_INSERT
:
1350 case SCSI_PROT_WRITE_STRIP
:
1351 total_bytes
= data_bytes
;
1352 data_bytes
+= dif_bytes
;
1355 case SCSI_PROT_READ_STRIP
:
1356 case SCSI_PROT_WRITE_INSERT
:
1357 case SCSI_PROT_READ_PASS
:
1358 case SCSI_PROT_WRITE_PASS
:
1359 total_bytes
= data_bytes
+ dif_bytes
;
1365 if (!qla2x00_hba_err_chk_enabled(sp
))
1366 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1369 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1372 * Configure Bundling if we need to fetch interlaving
1373 * protection PCI accesses
1375 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1376 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1377 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1379 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1382 /* Finish the common fields of CRC pkt */
1383 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1384 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1385 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1386 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1387 /* Fibre channel byte count */
1388 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1389 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1390 additional_fcpcdb_len
);
1391 *fcp_dl
= htonl(total_bytes
);
1393 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1394 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1397 /* Walks data segments */
1399 cmd_pkt
->control_flags
|=
1400 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1402 if (!bundling
&& tot_prot_dsds
) {
1403 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1405 goto crc_queuing_error
;
1406 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1407 (tot_dsds
- tot_prot_dsds
)))
1408 goto crc_queuing_error
;
1410 if (bundling
&& tot_prot_dsds
) {
1411 /* Walks dif segments */
1412 cur_seg
= scsi_prot_sglist(cmd
);
1413 cmd_pkt
->control_flags
|=
1414 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1415 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1416 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1418 goto crc_queuing_error
;
1423 /* Cleanup will be performed by the caller */
1425 return QLA_FUNCTION_FAILED
;
1429 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1430 * @sp: command to send to the ISP
1432 * Returns non-zero if a failure occurred, else zero.
1435 qla24xx_start_scsi(srb_t
*sp
)
1438 unsigned long flags
;
1442 struct cmd_type_7
*cmd_pkt
;
1446 struct req_que
*req
= NULL
;
1447 struct rsp_que
*rsp
= NULL
;
1448 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1449 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1450 struct qla_hw_data
*ha
= vha
->hw
;
1453 /* Setup device pointers. */
1456 qla25xx_set_que(sp
, &rsp
);
1459 /* So we know we haven't pci_map'ed anything yet */
1462 /* Send marker if required */
1463 if (vha
->marker_needed
!= 0) {
1464 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1466 return QLA_FUNCTION_FAILED
;
1467 vha
->marker_needed
= 0;
1470 /* Acquire ring specific lock */
1471 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1473 /* Check for room in outstanding command list. */
1474 handle
= req
->current_outstanding_cmd
;
1475 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1477 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1479 if (!req
->outstanding_cmds
[handle
])
1482 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1486 /* Map the sg table so we have an accurate count of sg entries needed */
1487 if (scsi_sg_count(cmd
)) {
1488 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1489 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1490 if (unlikely(!nseg
))
1496 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1497 if (req
->cnt
< (req_cnt
+ 2)) {
1498 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1500 if (req
->ring_index
< cnt
)
1501 req
->cnt
= cnt
- req
->ring_index
;
1503 req
->cnt
= req
->length
-
1504 (req
->ring_index
- cnt
);
1506 if (req
->cnt
< (req_cnt
+ 2))
1509 /* Build command packet. */
1510 req
->current_outstanding_cmd
= handle
;
1511 req
->outstanding_cmds
[handle
] = sp
;
1512 sp
->handle
= handle
;
1513 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1514 req
->cnt
-= req_cnt
;
1516 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1517 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1519 /* Zero out remaining portion of packet. */
1520 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1521 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1522 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1523 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1525 /* Set NPORT-ID and LUN number*/
1526 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1527 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1528 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1529 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1530 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1532 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1536 if (scsi_populate_tag_msg(cmd
, tag
)) {
1538 case HEAD_OF_QUEUE_TAG
:
1539 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
1541 case ORDERED_QUEUE_TAG
:
1542 cmd_pkt
->task
= TSK_ORDERED
;
1547 /* Load SCSI command packet. */
1548 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1549 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1551 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1553 /* Build IOCB segments */
1554 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1556 /* Set total data segment count. */
1557 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1558 /* Specify response queue number where completion should happen */
1559 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1561 /* Adjust ring index. */
1563 if (req
->ring_index
== req
->length
) {
1564 req
->ring_index
= 0;
1565 req
->ring_ptr
= req
->ring
;
1569 sp
->flags
|= SRB_DMA_VALID
;
1571 /* Set chip new ring index. */
1572 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1573 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1575 /* Manage unprocessed RIO/ZIO commands in response queue. */
1576 if (vha
->flags
.process_response_queue
&&
1577 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1578 qla24xx_process_response_queue(vha
, rsp
);
1580 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1585 scsi_dma_unmap(cmd
);
1587 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1589 return QLA_FUNCTION_FAILED
;
1594 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1595 * @sp: command to send to the ISP
1597 * Returns non-zero if a failure occurred, else zero.
1600 qla24xx_dif_start_scsi(srb_t
*sp
)
1603 unsigned long flags
;
1608 uint16_t req_cnt
= 0;
1610 uint16_t tot_prot_dsds
;
1611 uint16_t fw_prot_opts
= 0;
1612 struct req_que
*req
= NULL
;
1613 struct rsp_que
*rsp
= NULL
;
1614 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1615 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1616 struct qla_hw_data
*ha
= vha
->hw
;
1617 struct cmd_type_crc_2
*cmd_pkt
;
1618 uint32_t status
= 0;
1620 #define QDSS_GOT_Q_SPACE BIT_0
1622 /* Only process protection or >16 cdb in this routine */
1623 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1624 if (cmd
->cmd_len
<= 16)
1625 return qla24xx_start_scsi(sp
);
1628 /* Setup device pointers. */
1630 qla25xx_set_que(sp
, &rsp
);
1633 /* So we know we haven't pci_map'ed anything yet */
1636 /* Send marker if required */
1637 if (vha
->marker_needed
!= 0) {
1638 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1640 return QLA_FUNCTION_FAILED
;
1641 vha
->marker_needed
= 0;
1644 /* Acquire ring specific lock */
1645 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1647 /* Check for room in outstanding command list. */
1648 handle
= req
->current_outstanding_cmd
;
1649 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1651 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1653 if (!req
->outstanding_cmds
[handle
])
1657 if (index
== MAX_OUTSTANDING_COMMANDS
)
1660 /* Compute number of required data segments */
1661 /* Map the sg table so we have an accurate count of sg entries needed */
1662 if (scsi_sg_count(cmd
)) {
1663 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1664 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1665 if (unlikely(!nseg
))
1668 sp
->flags
|= SRB_DMA_VALID
;
1670 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1671 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1672 struct qla2_sgx sgx
;
1675 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1676 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1677 sgx
.cur_sg
= scsi_sglist(cmd
);
1681 while (qla24xx_get_one_block_sg(
1682 cmd
->device
->sector_size
, &sgx
, &partial
))
1688 /* number of required data segments */
1691 /* Compute number of required protection segments */
1692 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1693 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1694 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1695 if (unlikely(!nseg
))
1698 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1700 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1701 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1702 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1709 /* Total Data and protection sg segment(s) */
1710 tot_prot_dsds
= nseg
;
1712 if (req
->cnt
< (req_cnt
+ 2)) {
1713 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1715 if (req
->ring_index
< cnt
)
1716 req
->cnt
= cnt
- req
->ring_index
;
1718 req
->cnt
= req
->length
-
1719 (req
->ring_index
- cnt
);
1722 if (req
->cnt
< (req_cnt
+ 2))
1725 status
|= QDSS_GOT_Q_SPACE
;
1727 /* Build header part of command packet (excluding the OPCODE). */
1728 req
->current_outstanding_cmd
= handle
;
1729 req
->outstanding_cmds
[handle
] = sp
;
1730 sp
->handle
= handle
;
1731 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1732 req
->cnt
-= req_cnt
;
1734 /* Fill-in common area */
1735 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1736 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1738 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1739 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1741 /* Set NPORT-ID and LUN number*/
1742 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1743 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1744 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1745 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1747 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1748 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1750 /* Total Data and protection segment(s) */
1751 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1753 /* Build IOCB segments and adjust for data protection segments */
1754 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1755 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1759 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1760 /* Specify response queue number where completion should happen */
1761 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1762 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1765 /* Adjust ring index. */
1767 if (req
->ring_index
== req
->length
) {
1768 req
->ring_index
= 0;
1769 req
->ring_ptr
= req
->ring
;
1773 /* Set chip new ring index. */
1774 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1775 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1777 /* Manage unprocessed RIO/ZIO commands in response queue. */
1778 if (vha
->flags
.process_response_queue
&&
1779 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1780 qla24xx_process_response_queue(vha
, rsp
);
1782 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1787 if (status
& QDSS_GOT_Q_SPACE
) {
1788 req
->outstanding_cmds
[handle
] = NULL
;
1789 req
->cnt
+= req_cnt
;
1791 /* Cleanup will be performed by the caller (queuecommand) */
1793 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1794 return QLA_FUNCTION_FAILED
;
1798 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1800 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1801 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1802 int affinity
= cmd
->request
->cpu
;
1804 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1805 affinity
< ha
->max_rsp_queues
- 1)
1806 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1808 *rsp
= ha
->rsp_q_map
[0];
1811 /* Generic Control-SRB manipulation functions. */
1813 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1815 struct qla_hw_data
*ha
= vha
->hw
;
1816 struct req_que
*req
= ha
->req_q_map
[0];
1817 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1818 uint32_t index
, handle
;
1820 uint16_t cnt
, req_cnt
;
1827 goto skip_cmd_array
;
1829 /* Check for room in outstanding command list. */
1830 handle
= req
->current_outstanding_cmd
;
1831 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1833 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1835 if (!req
->outstanding_cmds
[handle
])
1838 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1839 ql_log(ql_log_warn
, vha
, 0x700b,
1840 "No room on oustanding cmd array.\n");
1844 /* Prep command array. */
1845 req
->current_outstanding_cmd
= handle
;
1846 req
->outstanding_cmds
[handle
] = sp
;
1847 sp
->handle
= handle
;
1849 /* Adjust entry-counts as needed. */
1850 if (sp
->type
!= SRB_SCSI_CMD
)
1851 req_cnt
= sp
->iocbs
;
1854 /* Check for room on request queue. */
1855 if (req
->cnt
< req_cnt
) {
1856 if (ha
->mqenable
|| IS_QLA83XX(ha
))
1857 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1858 else if (IS_QLA82XX(ha
))
1859 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1860 else if (IS_FWI2_CAPABLE(ha
))
1861 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1863 cnt
= qla2x00_debounce_register(
1864 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1866 if (req
->ring_index
< cnt
)
1867 req
->cnt
= cnt
- req
->ring_index
;
1869 req
->cnt
= req
->length
-
1870 (req
->ring_index
- cnt
);
1872 if (req
->cnt
< req_cnt
)
1876 req
->cnt
-= req_cnt
;
1877 pkt
= req
->ring_ptr
;
1878 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1879 pkt
->entry_count
= req_cnt
;
1880 pkt
->handle
= handle
;
1887 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1889 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1891 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1892 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1893 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1894 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1895 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1896 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1897 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1898 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1899 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1900 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1901 logio
->vp_index
= sp
->fcport
->vp_idx
;
1905 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1907 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1908 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1911 mbx
->entry_type
= MBX_IOCB_TYPE
;
1912 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1913 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1914 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1915 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1916 if (HAS_EXTENDED_IDS(ha
)) {
1917 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1918 mbx
->mb10
= cpu_to_le16(opts
);
1920 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1922 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1923 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1924 sp
->fcport
->d_id
.b
.al_pa
);
1925 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1929 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1931 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1932 logio
->control_flags
=
1933 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1934 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1935 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1936 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1937 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1938 logio
->vp_index
= sp
->fcport
->vp_idx
;
1942 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1944 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1946 mbx
->entry_type
= MBX_IOCB_TYPE
;
1947 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1948 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1949 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1950 cpu_to_le16(sp
->fcport
->loop_id
):
1951 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1952 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1953 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1954 sp
->fcport
->d_id
.b
.al_pa
);
1955 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1956 /* Implicit: mbx->mbx10 = 0. */
1960 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1962 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1963 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
1964 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1965 logio
->vp_index
= sp
->fcport
->vp_idx
;
1969 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1971 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1973 mbx
->entry_type
= MBX_IOCB_TYPE
;
1974 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1975 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
1976 if (HAS_EXTENDED_IDS(ha
)) {
1977 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1978 mbx
->mb10
= cpu_to_le16(BIT_0
);
1980 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
1982 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
1983 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
1984 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
1985 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
1986 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1990 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
1994 struct fc_port
*fcport
= sp
->fcport
;
1995 scsi_qla_host_t
*vha
= fcport
->vha
;
1996 struct qla_hw_data
*ha
= vha
->hw
;
1997 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
1998 struct req_que
*req
= vha
->req
;
2000 flags
= iocb
->u
.tmf
.flags
;
2001 lun
= iocb
->u
.tmf
.lun
;
2003 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2004 tsk
->entry_count
= 1;
2005 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2006 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2007 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2008 tsk
->control_flags
= cpu_to_le32(flags
);
2009 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2010 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2011 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2012 tsk
->vp_index
= fcport
->vp_idx
;
2014 if (flags
== TCF_LUN_RESET
) {
2015 int_to_scsilun(lun
, &tsk
->lun
);
2016 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2022 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2024 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2026 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2027 els_iocb
->entry_count
= 1;
2028 els_iocb
->sys_define
= 0;
2029 els_iocb
->entry_status
= 0;
2030 els_iocb
->handle
= sp
->handle
;
2031 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2032 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2033 els_iocb
->vp_index
= sp
->fcport
->vp_idx
;
2034 els_iocb
->sof_type
= EST_SOFI3
;
2035 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2038 sp
->type
== SRB_ELS_CMD_RPT
?
2039 bsg_job
->request
->rqst_data
.r_els
.els_code
:
2040 bsg_job
->request
->rqst_data
.h_els
.command_code
;
2041 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2042 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2043 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2044 els_iocb
->control_flags
= 0;
2045 els_iocb
->rx_byte_count
=
2046 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2047 els_iocb
->tx_byte_count
=
2048 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2050 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2051 (bsg_job
->request_payload
.sg_list
)));
2052 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2053 (bsg_job
->request_payload
.sg_list
)));
2054 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2055 (bsg_job
->request_payload
.sg_list
));
2057 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2058 (bsg_job
->reply_payload
.sg_list
)));
2059 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2060 (bsg_job
->reply_payload
.sg_list
)));
2061 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2062 (bsg_job
->reply_payload
.sg_list
));
2066 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2068 uint16_t avail_dsds
;
2070 struct scatterlist
*sg
;
2073 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2074 struct qla_hw_data
*ha
= vha
->hw
;
2075 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2076 int loop_iterartion
= 0;
2077 int cont_iocb_prsnt
= 0;
2078 int entry_count
= 1;
2080 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2081 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2082 ct_iocb
->entry_status
= 0;
2083 ct_iocb
->handle1
= sp
->handle
;
2084 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2085 ct_iocb
->status
= __constant_cpu_to_le16(0);
2086 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
2087 ct_iocb
->timeout
= 0;
2088 ct_iocb
->cmd_dsd_count
=
2089 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2090 ct_iocb
->total_dsd_count
=
2091 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2092 ct_iocb
->req_bytecount
=
2093 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2094 ct_iocb
->rsp_bytecount
=
2095 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2097 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2098 (bsg_job
->request_payload
.sg_list
)));
2099 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2100 (bsg_job
->request_payload
.sg_list
)));
2101 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2103 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2104 (bsg_job
->reply_payload
.sg_list
)));
2105 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2106 (bsg_job
->reply_payload
.sg_list
)));
2107 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2110 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2112 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2114 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2116 cont_a64_entry_t
*cont_pkt
;
2118 /* Allocate additional continuation packets? */
2119 if (avail_dsds
== 0) {
2121 * Five DSDs are available in the Cont.
2124 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2125 vha
->hw
->req_q_map
[0]);
2126 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2128 cont_iocb_prsnt
= 1;
2132 sle_dma
= sg_dma_address(sg
);
2133 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2134 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2135 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2139 ct_iocb
->entry_count
= entry_count
;
2143 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2145 uint16_t avail_dsds
;
2147 struct scatterlist
*sg
;
2150 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2151 struct qla_hw_data
*ha
= vha
->hw
;
2152 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2153 int loop_iterartion
= 0;
2154 int cont_iocb_prsnt
= 0;
2155 int entry_count
= 1;
2157 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2158 ct_iocb
->entry_status
= 0;
2159 ct_iocb
->sys_define
= 0;
2160 ct_iocb
->handle
= sp
->handle
;
2162 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2163 ct_iocb
->vp_index
= sp
->fcport
->vp_idx
;
2164 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
2166 ct_iocb
->cmd_dsd_count
=
2167 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2168 ct_iocb
->timeout
= 0;
2169 ct_iocb
->rsp_dsd_count
=
2170 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2171 ct_iocb
->rsp_byte_count
=
2172 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2173 ct_iocb
->cmd_byte_count
=
2174 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2175 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
2176 (bsg_job
->request_payload
.sg_list
)));
2177 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
2178 (bsg_job
->request_payload
.sg_list
)));
2179 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
2180 (bsg_job
->request_payload
.sg_list
));
2183 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
2185 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2187 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2189 cont_a64_entry_t
*cont_pkt
;
2191 /* Allocate additional continuation packets? */
2192 if (avail_dsds
== 0) {
2194 * Five DSDs are available in the Cont.
2197 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2199 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2201 cont_iocb_prsnt
= 1;
2205 sle_dma
= sg_dma_address(sg
);
2206 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2207 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2208 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2212 ct_iocb
->entry_count
= entry_count
;
2216 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2217 * @sp: command to send to the ISP
2219 * Returns non-zero if a failure occurred, else zero.
2222 qla82xx_start_scsi(srb_t
*sp
)
2225 unsigned long flags
;
2226 struct scsi_cmnd
*cmd
;
2233 struct device_reg_82xx __iomem
*reg
;
2236 uint8_t additional_cdb_len
;
2237 struct ct6_dsd
*ctx
;
2238 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2239 struct qla_hw_data
*ha
= vha
->hw
;
2240 struct req_que
*req
= NULL
;
2241 struct rsp_que
*rsp
= NULL
;
2244 /* Setup device pointers. */
2246 reg
= &ha
->iobase
->isp82
;
2247 cmd
= GET_CMD_SP(sp
);
2249 rsp
= ha
->rsp_q_map
[0];
2251 /* So we know we haven't pci_map'ed anything yet */
2254 dbval
= 0x04 | (ha
->portnum
<< 5);
2256 /* Send marker if required */
2257 if (vha
->marker_needed
!= 0) {
2258 if (qla2x00_marker(vha
, req
,
2259 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2260 ql_log(ql_log_warn
, vha
, 0x300c,
2261 "qla2x00_marker failed for cmd=%p.\n", cmd
);
2262 return QLA_FUNCTION_FAILED
;
2264 vha
->marker_needed
= 0;
2267 /* Acquire ring specific lock */
2268 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2270 /* Check for room in outstanding command list. */
2271 handle
= req
->current_outstanding_cmd
;
2272 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
2274 if (handle
== MAX_OUTSTANDING_COMMANDS
)
2276 if (!req
->outstanding_cmds
[handle
])
2279 if (index
== MAX_OUTSTANDING_COMMANDS
)
2282 /* Map the sg table so we have an accurate count of sg entries needed */
2283 if (scsi_sg_count(cmd
)) {
2284 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
2285 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
2286 if (unlikely(!nseg
))
2293 if (tot_dsds
> ql2xshiftctondsd
) {
2294 struct cmd_type_6
*cmd_pkt
;
2295 uint16_t more_dsd_lists
= 0;
2296 struct dsd_dma
*dsd_ptr
;
2299 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
2300 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
2301 ql_dbg(ql_dbg_io
, vha
, 0x300d,
2302 "Num of DSD list %d is than %d for cmd=%p.\n",
2303 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
2308 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
2309 goto sufficient_dsds
;
2311 more_dsd_lists
-= ha
->gbl_dsd_avail
;
2313 for (i
= 0; i
< more_dsd_lists
; i
++) {
2314 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
2316 ql_log(ql_log_fatal
, vha
, 0x300e,
2317 "Failed to allocate memory for dsd_dma "
2318 "for cmd=%p.\n", cmd
);
2322 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
2323 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
2324 if (!dsd_ptr
->dsd_addr
) {
2326 ql_log(ql_log_fatal
, vha
, 0x300f,
2327 "Failed to allocate memory for dsd_addr "
2328 "for cmd=%p.\n", cmd
);
2331 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
2332 ha
->gbl_dsd_avail
++;
2338 if (req
->cnt
< (req_cnt
+ 2)) {
2339 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2340 ®
->req_q_out
[0]);
2341 if (req
->ring_index
< cnt
)
2342 req
->cnt
= cnt
- req
->ring_index
;
2344 req
->cnt
= req
->length
-
2345 (req
->ring_index
- cnt
);
2348 if (req
->cnt
< (req_cnt
+ 2))
2351 ctx
= sp
->u
.scmd
.ctx
=
2352 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
2354 ql_log(ql_log_fatal
, vha
, 0x3010,
2355 "Failed to allocate ctx for cmd=%p.\n", cmd
);
2359 memset(ctx
, 0, sizeof(struct ct6_dsd
));
2360 ctx
->fcp_cmnd
= dma_pool_alloc(ha
->fcp_cmnd_dma_pool
,
2361 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
2362 if (!ctx
->fcp_cmnd
) {
2363 ql_log(ql_log_fatal
, vha
, 0x3011,
2364 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
2365 goto queuing_error_fcp_cmnd
;
2368 /* Initialize the DSD list and dma handle */
2369 INIT_LIST_HEAD(&ctx
->dsd_list
);
2370 ctx
->dsd_use_cnt
= 0;
2372 if (cmd
->cmd_len
> 16) {
2373 additional_cdb_len
= cmd
->cmd_len
- 16;
2374 if ((cmd
->cmd_len
% 4) != 0) {
2375 /* SCSI command bigger than 16 bytes must be
2378 ql_log(ql_log_warn
, vha
, 0x3012,
2379 "scsi cmd len %d not multiple of 4 "
2380 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
2381 goto queuing_error_fcp_cmnd
;
2383 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
2385 additional_cdb_len
= 0;
2386 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
2389 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
2390 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2392 /* Zero out remaining portion of packet. */
2393 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2394 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2395 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2396 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2398 /* Set NPORT-ID and LUN number*/
2399 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2400 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2401 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2402 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2403 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
2405 /* Build IOCB segments */
2406 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
2407 goto queuing_error_fcp_cmnd
;
2409 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2410 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2412 /* build FCP_CMND IU */
2413 memset(ctx
->fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
2414 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
2415 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
2417 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
2418 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
2419 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2420 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
2423 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2425 if (scsi_populate_tag_msg(cmd
, tag
)) {
2427 case HEAD_OF_QUEUE_TAG
:
2428 ctx
->fcp_cmnd
->task_attribute
=
2431 case ORDERED_QUEUE_TAG
:
2432 ctx
->fcp_cmnd
->task_attribute
=
2438 /* Populate the FCP_PRIO. */
2439 if (ha
->flags
.fcp_prio_enabled
)
2440 ctx
->fcp_cmnd
->task_attribute
|=
2441 sp
->fcport
->fcp_prio
<< 3;
2443 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2445 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
2446 additional_cdb_len
);
2447 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
2449 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
2450 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
2451 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
2452 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
2453 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
2455 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
2456 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2457 /* Set total data segment count. */
2458 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2459 /* Specify response queue number where
2460 * completion should happen
2462 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2464 struct cmd_type_7
*cmd_pkt
;
2465 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2466 if (req
->cnt
< (req_cnt
+ 2)) {
2467 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2468 ®
->req_q_out
[0]);
2469 if (req
->ring_index
< cnt
)
2470 req
->cnt
= cnt
- req
->ring_index
;
2472 req
->cnt
= req
->length
-
2473 (req
->ring_index
- cnt
);
2475 if (req
->cnt
< (req_cnt
+ 2))
2478 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
2479 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2481 /* Zero out remaining portion of packet. */
2482 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2483 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2484 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2485 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2487 /* Set NPORT-ID and LUN number*/
2488 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2489 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2490 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2491 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2492 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
2494 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2495 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
2496 sizeof(cmd_pkt
->lun
));
2499 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2501 if (scsi_populate_tag_msg(cmd
, tag
)) {
2503 case HEAD_OF_QUEUE_TAG
:
2504 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
2506 case ORDERED_QUEUE_TAG
:
2507 cmd_pkt
->task
= TSK_ORDERED
;
2512 /* Populate the FCP_PRIO. */
2513 if (ha
->flags
.fcp_prio_enabled
)
2514 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
2516 /* Load SCSI command packet. */
2517 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2518 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
2520 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2522 /* Build IOCB segments */
2523 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
2525 /* Set total data segment count. */
2526 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2527 /* Specify response queue number where
2528 * completion should happen.
2530 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2533 /* Build command packet. */
2534 req
->current_outstanding_cmd
= handle
;
2535 req
->outstanding_cmds
[handle
] = sp
;
2536 sp
->handle
= handle
;
2537 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2538 req
->cnt
-= req_cnt
;
2541 /* Adjust ring index. */
2543 if (req
->ring_index
== req
->length
) {
2544 req
->ring_index
= 0;
2545 req
->ring_ptr
= req
->ring
;
2549 sp
->flags
|= SRB_DMA_VALID
;
2551 /* Set chip new ring index. */
2552 /* write, read and verify logic */
2553 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
2555 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
2558 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2561 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
2563 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2569 /* Manage unprocessed RIO/ZIO commands in response queue. */
2570 if (vha
->flags
.process_response_queue
&&
2571 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2572 qla24xx_process_response_queue(vha
, rsp
);
2574 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2577 queuing_error_fcp_cmnd
:
2578 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
2581 scsi_dma_unmap(cmd
);
2583 if (sp
->u
.scmd
.ctx
) {
2584 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
2585 sp
->u
.scmd
.ctx
= NULL
;
2587 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2589 return QLA_FUNCTION_FAILED
;
2593 qla2x00_start_sp(srb_t
*sp
)
2596 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2598 unsigned long flags
;
2600 rval
= QLA_FUNCTION_FAILED
;
2601 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2602 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
2604 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x700c,
2605 "qla2x00_alloc_iocbs failed.\n");
2612 IS_FWI2_CAPABLE(ha
) ?
2613 qla24xx_login_iocb(sp
, pkt
) :
2614 qla2x00_login_iocb(sp
, pkt
);
2616 case SRB_LOGOUT_CMD
:
2617 IS_FWI2_CAPABLE(ha
) ?
2618 qla24xx_logout_iocb(sp
, pkt
) :
2619 qla2x00_logout_iocb(sp
, pkt
);
2621 case SRB_ELS_CMD_RPT
:
2622 case SRB_ELS_CMD_HST
:
2623 qla24xx_els_iocb(sp
, pkt
);
2626 IS_FWI2_CAPABLE(ha
) ?
2627 qla24xx_ct_iocb(sp
, pkt
) :
2628 qla2x00_ct_iocb(sp
, pkt
);
2631 IS_FWI2_CAPABLE(ha
) ?
2632 qla24xx_adisc_iocb(sp
, pkt
) :
2633 qla2x00_adisc_iocb(sp
, pkt
);
2636 qla24xx_tm_iocb(sp
, pkt
);
2643 qla2x00_start_iocbs(sp
->fcport
->vha
, ha
->req_q_map
[0]);
2645 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);