2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t
*sp
)
26 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
27 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
31 /* Set transfer direction */
32 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
34 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
35 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment decriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds
)
57 iocbs
+= (dsds
- 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment decriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds
)
79 iocbs
+= (dsds
- 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t
*
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
95 cont_entry_t
*cont_pkt
;
96 struct req_que
*req
= vha
->req
;
97 /* Adjust ring index. */
99 if (req
->ring_index
== req
->length
) {
101 req
->ring_ptr
= req
->ring
;
106 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt
->entry_type
)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t
*
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
124 cont_a64_entry_t
*cont_pkt
;
126 /* Adjust ring index. */
128 if (req
->ring_index
== req
->length
) {
130 req
->ring_ptr
= req
->ring
;
135 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt
->entry_type
)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
145 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
147 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
148 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
150 /* We only support T10 DIF right now */
151 if (guard
!= SHOST_DIX_GUARD_CRC
) {
152 ql_dbg(ql_dbg_io
, sp
->fcport
->vha
, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard
, cmd
);
157 /* We always use DIFF Bundling for best performance */
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(cmd
)) {
162 case SCSI_PROT_READ_STRIP
:
163 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
165 case SCSI_PROT_WRITE_INSERT
:
166 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
168 case SCSI_PROT_READ_INSERT
:
169 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
171 case SCSI_PROT_WRITE_STRIP
:
172 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
174 case SCSI_PROT_READ_PASS
:
175 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
177 case SCSI_PROT_WRITE_PASS
:
178 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
180 default: /* Normal Request */
181 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
185 return scsi_prot_sg_count(cmd
);
189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
196 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
201 scsi_qla_host_t
*vha
;
202 struct scsi_cmnd
*cmd
;
203 struct scatterlist
*sg
;
206 cmd
= GET_CMD_SP(sp
);
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
210 __constant_cpu_to_le32(COMMAND_TYPE
);
212 /* No data transfer */
213 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
214 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
218 vha
= sp
->fcport
->vha
;
219 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
221 /* Three DSDs are available in the Command Type 2 IOCB */
223 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
225 /* Load data segments */
226 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
227 cont_entry_t
*cont_pkt
;
229 /* Allocate additional continuation packets? */
230 if (avail_dsds
== 0) {
232 * Seven DSDs are available in the Continuation
235 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
236 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
240 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
241 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
254 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
259 scsi_qla_host_t
*vha
;
260 struct scsi_cmnd
*cmd
;
261 struct scatterlist
*sg
;
264 cmd
= GET_CMD_SP(sp
);
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
270 /* No data transfer */
271 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
272 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
276 vha
= sp
->fcport
->vha
;
277 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
279 /* Two DSDs are available in the Command Type 3 IOCB */
281 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
283 /* Load data segments */
284 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
286 cont_a64_entry_t
*cont_pkt
;
288 /* Allocate additional continuation packets? */
289 if (avail_dsds
== 0) {
291 * Five DSDs are available in the Continuation
294 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
295 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
299 sle_dma
= sg_dma_address(sg
);
300 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
301 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
302 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
311 * Returns non-zero if a failure occurred, else zero.
314 qla2x00_start_scsi(srb_t
*sp
)
318 scsi_qla_host_t
*vha
;
319 struct scsi_cmnd
*cmd
;
323 cmd_entry_t
*cmd_pkt
;
327 struct device_reg_2xxx __iomem
*reg
;
328 struct qla_hw_data
*ha
;
333 /* Setup device pointers. */
335 vha
= sp
->fcport
->vha
;
337 reg
= &ha
->iobase
->isp
;
338 cmd
= GET_CMD_SP(sp
);
339 req
= ha
->req_q_map
[0];
340 rsp
= ha
->rsp_q_map
[0];
341 /* So we know we haven't pci_map'ed anything yet */
344 /* Send marker if required */
345 if (vha
->marker_needed
!= 0) {
346 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
348 return (QLA_FUNCTION_FAILED
);
350 vha
->marker_needed
= 0;
353 /* Acquire ring specific lock */
354 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
356 /* Check for room in outstanding command list. */
357 handle
= req
->current_outstanding_cmd
;
358 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
360 if (handle
== MAX_OUTSTANDING_COMMANDS
)
362 if (!req
->outstanding_cmds
[handle
])
365 if (index
== MAX_OUTSTANDING_COMMANDS
)
368 /* Map the sg table so we have an accurate count of sg entries needed */
369 if (scsi_sg_count(cmd
)) {
370 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
371 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
379 /* Calculate the number of request entries needed. */
380 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
381 if (req
->cnt
< (req_cnt
+ 2)) {
382 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
383 if (req
->ring_index
< cnt
)
384 req
->cnt
= cnt
- req
->ring_index
;
386 req
->cnt
= req
->length
-
387 (req
->ring_index
- cnt
);
388 /* If still no head room then bail out */
389 if (req
->cnt
< (req_cnt
+ 2))
393 /* Build command packet */
394 req
->current_outstanding_cmd
= handle
;
395 req
->outstanding_cmds
[handle
] = sp
;
397 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
400 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
401 cmd_pkt
->handle
= handle
;
402 /* Zero out remaining portion of packet. */
403 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
404 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
405 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
407 /* Set target ID and LUN number*/
408 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
409 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
411 /* Update tagged queuing modifier */
412 if (scsi_populate_tag_msg(cmd
, tag
)) {
414 case HEAD_OF_QUEUE_TAG
:
415 cmd_pkt
->control_flags
=
416 __constant_cpu_to_le16(CF_HEAD_TAG
);
418 case ORDERED_QUEUE_TAG
:
419 cmd_pkt
->control_flags
=
420 __constant_cpu_to_le16(CF_ORDERED_TAG
);
423 cmd_pkt
->control_flags
=
424 __constant_cpu_to_le16(CF_SIMPLE_TAG
);
429 /* Load SCSI command packet. */
430 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
431 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
433 /* Build IOCB segments */
434 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
436 /* Set total data segment count. */
437 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
440 /* Adjust ring index. */
442 if (req
->ring_index
== req
->length
) {
444 req
->ring_ptr
= req
->ring
;
448 sp
->flags
|= SRB_DMA_VALID
;
450 /* Set chip new ring index. */
451 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
452 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
454 /* Manage unprocessed RIO/ZIO commands in response queue. */
455 if (vha
->flags
.process_response_queue
&&
456 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
457 qla2x00_process_response_queue(rsp
);
459 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
460 return (QLA_SUCCESS
);
466 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
468 return (QLA_FUNCTION_FAILED
);
472 * qla2x00_start_iocbs() - Execute the IOCB command
475 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
477 struct qla_hw_data
*ha
= vha
->hw
;
478 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
480 if (IS_QLA82XX(ha
)) {
481 qla82xx_start_iocbs(vha
);
483 /* Adjust ring index. */
485 if (req
->ring_index
== req
->length
) {
487 req
->ring_ptr
= req
->ring
;
491 /* Set chip new ring index. */
492 if (ha
->mqenable
|| IS_QLA83XX(ha
)) {
493 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
494 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
495 } else if (IS_FWI2_CAPABLE(ha
)) {
496 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
497 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
499 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
501 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
507 * qla2x00_marker() - Send a marker IOCB to the firmware.
511 * @type: marker modifier
513 * Can be called from both normal and interrupt context.
515 * Returns non-zero if a failure occurred, else zero.
518 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
519 struct rsp_que
*rsp
, uint16_t loop_id
,
520 uint16_t lun
, uint8_t type
)
523 struct mrk_entry_24xx
*mrk24
;
524 struct qla_hw_data
*ha
= vha
->hw
;
525 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
528 req
= ha
->req_q_map
[0];
529 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, 0);
531 ql_log(ql_log_warn
, base_vha
, 0x3026,
532 "Failed to allocate Marker IOCB.\n");
534 return (QLA_FUNCTION_FAILED
);
537 mrk
->entry_type
= MARKER_TYPE
;
538 mrk
->modifier
= type
;
539 if (type
!= MK_SYNC_ALL
) {
540 if (IS_FWI2_CAPABLE(ha
)) {
541 mrk24
= (struct mrk_entry_24xx
*) mrk
;
542 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
543 mrk24
->lun
[1] = LSB(lun
);
544 mrk24
->lun
[2] = MSB(lun
);
545 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
546 mrk24
->vp_index
= vha
->vp_idx
;
547 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
549 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
550 mrk
->lun
= cpu_to_le16(lun
);
555 qla2x00_start_iocbs(vha
, req
);
557 return (QLA_SUCCESS
);
561 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
562 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
566 unsigned long flags
= 0;
568 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
569 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
570 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
576 * qla2x00_issue_marker
579 * Caller CAN have hardware lock held as specified by ha_locked parameter.
580 * Might release it, then reaquire.
582 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
585 if (__qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
586 MK_SYNC_ALL
) != QLA_SUCCESS
)
587 return QLA_FUNCTION_FAILED
;
589 if (qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
590 MK_SYNC_ALL
) != QLA_SUCCESS
)
591 return QLA_FUNCTION_FAILED
;
593 vha
->marker_needed
= 0;
599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
602 * @dsds: number of data segment decriptors needed
604 * Returns the number of IOCB entries needed to store @dsds.
607 qla24xx_calc_iocbs(scsi_qla_host_t
*vha
, uint16_t dsds
)
613 iocbs
+= (dsds
- 1) / 5;
621 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
624 uint32_t *cur_dsd
= NULL
;
625 scsi_qla_host_t
*vha
;
626 struct qla_hw_data
*ha
;
627 struct scsi_cmnd
*cmd
;
628 struct scatterlist
*cur_seg
;
632 uint8_t first_iocb
= 1;
633 uint32_t dsd_list_len
;
634 struct dsd_dma
*dsd_ptr
;
637 cmd
= GET_CMD_SP(sp
);
639 /* Update entry type to indicate Command Type 3 IOCB */
640 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
641 __constant_cpu_to_le32(COMMAND_TYPE_6
);
643 /* No data transfer */
644 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
645 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
649 vha
= sp
->fcport
->vha
;
652 /* Set transfer direction */
653 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
654 cmd_pkt
->control_flags
=
655 __constant_cpu_to_le16(CF_WRITE_DATA
);
656 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
657 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
658 cmd_pkt
->control_flags
=
659 __constant_cpu_to_le16(CF_READ_DATA
);
660 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
663 cur_seg
= scsi_sglist(cmd
);
664 ctx
= GET_CMD_CTX_SP(sp
);
667 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
668 QLA_DSDS_PER_IOCB
: tot_dsds
;
669 tot_dsds
-= avail_dsds
;
670 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
672 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
673 struct dsd_dma
, list
);
674 next_dsd
= dsd_ptr
->dsd_addr
;
675 list_del(&dsd_ptr
->list
);
677 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
683 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
684 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
685 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
686 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
688 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
689 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
690 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
692 cur_dsd
= (uint32_t *)next_dsd
;
696 sle_dma
= sg_dma_address(cur_seg
);
697 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
698 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
699 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
700 cur_seg
= sg_next(cur_seg
);
705 /* Null termination */
709 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
714 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
715 * for Command Type 6.
717 * @dsds: number of data segment decriptors needed
719 * Returns the number of dsd list needed to store @dsds.
722 qla24xx_calc_dsd_lists(uint16_t dsds
)
724 uint16_t dsd_lists
= 0;
726 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
727 if (dsds
% QLA_DSDS_PER_IOCB
)
734 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
737 * @sp: SRB command to process
738 * @cmd_pkt: Command type 3 IOCB
739 * @tot_dsds: Total number of segments to transfer
742 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
747 scsi_qla_host_t
*vha
;
748 struct scsi_cmnd
*cmd
;
749 struct scatterlist
*sg
;
753 cmd
= GET_CMD_SP(sp
);
755 /* Update entry type to indicate Command Type 3 IOCB */
756 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
757 __constant_cpu_to_le32(COMMAND_TYPE_7
);
759 /* No data transfer */
760 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
761 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
765 vha
= sp
->fcport
->vha
;
768 /* Set transfer direction */
769 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
770 cmd_pkt
->task_mgmt_flags
=
771 __constant_cpu_to_le16(TMF_WRITE_DATA
);
772 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
773 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
774 cmd_pkt
->task_mgmt_flags
=
775 __constant_cpu_to_le16(TMF_READ_DATA
);
776 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
779 /* One DSD is available in the Command Type 3 IOCB */
781 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
783 /* Load data segments */
785 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
787 cont_a64_entry_t
*cont_pkt
;
789 /* Allocate additional continuation packets? */
790 if (avail_dsds
== 0) {
792 * Five DSDs are available in the Continuation
795 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
796 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
800 sle_dma
= sg_dma_address(sg
);
801 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
802 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
803 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
808 struct fw_dif_context
{
811 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
812 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
816 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
820 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
821 unsigned int protcnt
)
823 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
824 scsi_qla_host_t
*vha
= shost_priv(cmd
->device
->host
);
826 switch (scsi_get_prot_type(cmd
)) {
827 case SCSI_PROT_DIF_TYPE0
:
829 * No check for ql2xenablehba_err_chk, as it would be an
830 * I/O error if hba tag generation is not done.
832 pkt
->ref_tag
= cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd
)));
835 if (!qla2x00_hba_err_chk_enabled(sp
))
838 pkt
->ref_tag_mask
[0] = 0xff;
839 pkt
->ref_tag_mask
[1] = 0xff;
840 pkt
->ref_tag_mask
[2] = 0xff;
841 pkt
->ref_tag_mask
[3] = 0xff;
845 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
846 * match LBA in CDB + N
848 case SCSI_PROT_DIF_TYPE2
:
849 pkt
->app_tag
= __constant_cpu_to_le16(0);
850 pkt
->app_tag_mask
[0] = 0x0;
851 pkt
->app_tag_mask
[1] = 0x0;
853 pkt
->ref_tag
= cpu_to_le32((uint32_t)
854 (0xffffffff & scsi_get_lba(cmd
)));
856 if (!qla2x00_hba_err_chk_enabled(sp
))
859 /* enable ALL bytes of the ref tag */
860 pkt
->ref_tag_mask
[0] = 0xff;
861 pkt
->ref_tag_mask
[1] = 0xff;
862 pkt
->ref_tag_mask
[2] = 0xff;
863 pkt
->ref_tag_mask
[3] = 0xff;
866 /* For Type 3 protection: 16 bit GUARD only */
867 case SCSI_PROT_DIF_TYPE3
:
868 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
869 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
874 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
877 case SCSI_PROT_DIF_TYPE1
:
878 pkt
->ref_tag
= cpu_to_le32((uint32_t)
879 (0xffffffff & scsi_get_lba(cmd
)));
880 pkt
->app_tag
= __constant_cpu_to_le16(0);
881 pkt
->app_tag_mask
[0] = 0x0;
882 pkt
->app_tag_mask
[1] = 0x0;
884 if (!qla2x00_hba_err_chk_enabled(sp
))
887 /* enable ALL bytes of the ref tag */
888 pkt
->ref_tag_mask
[0] = 0xff;
889 pkt
->ref_tag_mask
[1] = 0xff;
890 pkt
->ref_tag_mask
[2] = 0xff;
891 pkt
->ref_tag_mask
[3] = 0xff;
895 ql_dbg(ql_dbg_io
, vha
, 0x3009,
896 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
897 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
898 pkt
->ref_tag
, pkt
->app_tag
, protcnt
, (int)scsi_get_lba(cmd
),
899 scsi_get_prot_type(cmd
), cmd
);
903 dma_addr_t dma_addr
; /* OUT */
904 uint32_t dma_len
; /* OUT */
906 uint32_t tot_bytes
; /* IN */
907 struct scatterlist
*cur_sg
; /* IN */
909 /* for book keeping, bzero on initial invocation */
910 uint32_t bytes_consumed
;
912 uint32_t tot_partial
;
920 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
923 struct scatterlist
*sg
;
924 uint32_t cumulative_partial
, sg_len
;
925 dma_addr_t sg_dma_addr
;
927 if (sgx
->num_bytes
== sgx
->tot_bytes
)
931 cumulative_partial
= sgx
->tot_partial
;
933 sg_dma_addr
= sg_dma_address(sg
);
934 sg_len
= sg_dma_len(sg
);
936 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
938 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
939 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
940 sgx
->tot_partial
= 0;
941 sgx
->num_bytes
+= blk_sz
;
944 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
945 sgx
->tot_partial
+= sgx
->dma_len
;
949 sgx
->bytes_consumed
+= sgx
->dma_len
;
951 if (sg_len
== sgx
->bytes_consumed
) {
955 sgx
->bytes_consumed
= 0;
962 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
963 uint32_t *dsd
, uint16_t tot_dsds
)
966 uint8_t avail_dsds
= 0;
967 uint32_t dsd_list_len
;
968 struct dsd_dma
*dsd_ptr
;
969 struct scatterlist
*sg_prot
;
970 uint32_t *cur_dsd
= dsd
;
971 uint16_t used_dsds
= tot_dsds
;
977 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
978 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
980 prot_int
= cmd
->device
->sector_size
;
982 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
983 sgx
.tot_bytes
= scsi_bufflen(cmd
);
984 sgx
.cur_sg
= scsi_sglist(cmd
);
987 sg_prot
= scsi_prot_sglist(cmd
);
989 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
991 sle_dma
= sgx
.dma_addr
;
992 sle_dma_len
= sgx
.dma_len
;
994 /* Allocate additional continuation packets? */
995 if (avail_dsds
== 0) {
996 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
997 QLA_DSDS_PER_IOCB
: used_dsds
;
998 dsd_list_len
= (avail_dsds
+ 1) * 12;
999 used_dsds
-= avail_dsds
;
1001 /* allocate tracking DS */
1002 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1006 /* allocate new list */
1007 dsd_ptr
->dsd_addr
= next_dsd
=
1008 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1009 &dsd_ptr
->dsd_list_dma
);
1013 * Need to cleanup only this dsd_ptr, rest
1014 * will be done by sp_free_dma()
1020 list_add_tail(&dsd_ptr
->list
,
1021 &((struct crc_context
*)sp
->u
.scmd
.ctx
)->dsd_list
);
1023 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1025 /* add new list to cmd iocb or last list */
1026 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1027 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1028 *cur_dsd
++ = dsd_list_len
;
1029 cur_dsd
= (uint32_t *)next_dsd
;
1031 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1032 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1033 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
1037 /* Got a full protection interval */
1038 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
1041 tot_prot_dma_len
+= sle_dma_len
;
1042 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
1043 tot_prot_dma_len
= 0;
1044 sg_prot
= sg_next(sg_prot
);
1047 partial
= 1; /* So as to not re-enter this block */
1048 goto alloc_and_fill
;
1051 /* Null termination */
1059 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1063 uint8_t avail_dsds
= 0;
1064 uint32_t dsd_list_len
;
1065 struct dsd_dma
*dsd_ptr
;
1066 struct scatterlist
*sg
;
1067 uint32_t *cur_dsd
= dsd
;
1069 uint16_t used_dsds
= tot_dsds
;
1070 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1071 scsi_qla_host_t
*vha
= shost_priv(cmd
->device
->host
);
1075 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
1078 /* Allocate additional continuation packets? */
1079 if (avail_dsds
== 0) {
1080 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1081 QLA_DSDS_PER_IOCB
: used_dsds
;
1082 dsd_list_len
= (avail_dsds
+ 1) * 12;
1083 used_dsds
-= avail_dsds
;
1085 /* allocate tracking DS */
1086 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1090 /* allocate new list */
1091 dsd_ptr
->dsd_addr
= next_dsd
=
1092 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1093 &dsd_ptr
->dsd_list_dma
);
1097 * Need to cleanup only this dsd_ptr, rest
1098 * will be done by sp_free_dma()
1104 list_add_tail(&dsd_ptr
->list
,
1105 &((struct crc_context
*)sp
->u
.scmd
.ctx
)->dsd_list
);
1107 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1109 /* add new list to cmd iocb or last list */
1110 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1111 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1112 *cur_dsd
++ = dsd_list_len
;
1113 cur_dsd
= (uint32_t *)next_dsd
;
1115 sle_dma
= sg_dma_address(sg
);
1116 ql_dbg(ql_dbg_io
, vha
, 0x300a,
1117 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1118 i
, LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
), cmd
);
1119 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1120 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1121 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1124 if (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_PASS
) {
1125 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1126 ql_dbg(ql_dbg_io
, vha
, 0x300b,
1127 "User data buffer=%p for cmd=%p.\n", cp
, cmd
);
1130 /* Null termination */
1138 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1143 uint8_t avail_dsds
= 0;
1144 uint32_t dsd_list_len
;
1145 struct dsd_dma
*dsd_ptr
;
1146 struct scatterlist
*sg
;
1148 struct scsi_cmnd
*cmd
;
1149 uint32_t *cur_dsd
= dsd
;
1150 uint16_t used_dsds
= tot_dsds
;
1151 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
1154 cmd
= GET_CMD_SP(sp
);
1155 scsi_for_each_prot_sg(cmd
, sg
, tot_dsds
, i
) {
1158 /* Allocate additional continuation packets? */
1159 if (avail_dsds
== 0) {
1160 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1161 QLA_DSDS_PER_IOCB
: used_dsds
;
1162 dsd_list_len
= (avail_dsds
+ 1) * 12;
1163 used_dsds
-= avail_dsds
;
1165 /* allocate tracking DS */
1166 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1170 /* allocate new list */
1171 dsd_ptr
->dsd_addr
= next_dsd
=
1172 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1173 &dsd_ptr
->dsd_list_dma
);
1177 * Need to cleanup only this dsd_ptr, rest
1178 * will be done by sp_free_dma()
1184 list_add_tail(&dsd_ptr
->list
,
1185 &((struct crc_context
*)sp
->u
.scmd
.ctx
)->dsd_list
);
1187 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1189 /* add new list to cmd iocb or last list */
1190 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1191 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1192 *cur_dsd
++ = dsd_list_len
;
1193 cur_dsd
= (uint32_t *)next_dsd
;
1195 sle_dma
= sg_dma_address(sg
);
1196 if (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_PASS
) {
1197 ql_dbg(ql_dbg_io
, vha
, 0x3027,
1198 "%s(): %p, sg_entry %d - "
1199 "addr=0x%x0x%x, len=%d.\n",
1200 __func__
, cur_dsd
, i
,
1201 LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
));
1203 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1204 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1205 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1207 if (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_PASS
) {
1208 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1209 ql_dbg(ql_dbg_io
, vha
, 0x3028,
1210 "%s(): Protection Data buffer = %p.\n", __func__
,
1215 /* Null termination */
1223 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1224 * Type 6 IOCB types.
1226 * @sp: SRB command to process
1227 * @cmd_pkt: Command type 3 IOCB
1228 * @tot_dsds: Total number of segments to transfer
1231 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1232 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1234 uint32_t *cur_dsd
, *fcp_dl
;
1235 scsi_qla_host_t
*vha
;
1236 struct scsi_cmnd
*cmd
;
1237 struct scatterlist
*cur_seg
;
1239 uint32_t total_bytes
= 0;
1240 uint32_t data_bytes
;
1242 uint8_t bundling
= 1;
1245 struct crc_context
*crc_ctx_pkt
= NULL
;
1246 struct qla_hw_data
*ha
;
1247 uint8_t additional_fcpcdb_len
;
1248 uint16_t fcp_cmnd_len
;
1249 struct fcp_cmnd
*fcp_cmnd
;
1250 dma_addr_t crc_ctx_dma
;
1253 cmd
= GET_CMD_SP(sp
);
1256 /* Update entry type to indicate Command Type CRC_2 IOCB */
1257 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
1258 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
1260 vha
= sp
->fcport
->vha
;
1263 /* No data transfer */
1264 data_bytes
= scsi_bufflen(cmd
);
1265 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1266 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1270 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1272 /* Set transfer direction */
1273 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1274 cmd_pkt
->control_flags
=
1275 __constant_cpu_to_le16(CF_WRITE_DATA
);
1276 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1277 cmd_pkt
->control_flags
=
1278 __constant_cpu_to_le16(CF_READ_DATA
);
1281 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1282 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1283 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1284 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1287 /* Allocate CRC context from global pool */
1288 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1289 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1292 goto crc_queuing_error
;
1294 /* Zero out CTX area. */
1295 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1296 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1298 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1300 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1303 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1305 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1307 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1308 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1310 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1311 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1312 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1314 /* Determine SCSI command length -- align to 4 byte boundary */
1315 if (cmd
->cmd_len
> 16) {
1316 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1317 if ((cmd
->cmd_len
% 4) != 0) {
1318 /* SCSI cmd > 16 bytes must be multiple of 4 */
1319 goto crc_queuing_error
;
1321 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1323 additional_fcpcdb_len
= 0;
1324 fcp_cmnd_len
= 12 + 16 + 4;
1327 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1329 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1330 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1331 fcp_cmnd
->additional_cdb_len
|= 1;
1332 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1333 fcp_cmnd
->additional_cdb_len
|= 2;
1335 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1336 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1337 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1338 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1339 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1340 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1341 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1342 fcp_cmnd
->task_management
= 0;
1345 * Update tagged queuing modifier if using command tag queuing
1347 if (scsi_populate_tag_msg(cmd
, tag
)) {
1349 case HEAD_OF_QUEUE_TAG
:
1350 fcp_cmnd
->task_attribute
= TSK_HEAD_OF_QUEUE
;
1352 case ORDERED_QUEUE_TAG
:
1353 fcp_cmnd
->task_attribute
= TSK_ORDERED
;
1356 fcp_cmnd
->task_attribute
= 0;
1360 fcp_cmnd
->task_attribute
= 0;
1363 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1365 /* Compute dif len and adjust data len to incude protection */
1367 blk_size
= cmd
->device
->sector_size
;
1368 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1370 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1371 case SCSI_PROT_READ_INSERT
:
1372 case SCSI_PROT_WRITE_STRIP
:
1373 total_bytes
= data_bytes
;
1374 data_bytes
+= dif_bytes
;
1377 case SCSI_PROT_READ_STRIP
:
1378 case SCSI_PROT_WRITE_INSERT
:
1379 case SCSI_PROT_READ_PASS
:
1380 case SCSI_PROT_WRITE_PASS
:
1381 total_bytes
= data_bytes
+ dif_bytes
;
1387 if (!qla2x00_hba_err_chk_enabled(sp
))
1388 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1391 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1394 * Configure Bundling if we need to fetch interlaving
1395 * protection PCI accesses
1397 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1398 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1399 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1401 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1404 /* Finish the common fields of CRC pkt */
1405 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1406 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1407 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1408 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1409 /* Fibre channel byte count */
1410 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1411 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1412 additional_fcpcdb_len
);
1413 *fcp_dl
= htonl(total_bytes
);
1415 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1416 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1419 /* Walks data segments */
1421 cmd_pkt
->control_flags
|=
1422 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1424 if (!bundling
&& tot_prot_dsds
) {
1425 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1427 goto crc_queuing_error
;
1428 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1429 (tot_dsds
- tot_prot_dsds
)))
1430 goto crc_queuing_error
;
1432 if (bundling
&& tot_prot_dsds
) {
1433 /* Walks dif segments */
1434 cur_seg
= scsi_prot_sglist(cmd
);
1435 cmd_pkt
->control_flags
|=
1436 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1437 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1438 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1440 goto crc_queuing_error
;
1445 /* Cleanup will be performed by the caller */
1447 return QLA_FUNCTION_FAILED
;
1451 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1452 * @sp: command to send to the ISP
1454 * Returns non-zero if a failure occurred, else zero.
1457 qla24xx_start_scsi(srb_t
*sp
)
1460 unsigned long flags
;
1464 struct cmd_type_7
*cmd_pkt
;
1468 struct req_que
*req
= NULL
;
1469 struct rsp_que
*rsp
= NULL
;
1470 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1471 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1472 struct qla_hw_data
*ha
= vha
->hw
;
1475 /* Setup device pointers. */
1478 qla25xx_set_que(sp
, &rsp
);
1481 /* So we know we haven't pci_map'ed anything yet */
1484 /* Send marker if required */
1485 if (vha
->marker_needed
!= 0) {
1486 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1488 return QLA_FUNCTION_FAILED
;
1489 vha
->marker_needed
= 0;
1492 /* Acquire ring specific lock */
1493 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1495 /* Check for room in outstanding command list. */
1496 handle
= req
->current_outstanding_cmd
;
1497 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1499 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1501 if (!req
->outstanding_cmds
[handle
])
1504 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1508 /* Map the sg table so we have an accurate count of sg entries needed */
1509 if (scsi_sg_count(cmd
)) {
1510 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1511 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1512 if (unlikely(!nseg
))
1518 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1519 if (req
->cnt
< (req_cnt
+ 2)) {
1520 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1522 if (req
->ring_index
< cnt
)
1523 req
->cnt
= cnt
- req
->ring_index
;
1525 req
->cnt
= req
->length
-
1526 (req
->ring_index
- cnt
);
1527 if (req
->cnt
< (req_cnt
+ 2))
1531 /* Build command packet. */
1532 req
->current_outstanding_cmd
= handle
;
1533 req
->outstanding_cmds
[handle
] = sp
;
1534 sp
->handle
= handle
;
1535 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1536 req
->cnt
-= req_cnt
;
1538 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1539 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1541 /* Zero out remaining portion of packet. */
1542 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1543 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1544 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1545 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1547 /* Set NPORT-ID and LUN number*/
1548 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1549 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1550 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1551 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1552 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1554 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1555 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1557 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1558 if (scsi_populate_tag_msg(cmd
, tag
)) {
1560 case HEAD_OF_QUEUE_TAG
:
1561 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
1563 case ORDERED_QUEUE_TAG
:
1564 cmd_pkt
->task
= TSK_ORDERED
;
1569 /* Load SCSI command packet. */
1570 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1571 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1573 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1575 /* Build IOCB segments */
1576 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1578 /* Set total data segment count. */
1579 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1580 /* Specify response queue number where completion should happen */
1581 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1583 /* Adjust ring index. */
1585 if (req
->ring_index
== req
->length
) {
1586 req
->ring_index
= 0;
1587 req
->ring_ptr
= req
->ring
;
1591 sp
->flags
|= SRB_DMA_VALID
;
1593 /* Set chip new ring index. */
1594 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1595 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1597 /* Manage unprocessed RIO/ZIO commands in response queue. */
1598 if (vha
->flags
.process_response_queue
&&
1599 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1600 qla24xx_process_response_queue(vha
, rsp
);
1602 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1607 scsi_dma_unmap(cmd
);
1609 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1611 return QLA_FUNCTION_FAILED
;
1616 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1617 * @sp: command to send to the ISP
1619 * Returns non-zero if a failure occurred, else zero.
1622 qla24xx_dif_start_scsi(srb_t
*sp
)
1625 unsigned long flags
;
1630 uint16_t req_cnt
= 0;
1632 uint16_t tot_prot_dsds
;
1633 uint16_t fw_prot_opts
= 0;
1634 struct req_que
*req
= NULL
;
1635 struct rsp_que
*rsp
= NULL
;
1636 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1637 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1638 struct qla_hw_data
*ha
= vha
->hw
;
1639 struct cmd_type_crc_2
*cmd_pkt
;
1640 uint32_t status
= 0;
1642 #define QDSS_GOT_Q_SPACE BIT_0
1644 /* Only process protection or >16 cdb in this routine */
1645 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1646 if (cmd
->cmd_len
<= 16)
1647 return qla24xx_start_scsi(sp
);
1650 /* Setup device pointers. */
1652 qla25xx_set_que(sp
, &rsp
);
1655 /* So we know we haven't pci_map'ed anything yet */
1658 /* Send marker if required */
1659 if (vha
->marker_needed
!= 0) {
1660 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1662 return QLA_FUNCTION_FAILED
;
1663 vha
->marker_needed
= 0;
1666 /* Acquire ring specific lock */
1667 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1669 /* Check for room in outstanding command list. */
1670 handle
= req
->current_outstanding_cmd
;
1671 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1673 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1675 if (!req
->outstanding_cmds
[handle
])
1679 if (index
== MAX_OUTSTANDING_COMMANDS
)
1682 /* Compute number of required data segments */
1683 /* Map the sg table so we have an accurate count of sg entries needed */
1684 if (scsi_sg_count(cmd
)) {
1685 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1686 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1687 if (unlikely(!nseg
))
1690 sp
->flags
|= SRB_DMA_VALID
;
1692 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1693 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1694 struct qla2_sgx sgx
;
1697 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1698 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1699 sgx
.cur_sg
= scsi_sglist(cmd
);
1703 while (qla24xx_get_one_block_sg(
1704 cmd
->device
->sector_size
, &sgx
, &partial
))
1710 /* number of required data segments */
1713 /* Compute number of required protection segments */
1714 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1715 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1716 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1717 if (unlikely(!nseg
))
1720 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1722 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1723 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1724 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1731 /* Total Data and protection sg segment(s) */
1732 tot_prot_dsds
= nseg
;
1734 if (req
->cnt
< (req_cnt
+ 2)) {
1735 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1737 if (req
->ring_index
< cnt
)
1738 req
->cnt
= cnt
- req
->ring_index
;
1740 req
->cnt
= req
->length
-
1741 (req
->ring_index
- cnt
);
1742 if (req
->cnt
< (req_cnt
+ 2))
1746 status
|= QDSS_GOT_Q_SPACE
;
1748 /* Build header part of command packet (excluding the OPCODE). */
1749 req
->current_outstanding_cmd
= handle
;
1750 req
->outstanding_cmds
[handle
] = sp
;
1751 sp
->handle
= handle
;
1752 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1753 req
->cnt
-= req_cnt
;
1755 /* Fill-in common area */
1756 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1757 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1759 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1760 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1762 /* Set NPORT-ID and LUN number*/
1763 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1764 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1765 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1766 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1768 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1769 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1771 /* Total Data and protection segment(s) */
1772 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1774 /* Build IOCB segments and adjust for data protection segments */
1775 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1776 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1780 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1781 /* Specify response queue number where completion should happen */
1782 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1783 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1786 /* Adjust ring index. */
1788 if (req
->ring_index
== req
->length
) {
1789 req
->ring_index
= 0;
1790 req
->ring_ptr
= req
->ring
;
1794 /* Set chip new ring index. */
1795 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1796 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1798 /* Manage unprocessed RIO/ZIO commands in response queue. */
1799 if (vha
->flags
.process_response_queue
&&
1800 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1801 qla24xx_process_response_queue(vha
, rsp
);
1803 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1808 if (status
& QDSS_GOT_Q_SPACE
) {
1809 req
->outstanding_cmds
[handle
] = NULL
;
1810 req
->cnt
+= req_cnt
;
1812 /* Cleanup will be performed by the caller (queuecommand) */
1814 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1815 return QLA_FUNCTION_FAILED
;
1819 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1821 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1822 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1823 int affinity
= cmd
->request
->cpu
;
1825 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1826 affinity
< ha
->max_rsp_queues
- 1)
1827 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1829 *rsp
= ha
->rsp_q_map
[0];
1832 /* Generic Control-SRB manipulation functions. */
1834 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1836 struct qla_hw_data
*ha
= vha
->hw
;
1837 struct req_que
*req
= ha
->req_q_map
[0];
1838 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1839 uint32_t index
, handle
;
1841 uint16_t cnt
, req_cnt
;
1848 goto skip_cmd_array
;
1850 /* Check for room in outstanding command list. */
1851 handle
= req
->current_outstanding_cmd
;
1852 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1854 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1856 if (!req
->outstanding_cmds
[handle
])
1859 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1860 ql_log(ql_log_warn
, vha
, 0x700b,
1861 "No room on oustanding cmd array.\n");
1865 /* Prep command array. */
1866 req
->current_outstanding_cmd
= handle
;
1867 req
->outstanding_cmds
[handle
] = sp
;
1868 sp
->handle
= handle
;
1870 /* Adjust entry-counts as needed. */
1871 if (sp
->type
!= SRB_SCSI_CMD
)
1872 req_cnt
= sp
->iocbs
;
1875 /* Check for room on request queue. */
1876 if (req
->cnt
< req_cnt
) {
1877 if (ha
->mqenable
|| IS_QLA83XX(ha
))
1878 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1879 else if (IS_QLA82XX(ha
))
1880 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1881 else if (IS_FWI2_CAPABLE(ha
))
1882 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1884 cnt
= qla2x00_debounce_register(
1885 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1887 if (req
->ring_index
< cnt
)
1888 req
->cnt
= cnt
- req
->ring_index
;
1890 req
->cnt
= req
->length
-
1891 (req
->ring_index
- cnt
);
1893 if (req
->cnt
< req_cnt
)
1897 req
->cnt
-= req_cnt
;
1898 pkt
= req
->ring_ptr
;
1899 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1900 pkt
->entry_count
= req_cnt
;
1901 pkt
->handle
= handle
;
1908 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1910 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1912 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1913 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1914 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1915 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1916 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1917 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1918 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1919 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1920 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1921 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1922 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1926 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1928 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1929 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
1932 mbx
->entry_type
= MBX_IOCB_TYPE
;
1933 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1934 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1935 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1936 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1937 if (HAS_EXTENDED_IDS(ha
)) {
1938 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1939 mbx
->mb10
= cpu_to_le16(opts
);
1941 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1943 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1944 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1945 sp
->fcport
->d_id
.b
.al_pa
);
1946 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
1950 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1952 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1953 logio
->control_flags
=
1954 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1955 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1956 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1957 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1958 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1959 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1963 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1965 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1967 mbx
->entry_type
= MBX_IOCB_TYPE
;
1968 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1969 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1970 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1971 cpu_to_le16(sp
->fcport
->loop_id
):
1972 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1973 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1974 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1975 sp
->fcport
->d_id
.b
.al_pa
);
1976 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
1977 /* Implicit: mbx->mbx10 = 0. */
1981 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1983 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1984 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
1985 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1986 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1990 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1992 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1994 mbx
->entry_type
= MBX_IOCB_TYPE
;
1995 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1996 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
1997 if (HAS_EXTENDED_IDS(ha
)) {
1998 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1999 mbx
->mb10
= cpu_to_le16(BIT_0
);
2001 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2003 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2004 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2005 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2006 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2007 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vha
->vp_idx
);
2011 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2015 struct fc_port
*fcport
= sp
->fcport
;
2016 scsi_qla_host_t
*vha
= fcport
->vha
;
2017 struct qla_hw_data
*ha
= vha
->hw
;
2018 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2019 struct req_que
*req
= vha
->req
;
2021 flags
= iocb
->u
.tmf
.flags
;
2022 lun
= iocb
->u
.tmf
.lun
;
2024 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2025 tsk
->entry_count
= 1;
2026 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2027 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2028 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2029 tsk
->control_flags
= cpu_to_le32(flags
);
2030 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2031 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2032 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2033 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2035 if (flags
== TCF_LUN_RESET
) {
2036 int_to_scsilun(lun
, &tsk
->lun
);
2037 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2043 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2045 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2047 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2048 els_iocb
->entry_count
= 1;
2049 els_iocb
->sys_define
= 0;
2050 els_iocb
->entry_status
= 0;
2051 els_iocb
->handle
= sp
->handle
;
2052 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2053 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2054 els_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2055 els_iocb
->sof_type
= EST_SOFI3
;
2056 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2059 sp
->type
== SRB_ELS_CMD_RPT
?
2060 bsg_job
->request
->rqst_data
.r_els
.els_code
:
2061 bsg_job
->request
->rqst_data
.h_els
.command_code
;
2062 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2063 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2064 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2065 els_iocb
->control_flags
= 0;
2066 els_iocb
->rx_byte_count
=
2067 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2068 els_iocb
->tx_byte_count
=
2069 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2071 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2072 (bsg_job
->request_payload
.sg_list
)));
2073 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2074 (bsg_job
->request_payload
.sg_list
)));
2075 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2076 (bsg_job
->request_payload
.sg_list
));
2078 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2079 (bsg_job
->reply_payload
.sg_list
)));
2080 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2081 (bsg_job
->reply_payload
.sg_list
)));
2082 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2083 (bsg_job
->reply_payload
.sg_list
));
2087 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2089 uint16_t avail_dsds
;
2091 struct scatterlist
*sg
;
2094 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2095 struct qla_hw_data
*ha
= vha
->hw
;
2096 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2097 int loop_iterartion
= 0;
2098 int cont_iocb_prsnt
= 0;
2099 int entry_count
= 1;
2101 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2102 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2103 ct_iocb
->entry_status
= 0;
2104 ct_iocb
->handle1
= sp
->handle
;
2105 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2106 ct_iocb
->status
= __constant_cpu_to_le16(0);
2107 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
2108 ct_iocb
->timeout
= 0;
2109 ct_iocb
->cmd_dsd_count
=
2110 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2111 ct_iocb
->total_dsd_count
=
2112 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2113 ct_iocb
->req_bytecount
=
2114 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2115 ct_iocb
->rsp_bytecount
=
2116 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2118 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2119 (bsg_job
->request_payload
.sg_list
)));
2120 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2121 (bsg_job
->request_payload
.sg_list
)));
2122 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2124 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2125 (bsg_job
->reply_payload
.sg_list
)));
2126 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2127 (bsg_job
->reply_payload
.sg_list
)));
2128 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2131 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2133 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2135 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2137 cont_a64_entry_t
*cont_pkt
;
2139 /* Allocate additional continuation packets? */
2140 if (avail_dsds
== 0) {
2142 * Five DSDs are available in the Cont.
2145 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2146 vha
->hw
->req_q_map
[0]);
2147 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2149 cont_iocb_prsnt
= 1;
2153 sle_dma
= sg_dma_address(sg
);
2154 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2155 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2156 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2160 ct_iocb
->entry_count
= entry_count
;
2164 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2166 uint16_t avail_dsds
;
2168 struct scatterlist
*sg
;
2171 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2172 struct qla_hw_data
*ha
= vha
->hw
;
2173 struct fc_bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2174 int loop_iterartion
= 0;
2175 int cont_iocb_prsnt
= 0;
2176 int entry_count
= 1;
2178 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2179 ct_iocb
->entry_status
= 0;
2180 ct_iocb
->sys_define
= 0;
2181 ct_iocb
->handle
= sp
->handle
;
2183 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2184 ct_iocb
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2185 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
2187 ct_iocb
->cmd_dsd_count
=
2188 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2189 ct_iocb
->timeout
= 0;
2190 ct_iocb
->rsp_dsd_count
=
2191 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2192 ct_iocb
->rsp_byte_count
=
2193 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2194 ct_iocb
->cmd_byte_count
=
2195 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2196 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
2197 (bsg_job
->request_payload
.sg_list
)));
2198 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
2199 (bsg_job
->request_payload
.sg_list
)));
2200 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
2201 (bsg_job
->request_payload
.sg_list
));
2204 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
2206 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2208 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2210 cont_a64_entry_t
*cont_pkt
;
2212 /* Allocate additional continuation packets? */
2213 if (avail_dsds
== 0) {
2215 * Five DSDs are available in the Cont.
2218 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2220 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2222 cont_iocb_prsnt
= 1;
2226 sle_dma
= sg_dma_address(sg
);
2227 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2228 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2229 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2233 ct_iocb
->entry_count
= entry_count
;
2237 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2238 * @sp: command to send to the ISP
2240 * Returns non-zero if a failure occurred, else zero.
2243 qla82xx_start_scsi(srb_t
*sp
)
2246 unsigned long flags
;
2247 struct scsi_cmnd
*cmd
;
2254 struct device_reg_82xx __iomem
*reg
;
2257 uint8_t additional_cdb_len
;
2258 struct ct6_dsd
*ctx
;
2259 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2260 struct qla_hw_data
*ha
= vha
->hw
;
2261 struct req_que
*req
= NULL
;
2262 struct rsp_que
*rsp
= NULL
;
2265 /* Setup device pointers. */
2267 reg
= &ha
->iobase
->isp82
;
2268 cmd
= GET_CMD_SP(sp
);
2270 rsp
= ha
->rsp_q_map
[0];
2272 /* So we know we haven't pci_map'ed anything yet */
2275 dbval
= 0x04 | (ha
->portnum
<< 5);
2277 /* Send marker if required */
2278 if (vha
->marker_needed
!= 0) {
2279 if (qla2x00_marker(vha
, req
,
2280 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2281 ql_log(ql_log_warn
, vha
, 0x300c,
2282 "qla2x00_marker failed for cmd=%p.\n", cmd
);
2283 return QLA_FUNCTION_FAILED
;
2285 vha
->marker_needed
= 0;
2288 /* Acquire ring specific lock */
2289 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2291 /* Check for room in outstanding command list. */
2292 handle
= req
->current_outstanding_cmd
;
2293 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
2295 if (handle
== MAX_OUTSTANDING_COMMANDS
)
2297 if (!req
->outstanding_cmds
[handle
])
2300 if (index
== MAX_OUTSTANDING_COMMANDS
)
2303 /* Map the sg table so we have an accurate count of sg entries needed */
2304 if (scsi_sg_count(cmd
)) {
2305 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
2306 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
2307 if (unlikely(!nseg
))
2314 if (tot_dsds
> ql2xshiftctondsd
) {
2315 struct cmd_type_6
*cmd_pkt
;
2316 uint16_t more_dsd_lists
= 0;
2317 struct dsd_dma
*dsd_ptr
;
2320 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
2321 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
2322 ql_dbg(ql_dbg_io
, vha
, 0x300d,
2323 "Num of DSD list %d is than %d for cmd=%p.\n",
2324 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
2329 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
2330 goto sufficient_dsds
;
2332 more_dsd_lists
-= ha
->gbl_dsd_avail
;
2334 for (i
= 0; i
< more_dsd_lists
; i
++) {
2335 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
2337 ql_log(ql_log_fatal
, vha
, 0x300e,
2338 "Failed to allocate memory for dsd_dma "
2339 "for cmd=%p.\n", cmd
);
2343 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
2344 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
2345 if (!dsd_ptr
->dsd_addr
) {
2347 ql_log(ql_log_fatal
, vha
, 0x300f,
2348 "Failed to allocate memory for dsd_addr "
2349 "for cmd=%p.\n", cmd
);
2352 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
2353 ha
->gbl_dsd_avail
++;
2359 if (req
->cnt
< (req_cnt
+ 2)) {
2360 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2361 ®
->req_q_out
[0]);
2362 if (req
->ring_index
< cnt
)
2363 req
->cnt
= cnt
- req
->ring_index
;
2365 req
->cnt
= req
->length
-
2366 (req
->ring_index
- cnt
);
2367 if (req
->cnt
< (req_cnt
+ 2))
2371 ctx
= sp
->u
.scmd
.ctx
=
2372 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
2374 ql_log(ql_log_fatal
, vha
, 0x3010,
2375 "Failed to allocate ctx for cmd=%p.\n", cmd
);
2379 memset(ctx
, 0, sizeof(struct ct6_dsd
));
2380 ctx
->fcp_cmnd
= dma_pool_alloc(ha
->fcp_cmnd_dma_pool
,
2381 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
2382 if (!ctx
->fcp_cmnd
) {
2383 ql_log(ql_log_fatal
, vha
, 0x3011,
2384 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
2388 /* Initialize the DSD list and dma handle */
2389 INIT_LIST_HEAD(&ctx
->dsd_list
);
2390 ctx
->dsd_use_cnt
= 0;
2392 if (cmd
->cmd_len
> 16) {
2393 additional_cdb_len
= cmd
->cmd_len
- 16;
2394 if ((cmd
->cmd_len
% 4) != 0) {
2395 /* SCSI command bigger than 16 bytes must be
2398 ql_log(ql_log_warn
, vha
, 0x3012,
2399 "scsi cmd len %d not multiple of 4 "
2400 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
2401 goto queuing_error_fcp_cmnd
;
2403 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
2405 additional_cdb_len
= 0;
2406 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
2409 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
2410 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2412 /* Zero out remaining portion of packet. */
2413 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2414 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2415 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2416 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2418 /* Set NPORT-ID and LUN number*/
2419 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2420 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2421 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2422 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2423 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2425 /* Build IOCB segments */
2426 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
2427 goto queuing_error_fcp_cmnd
;
2429 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2430 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2432 /* build FCP_CMND IU */
2433 memset(ctx
->fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
2434 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
2435 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
2437 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
2438 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
2439 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2440 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
2443 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2445 if (scsi_populate_tag_msg(cmd
, tag
)) {
2447 case HEAD_OF_QUEUE_TAG
:
2448 ctx
->fcp_cmnd
->task_attribute
=
2451 case ORDERED_QUEUE_TAG
:
2452 ctx
->fcp_cmnd
->task_attribute
=
2458 /* Populate the FCP_PRIO. */
2459 if (ha
->flags
.fcp_prio_enabled
)
2460 ctx
->fcp_cmnd
->task_attribute
|=
2461 sp
->fcport
->fcp_prio
<< 3;
2463 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2465 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
2466 additional_cdb_len
);
2467 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
2469 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
2470 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
2471 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
2472 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
2473 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
2475 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
2476 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2477 /* Set total data segment count. */
2478 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2479 /* Specify response queue number where
2480 * completion should happen
2482 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2484 struct cmd_type_7
*cmd_pkt
;
2485 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2486 if (req
->cnt
< (req_cnt
+ 2)) {
2487 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2488 ®
->req_q_out
[0]);
2489 if (req
->ring_index
< cnt
)
2490 req
->cnt
= cnt
- req
->ring_index
;
2492 req
->cnt
= req
->length
-
2493 (req
->ring_index
- cnt
);
2495 if (req
->cnt
< (req_cnt
+ 2))
2498 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
2499 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2501 /* Zero out remaining portion of packet. */
2502 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2503 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2504 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2505 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2507 /* Set NPORT-ID and LUN number*/
2508 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2509 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2510 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2511 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2512 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
2514 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2515 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
2516 sizeof(cmd_pkt
->lun
));
2519 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2521 if (scsi_populate_tag_msg(cmd
, tag
)) {
2523 case HEAD_OF_QUEUE_TAG
:
2524 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
2526 case ORDERED_QUEUE_TAG
:
2527 cmd_pkt
->task
= TSK_ORDERED
;
2532 /* Populate the FCP_PRIO. */
2533 if (ha
->flags
.fcp_prio_enabled
)
2534 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
2536 /* Load SCSI command packet. */
2537 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2538 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
2540 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2542 /* Build IOCB segments */
2543 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
2545 /* Set total data segment count. */
2546 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2547 /* Specify response queue number where
2548 * completion should happen.
2550 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2553 /* Build command packet. */
2554 req
->current_outstanding_cmd
= handle
;
2555 req
->outstanding_cmds
[handle
] = sp
;
2556 sp
->handle
= handle
;
2557 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2558 req
->cnt
-= req_cnt
;
2561 /* Adjust ring index. */
2563 if (req
->ring_index
== req
->length
) {
2564 req
->ring_index
= 0;
2565 req
->ring_ptr
= req
->ring
;
2569 sp
->flags
|= SRB_DMA_VALID
;
2571 /* Set chip new ring index. */
2572 /* write, read and verify logic */
2573 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
2575 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
2578 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2581 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
2583 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2589 /* Manage unprocessed RIO/ZIO commands in response queue. */
2590 if (vha
->flags
.process_response_queue
&&
2591 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2592 qla24xx_process_response_queue(vha
, rsp
);
2594 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2597 queuing_error_fcp_cmnd
:
2598 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
2601 scsi_dma_unmap(cmd
);
2603 if (sp
->u
.scmd
.ctx
) {
2604 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
2605 sp
->u
.scmd
.ctx
= NULL
;
2607 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2609 return QLA_FUNCTION_FAILED
;
2613 qla2x00_start_sp(srb_t
*sp
)
2616 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2618 unsigned long flags
;
2620 rval
= QLA_FUNCTION_FAILED
;
2621 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2622 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
2624 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x700c,
2625 "qla2x00_alloc_iocbs failed.\n");
2632 IS_FWI2_CAPABLE(ha
) ?
2633 qla24xx_login_iocb(sp
, pkt
) :
2634 qla2x00_login_iocb(sp
, pkt
);
2636 case SRB_LOGOUT_CMD
:
2637 IS_FWI2_CAPABLE(ha
) ?
2638 qla24xx_logout_iocb(sp
, pkt
) :
2639 qla2x00_logout_iocb(sp
, pkt
);
2641 case SRB_ELS_CMD_RPT
:
2642 case SRB_ELS_CMD_HST
:
2643 qla24xx_els_iocb(sp
, pkt
);
2646 IS_FWI2_CAPABLE(ha
) ?
2647 qla24xx_ct_iocb(sp
, pkt
) :
2648 qla2x00_ct_iocb(sp
, pkt
);
2651 IS_FWI2_CAPABLE(ha
) ?
2652 qla24xx_adisc_iocb(sp
, pkt
) :
2653 qla2x00_adisc_iocb(sp
, pkt
);
2656 qla24xx_tm_iocb(sp
, pkt
);
2663 qla2x00_start_iocbs(sp
->fcport
->vha
, ha
->req_q_map
[0]);
2665 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);