2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
28 /* Set transfer direction */
29 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
31 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
32 scsi_bufflen(sp
->cmd
);
33 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
35 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
36 scsi_bufflen(sp
->cmd
);
42 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43 * Continuation Type 0 IOCBs to allocate.
45 * @dsds: number of data segment decriptors needed
47 * Returns the number of IOCB entries needed to store @dsds.
50 qla2x00_calc_iocbs_32(uint16_t dsds
)
56 iocbs
+= (dsds
- 3) / 7;
64 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65 * Continuation Type 1 IOCBs to allocate.
67 * @dsds: number of data segment decriptors needed
69 * Returns the number of IOCB entries needed to store @dsds.
72 qla2x00_calc_iocbs_64(uint16_t dsds
)
78 iocbs
+= (dsds
- 2) / 5;
86 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 static inline cont_entry_t
*
92 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
94 cont_entry_t
*cont_pkt
;
95 struct req_que
*req
= vha
->req
;
96 /* Adjust ring index. */
98 if (req
->ring_index
== req
->length
) {
100 req
->ring_ptr
= req
->ring
;
105 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
107 /* Load packet defaults. */
108 *((uint32_t *)(&cont_pkt
->entry_type
)) =
109 __constant_cpu_to_le32(CONTINUE_TYPE
);
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * Returns a pointer to the continuation type 1 IOCB packet.
120 static inline cont_a64_entry_t
*
121 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
123 cont_a64_entry_t
*cont_pkt
;
125 /* Adjust ring index. */
127 if (req
->ring_index
== req
->length
) {
129 req
->ring_ptr
= req
->ring
;
134 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
136 /* Load packet defaults. */
137 *((uint32_t *)(&cont_pkt
->entry_type
)) =
138 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
144 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
146 uint8_t guard
= scsi_host_get_guard(sp
->cmd
->device
->host
);
148 /* We only support T10 DIF right now */
149 if (guard
!= SHOST_DIX_GUARD_CRC
) {
150 ql_dbg(ql_dbg_io
, sp
->fcport
->vha
, 0x3007,
151 "Unsupported guard: %d for cmd=%p.\n", guard
, sp
->cmd
);
155 /* We always use DIFF Bundling for best performance */
158 /* Translate SCSI opcode to a protection opcode */
159 switch (scsi_get_prot_op(sp
->cmd
)) {
160 case SCSI_PROT_READ_STRIP
:
161 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
163 case SCSI_PROT_WRITE_INSERT
:
164 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
166 case SCSI_PROT_READ_INSERT
:
167 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
169 case SCSI_PROT_WRITE_STRIP
:
170 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
172 case SCSI_PROT_READ_PASS
:
173 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
175 case SCSI_PROT_WRITE_PASS
:
176 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
178 default: /* Normal Request */
179 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
183 return scsi_prot_sg_count(sp
->cmd
);
187 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
188 * capable IOCB types.
190 * @sp: SRB command to process
191 * @cmd_pkt: Command type 2 IOCB
192 * @tot_dsds: Total number of segments to transfer
194 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
199 scsi_qla_host_t
*vha
;
200 struct scsi_cmnd
*cmd
;
201 struct scatterlist
*sg
;
206 /* Update entry type to indicate Command Type 2 IOCB */
207 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
208 __constant_cpu_to_le32(COMMAND_TYPE
);
210 /* No data transfer */
211 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
212 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
216 vha
= sp
->fcport
->vha
;
217 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
219 /* Three DSDs are available in the Command Type 2 IOCB */
221 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
223 /* Load data segments */
224 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
225 cont_entry_t
*cont_pkt
;
227 /* Allocate additional continuation packets? */
228 if (avail_dsds
== 0) {
230 * Seven DSDs are available in the Continuation
233 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
234 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
238 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
239 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
245 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
246 * capable IOCB types.
248 * @sp: SRB command to process
249 * @cmd_pkt: Command type 3 IOCB
250 * @tot_dsds: Total number of segments to transfer
252 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
257 scsi_qla_host_t
*vha
;
258 struct scsi_cmnd
*cmd
;
259 struct scatterlist
*sg
;
264 /* Update entry type to indicate Command Type 3 IOCB */
265 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
266 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
268 /* No data transfer */
269 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
270 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
274 vha
= sp
->fcport
->vha
;
275 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
277 /* Two DSDs are available in the Command Type 3 IOCB */
279 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
281 /* Load data segments */
282 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
284 cont_a64_entry_t
*cont_pkt
;
286 /* Allocate additional continuation packets? */
287 if (avail_dsds
== 0) {
289 * Five DSDs are available in the Continuation
292 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
293 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
297 sle_dma
= sg_dma_address(sg
);
298 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
299 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
300 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
306 * qla2x00_start_scsi() - Send a SCSI command to the ISP
307 * @sp: command to send to the ISP
309 * Returns non-zero if a failure occurred, else zero.
312 qla2x00_start_scsi(srb_t
*sp
)
316 scsi_qla_host_t
*vha
;
317 struct scsi_cmnd
*cmd
;
321 cmd_entry_t
*cmd_pkt
;
325 struct device_reg_2xxx __iomem
*reg
;
326 struct qla_hw_data
*ha
;
331 /* Setup device pointers. */
333 vha
= sp
->fcport
->vha
;
335 reg
= &ha
->iobase
->isp
;
337 req
= ha
->req_q_map
[0];
338 rsp
= ha
->rsp_q_map
[0];
339 /* So we know we haven't pci_map'ed anything yet */
342 /* Send marker if required */
343 if (vha
->marker_needed
!= 0) {
344 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
346 return (QLA_FUNCTION_FAILED
);
348 vha
->marker_needed
= 0;
351 /* Acquire ring specific lock */
352 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
354 /* Check for room in outstanding command list. */
355 handle
= req
->current_outstanding_cmd
;
356 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
358 if (handle
== MAX_OUTSTANDING_COMMANDS
)
360 if (!req
->outstanding_cmds
[handle
])
363 if (index
== MAX_OUTSTANDING_COMMANDS
)
366 /* Map the sg table so we have an accurate count of sg entries needed */
367 if (scsi_sg_count(cmd
)) {
368 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
369 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
377 /* Calculate the number of request entries needed. */
378 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
379 if (req
->cnt
< (req_cnt
+ 2)) {
380 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
381 if (req
->ring_index
< cnt
)
382 req
->cnt
= cnt
- req
->ring_index
;
384 req
->cnt
= req
->length
-
385 (req
->ring_index
- cnt
);
387 if (req
->cnt
< (req_cnt
+ 2))
390 /* Build command packet */
391 req
->current_outstanding_cmd
= handle
;
392 req
->outstanding_cmds
[handle
] = sp
;
394 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
397 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
398 cmd_pkt
->handle
= handle
;
399 /* Zero out remaining portion of packet. */
400 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
401 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
402 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
406 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
408 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd
, tag
)) {
411 case HEAD_OF_QUEUE_TAG
:
412 cmd_pkt
->control_flags
=
413 __constant_cpu_to_le16(CF_HEAD_TAG
);
415 case ORDERED_QUEUE_TAG
:
416 cmd_pkt
->control_flags
=
417 __constant_cpu_to_le16(CF_ORDERED_TAG
);
420 cmd_pkt
->control_flags
=
421 __constant_cpu_to_le16(CF_SIMPLE_TAG
);
426 /* Load SCSI command packet. */
427 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
428 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
430 /* Build IOCB segments */
431 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
433 /* Set total data segment count. */
434 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
437 /* Adjust ring index. */
439 if (req
->ring_index
== req
->length
) {
441 req
->ring_ptr
= req
->ring
;
445 sp
->flags
|= SRB_DMA_VALID
;
447 /* Set chip new ring index. */
448 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
449 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
451 /* Manage unprocessed RIO/ZIO commands in response queue. */
452 if (vha
->flags
.process_response_queue
&&
453 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
454 qla2x00_process_response_queue(rsp
);
456 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
457 return (QLA_SUCCESS
);
463 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
465 return (QLA_FUNCTION_FAILED
);
469 * qla2x00_start_iocbs() - Execute the IOCB command
472 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
474 struct qla_hw_data
*ha
= vha
->hw
;
475 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
476 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
478 if (IS_QLA82XX(ha
)) {
479 qla82xx_start_iocbs(vha
);
481 /* Adjust ring index. */
483 if (req
->ring_index
== req
->length
) {
485 req
->ring_ptr
= req
->ring
;
489 /* Set chip new ring index. */
491 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
492 RD_REG_DWORD(&ioreg
->hccr
);
493 } else if (IS_FWI2_CAPABLE(ha
)) {
494 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
495 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
497 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
499 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
505 * qla2x00_marker() - Send a marker IOCB to the firmware.
509 * @type: marker modifier
511 * Can be called from both normal and interrupt context.
513 * Returns non-zero if a failure occurred, else zero.
516 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
517 struct rsp_que
*rsp
, uint16_t loop_id
,
518 uint16_t lun
, uint8_t type
)
521 struct mrk_entry_24xx
*mrk24
;
522 struct qla_hw_data
*ha
= vha
->hw
;
523 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
526 req
= ha
->req_q_map
[0];
527 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, 0);
529 ql_log(ql_log_warn
, base_vha
, 0x3026,
530 "Failed to allocate Marker IOCB.\n");
532 return (QLA_FUNCTION_FAILED
);
535 mrk
->entry_type
= MARKER_TYPE
;
536 mrk
->modifier
= type
;
537 if (type
!= MK_SYNC_ALL
) {
538 if (IS_FWI2_CAPABLE(ha
)) {
539 mrk24
= (struct mrk_entry_24xx
*) mrk
;
540 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
541 mrk24
->lun
[1] = LSB(lun
);
542 mrk24
->lun
[2] = MSB(lun
);
543 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
544 mrk24
->vp_index
= vha
->vp_idx
;
545 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
547 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
548 mrk
->lun
= cpu_to_le16(lun
);
553 qla2x00_start_iocbs(vha
, req
);
555 return (QLA_SUCCESS
);
559 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
560 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
564 unsigned long flags
= 0;
566 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
567 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
568 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
574 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
575 * Continuation Type 1 IOCBs to allocate.
577 * @dsds: number of data segment decriptors needed
579 * Returns the number of IOCB entries needed to store @dsds.
582 qla24xx_calc_iocbs(scsi_qla_host_t
*vha
, uint16_t dsds
)
588 iocbs
+= (dsds
- 1) / 5;
596 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
599 uint32_t *cur_dsd
= NULL
;
600 scsi_qla_host_t
*vha
;
601 struct qla_hw_data
*ha
;
602 struct scsi_cmnd
*cmd
;
603 struct scatterlist
*cur_seg
;
607 uint8_t first_iocb
= 1;
608 uint32_t dsd_list_len
;
609 struct dsd_dma
*dsd_ptr
;
614 /* Update entry type to indicate Command Type 3 IOCB */
615 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
616 __constant_cpu_to_le32(COMMAND_TYPE_6
);
618 /* No data transfer */
619 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
620 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
624 vha
= sp
->fcport
->vha
;
627 /* Set transfer direction */
628 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
629 cmd_pkt
->control_flags
=
630 __constant_cpu_to_le16(CF_WRITE_DATA
);
631 ha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
632 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
633 cmd_pkt
->control_flags
=
634 __constant_cpu_to_le16(CF_READ_DATA
);
635 ha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
638 cur_seg
= scsi_sglist(cmd
);
642 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
643 QLA_DSDS_PER_IOCB
: tot_dsds
;
644 tot_dsds
-= avail_dsds
;
645 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
647 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
648 struct dsd_dma
, list
);
649 next_dsd
= dsd_ptr
->dsd_addr
;
650 list_del(&dsd_ptr
->list
);
652 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
658 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
659 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
660 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
661 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
663 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
664 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
665 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
667 cur_dsd
= (uint32_t *)next_dsd
;
671 sle_dma
= sg_dma_address(cur_seg
);
672 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
673 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
674 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
675 cur_seg
= sg_next(cur_seg
);
680 /* Null termination */
684 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
689 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
690 * for Command Type 6.
692 * @dsds: number of data segment decriptors needed
694 * Returns the number of dsd list needed to store @dsds.
697 qla24xx_calc_dsd_lists(uint16_t dsds
)
699 uint16_t dsd_lists
= 0;
701 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
702 if (dsds
% QLA_DSDS_PER_IOCB
)
709 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
712 * @sp: SRB command to process
713 * @cmd_pkt: Command type 3 IOCB
714 * @tot_dsds: Total number of segments to transfer
717 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
722 scsi_qla_host_t
*vha
;
723 struct scsi_cmnd
*cmd
;
724 struct scatterlist
*sg
;
730 /* Update entry type to indicate Command Type 3 IOCB */
731 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
732 __constant_cpu_to_le32(COMMAND_TYPE_7
);
734 /* No data transfer */
735 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
736 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
740 vha
= sp
->fcport
->vha
;
743 /* Set transfer direction */
744 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
745 cmd_pkt
->task_mgmt_flags
=
746 __constant_cpu_to_le16(TMF_WRITE_DATA
);
747 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
748 scsi_bufflen(sp
->cmd
);
749 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
750 cmd_pkt
->task_mgmt_flags
=
751 __constant_cpu_to_le16(TMF_READ_DATA
);
752 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
753 scsi_bufflen(sp
->cmd
);
756 /* One DSD is available in the Command Type 3 IOCB */
758 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
760 /* Load data segments */
762 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
764 cont_a64_entry_t
*cont_pkt
;
766 /* Allocate additional continuation packets? */
767 if (avail_dsds
== 0) {
769 * Five DSDs are available in the Continuation
772 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
773 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
777 sle_dma
= sg_dma_address(sg
);
778 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
779 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
780 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
785 struct fw_dif_context
{
788 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
789 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
793 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
797 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
798 unsigned int protcnt
)
800 struct scsi_cmnd
*cmd
= sp
->cmd
;
801 scsi_qla_host_t
*vha
= shost_priv(cmd
->device
->host
);
803 switch (scsi_get_prot_type(cmd
)) {
804 case SCSI_PROT_DIF_TYPE0
:
806 * No check for ql2xenablehba_err_chk, as it would be an
807 * I/O error if hba tag generation is not done.
809 pkt
->ref_tag
= cpu_to_le32((uint32_t)
810 (0xffffffff & scsi_get_lba(cmd
)));
812 if (!qla2x00_hba_err_chk_enabled(sp
))
815 pkt
->ref_tag_mask
[0] = 0xff;
816 pkt
->ref_tag_mask
[1] = 0xff;
817 pkt
->ref_tag_mask
[2] = 0xff;
818 pkt
->ref_tag_mask
[3] = 0xff;
822 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
823 * match LBA in CDB + N
825 case SCSI_PROT_DIF_TYPE2
:
826 pkt
->app_tag
= __constant_cpu_to_le16(0);
827 pkt
->app_tag_mask
[0] = 0x0;
828 pkt
->app_tag_mask
[1] = 0x0;
830 pkt
->ref_tag
= cpu_to_le32((uint32_t)
831 (0xffffffff & scsi_get_lba(cmd
)));
833 if (!qla2x00_hba_err_chk_enabled(sp
))
836 /* enable ALL bytes of the ref tag */
837 pkt
->ref_tag_mask
[0] = 0xff;
838 pkt
->ref_tag_mask
[1] = 0xff;
839 pkt
->ref_tag_mask
[2] = 0xff;
840 pkt
->ref_tag_mask
[3] = 0xff;
843 /* For Type 3 protection: 16 bit GUARD only */
844 case SCSI_PROT_DIF_TYPE3
:
845 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
846 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
851 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
854 case SCSI_PROT_DIF_TYPE1
:
855 pkt
->ref_tag
= cpu_to_le32((uint32_t)
856 (0xffffffff & scsi_get_lba(cmd
)));
857 pkt
->app_tag
= __constant_cpu_to_le16(0);
858 pkt
->app_tag_mask
[0] = 0x0;
859 pkt
->app_tag_mask
[1] = 0x0;
861 if (!qla2x00_hba_err_chk_enabled(sp
))
864 /* enable ALL bytes of the ref tag */
865 pkt
->ref_tag_mask
[0] = 0xff;
866 pkt
->ref_tag_mask
[1] = 0xff;
867 pkt
->ref_tag_mask
[2] = 0xff;
868 pkt
->ref_tag_mask
[3] = 0xff;
872 ql_dbg(ql_dbg_io
, vha
, 0x3009,
873 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
874 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
875 pkt
->ref_tag
, pkt
->app_tag
, protcnt
, (int)scsi_get_lba(cmd
),
876 scsi_get_prot_type(cmd
), cmd
);
880 dma_addr_t dma_addr
; /* OUT */
881 uint32_t dma_len
; /* OUT */
883 uint32_t tot_bytes
; /* IN */
884 struct scatterlist
*cur_sg
; /* IN */
886 /* for book keeping, bzero on initial invocation */
887 uint32_t bytes_consumed
;
889 uint32_t tot_partial
;
897 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
900 struct scatterlist
*sg
;
901 uint32_t cumulative_partial
, sg_len
;
902 dma_addr_t sg_dma_addr
;
904 if (sgx
->num_bytes
== sgx
->tot_bytes
)
908 cumulative_partial
= sgx
->tot_partial
;
910 sg_dma_addr
= sg_dma_address(sg
);
911 sg_len
= sg_dma_len(sg
);
913 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
915 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
916 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
917 sgx
->tot_partial
= 0;
918 sgx
->num_bytes
+= blk_sz
;
921 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
922 sgx
->tot_partial
+= sgx
->dma_len
;
926 sgx
->bytes_consumed
+= sgx
->dma_len
;
928 if (sg_len
== sgx
->bytes_consumed
) {
932 sgx
->bytes_consumed
= 0;
939 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
940 uint32_t *dsd
, uint16_t tot_dsds
)
943 uint8_t avail_dsds
= 0;
944 uint32_t dsd_list_len
;
945 struct dsd_dma
*dsd_ptr
;
946 struct scatterlist
*sg_prot
;
947 uint32_t *cur_dsd
= dsd
;
948 uint16_t used_dsds
= tot_dsds
;
954 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
955 struct scsi_cmnd
*cmd
= sp
->cmd
;
957 prot_int
= cmd
->device
->sector_size
;
959 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
960 sgx
.tot_bytes
= scsi_bufflen(sp
->cmd
);
961 sgx
.cur_sg
= scsi_sglist(sp
->cmd
);
964 sg_prot
= scsi_prot_sglist(sp
->cmd
);
966 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
968 sle_dma
= sgx
.dma_addr
;
969 sle_dma_len
= sgx
.dma_len
;
971 /* Allocate additional continuation packets? */
972 if (avail_dsds
== 0) {
973 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
974 QLA_DSDS_PER_IOCB
: used_dsds
;
975 dsd_list_len
= (avail_dsds
+ 1) * 12;
976 used_dsds
-= avail_dsds
;
978 /* allocate tracking DS */
979 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
983 /* allocate new list */
984 dsd_ptr
->dsd_addr
= next_dsd
=
985 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
986 &dsd_ptr
->dsd_list_dma
);
990 * Need to cleanup only this dsd_ptr, rest
991 * will be done by sp_free_dma()
997 list_add_tail(&dsd_ptr
->list
,
998 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
1000 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1002 /* add new list to cmd iocb or last list */
1003 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1004 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1005 *cur_dsd
++ = dsd_list_len
;
1006 cur_dsd
= (uint32_t *)next_dsd
;
1008 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1009 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1010 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
1014 /* Got a full protection interval */
1015 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
1018 tot_prot_dma_len
+= sle_dma_len
;
1019 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
1020 tot_prot_dma_len
= 0;
1021 sg_prot
= sg_next(sg_prot
);
1024 partial
= 1; /* So as to not re-enter this block */
1025 goto alloc_and_fill
;
1028 /* Null termination */
1036 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1040 uint8_t avail_dsds
= 0;
1041 uint32_t dsd_list_len
;
1042 struct dsd_dma
*dsd_ptr
;
1043 struct scatterlist
*sg
;
1044 uint32_t *cur_dsd
= dsd
;
1046 uint16_t used_dsds
= tot_dsds
;
1047 scsi_qla_host_t
*vha
= shost_priv(sp
->cmd
->device
->host
);
1051 scsi_for_each_sg(sp
->cmd
, sg
, tot_dsds
, i
) {
1054 /* Allocate additional continuation packets? */
1055 if (avail_dsds
== 0) {
1056 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1057 QLA_DSDS_PER_IOCB
: used_dsds
;
1058 dsd_list_len
= (avail_dsds
+ 1) * 12;
1059 used_dsds
-= avail_dsds
;
1061 /* allocate tracking DS */
1062 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1066 /* allocate new list */
1067 dsd_ptr
->dsd_addr
= next_dsd
=
1068 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1069 &dsd_ptr
->dsd_list_dma
);
1073 * Need to cleanup only this dsd_ptr, rest
1074 * will be done by sp_free_dma()
1080 list_add_tail(&dsd_ptr
->list
,
1081 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
1083 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1085 /* add new list to cmd iocb or last list */
1086 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1087 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1088 *cur_dsd
++ = dsd_list_len
;
1089 cur_dsd
= (uint32_t *)next_dsd
;
1091 sle_dma
= sg_dma_address(sg
);
1092 ql_dbg(ql_dbg_io
, vha
, 0x300a,
1093 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1094 i
, LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
),
1096 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1097 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1098 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1101 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
1102 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1103 ql_dbg(ql_dbg_io
, vha
, 0x300b,
1104 "User data buffer=%p for cmd=%p.\n", cp
, sp
->cmd
);
1107 /* Null termination */
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1120 uint8_t avail_dsds
= 0;
1121 uint32_t dsd_list_len
;
1122 struct dsd_dma
*dsd_ptr
;
1123 struct scatterlist
*sg
;
1125 struct scsi_cmnd
*cmd
;
1126 uint32_t *cur_dsd
= dsd
;
1127 uint16_t used_dsds
= tot_dsds
;
1128 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
1133 scsi_for_each_prot_sg(cmd
, sg
, tot_dsds
, i
) {
1136 /* Allocate additional continuation packets? */
1137 if (avail_dsds
== 0) {
1138 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1139 QLA_DSDS_PER_IOCB
: used_dsds
;
1140 dsd_list_len
= (avail_dsds
+ 1) * 12;
1141 used_dsds
-= avail_dsds
;
1143 /* allocate tracking DS */
1144 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1148 /* allocate new list */
1149 dsd_ptr
->dsd_addr
= next_dsd
=
1150 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1151 &dsd_ptr
->dsd_list_dma
);
1155 * Need to cleanup only this dsd_ptr, rest
1156 * will be done by sp_free_dma()
1162 list_add_tail(&dsd_ptr
->list
,
1163 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
1165 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1167 /* add new list to cmd iocb or last list */
1168 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1169 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1170 *cur_dsd
++ = dsd_list_len
;
1171 cur_dsd
= (uint32_t *)next_dsd
;
1173 sle_dma
= sg_dma_address(sg
);
1174 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
1175 ql_dbg(ql_dbg_io
, vha
, 0x3027,
1176 "%s(): %p, sg_entry %d - "
1177 "addr=0x%x0x%x, len=%d.\n",
1178 __func__
, cur_dsd
, i
,
1179 LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
));
1181 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1182 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1183 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1185 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
1186 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1187 ql_dbg(ql_dbg_io
, vha
, 0x3028,
1188 "%s(): Protection Data buffer = %p.\n", __func__
,
1193 /* Null termination */
1201 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1202 * Type 6 IOCB types.
1204 * @sp: SRB command to process
1205 * @cmd_pkt: Command type 3 IOCB
1206 * @tot_dsds: Total number of segments to transfer
1209 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1210 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1212 uint32_t *cur_dsd
, *fcp_dl
;
1213 scsi_qla_host_t
*vha
;
1214 struct scsi_cmnd
*cmd
;
1215 struct scatterlist
*cur_seg
;
1217 uint32_t total_bytes
= 0;
1218 uint32_t data_bytes
;
1220 uint8_t bundling
= 1;
1223 struct crc_context
*crc_ctx_pkt
= NULL
;
1224 struct qla_hw_data
*ha
;
1225 uint8_t additional_fcpcdb_len
;
1226 uint16_t fcp_cmnd_len
;
1227 struct fcp_cmnd
*fcp_cmnd
;
1228 dma_addr_t crc_ctx_dma
;
1234 /* Update entry type to indicate Command Type CRC_2 IOCB */
1235 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
1236 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
1238 vha
= sp
->fcport
->vha
;
1241 /* No data transfer */
1242 data_bytes
= scsi_bufflen(cmd
);
1243 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1244 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1248 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1250 /* Set transfer direction */
1251 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1252 cmd_pkt
->control_flags
=
1253 __constant_cpu_to_le16(CF_WRITE_DATA
);
1254 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1255 cmd_pkt
->control_flags
=
1256 __constant_cpu_to_le16(CF_READ_DATA
);
1259 if ((scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_READ_INSERT
) ||
1260 (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1261 (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_READ_STRIP
) ||
1262 (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_INSERT
))
1265 /* Allocate CRC context from global pool */
1266 crc_ctx_pkt
= sp
->ctx
= dma_pool_alloc(ha
->dl_dma_pool
,
1267 GFP_ATOMIC
, &crc_ctx_dma
);
1270 goto crc_queuing_error
;
1272 /* Zero out CTX area. */
1273 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1274 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1276 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1278 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1281 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1283 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1285 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1286 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1288 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1289 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1290 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1292 /* Determine SCSI command length -- align to 4 byte boundary */
1293 if (cmd
->cmd_len
> 16) {
1294 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1295 if ((cmd
->cmd_len
% 4) != 0) {
1296 /* SCSI cmd > 16 bytes must be multiple of 4 */
1297 goto crc_queuing_error
;
1299 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1301 additional_fcpcdb_len
= 0;
1302 fcp_cmnd_len
= 12 + 16 + 4;
1305 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1307 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1308 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1309 fcp_cmnd
->additional_cdb_len
|= 1;
1310 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1311 fcp_cmnd
->additional_cdb_len
|= 2;
1313 int_to_scsilun(sp
->cmd
->device
->lun
, &fcp_cmnd
->lun
);
1314 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1315 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1316 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1317 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1318 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1319 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1320 fcp_cmnd
->task_management
= 0;
1323 * Update tagged queuing modifier if using command tag queuing
1325 if (scsi_populate_tag_msg(cmd
, tag
)) {
1327 case HEAD_OF_QUEUE_TAG
:
1328 fcp_cmnd
->task_attribute
= TSK_HEAD_OF_QUEUE
;
1330 case ORDERED_QUEUE_TAG
:
1331 fcp_cmnd
->task_attribute
= TSK_ORDERED
;
1334 fcp_cmnd
->task_attribute
= 0;
1338 fcp_cmnd
->task_attribute
= 0;
1341 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1343 /* Compute dif len and adjust data len to incude protection */
1345 blk_size
= cmd
->device
->sector_size
;
1346 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1348 switch (scsi_get_prot_op(sp
->cmd
)) {
1349 case SCSI_PROT_READ_INSERT
:
1350 case SCSI_PROT_WRITE_STRIP
:
1351 total_bytes
= data_bytes
;
1352 data_bytes
+= dif_bytes
;
1355 case SCSI_PROT_READ_STRIP
:
1356 case SCSI_PROT_WRITE_INSERT
:
1357 case SCSI_PROT_READ_PASS
:
1358 case SCSI_PROT_WRITE_PASS
:
1359 total_bytes
= data_bytes
+ dif_bytes
;
1365 if (!qla2x00_hba_err_chk_enabled(sp
))
1366 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1369 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1372 * Configure Bundling if we need to fetch interlaving
1373 * protection PCI accesses
1375 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1376 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1377 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1379 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1382 /* Finish the common fields of CRC pkt */
1383 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1384 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1385 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1386 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1387 /* Fibre channel byte count */
1388 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1389 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1390 additional_fcpcdb_len
);
1391 *fcp_dl
= htonl(total_bytes
);
1393 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1394 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1397 /* Walks data segments */
1399 cmd_pkt
->control_flags
|=
1400 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1402 if (!bundling
&& tot_prot_dsds
) {
1403 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1405 goto crc_queuing_error
;
1406 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1407 (tot_dsds
- tot_prot_dsds
)))
1408 goto crc_queuing_error
;
1410 if (bundling
&& tot_prot_dsds
) {
1411 /* Walks dif segments */
1412 cur_seg
= scsi_prot_sglist(cmd
);
1413 cmd_pkt
->control_flags
|=
1414 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1415 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1416 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1418 goto crc_queuing_error
;
1423 /* Cleanup will be performed by the caller */
1425 return QLA_FUNCTION_FAILED
;
1429 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1430 * @sp: command to send to the ISP
1432 * Returns non-zero if a failure occurred, else zero.
1435 qla24xx_start_scsi(srb_t
*sp
)
1438 unsigned long flags
;
1442 struct cmd_type_7
*cmd_pkt
;
1446 struct req_que
*req
= NULL
;
1447 struct rsp_que
*rsp
= NULL
;
1448 struct scsi_cmnd
*cmd
= sp
->cmd
;
1449 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1450 struct qla_hw_data
*ha
= vha
->hw
;
1453 /* Setup device pointers. */
1456 qla25xx_set_que(sp
, &rsp
);
1459 /* So we know we haven't pci_map'ed anything yet */
1462 /* Send marker if required */
1463 if (vha
->marker_needed
!= 0) {
1464 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1466 return QLA_FUNCTION_FAILED
;
1467 vha
->marker_needed
= 0;
1470 /* Acquire ring specific lock */
1471 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1473 /* Check for room in outstanding command list. */
1474 handle
= req
->current_outstanding_cmd
;
1475 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1477 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1479 if (!req
->outstanding_cmds
[handle
])
1482 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1486 /* Map the sg table so we have an accurate count of sg entries needed */
1487 if (scsi_sg_count(cmd
)) {
1488 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1489 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1490 if (unlikely(!nseg
))
1496 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1497 if (req
->cnt
< (req_cnt
+ 2)) {
1498 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1500 if (req
->ring_index
< cnt
)
1501 req
->cnt
= cnt
- req
->ring_index
;
1503 req
->cnt
= req
->length
-
1504 (req
->ring_index
- cnt
);
1506 if (req
->cnt
< (req_cnt
+ 2))
1509 /* Build command packet. */
1510 req
->current_outstanding_cmd
= handle
;
1511 req
->outstanding_cmds
[handle
] = sp
;
1512 sp
->handle
= handle
;
1513 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1514 req
->cnt
-= req_cnt
;
1516 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1517 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1519 /* Zero out remaining portion of packet. */
1520 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1521 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1522 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1523 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1525 /* Set NPORT-ID and LUN number*/
1526 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1527 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1528 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1529 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1530 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1532 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1536 if (scsi_populate_tag_msg(cmd
, tag
)) {
1538 case HEAD_OF_QUEUE_TAG
:
1539 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
1541 case ORDERED_QUEUE_TAG
:
1542 cmd_pkt
->task
= TSK_ORDERED
;
1547 /* Load SCSI command packet. */
1548 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1549 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1551 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1553 /* Build IOCB segments */
1554 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1556 /* Set total data segment count. */
1557 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1558 /* Specify response queue number where completion should happen */
1559 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1561 /* Adjust ring index. */
1563 if (req
->ring_index
== req
->length
) {
1564 req
->ring_index
= 0;
1565 req
->ring_ptr
= req
->ring
;
1569 sp
->flags
|= SRB_DMA_VALID
;
1571 /* Set chip new ring index. */
1572 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1573 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1575 /* Manage unprocessed RIO/ZIO commands in response queue. */
1576 if (vha
->flags
.process_response_queue
&&
1577 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1578 qla24xx_process_response_queue(vha
, rsp
);
1580 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1585 scsi_dma_unmap(cmd
);
1587 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1589 return QLA_FUNCTION_FAILED
;
1594 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1595 * @sp: command to send to the ISP
1597 * Returns non-zero if a failure occurred, else zero.
1600 qla24xx_dif_start_scsi(srb_t
*sp
)
1603 unsigned long flags
;
1608 uint16_t req_cnt
= 0;
1610 uint16_t tot_prot_dsds
;
1611 uint16_t fw_prot_opts
= 0;
1612 struct req_que
*req
= NULL
;
1613 struct rsp_que
*rsp
= NULL
;
1614 struct scsi_cmnd
*cmd
= sp
->cmd
;
1615 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1616 struct qla_hw_data
*ha
= vha
->hw
;
1617 struct cmd_type_crc_2
*cmd_pkt
;
1618 uint32_t status
= 0;
1620 #define QDSS_GOT_Q_SPACE BIT_0
1622 /* Only process protection or >16 cdb in this routine */
1623 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1624 if (cmd
->cmd_len
<= 16)
1625 return qla24xx_start_scsi(sp
);
1628 /* Setup device pointers. */
1630 qla25xx_set_que(sp
, &rsp
);
1633 /* So we know we haven't pci_map'ed anything yet */
1636 /* Send marker if required */
1637 if (vha
->marker_needed
!= 0) {
1638 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1640 return QLA_FUNCTION_FAILED
;
1641 vha
->marker_needed
= 0;
1644 /* Acquire ring specific lock */
1645 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1647 /* Check for room in outstanding command list. */
1648 handle
= req
->current_outstanding_cmd
;
1649 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1651 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1653 if (!req
->outstanding_cmds
[handle
])
1657 if (index
== MAX_OUTSTANDING_COMMANDS
)
1660 /* Compute number of required data segments */
1661 /* Map the sg table so we have an accurate count of sg entries needed */
1662 if (scsi_sg_count(cmd
)) {
1663 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1664 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1665 if (unlikely(!nseg
))
1668 sp
->flags
|= SRB_DMA_VALID
;
1670 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1671 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1672 struct qla2_sgx sgx
;
1675 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1676 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1677 sgx
.cur_sg
= scsi_sglist(cmd
);
1681 while (qla24xx_get_one_block_sg(
1682 cmd
->device
->sector_size
, &sgx
, &partial
))
1688 /* number of required data segments */
1691 /* Compute number of required protection segments */
1692 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1693 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1694 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1695 if (unlikely(!nseg
))
1698 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1700 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1701 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1702 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1709 /* Total Data and protection sg segment(s) */
1710 tot_prot_dsds
= nseg
;
1712 if (req
->cnt
< (req_cnt
+ 2)) {
1713 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1715 if (req
->ring_index
< cnt
)
1716 req
->cnt
= cnt
- req
->ring_index
;
1718 req
->cnt
= req
->length
-
1719 (req
->ring_index
- cnt
);
1722 if (req
->cnt
< (req_cnt
+ 2))
1725 status
|= QDSS_GOT_Q_SPACE
;
1727 /* Build header part of command packet (excluding the OPCODE). */
1728 req
->current_outstanding_cmd
= handle
;
1729 req
->outstanding_cmds
[handle
] = sp
;
1730 sp
->handle
= handle
;
1731 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1732 req
->cnt
-= req_cnt
;
1734 /* Fill-in common area */
1735 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1736 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1738 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1739 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1741 /* Set NPORT-ID and LUN number*/
1742 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1743 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1744 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1745 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1747 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1748 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1750 /* Total Data and protection segment(s) */
1751 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1753 /* Build IOCB segments and adjust for data protection segments */
1754 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1755 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1759 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1760 /* Specify response queue number where completion should happen */
1761 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1762 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1765 /* Adjust ring index. */
1767 if (req
->ring_index
== req
->length
) {
1768 req
->ring_index
= 0;
1769 req
->ring_ptr
= req
->ring
;
1773 /* Set chip new ring index. */
1774 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1775 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1777 /* Manage unprocessed RIO/ZIO commands in response queue. */
1778 if (vha
->flags
.process_response_queue
&&
1779 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1780 qla24xx_process_response_queue(vha
, rsp
);
1782 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1787 if (status
& QDSS_GOT_Q_SPACE
) {
1788 req
->outstanding_cmds
[handle
] = NULL
;
1789 req
->cnt
+= req_cnt
;
1791 /* Cleanup will be performed by the caller (queuecommand) */
1793 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1794 return QLA_FUNCTION_FAILED
;
1798 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1800 struct scsi_cmnd
*cmd
= sp
->cmd
;
1801 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1802 int affinity
= cmd
->request
->cpu
;
1804 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1805 affinity
< ha
->max_rsp_queues
- 1)
1806 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1808 *rsp
= ha
->rsp_q_map
[0];
1811 /* Generic Control-SRB manipulation functions. */
1813 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1815 struct qla_hw_data
*ha
= vha
->hw
;
1816 struct req_que
*req
= ha
->req_q_map
[0];
1817 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1818 uint32_t index
, handle
;
1820 uint16_t cnt
, req_cnt
;
1821 struct srb_ctx
*ctx
;
1828 goto skip_cmd_array
;
1830 /* Check for room in outstanding command list. */
1831 handle
= req
->current_outstanding_cmd
;
1832 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1834 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1836 if (!req
->outstanding_cmds
[handle
])
1839 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1840 ql_log(ql_log_warn
, vha
, 0x700b,
1841 "No room on oustanding cmd array.\n");
1845 /* Prep command array. */
1846 req
->current_outstanding_cmd
= handle
;
1847 req
->outstanding_cmds
[handle
] = sp
;
1848 sp
->handle
= handle
;
1850 /* Adjust entry-counts as needed. */
1853 req_cnt
= ctx
->iocbs
;
1857 /* Check for room on request queue. */
1858 if (req
->cnt
< req_cnt
) {
1860 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1861 else if (IS_QLA82XX(ha
))
1862 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1863 else if (IS_FWI2_CAPABLE(ha
))
1864 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1866 cnt
= qla2x00_debounce_register(
1867 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1869 if (req
->ring_index
< cnt
)
1870 req
->cnt
= cnt
- req
->ring_index
;
1872 req
->cnt
= req
->length
-
1873 (req
->ring_index
- cnt
);
1875 if (req
->cnt
< req_cnt
)
1879 req
->cnt
-= req_cnt
;
1880 pkt
= req
->ring_ptr
;
1881 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1882 pkt
->entry_count
= req_cnt
;
1883 pkt
->handle
= handle
;
1890 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1892 struct srb_ctx
*ctx
= sp
->ctx
;
1893 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1895 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1896 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1897 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1898 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1899 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1900 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1901 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1902 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1903 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1904 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1905 logio
->vp_index
= sp
->fcport
->vp_idx
;
1909 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1911 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1912 struct srb_ctx
*ctx
= sp
->ctx
;
1913 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1916 mbx
->entry_type
= MBX_IOCB_TYPE
;
1917 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1918 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1919 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1920 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1921 if (HAS_EXTENDED_IDS(ha
)) {
1922 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1923 mbx
->mb10
= cpu_to_le16(opts
);
1925 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1927 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1928 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1929 sp
->fcport
->d_id
.b
.al_pa
);
1930 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1934 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1936 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1937 logio
->control_flags
=
1938 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1939 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1940 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1941 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1942 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1943 logio
->vp_index
= sp
->fcport
->vp_idx
;
1947 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1949 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1951 mbx
->entry_type
= MBX_IOCB_TYPE
;
1952 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1953 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1954 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1955 cpu_to_le16(sp
->fcport
->loop_id
):
1956 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1957 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1958 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1959 sp
->fcport
->d_id
.b
.al_pa
);
1960 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1961 /* Implicit: mbx->mbx10 = 0. */
1965 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1967 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1968 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
1969 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1970 logio
->vp_index
= sp
->fcport
->vp_idx
;
1974 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1976 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1978 mbx
->entry_type
= MBX_IOCB_TYPE
;
1979 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1980 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
1981 if (HAS_EXTENDED_IDS(ha
)) {
1982 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1983 mbx
->mb10
= cpu_to_le16(BIT_0
);
1985 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
1987 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
1988 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
1989 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
1990 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
1991 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1995 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
1999 struct fc_port
*fcport
= sp
->fcport
;
2000 scsi_qla_host_t
*vha
= fcport
->vha
;
2001 struct qla_hw_data
*ha
= vha
->hw
;
2002 struct srb_ctx
*ctx
= sp
->ctx
;
2003 struct srb_iocb
*iocb
= ctx
->u
.iocb_cmd
;
2004 struct req_que
*req
= vha
->req
;
2006 flags
= iocb
->u
.tmf
.flags
;
2007 lun
= iocb
->u
.tmf
.lun
;
2009 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2010 tsk
->entry_count
= 1;
2011 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2012 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2013 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2014 tsk
->control_flags
= cpu_to_le32(flags
);
2015 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2016 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2017 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2018 tsk
->vp_index
= fcport
->vp_idx
;
2020 if (flags
== TCF_LUN_RESET
) {
2021 int_to_scsilun(lun
, &tsk
->lun
);
2022 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2028 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2030 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
2032 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2033 els_iocb
->entry_count
= 1;
2034 els_iocb
->sys_define
= 0;
2035 els_iocb
->entry_status
= 0;
2036 els_iocb
->handle
= sp
->handle
;
2037 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2038 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2039 els_iocb
->vp_index
= sp
->fcport
->vp_idx
;
2040 els_iocb
->sof_type
= EST_SOFI3
;
2041 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2044 (((struct srb_ctx
*)sp
->ctx
)->type
== SRB_ELS_CMD_RPT
) ?
2045 bsg_job
->request
->rqst_data
.r_els
.els_code
:
2046 bsg_job
->request
->rqst_data
.h_els
.command_code
;
2047 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2048 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2049 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2050 els_iocb
->control_flags
= 0;
2051 els_iocb
->rx_byte_count
=
2052 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2053 els_iocb
->tx_byte_count
=
2054 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2056 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2057 (bsg_job
->request_payload
.sg_list
)));
2058 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2059 (bsg_job
->request_payload
.sg_list
)));
2060 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2061 (bsg_job
->request_payload
.sg_list
));
2063 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2064 (bsg_job
->reply_payload
.sg_list
)));
2065 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2066 (bsg_job
->reply_payload
.sg_list
)));
2067 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2068 (bsg_job
->reply_payload
.sg_list
));
2072 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2074 uint16_t avail_dsds
;
2076 struct scatterlist
*sg
;
2079 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2080 struct qla_hw_data
*ha
= vha
->hw
;
2081 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
2082 int loop_iterartion
= 0;
2083 int cont_iocb_prsnt
= 0;
2084 int entry_count
= 1;
2086 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2087 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2088 ct_iocb
->entry_status
= 0;
2089 ct_iocb
->handle1
= sp
->handle
;
2090 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2091 ct_iocb
->status
= __constant_cpu_to_le16(0);
2092 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
2093 ct_iocb
->timeout
= 0;
2094 ct_iocb
->cmd_dsd_count
=
2095 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2096 ct_iocb
->total_dsd_count
=
2097 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2098 ct_iocb
->req_bytecount
=
2099 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2100 ct_iocb
->rsp_bytecount
=
2101 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2103 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2104 (bsg_job
->request_payload
.sg_list
)));
2105 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2106 (bsg_job
->request_payload
.sg_list
)));
2107 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2109 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2110 (bsg_job
->reply_payload
.sg_list
)));
2111 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2112 (bsg_job
->reply_payload
.sg_list
)));
2113 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2116 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2118 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2120 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2122 cont_a64_entry_t
*cont_pkt
;
2124 /* Allocate additional continuation packets? */
2125 if (avail_dsds
== 0) {
2127 * Five DSDs are available in the Cont.
2130 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2131 vha
->hw
->req_q_map
[0]);
2132 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2134 cont_iocb_prsnt
= 1;
2138 sle_dma
= sg_dma_address(sg
);
2139 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2140 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2141 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2145 ct_iocb
->entry_count
= entry_count
;
2149 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2151 uint16_t avail_dsds
;
2153 struct scatterlist
*sg
;
2156 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2157 struct qla_hw_data
*ha
= vha
->hw
;
2158 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
2159 int loop_iterartion
= 0;
2160 int cont_iocb_prsnt
= 0;
2161 int entry_count
= 1;
2163 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2164 ct_iocb
->entry_status
= 0;
2165 ct_iocb
->sys_define
= 0;
2166 ct_iocb
->handle
= sp
->handle
;
2168 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2169 ct_iocb
->vp_index
= sp
->fcport
->vp_idx
;
2170 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
2172 ct_iocb
->cmd_dsd_count
=
2173 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2174 ct_iocb
->timeout
= 0;
2175 ct_iocb
->rsp_dsd_count
=
2176 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2177 ct_iocb
->rsp_byte_count
=
2178 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2179 ct_iocb
->cmd_byte_count
=
2180 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2181 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
2182 (bsg_job
->request_payload
.sg_list
)));
2183 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
2184 (bsg_job
->request_payload
.sg_list
)));
2185 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
2186 (bsg_job
->request_payload
.sg_list
));
2189 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
2191 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2193 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2195 cont_a64_entry_t
*cont_pkt
;
2197 /* Allocate additional continuation packets? */
2198 if (avail_dsds
== 0) {
2200 * Five DSDs are available in the Cont.
2203 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2205 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2207 cont_iocb_prsnt
= 1;
2211 sle_dma
= sg_dma_address(sg
);
2212 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2213 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2214 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2218 ct_iocb
->entry_count
= entry_count
;
2222 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2223 * @sp: command to send to the ISP
2225 * Returns non-zero if a failure occurred, else zero.
2228 qla82xx_start_scsi(srb_t
*sp
)
2231 unsigned long flags
;
2232 struct scsi_cmnd
*cmd
;
2239 struct device_reg_82xx __iomem
*reg
;
2242 uint8_t additional_cdb_len
;
2243 struct ct6_dsd
*ctx
;
2244 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
2245 struct qla_hw_data
*ha
= vha
->hw
;
2246 struct req_que
*req
= NULL
;
2247 struct rsp_que
*rsp
= NULL
;
2250 /* Setup device pointers. */
2252 reg
= &ha
->iobase
->isp82
;
2255 rsp
= ha
->rsp_q_map
[0];
2257 /* So we know we haven't pci_map'ed anything yet */
2260 dbval
= 0x04 | (ha
->portnum
<< 5);
2262 /* Send marker if required */
2263 if (vha
->marker_needed
!= 0) {
2264 if (qla2x00_marker(vha
, req
,
2265 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2266 ql_log(ql_log_warn
, vha
, 0x300c,
2267 "qla2x00_marker failed for cmd=%p.\n", cmd
);
2268 return QLA_FUNCTION_FAILED
;
2270 vha
->marker_needed
= 0;
2273 /* Acquire ring specific lock */
2274 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2276 /* Check for room in outstanding command list. */
2277 handle
= req
->current_outstanding_cmd
;
2278 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
2280 if (handle
== MAX_OUTSTANDING_COMMANDS
)
2282 if (!req
->outstanding_cmds
[handle
])
2285 if (index
== MAX_OUTSTANDING_COMMANDS
)
2288 /* Map the sg table so we have an accurate count of sg entries needed */
2289 if (scsi_sg_count(cmd
)) {
2290 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
2291 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
2292 if (unlikely(!nseg
))
2299 if (tot_dsds
> ql2xshiftctondsd
) {
2300 struct cmd_type_6
*cmd_pkt
;
2301 uint16_t more_dsd_lists
= 0;
2302 struct dsd_dma
*dsd_ptr
;
2305 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
2306 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
2307 ql_dbg(ql_dbg_io
, vha
, 0x300d,
2308 "Num of DSD list %d is than %d for cmd=%p.\n",
2309 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
2314 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
2315 goto sufficient_dsds
;
2317 more_dsd_lists
-= ha
->gbl_dsd_avail
;
2319 for (i
= 0; i
< more_dsd_lists
; i
++) {
2320 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
2322 ql_log(ql_log_fatal
, vha
, 0x300e,
2323 "Failed to allocate memory for dsd_dma "
2324 "for cmd=%p.\n", cmd
);
2328 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
2329 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
2330 if (!dsd_ptr
->dsd_addr
) {
2332 ql_log(ql_log_fatal
, vha
, 0x300f,
2333 "Failed to allocate memory for dsd_addr "
2334 "for cmd=%p.\n", cmd
);
2337 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
2338 ha
->gbl_dsd_avail
++;
2344 if (req
->cnt
< (req_cnt
+ 2)) {
2345 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2346 ®
->req_q_out
[0]);
2347 if (req
->ring_index
< cnt
)
2348 req
->cnt
= cnt
- req
->ring_index
;
2350 req
->cnt
= req
->length
-
2351 (req
->ring_index
- cnt
);
2354 if (req
->cnt
< (req_cnt
+ 2))
2357 ctx
= sp
->ctx
= mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
2359 ql_log(ql_log_fatal
, vha
, 0x3010,
2360 "Failed to allocate ctx for cmd=%p.\n", cmd
);
2363 memset(ctx
, 0, sizeof(struct ct6_dsd
));
2364 ctx
->fcp_cmnd
= dma_pool_alloc(ha
->fcp_cmnd_dma_pool
,
2365 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
2366 if (!ctx
->fcp_cmnd
) {
2367 ql_log(ql_log_fatal
, vha
, 0x3011,
2368 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
2369 goto queuing_error_fcp_cmnd
;
2372 /* Initialize the DSD list and dma handle */
2373 INIT_LIST_HEAD(&ctx
->dsd_list
);
2374 ctx
->dsd_use_cnt
= 0;
2376 if (cmd
->cmd_len
> 16) {
2377 additional_cdb_len
= cmd
->cmd_len
- 16;
2378 if ((cmd
->cmd_len
% 4) != 0) {
2379 /* SCSI command bigger than 16 bytes must be
2382 ql_log(ql_log_warn
, vha
, 0x3012,
2383 "scsi cmd len %d not multiple of 4 "
2384 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
2385 goto queuing_error_fcp_cmnd
;
2387 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
2389 additional_cdb_len
= 0;
2390 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
2393 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
2394 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2396 /* Zero out remaining portion of packet. */
2397 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2398 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2399 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2400 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2402 /* Set NPORT-ID and LUN number*/
2403 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2404 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2405 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2406 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2407 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
2409 /* Build IOCB segments */
2410 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
2411 goto queuing_error_fcp_cmnd
;
2413 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
2414 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2416 /* build FCP_CMND IU */
2417 memset(ctx
->fcp_cmnd
, 0, sizeof(struct fcp_cmnd
));
2418 int_to_scsilun(sp
->cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
2419 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
2421 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
2422 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
2423 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
2424 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
2427 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2429 if (scsi_populate_tag_msg(cmd
, tag
)) {
2431 case HEAD_OF_QUEUE_TAG
:
2432 ctx
->fcp_cmnd
->task_attribute
=
2435 case ORDERED_QUEUE_TAG
:
2436 ctx
->fcp_cmnd
->task_attribute
=
2442 /* Populate the FCP_PRIO. */
2443 if (ha
->flags
.fcp_prio_enabled
)
2444 ctx
->fcp_cmnd
->task_attribute
|=
2445 sp
->fcport
->fcp_prio
<< 3;
2447 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2449 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
2450 additional_cdb_len
);
2451 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
2453 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
2454 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
2455 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
2456 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
2457 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
2459 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
2460 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2461 /* Set total data segment count. */
2462 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2463 /* Specify response queue number where
2464 * completion should happen
2466 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2468 struct cmd_type_7
*cmd_pkt
;
2469 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
2470 if (req
->cnt
< (req_cnt
+ 2)) {
2471 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
2472 ®
->req_q_out
[0]);
2473 if (req
->ring_index
< cnt
)
2474 req
->cnt
= cnt
- req
->ring_index
;
2476 req
->cnt
= req
->length
-
2477 (req
->ring_index
- cnt
);
2479 if (req
->cnt
< (req_cnt
+ 2))
2482 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
2483 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2485 /* Zero out remaining portion of packet. */
2486 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2487 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2488 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2489 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2491 /* Set NPORT-ID and LUN number*/
2492 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2493 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2494 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2495 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2496 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
2498 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
2499 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
2500 sizeof(cmd_pkt
->lun
));
2503 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2505 if (scsi_populate_tag_msg(cmd
, tag
)) {
2507 case HEAD_OF_QUEUE_TAG
:
2508 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
2510 case ORDERED_QUEUE_TAG
:
2511 cmd_pkt
->task
= TSK_ORDERED
;
2516 /* Populate the FCP_PRIO. */
2517 if (ha
->flags
.fcp_prio_enabled
)
2518 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
2520 /* Load SCSI command packet. */
2521 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
2522 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
2524 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
2526 /* Build IOCB segments */
2527 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
2529 /* Set total data segment count. */
2530 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2531 /* Specify response queue number where
2532 * completion should happen.
2534 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
2537 /* Build command packet. */
2538 req
->current_outstanding_cmd
= handle
;
2539 req
->outstanding_cmds
[handle
] = sp
;
2540 sp
->handle
= handle
;
2541 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2542 req
->cnt
-= req_cnt
;
2545 /* Adjust ring index. */
2547 if (req
->ring_index
== req
->length
) {
2548 req
->ring_index
= 0;
2549 req
->ring_ptr
= req
->ring
;
2553 sp
->flags
|= SRB_DMA_VALID
;
2555 /* Set chip new ring index. */
2556 /* write, read and verify logic */
2557 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
2559 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
2562 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2565 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
2567 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
2573 /* Manage unprocessed RIO/ZIO commands in response queue. */
2574 if (vha
->flags
.process_response_queue
&&
2575 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2576 qla24xx_process_response_queue(vha
, rsp
);
2578 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2581 queuing_error_fcp_cmnd
:
2582 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
2585 scsi_dma_unmap(cmd
);
2588 mempool_free(sp
->ctx
, ha
->ctx_mempool
);
2591 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2593 return QLA_FUNCTION_FAILED
;
2597 qla2x00_start_sp(srb_t
*sp
)
2600 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2602 struct srb_ctx
*ctx
= sp
->ctx
;
2603 unsigned long flags
;
2605 rval
= QLA_FUNCTION_FAILED
;
2606 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2607 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
2609 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x700c,
2610 "qla2x00_alloc_iocbs failed.\n");
2615 switch (ctx
->type
) {
2617 IS_FWI2_CAPABLE(ha
) ?
2618 qla24xx_login_iocb(sp
, pkt
) :
2619 qla2x00_login_iocb(sp
, pkt
);
2621 case SRB_LOGOUT_CMD
:
2622 IS_FWI2_CAPABLE(ha
) ?
2623 qla24xx_logout_iocb(sp
, pkt
) :
2624 qla2x00_logout_iocb(sp
, pkt
);
2626 case SRB_ELS_CMD_RPT
:
2627 case SRB_ELS_CMD_HST
:
2628 qla24xx_els_iocb(sp
, pkt
);
2631 IS_FWI2_CAPABLE(ha
) ?
2632 qla24xx_ct_iocb(sp
, pkt
) :
2633 qla2x00_ct_iocb(sp
, pkt
);
2636 IS_FWI2_CAPABLE(ha
) ?
2637 qla24xx_adisc_iocb(sp
, pkt
) :
2638 qla2x00_adisc_iocb(sp
, pkt
);
2641 qla24xx_tm_iocb(sp
, pkt
);
2648 qla2x00_start_iocbs(sp
->fcport
->vha
, ha
->req_q_map
[0]);
2650 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);