2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static void qla2x00_isp_cmd(struct scsi_qla_host
*, struct req_que
*);
16 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * Returns the proper CF_* direction based on CDB.
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t
*sp
)
30 /* Set transfer direction */
31 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
34 scsi_bufflen(sp
->cmd
);
35 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
38 scsi_bufflen(sp
->cmd
);
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE
);
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
)
125 cont_a64_entry_t
*cont_pkt
;
127 struct req_que
*req
= vha
->req
;
128 /* Adjust ring index. */
130 if (req
->ring_index
== req
->length
) {
132 req
->ring_ptr
= req
->ring
;
137 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt
->entry_type
)) =
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
147 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
149 uint8_t guard
= scsi_host_get_guard(sp
->cmd
->device
->host
);
151 /* We only support T10 DIF right now */
152 if (guard
!= SHOST_DIX_GUARD_CRC
) {
153 DEBUG2(printk(KERN_ERR
"Unsupported guard: %d\n", guard
));
157 /* We always use DIFF Bundling for best performance */
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(sp
->cmd
)) {
162 case SCSI_PROT_READ_STRIP
:
163 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
165 case SCSI_PROT_WRITE_INSERT
:
166 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
168 case SCSI_PROT_READ_INSERT
:
169 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
171 case SCSI_PROT_WRITE_STRIP
:
172 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
174 case SCSI_PROT_READ_PASS
:
175 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
177 case SCSI_PROT_WRITE_PASS
:
178 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
180 default: /* Normal Request */
181 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
185 return scsi_prot_sg_count(sp
->cmd
);
189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
196 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
201 scsi_qla_host_t
*vha
;
202 struct scsi_cmnd
*cmd
;
203 struct scatterlist
*sg
;
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
210 __constant_cpu_to_le32(COMMAND_TYPE
);
212 /* No data transfer */
213 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
214 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
218 vha
= sp
->fcport
->vha
;
219 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
221 /* Three DSDs are available in the Command Type 2 IOCB */
223 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
225 /* Load data segments */
226 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
227 cont_entry_t
*cont_pkt
;
229 /* Allocate additional continuation packets? */
230 if (avail_dsds
== 0) {
232 * Seven DSDs are available in the Continuation
235 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
236 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
240 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
241 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
254 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
259 scsi_qla_host_t
*vha
;
260 struct scsi_cmnd
*cmd
;
261 struct scatterlist
*sg
;
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
270 /* No data transfer */
271 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
272 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
276 vha
= sp
->fcport
->vha
;
277 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
279 /* Two DSDs are available in the Command Type 3 IOCB */
281 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
283 /* Load data segments */
284 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
286 cont_a64_entry_t
*cont_pkt
;
288 /* Allocate additional continuation packets? */
289 if (avail_dsds
== 0) {
291 * Five DSDs are available in the Continuation
294 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
295 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
299 sle_dma
= sg_dma_address(sg
);
300 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
301 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
302 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
311 * Returns non-zero if a failure occurred, else zero.
314 qla2x00_start_scsi(srb_t
*sp
)
318 scsi_qla_host_t
*vha
;
319 struct scsi_cmnd
*cmd
;
323 cmd_entry_t
*cmd_pkt
;
327 struct device_reg_2xxx __iomem
*reg
;
328 struct qla_hw_data
*ha
;
333 /* Setup device pointers. */
335 vha
= sp
->fcport
->vha
;
337 reg
= &ha
->iobase
->isp
;
339 req
= ha
->req_q_map
[0];
340 rsp
= ha
->rsp_q_map
[0];
341 /* So we know we haven't pci_map'ed anything yet */
344 /* Send marker if required */
345 if (vha
->marker_needed
!= 0) {
346 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
348 return (QLA_FUNCTION_FAILED
);
349 vha
->marker_needed
= 0;
352 /* Acquire ring specific lock */
353 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
355 /* Check for room in outstanding command list. */
356 handle
= req
->current_outstanding_cmd
;
357 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
359 if (handle
== MAX_OUTSTANDING_COMMANDS
)
361 if (!req
->outstanding_cmds
[handle
])
364 if (index
== MAX_OUTSTANDING_COMMANDS
)
367 /* Map the sg table so we have an accurate count of sg entries needed */
368 if (scsi_sg_count(cmd
)) {
369 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
370 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
378 /* Calculate the number of request entries needed. */
379 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
380 if (req
->cnt
< (req_cnt
+ 2)) {
381 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
382 if (req
->ring_index
< cnt
)
383 req
->cnt
= cnt
- req
->ring_index
;
385 req
->cnt
= req
->length
-
386 (req
->ring_index
- cnt
);
388 if (req
->cnt
< (req_cnt
+ 2))
391 /* Build command packet */
392 req
->current_outstanding_cmd
= handle
;
393 req
->outstanding_cmds
[handle
] = sp
;
395 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
398 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
399 cmd_pkt
->handle
= handle
;
400 /* Zero out remaining portion of packet. */
401 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
402 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
403 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
405 /* Set target ID and LUN number*/
406 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
407 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
409 /* Update tagged queuing modifier */
410 if (scsi_populate_tag_msg(cmd
, tag
)) {
412 case HEAD_OF_QUEUE_TAG
:
413 cmd_pkt
->control_flags
=
414 __constant_cpu_to_le16(CF_HEAD_TAG
);
416 case ORDERED_QUEUE_TAG
:
417 cmd_pkt
->control_flags
=
418 __constant_cpu_to_le16(CF_ORDERED_TAG
);
421 cmd_pkt
->control_flags
=
422 __constant_cpu_to_le16(CF_SIMPLE_TAG
);
427 /* Load SCSI command packet. */
428 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
429 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
431 /* Build IOCB segments */
432 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
434 /* Set total data segment count. */
435 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
438 /* Adjust ring index. */
440 if (req
->ring_index
== req
->length
) {
442 req
->ring_ptr
= req
->ring
;
446 sp
->flags
|= SRB_DMA_VALID
;
448 /* Set chip new ring index. */
449 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
450 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
452 /* Manage unprocessed RIO/ZIO commands in response queue. */
453 if (vha
->flags
.process_response_queue
&&
454 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
455 qla2x00_process_response_queue(rsp
);
457 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
458 return (QLA_SUCCESS
);
464 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
466 return (QLA_FUNCTION_FAILED
);
470 * qla2x00_marker() - Send a marker IOCB to the firmware.
474 * @type: marker modifier
476 * Can be called from both normal and interrupt context.
478 * Returns non-zero if a failure occurred, else zero.
481 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
482 struct rsp_que
*rsp
, uint16_t loop_id
,
483 uint16_t lun
, uint8_t type
)
486 struct mrk_entry_24xx
*mrk24
;
487 struct qla_hw_data
*ha
= vha
->hw
;
488 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
491 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, 0);
493 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
494 __func__
, base_vha
->host_no
));
496 return (QLA_FUNCTION_FAILED
);
499 mrk
->entry_type
= MARKER_TYPE
;
500 mrk
->modifier
= type
;
501 if (type
!= MK_SYNC_ALL
) {
502 if (IS_FWI2_CAPABLE(ha
)) {
503 mrk24
= (struct mrk_entry_24xx
*) mrk
;
504 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
505 mrk24
->lun
[1] = LSB(lun
);
506 mrk24
->lun
[2] = MSB(lun
);
507 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
508 mrk24
->vp_index
= vha
->vp_idx
;
509 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
511 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
512 mrk
->lun
= cpu_to_le16(lun
);
517 qla2x00_isp_cmd(vha
, req
);
519 return (QLA_SUCCESS
);
523 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
524 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
528 unsigned long flags
= 0;
530 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
531 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
532 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
538 * qla2x00_isp_cmd() - Modify the request ring pointer.
541 * Note: The caller must hold the hardware lock before calling this routine.
544 qla2x00_isp_cmd(struct scsi_qla_host
*vha
, struct req_que
*req
)
546 struct qla_hw_data
*ha
= vha
->hw
;
547 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
548 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
550 DEBUG5(printk("%s(): IOCB data:\n", __func__
));
551 DEBUG5(qla2x00_dump_buffer(
552 (uint8_t *)req
->ring_ptr
, REQUEST_ENTRY_SIZE
));
554 /* Adjust ring index. */
556 if (req
->ring_index
== req
->length
) {
558 req
->ring_ptr
= req
->ring
;
562 /* Set chip new ring index. */
563 if (IS_QLA82XX(ha
)) {
564 uint32_t dbval
= 0x04 | (ha
->portnum
<< 5);
566 /* write, read and verify logic */
567 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
569 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
572 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
575 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
576 WRT_REG_DWORD((unsigned long __iomem
*)
577 ha
->nxdb_wr_ptr
, dbval
);
581 } else if (ha
->mqenable
) {
582 /* Set chip new ring index. */
583 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
584 RD_REG_DWORD(&ioreg
->hccr
);
586 if (IS_FWI2_CAPABLE(ha
)) {
587 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
588 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
590 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
592 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
602 * @dsds: number of data segment decriptors needed
604 * Returns the number of IOCB entries needed to store @dsds.
607 qla24xx_calc_iocbs(uint16_t dsds
)
613 iocbs
+= (dsds
- 1) / 5;
617 DEBUG3(printk(KERN_DEBUG
"%s(): Required PKT(s) = %d\n",
623 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
626 * @sp: SRB command to process
627 * @cmd_pkt: Command type 3 IOCB
628 * @tot_dsds: Total number of segments to transfer
631 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
636 scsi_qla_host_t
*vha
;
637 struct scsi_cmnd
*cmd
;
638 struct scatterlist
*sg
;
644 /* Update entry type to indicate Command Type 3 IOCB */
645 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
646 __constant_cpu_to_le32(COMMAND_TYPE_7
);
648 /* No data transfer */
649 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
650 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
654 vha
= sp
->fcport
->vha
;
657 /* Set transfer direction */
658 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
659 cmd_pkt
->task_mgmt_flags
=
660 __constant_cpu_to_le16(TMF_WRITE_DATA
);
661 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
662 scsi_bufflen(sp
->cmd
);
663 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
664 cmd_pkt
->task_mgmt_flags
=
665 __constant_cpu_to_le16(TMF_READ_DATA
);
666 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
667 scsi_bufflen(sp
->cmd
);
670 /* One DSD is available in the Command Type 3 IOCB */
672 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
674 /* Load data segments */
676 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
678 cont_a64_entry_t
*cont_pkt
;
680 /* Allocate additional continuation packets? */
681 if (avail_dsds
== 0) {
683 * Five DSDs are available in the Continuation
686 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
687 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
691 sle_dma
= sg_dma_address(sg
);
692 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
693 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
694 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
699 struct fw_dif_context
{
702 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
703 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
707 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
711 qla24xx_set_t10dif_tags(struct scsi_cmnd
*cmd
, struct fw_dif_context
*pkt
,
712 unsigned int protcnt
)
714 struct sd_dif_tuple
*spt
;
715 unsigned char op
= scsi_get_prot_op(cmd
);
717 switch (scsi_get_prot_type(cmd
)) {
718 /* For TYPE 0 protection: no checking */
719 case SCSI_PROT_DIF_TYPE0
:
720 pkt
->ref_tag_mask
[0] = 0x00;
721 pkt
->ref_tag_mask
[1] = 0x00;
722 pkt
->ref_tag_mask
[2] = 0x00;
723 pkt
->ref_tag_mask
[3] = 0x00;
727 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
728 * match LBA in CDB + N
730 case SCSI_PROT_DIF_TYPE2
:
731 if (!ql2xenablehba_err_chk
)
734 if (scsi_prot_sg_count(cmd
)) {
735 spt
= page_address(sg_page(scsi_prot_sglist(cmd
))) +
736 scsi_prot_sglist(cmd
)[0].offset
;
737 pkt
->app_tag
= swab32(spt
->app_tag
);
738 pkt
->app_tag_mask
[0] = 0xff;
739 pkt
->app_tag_mask
[1] = 0xff;
742 pkt
->ref_tag
= cpu_to_le32((uint32_t)
743 (0xffffffff & scsi_get_lba(cmd
)));
745 /* enable ALL bytes of the ref tag */
746 pkt
->ref_tag_mask
[0] = 0xff;
747 pkt
->ref_tag_mask
[1] = 0xff;
748 pkt
->ref_tag_mask
[2] = 0xff;
749 pkt
->ref_tag_mask
[3] = 0xff;
752 /* For Type 3 protection: 16 bit GUARD only */
753 case SCSI_PROT_DIF_TYPE3
:
754 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
755 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
760 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
763 case SCSI_PROT_DIF_TYPE1
:
764 if (!ql2xenablehba_err_chk
)
767 if (protcnt
&& (op
== SCSI_PROT_WRITE_STRIP
||
768 op
== SCSI_PROT_WRITE_PASS
)) {
769 spt
= page_address(sg_page(scsi_prot_sglist(cmd
))) +
770 scsi_prot_sglist(cmd
)[0].offset
;
771 DEBUG18(printk(KERN_DEBUG
772 "%s(): LBA from user %p, lba = 0x%x\n",
773 __func__
, spt
, (int)spt
->ref_tag
));
774 pkt
->ref_tag
= swab32(spt
->ref_tag
);
775 pkt
->app_tag_mask
[0] = 0x0;
776 pkt
->app_tag_mask
[1] = 0x0;
778 pkt
->ref_tag
= cpu_to_le32((uint32_t)
779 (0xffffffff & scsi_get_lba(cmd
)));
780 pkt
->app_tag
= __constant_cpu_to_le16(0);
781 pkt
->app_tag_mask
[0] = 0x0;
782 pkt
->app_tag_mask
[1] = 0x0;
784 /* enable ALL bytes of the ref tag */
785 pkt
->ref_tag_mask
[0] = 0xff;
786 pkt
->ref_tag_mask
[1] = 0xff;
787 pkt
->ref_tag_mask
[2] = 0xff;
788 pkt
->ref_tag_mask
[3] = 0xff;
792 DEBUG18(printk(KERN_DEBUG
793 "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
794 " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
795 " prot_type=%u\n", __func__
, pkt
->ref_tag
, pkt
->app_tag
, protcnt
,
796 (int)scsi_get_lba(cmd
), scsi_get_prot_type(cmd
)));
801 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
805 uint8_t avail_dsds
= 0;
806 uint32_t dsd_list_len
;
807 struct dsd_dma
*dsd_ptr
;
808 struct scatterlist
*sg
;
809 uint32_t *cur_dsd
= dsd
;
811 uint16_t used_dsds
= tot_dsds
;
815 scsi_for_each_sg(sp
->cmd
, sg
, tot_dsds
, i
) {
818 /* Allocate additional continuation packets? */
819 if (avail_dsds
== 0) {
820 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
821 QLA_DSDS_PER_IOCB
: used_dsds
;
822 dsd_list_len
= (avail_dsds
+ 1) * 12;
823 used_dsds
-= avail_dsds
;
825 /* allocate tracking DS */
826 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
830 /* allocate new list */
831 dsd_ptr
->dsd_addr
= next_dsd
=
832 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
833 &dsd_ptr
->dsd_list_dma
);
837 * Need to cleanup only this dsd_ptr, rest
838 * will be done by sp_free_dma()
844 list_add_tail(&dsd_ptr
->list
,
845 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
847 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
849 /* add new list to cmd iocb or last list */
850 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
851 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
852 *cur_dsd
++ = dsd_list_len
;
853 cur_dsd
= (uint32_t *)next_dsd
;
855 sle_dma
= sg_dma_address(sg
);
856 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
857 " len =%d\n", __func__
, cur_dsd
, i
, LSD(sle_dma
),
858 MSD(sle_dma
), sg_dma_len(sg
)));
859 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
860 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
861 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
864 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
865 cp
= page_address(sg_page(sg
)) + sg
->offset
;
866 DEBUG18(printk("%s(): User Data buffer= %p:\n",
870 /* Null termination */
878 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
883 uint8_t avail_dsds
= 0;
884 uint32_t dsd_list_len
;
885 struct dsd_dma
*dsd_ptr
;
886 struct scatterlist
*sg
;
888 struct scsi_cmnd
*cmd
;
889 uint32_t *cur_dsd
= dsd
;
890 uint16_t used_dsds
= tot_dsds
;
896 scsi_for_each_prot_sg(cmd
, sg
, tot_dsds
, i
) {
899 /* Allocate additional continuation packets? */
900 if (avail_dsds
== 0) {
901 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
902 QLA_DSDS_PER_IOCB
: used_dsds
;
903 dsd_list_len
= (avail_dsds
+ 1) * 12;
904 used_dsds
-= avail_dsds
;
906 /* allocate tracking DS */
907 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
911 /* allocate new list */
912 dsd_ptr
->dsd_addr
= next_dsd
=
913 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
914 &dsd_ptr
->dsd_list_dma
);
918 * Need to cleanup only this dsd_ptr, rest
919 * will be done by sp_free_dma()
925 list_add_tail(&dsd_ptr
->list
,
926 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
928 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
930 /* add new list to cmd iocb or last list */
931 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
932 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
933 *cur_dsd
++ = dsd_list_len
;
934 cur_dsd
= (uint32_t *)next_dsd
;
936 sle_dma
= sg_dma_address(sg
);
937 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
938 DEBUG18(printk(KERN_DEBUG
939 "%s(): %p, sg entry %d - addr =0x%x"
940 "0x%x, len =%d\n", __func__
, cur_dsd
, i
,
941 LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
)));
943 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
944 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
945 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
947 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
948 cp
= page_address(sg_page(sg
)) + sg
->offset
;
949 DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
954 /* Null termination */
962 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
965 * @sp: SRB command to process
966 * @cmd_pkt: Command type 3 IOCB
967 * @tot_dsds: Total number of segments to transfer
970 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
971 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
973 uint32_t *cur_dsd
, *fcp_dl
;
974 scsi_qla_host_t
*vha
;
975 struct scsi_cmnd
*cmd
;
976 struct scatterlist
*cur_seg
;
978 uint32_t total_bytes
;
981 uint8_t bundling
= 1;
984 struct crc_context
*crc_ctx_pkt
= NULL
;
985 struct qla_hw_data
*ha
;
986 uint8_t additional_fcpcdb_len
;
987 uint16_t fcp_cmnd_len
;
988 struct fcp_cmnd
*fcp_cmnd
;
989 dma_addr_t crc_ctx_dma
;
995 /* Update entry type to indicate Command Type CRC_2 IOCB */
996 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
997 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
999 /* No data transfer */
1000 data_bytes
= scsi_bufflen(cmd
);
1001 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1002 DEBUG18(printk(KERN_INFO
"%s: Zero data bytes or DMA-NONE %d\n",
1003 __func__
, data_bytes
));
1004 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1008 vha
= sp
->fcport
->vha
;
1011 DEBUG18(printk(KERN_DEBUG
1012 "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__
,
1013 vha
->host_no
, sp
, scsi_get_prot_op(sp
->cmd
)));
1015 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1017 /* Set transfer direction */
1018 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1019 cmd_pkt
->control_flags
=
1020 __constant_cpu_to_le16(CF_WRITE_DATA
);
1021 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1022 cmd_pkt
->control_flags
=
1023 __constant_cpu_to_le16(CF_READ_DATA
);
1026 tot_prot_dsds
= scsi_prot_sg_count(cmd
);
1030 /* Allocate CRC context from global pool */
1031 crc_ctx_pkt
= sp
->ctx
= dma_pool_alloc(ha
->dl_dma_pool
,
1032 GFP_ATOMIC
, &crc_ctx_dma
);
1035 goto crc_queuing_error
;
1037 /* Zero out CTX area. */
1038 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1039 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1041 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1043 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1046 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1048 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1050 qla24xx_set_t10dif_tags(cmd
, (struct fw_dif_context
*)
1051 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1053 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1054 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1055 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1057 /* Determine SCSI command length -- align to 4 byte boundary */
1058 if (cmd
->cmd_len
> 16) {
1059 DEBUG18(printk(KERN_INFO
"%s(): **** SCSI CMD > 16\n",
1061 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1062 if ((cmd
->cmd_len
% 4) != 0) {
1063 /* SCSI cmd > 16 bytes must be multiple of 4 */
1064 goto crc_queuing_error
;
1066 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1068 additional_fcpcdb_len
= 0;
1069 fcp_cmnd_len
= 12 + 16 + 4;
1072 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1074 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1075 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1076 fcp_cmnd
->additional_cdb_len
|= 1;
1077 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1078 fcp_cmnd
->additional_cdb_len
|= 2;
1080 int_to_scsilun(sp
->cmd
->device
->lun
, &fcp_cmnd
->lun
);
1081 host_to_fcp_swap((uint8_t *)&fcp_cmnd
->lun
, sizeof(fcp_cmnd
->lun
));
1082 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1083 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1084 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1085 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1086 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1087 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1088 fcp_cmnd
->task_management
= 0;
1091 * Update tagged queuing modifier if using command tag queuing
1093 if (scsi_populate_tag_msg(cmd
, tag
)) {
1095 case HEAD_OF_QUEUE_TAG
:
1096 fcp_cmnd
->task_attribute
= TSK_HEAD_OF_QUEUE
;
1098 case ORDERED_QUEUE_TAG
:
1099 fcp_cmnd
->task_attribute
= TSK_ORDERED
;
1102 fcp_cmnd
->task_attribute
= 0;
1106 fcp_cmnd
->task_attribute
= 0;
1109 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1111 DEBUG18(printk(KERN_INFO
"%s(%ld): Total SG(s) Entries %d, Data"
1112 "entries %d, data bytes %d, Protection entries %d\n",
1113 __func__
, vha
->host_no
, tot_dsds
, (tot_dsds
-tot_prot_dsds
),
1114 data_bytes
, tot_prot_dsds
));
1116 /* Compute dif len and adjust data len to incude protection */
1117 total_bytes
= data_bytes
;
1119 blk_size
= cmd
->device
->sector_size
;
1120 if (scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
1121 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1122 total_bytes
+= dif_bytes
;
1125 if (!ql2xenablehba_err_chk
)
1126 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1129 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1132 * Configure Bundling if we need to fetch interlaving
1133 * protection PCI accesses
1135 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1136 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1137 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1139 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1142 /* Finish the common fields of CRC pkt */
1143 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1144 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1145 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1146 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1147 /* Fibre channel byte count */
1148 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1149 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1150 additional_fcpcdb_len
);
1151 *fcp_dl
= htonl(total_bytes
);
1153 DEBUG18(printk(KERN_INFO
"%s(%ld): dif bytes = 0x%x (%d), total bytes"
1154 " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__
,
1155 vha
->host_no
, dif_bytes
, dif_bytes
, total_bytes
, total_bytes
,
1156 crc_ctx_pkt
->blk_size
, crc_ctx_pkt
->blk_size
));
1158 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1159 DEBUG18(printk(KERN_INFO
"%s: Zero data bytes or DMA-NONE %d\n",
1160 __func__
, data_bytes
));
1161 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1164 /* Walks data segments */
1166 cmd_pkt
->control_flags
|=
1167 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1168 if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1169 (tot_dsds
- tot_prot_dsds
)))
1170 goto crc_queuing_error
;
1172 if (bundling
&& tot_prot_dsds
) {
1173 /* Walks dif segments */
1174 cur_seg
= scsi_prot_sglist(cmd
);
1175 cmd_pkt
->control_flags
|=
1176 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1177 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1178 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1180 goto crc_queuing_error
;
1185 DEBUG18(qla_printk(KERN_INFO
, ha
,
1186 "CMD sent FAILED crc_q error:sp = %p\n", sp
));
1187 /* Cleanup will be performed by the caller */
1189 return QLA_FUNCTION_FAILED
;
1193 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1194 * @sp: command to send to the ISP
1196 * Returns non-zero if a failure occurred, else zero.
1199 qla24xx_start_scsi(srb_t
*sp
)
1202 unsigned long flags
;
1206 struct cmd_type_7
*cmd_pkt
;
1210 struct req_que
*req
= NULL
;
1211 struct rsp_que
*rsp
= NULL
;
1212 struct scsi_cmnd
*cmd
= sp
->cmd
;
1213 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1214 struct qla_hw_data
*ha
= vha
->hw
;
1217 /* Setup device pointers. */
1220 qla25xx_set_que(sp
, &rsp
);
1223 /* So we know we haven't pci_map'ed anything yet */
1226 /* Send marker if required */
1227 if (vha
->marker_needed
!= 0) {
1228 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
1230 return QLA_FUNCTION_FAILED
;
1231 vha
->marker_needed
= 0;
1234 /* Acquire ring specific lock */
1235 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1237 /* Check for room in outstanding command list. */
1238 handle
= req
->current_outstanding_cmd
;
1239 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1241 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1243 if (!req
->outstanding_cmds
[handle
])
1246 if (index
== MAX_OUTSTANDING_COMMANDS
)
1249 /* Map the sg table so we have an accurate count of sg entries needed */
1250 if (scsi_sg_count(cmd
)) {
1251 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1252 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1253 if (unlikely(!nseg
))
1260 req_cnt
= qla24xx_calc_iocbs(tot_dsds
);
1261 if (req
->cnt
< (req_cnt
+ 2)) {
1262 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1264 if (req
->ring_index
< cnt
)
1265 req
->cnt
= cnt
- req
->ring_index
;
1267 req
->cnt
= req
->length
-
1268 (req
->ring_index
- cnt
);
1270 if (req
->cnt
< (req_cnt
+ 2))
1273 /* Build command packet. */
1274 req
->current_outstanding_cmd
= handle
;
1275 req
->outstanding_cmds
[handle
] = sp
;
1276 sp
->handle
= handle
;
1277 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1278 req
->cnt
-= req_cnt
;
1280 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1281 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1283 /* Zero out remaining portion of packet. */
1284 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1285 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1286 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1287 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1289 /* Set NPORT-ID and LUN number*/
1290 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1291 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1292 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1293 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1294 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1296 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1297 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1299 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1300 if (scsi_populate_tag_msg(cmd
, tag
)) {
1302 case HEAD_OF_QUEUE_TAG
:
1303 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
1305 case ORDERED_QUEUE_TAG
:
1306 cmd_pkt
->task
= TSK_ORDERED
;
1311 /* Load SCSI command packet. */
1312 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1313 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1315 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1317 /* Build IOCB segments */
1318 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1320 /* Set total data segment count. */
1321 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1322 /* Specify response queue number where completion should happen */
1323 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1326 /* Adjust ring index. */
1328 if (req
->ring_index
== req
->length
) {
1329 req
->ring_index
= 0;
1330 req
->ring_ptr
= req
->ring
;
1334 sp
->flags
|= SRB_DMA_VALID
;
1336 /* Set chip new ring index. */
1337 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1338 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1340 /* Manage unprocessed RIO/ZIO commands in response queue. */
1341 if (vha
->flags
.process_response_queue
&&
1342 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1343 qla24xx_process_response_queue(vha
, rsp
);
1345 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1350 scsi_dma_unmap(cmd
);
1352 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1354 return QLA_FUNCTION_FAILED
;
1359 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1360 * @sp: command to send to the ISP
1362 * Returns non-zero if a failure occurred, else zero.
1365 qla24xx_dif_start_scsi(srb_t
*sp
)
1368 unsigned long flags
;
1373 uint16_t req_cnt
= 0;
1375 uint16_t tot_prot_dsds
;
1376 uint16_t fw_prot_opts
= 0;
1377 struct req_que
*req
= NULL
;
1378 struct rsp_que
*rsp
= NULL
;
1379 struct scsi_cmnd
*cmd
= sp
->cmd
;
1380 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1381 struct qla_hw_data
*ha
= vha
->hw
;
1382 struct cmd_type_crc_2
*cmd_pkt
;
1383 uint32_t status
= 0;
1385 #define QDSS_GOT_Q_SPACE BIT_0
1387 /* Only process protection or >16 cdb in this routine */
1388 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1389 if (cmd
->cmd_len
<= 16)
1390 return qla24xx_start_scsi(sp
);
1393 /* Setup device pointers. */
1395 qla25xx_set_que(sp
, &rsp
);
1398 /* So we know we haven't pci_map'ed anything yet */
1401 /* Send marker if required */
1402 if (vha
->marker_needed
!= 0) {
1403 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1405 return QLA_FUNCTION_FAILED
;
1406 vha
->marker_needed
= 0;
1409 /* Acquire ring specific lock */
1410 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1412 /* Check for room in outstanding command list. */
1413 handle
= req
->current_outstanding_cmd
;
1414 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1416 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1418 if (!req
->outstanding_cmds
[handle
])
1422 if (index
== MAX_OUTSTANDING_COMMANDS
)
1425 /* Compute number of required data segments */
1426 /* Map the sg table so we have an accurate count of sg entries needed */
1427 if (scsi_sg_count(cmd
)) {
1428 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1429 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1430 if (unlikely(!nseg
))
1433 sp
->flags
|= SRB_DMA_VALID
;
1437 /* number of required data segments */
1440 /* Compute number of required protection segments */
1441 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1442 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1443 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1444 if (unlikely(!nseg
))
1447 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1453 /* Total Data and protection sg segment(s) */
1454 tot_prot_dsds
= nseg
;
1456 if (req
->cnt
< (req_cnt
+ 2)) {
1457 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1459 if (req
->ring_index
< cnt
)
1460 req
->cnt
= cnt
- req
->ring_index
;
1462 req
->cnt
= req
->length
-
1463 (req
->ring_index
- cnt
);
1466 if (req
->cnt
< (req_cnt
+ 2))
1469 status
|= QDSS_GOT_Q_SPACE
;
1471 /* Build header part of command packet (excluding the OPCODE). */
1472 req
->current_outstanding_cmd
= handle
;
1473 req
->outstanding_cmds
[handle
] = sp
;
1474 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1475 req
->cnt
-= req_cnt
;
1477 /* Fill-in common area */
1478 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1479 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1481 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1482 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1484 /* Set NPORT-ID and LUN number*/
1485 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1486 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1487 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1488 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1490 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1491 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1493 /* Total Data and protection segment(s) */
1494 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1496 /* Build IOCB segments and adjust for data protection segments */
1497 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1498 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1502 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1503 /* Specify response queue number where completion should happen */
1504 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1505 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1508 /* Adjust ring index. */
1510 if (req
->ring_index
== req
->length
) {
1511 req
->ring_index
= 0;
1512 req
->ring_ptr
= req
->ring
;
1516 /* Set chip new ring index. */
1517 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1518 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1520 /* Manage unprocessed RIO/ZIO commands in response queue. */
1521 if (vha
->flags
.process_response_queue
&&
1522 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1523 qla24xx_process_response_queue(vha
, rsp
);
1525 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1530 if (status
& QDSS_GOT_Q_SPACE
) {
1531 req
->outstanding_cmds
[handle
] = NULL
;
1532 req
->cnt
+= req_cnt
;
1534 /* Cleanup will be performed by the caller (queuecommand) */
1536 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1538 DEBUG18(qla_printk(KERN_INFO
, ha
,
1539 "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd
)));
1540 return QLA_FUNCTION_FAILED
;
1544 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1546 struct scsi_cmnd
*cmd
= sp
->cmd
;
1547 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1548 int affinity
= cmd
->request
->cpu
;
1550 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1551 affinity
< ha
->max_rsp_queues
- 1)
1552 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1554 *rsp
= ha
->rsp_q_map
[0];
1557 /* Generic Control-SRB manipulation functions. */
1559 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1561 struct qla_hw_data
*ha
= vha
->hw
;
1562 struct req_que
*req
= ha
->req_q_map
[0];
1563 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1564 uint32_t index
, handle
;
1566 uint16_t cnt
, req_cnt
;
1573 goto skip_cmd_array
;
1575 /* Check for room in outstanding command list. */
1576 handle
= req
->current_outstanding_cmd
;
1577 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1579 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1581 if (!req
->outstanding_cmds
[handle
])
1584 if (index
== MAX_OUTSTANDING_COMMANDS
)
1587 /* Prep command array. */
1588 req
->current_outstanding_cmd
= handle
;
1589 req
->outstanding_cmds
[handle
] = sp
;
1590 sp
->handle
= handle
;
1593 /* Check for room on request queue. */
1594 if (req
->cnt
< req_cnt
) {
1596 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1597 else if (IS_QLA82XX(ha
))
1598 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1599 else if (IS_FWI2_CAPABLE(ha
))
1600 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1602 cnt
= qla2x00_debounce_register(
1603 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1605 if (req
->ring_index
< cnt
)
1606 req
->cnt
= cnt
- req
->ring_index
;
1608 req
->cnt
= req
->length
-
1609 (req
->ring_index
- cnt
);
1611 if (req
->cnt
< req_cnt
)
1615 req
->cnt
-= req_cnt
;
1616 pkt
= req
->ring_ptr
;
1617 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1618 pkt
->entry_count
= req_cnt
;
1619 pkt
->handle
= handle
;
1626 qla2x00_start_iocbs(srb_t
*sp
)
1628 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1629 struct req_que
*req
= ha
->req_q_map
[0];
1630 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1631 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
1633 if (IS_QLA82XX(ha
)) {
1634 qla82xx_start_iocbs(sp
);
1636 /* Adjust ring index. */
1638 if (req
->ring_index
== req
->length
) {
1639 req
->ring_index
= 0;
1640 req
->ring_ptr
= req
->ring
;
1644 /* Set chip new ring index. */
1646 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
1647 RD_REG_DWORD(&ioreg
->hccr
);
1648 } else if (IS_QLA82XX(ha
)) {
1649 qla82xx_start_iocbs(sp
);
1650 } else if (IS_FWI2_CAPABLE(ha
)) {
1651 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
1652 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
1654 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
1656 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
1662 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1664 struct srb_ctx
*ctx
= sp
->ctx
;
1665 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1667 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1668 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1669 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1670 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1671 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1672 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1673 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1674 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1675 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1676 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1677 logio
->vp_index
= sp
->fcport
->vp_idx
;
1681 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1683 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1684 struct srb_ctx
*ctx
= sp
->ctx
;
1685 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1688 mbx
->entry_type
= MBX_IOCB_TYPE
;
1689 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1690 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1691 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1692 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1693 if (HAS_EXTENDED_IDS(ha
)) {
1694 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1695 mbx
->mb10
= cpu_to_le16(opts
);
1697 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1699 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1700 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1701 sp
->fcport
->d_id
.b
.al_pa
);
1702 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1706 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1708 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1709 logio
->control_flags
=
1710 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1711 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1712 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1713 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1714 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1715 logio
->vp_index
= sp
->fcport
->vp_idx
;
1719 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1721 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1723 mbx
->entry_type
= MBX_IOCB_TYPE
;
1724 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1725 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1726 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1727 cpu_to_le16(sp
->fcport
->loop_id
):
1728 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1729 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1730 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1731 sp
->fcport
->d_id
.b
.al_pa
);
1732 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1733 /* Implicit: mbx->mbx10 = 0. */
1737 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1739 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1740 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
1741 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1742 logio
->vp_index
= sp
->fcport
->vp_idx
;
1746 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1748 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1750 mbx
->entry_type
= MBX_IOCB_TYPE
;
1751 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1752 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
1753 if (HAS_EXTENDED_IDS(ha
)) {
1754 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1755 mbx
->mb10
= cpu_to_le16(BIT_0
);
1757 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
1759 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
1760 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
1761 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
1762 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
1763 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1767 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
1771 struct fc_port
*fcport
= sp
->fcport
;
1772 scsi_qla_host_t
*vha
= fcport
->vha
;
1773 struct qla_hw_data
*ha
= vha
->hw
;
1774 struct srb_ctx
*ctx
= sp
->ctx
;
1775 struct srb_iocb
*iocb
= ctx
->u
.iocb_cmd
;
1776 struct req_que
*req
= vha
->req
;
1778 flags
= iocb
->u
.tmf
.flags
;
1779 lun
= iocb
->u
.tmf
.lun
;
1781 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
1782 tsk
->entry_count
= 1;
1783 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
1784 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
1785 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
1786 tsk
->control_flags
= cpu_to_le32(flags
);
1787 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
1788 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
1789 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
1790 tsk
->vp_index
= fcport
->vp_idx
;
1792 if (flags
== TCF_LUN_RESET
) {
1793 int_to_scsilun(lun
, &tsk
->lun
);
1794 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
1800 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
1802 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1804 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
1805 els_iocb
->entry_count
= 1;
1806 els_iocb
->sys_define
= 0;
1807 els_iocb
->entry_status
= 0;
1808 els_iocb
->handle
= sp
->handle
;
1809 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1810 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1811 els_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1812 els_iocb
->sof_type
= EST_SOFI3
;
1813 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1816 (((struct srb_ctx
*)sp
->ctx
)->type
== SRB_ELS_CMD_RPT
) ?
1817 bsg_job
->request
->rqst_data
.r_els
.els_code
:
1818 bsg_job
->request
->rqst_data
.h_els
.command_code
;
1819 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1820 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1821 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1822 els_iocb
->control_flags
= 0;
1823 els_iocb
->rx_byte_count
=
1824 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1825 els_iocb
->tx_byte_count
=
1826 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1828 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
1829 (bsg_job
->request_payload
.sg_list
)));
1830 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
1831 (bsg_job
->request_payload
.sg_list
)));
1832 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
1833 (bsg_job
->request_payload
.sg_list
));
1835 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
1836 (bsg_job
->reply_payload
.sg_list
)));
1837 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
1838 (bsg_job
->reply_payload
.sg_list
)));
1839 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
1840 (bsg_job
->reply_payload
.sg_list
));
1844 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
1846 uint16_t avail_dsds
;
1848 struct scatterlist
*sg
;
1851 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
1852 struct qla_hw_data
*ha
= vha
->hw
;
1853 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1854 int loop_iterartion
= 0;
1855 int cont_iocb_prsnt
= 0;
1856 int entry_count
= 1;
1858 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
1859 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
1860 ct_iocb
->entry_status
= 0;
1861 ct_iocb
->handle1
= sp
->handle
;
1862 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
1863 ct_iocb
->status
= __constant_cpu_to_le16(0);
1864 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
1865 ct_iocb
->timeout
= 0;
1866 ct_iocb
->cmd_dsd_count
=
1867 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1868 ct_iocb
->total_dsd_count
=
1869 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
1870 ct_iocb
->req_bytecount
=
1871 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1872 ct_iocb
->rsp_bytecount
=
1873 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1875 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
1876 (bsg_job
->request_payload
.sg_list
)));
1877 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
1878 (bsg_job
->request_payload
.sg_list
)));
1879 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
1881 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
1882 (bsg_job
->reply_payload
.sg_list
)));
1883 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
1884 (bsg_job
->reply_payload
.sg_list
)));
1885 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
1888 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
1890 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
1892 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
1894 cont_a64_entry_t
*cont_pkt
;
1896 /* Allocate additional continuation packets? */
1897 if (avail_dsds
== 0) {
1899 * Five DSDs are available in the Cont.
1902 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
1903 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
1905 cont_iocb_prsnt
= 1;
1909 sle_dma
= sg_dma_address(sg
);
1910 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1911 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1912 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1916 ct_iocb
->entry_count
= entry_count
;
1920 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
1922 uint16_t avail_dsds
;
1924 struct scatterlist
*sg
;
1927 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
1928 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1929 int loop_iterartion
= 0;
1930 int cont_iocb_prsnt
= 0;
1931 int entry_count
= 1;
1933 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
1934 ct_iocb
->entry_status
= 0;
1935 ct_iocb
->sys_define
= 0;
1936 ct_iocb
->handle
= sp
->handle
;
1938 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1939 ct_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1940 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
1942 ct_iocb
->cmd_dsd_count
=
1943 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1944 ct_iocb
->timeout
= 0;
1945 ct_iocb
->rsp_dsd_count
=
1946 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1947 ct_iocb
->rsp_byte_count
=
1948 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1949 ct_iocb
->cmd_byte_count
=
1950 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1951 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
1952 (bsg_job
->request_payload
.sg_list
)));
1953 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
1954 (bsg_job
->request_payload
.sg_list
)));
1955 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
1956 (bsg_job
->request_payload
.sg_list
));
1959 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
1961 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
1963 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
1965 cont_a64_entry_t
*cont_pkt
;
1967 /* Allocate additional continuation packets? */
1968 if (avail_dsds
== 0) {
1970 * Five DSDs are available in the Cont.
1973 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
1974 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
1976 cont_iocb_prsnt
= 1;
1980 sle_dma
= sg_dma_address(sg
);
1981 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1982 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1983 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1987 ct_iocb
->entry_count
= entry_count
;
1991 qla2x00_start_sp(srb_t
*sp
)
1994 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1996 struct srb_ctx
*ctx
= sp
->ctx
;
1997 unsigned long flags
;
1999 rval
= QLA_FUNCTION_FAILED
;
2000 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2001 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
2006 switch (ctx
->type
) {
2008 IS_FWI2_CAPABLE(ha
) ?
2009 qla24xx_login_iocb(sp
, pkt
) :
2010 qla2x00_login_iocb(sp
, pkt
);
2012 case SRB_LOGOUT_CMD
:
2013 IS_FWI2_CAPABLE(ha
) ?
2014 qla24xx_logout_iocb(sp
, pkt
) :
2015 qla2x00_logout_iocb(sp
, pkt
);
2017 case SRB_ELS_CMD_RPT
:
2018 case SRB_ELS_CMD_HST
:
2019 qla24xx_els_iocb(sp
, pkt
);
2022 IS_FWI2_CAPABLE(ha
) ?
2023 qla24xx_ct_iocb(sp
, pkt
) :
2024 qla2x00_ct_iocb(sp
, pkt
);
2027 IS_FWI2_CAPABLE(ha
) ?
2028 qla24xx_adisc_iocb(sp
, pkt
) :
2029 qla2x00_adisc_iocb(sp
, pkt
);
2032 qla24xx_tm_iocb(sp
, pkt
);
2039 qla2x00_start_iocbs(sp
);
2041 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);