2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static void qla2x00_isp_cmd(struct scsi_qla_host
*, struct req_que
*);
16 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * Returns the proper CF_* direction based on CDB.
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t
*sp
)
30 /* Set transfer direction */
31 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
34 scsi_bufflen(sp
->cmd
);
35 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
38 scsi_bufflen(sp
->cmd
);
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE
);
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
)
125 cont_a64_entry_t
*cont_pkt
;
127 struct req_que
*req
= vha
->req
;
128 /* Adjust ring index. */
130 if (req
->ring_index
== req
->length
) {
132 req
->ring_ptr
= req
->ring
;
137 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
139 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt
->entry_type
)) =
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
147 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
149 uint8_t guard
= scsi_host_get_guard(sp
->cmd
->device
->host
);
151 /* We only support T10 DIF right now */
152 if (guard
!= SHOST_DIX_GUARD_CRC
) {
153 ql_dbg(ql_dbg_io
, sp
->fcport
->vha
, 0x3007,
154 "Unsupported guard: %d for cmd=%p.\n", guard
, sp
->cmd
);
158 /* We always use DIFF Bundling for best performance */
161 /* Translate SCSI opcode to a protection opcode */
162 switch (scsi_get_prot_op(sp
->cmd
)) {
163 case SCSI_PROT_READ_STRIP
:
164 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
166 case SCSI_PROT_WRITE_INSERT
:
167 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
169 case SCSI_PROT_READ_INSERT
:
170 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
172 case SCSI_PROT_WRITE_STRIP
:
173 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
175 case SCSI_PROT_READ_PASS
:
176 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
178 case SCSI_PROT_WRITE_PASS
:
179 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
181 default: /* Normal Request */
182 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
186 return scsi_prot_sg_count(sp
->cmd
);
190 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
191 * capable IOCB types.
193 * @sp: SRB command to process
194 * @cmd_pkt: Command type 2 IOCB
195 * @tot_dsds: Total number of segments to transfer
197 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
202 scsi_qla_host_t
*vha
;
203 struct scsi_cmnd
*cmd
;
204 struct scatterlist
*sg
;
209 /* Update entry type to indicate Command Type 2 IOCB */
210 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
211 __constant_cpu_to_le32(COMMAND_TYPE
);
213 /* No data transfer */
214 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
215 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
219 vha
= sp
->fcport
->vha
;
220 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
222 /* Three DSDs are available in the Command Type 2 IOCB */
224 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
226 /* Load data segments */
227 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
228 cont_entry_t
*cont_pkt
;
230 /* Allocate additional continuation packets? */
231 if (avail_dsds
== 0) {
233 * Seven DSDs are available in the Continuation
236 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
237 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
241 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
242 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
248 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
249 * capable IOCB types.
251 * @sp: SRB command to process
252 * @cmd_pkt: Command type 3 IOCB
253 * @tot_dsds: Total number of segments to transfer
255 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
260 scsi_qla_host_t
*vha
;
261 struct scsi_cmnd
*cmd
;
262 struct scatterlist
*sg
;
267 /* Update entry type to indicate Command Type 3 IOCB */
268 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
269 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
271 /* No data transfer */
272 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
273 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
277 vha
= sp
->fcport
->vha
;
278 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
280 /* Two DSDs are available in the Command Type 3 IOCB */
282 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
284 /* Load data segments */
285 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
287 cont_a64_entry_t
*cont_pkt
;
289 /* Allocate additional continuation packets? */
290 if (avail_dsds
== 0) {
292 * Five DSDs are available in the Continuation
295 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
296 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
300 sle_dma
= sg_dma_address(sg
);
301 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
302 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
303 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
309 * qla2x00_start_scsi() - Send a SCSI command to the ISP
310 * @sp: command to send to the ISP
312 * Returns non-zero if a failure occurred, else zero.
315 qla2x00_start_scsi(srb_t
*sp
)
319 scsi_qla_host_t
*vha
;
320 struct scsi_cmnd
*cmd
;
324 cmd_entry_t
*cmd_pkt
;
328 struct device_reg_2xxx __iomem
*reg
;
329 struct qla_hw_data
*ha
;
334 /* Setup device pointers. */
336 vha
= sp
->fcport
->vha
;
338 reg
= &ha
->iobase
->isp
;
340 req
= ha
->req_q_map
[0];
341 rsp
= ha
->rsp_q_map
[0];
342 /* So we know we haven't pci_map'ed anything yet */
345 /* Send marker if required */
346 if (vha
->marker_needed
!= 0) {
347 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
349 return (QLA_FUNCTION_FAILED
);
351 vha
->marker_needed
= 0;
354 /* Acquire ring specific lock */
355 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
357 /* Check for room in outstanding command list. */
358 handle
= req
->current_outstanding_cmd
;
359 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
361 if (handle
== MAX_OUTSTANDING_COMMANDS
)
363 if (!req
->outstanding_cmds
[handle
])
366 if (index
== MAX_OUTSTANDING_COMMANDS
)
369 /* Map the sg table so we have an accurate count of sg entries needed */
370 if (scsi_sg_count(cmd
)) {
371 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
372 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
380 /* Calculate the number of request entries needed. */
381 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
382 if (req
->cnt
< (req_cnt
+ 2)) {
383 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
384 if (req
->ring_index
< cnt
)
385 req
->cnt
= cnt
- req
->ring_index
;
387 req
->cnt
= req
->length
-
388 (req
->ring_index
- cnt
);
390 if (req
->cnt
< (req_cnt
+ 2))
393 /* Build command packet */
394 req
->current_outstanding_cmd
= handle
;
395 req
->outstanding_cmds
[handle
] = sp
;
397 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
400 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
401 cmd_pkt
->handle
= handle
;
402 /* Zero out remaining portion of packet. */
403 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
404 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
405 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
407 /* Set target ID and LUN number*/
408 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
409 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
411 /* Update tagged queuing modifier */
412 if (scsi_populate_tag_msg(cmd
, tag
)) {
414 case HEAD_OF_QUEUE_TAG
:
415 cmd_pkt
->control_flags
=
416 __constant_cpu_to_le16(CF_HEAD_TAG
);
418 case ORDERED_QUEUE_TAG
:
419 cmd_pkt
->control_flags
=
420 __constant_cpu_to_le16(CF_ORDERED_TAG
);
423 cmd_pkt
->control_flags
=
424 __constant_cpu_to_le16(CF_SIMPLE_TAG
);
429 /* Load SCSI command packet. */
430 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
431 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
433 /* Build IOCB segments */
434 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
436 /* Set total data segment count. */
437 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
440 /* Adjust ring index. */
442 if (req
->ring_index
== req
->length
) {
444 req
->ring_ptr
= req
->ring
;
448 sp
->flags
|= SRB_DMA_VALID
;
450 /* Set chip new ring index. */
451 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
452 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
454 /* Manage unprocessed RIO/ZIO commands in response queue. */
455 if (vha
->flags
.process_response_queue
&&
456 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
457 qla2x00_process_response_queue(rsp
);
459 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
460 return (QLA_SUCCESS
);
466 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
468 return (QLA_FUNCTION_FAILED
);
472 * qla2x00_marker() - Send a marker IOCB to the firmware.
476 * @type: marker modifier
478 * Can be called from both normal and interrupt context.
480 * Returns non-zero if a failure occurred, else zero.
483 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
484 struct rsp_que
*rsp
, uint16_t loop_id
,
485 uint16_t lun
, uint8_t type
)
488 struct mrk_entry_24xx
*mrk24
;
489 struct qla_hw_data
*ha
= vha
->hw
;
490 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
493 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, 0);
495 ql_log(ql_log_warn
, base_vha
, 0x3026,
496 "Failed to allocate Marker IOCB.\n");
498 return (QLA_FUNCTION_FAILED
);
501 mrk
->entry_type
= MARKER_TYPE
;
502 mrk
->modifier
= type
;
503 if (type
!= MK_SYNC_ALL
) {
504 if (IS_FWI2_CAPABLE(ha
)) {
505 mrk24
= (struct mrk_entry_24xx
*) mrk
;
506 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
507 mrk24
->lun
[1] = LSB(lun
);
508 mrk24
->lun
[2] = MSB(lun
);
509 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
510 mrk24
->vp_index
= vha
->vp_idx
;
511 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
513 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
514 mrk
->lun
= cpu_to_le16(lun
);
519 qla2x00_isp_cmd(vha
, req
);
521 return (QLA_SUCCESS
);
525 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
526 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
530 unsigned long flags
= 0;
532 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
533 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
534 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
540 * qla2x00_isp_cmd() - Modify the request ring pointer.
543 * Note: The caller must hold the hardware lock before calling this routine.
546 qla2x00_isp_cmd(struct scsi_qla_host
*vha
, struct req_que
*req
)
548 struct qla_hw_data
*ha
= vha
->hw
;
549 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
550 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
552 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302d,
554 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302e,
555 (uint8_t *)req
->ring_ptr
, REQUEST_ENTRY_SIZE
);
557 /* Adjust ring index. */
559 if (req
->ring_index
== req
->length
) {
561 req
->ring_ptr
= req
->ring
;
565 /* Set chip new ring index. */
566 if (IS_QLA82XX(ha
)) {
567 uint32_t dbval
= 0x04 | (ha
->portnum
<< 5);
569 /* write, read and verify logic */
570 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
572 qla82xx_wr_32(ha
, ha
->nxdb_wr_ptr
, dbval
);
575 (unsigned long __iomem
*)ha
->nxdb_wr_ptr
,
578 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
579 WRT_REG_DWORD((unsigned long __iomem
*)
580 ha
->nxdb_wr_ptr
, dbval
);
584 } else if (ha
->mqenable
) {
585 /* Set chip new ring index. */
586 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
587 RD_REG_DWORD(&ioreg
->hccr
);
589 if (IS_FWI2_CAPABLE(ha
)) {
590 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
591 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
593 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
595 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
602 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
603 * Continuation Type 1 IOCBs to allocate.
605 * @dsds: number of data segment decriptors needed
607 * Returns the number of IOCB entries needed to store @dsds.
610 qla24xx_calc_iocbs(scsi_qla_host_t
*vha
, uint16_t dsds
)
616 iocbs
+= (dsds
- 1) / 5;
624 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
627 * @sp: SRB command to process
628 * @cmd_pkt: Command type 3 IOCB
629 * @tot_dsds: Total number of segments to transfer
632 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
637 scsi_qla_host_t
*vha
;
638 struct scsi_cmnd
*cmd
;
639 struct scatterlist
*sg
;
645 /* Update entry type to indicate Command Type 3 IOCB */
646 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
647 __constant_cpu_to_le32(COMMAND_TYPE_7
);
649 /* No data transfer */
650 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
651 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
655 vha
= sp
->fcport
->vha
;
658 /* Set transfer direction */
659 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
660 cmd_pkt
->task_mgmt_flags
=
661 __constant_cpu_to_le16(TMF_WRITE_DATA
);
662 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
663 scsi_bufflen(sp
->cmd
);
664 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
665 cmd_pkt
->task_mgmt_flags
=
666 __constant_cpu_to_le16(TMF_READ_DATA
);
667 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
668 scsi_bufflen(sp
->cmd
);
671 /* One DSD is available in the Command Type 3 IOCB */
673 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
675 /* Load data segments */
677 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
679 cont_a64_entry_t
*cont_pkt
;
681 /* Allocate additional continuation packets? */
682 if (avail_dsds
== 0) {
684 * Five DSDs are available in the Continuation
687 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
688 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
692 sle_dma
= sg_dma_address(sg
);
693 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
694 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
695 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
700 struct fw_dif_context
{
703 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
704 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
708 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
712 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
713 unsigned int protcnt
)
715 struct scsi_cmnd
*cmd
= sp
->cmd
;
716 scsi_qla_host_t
*vha
= shost_priv(cmd
->device
->host
);
718 switch (scsi_get_prot_type(cmd
)) {
719 case SCSI_PROT_DIF_TYPE0
:
721 * No check for ql2xenablehba_err_chk, as it would be an
722 * I/O error if hba tag generation is not done.
724 pkt
->ref_tag
= cpu_to_le32((uint32_t)
725 (0xffffffff & scsi_get_lba(cmd
)));
727 if (!qla2x00_hba_err_chk_enabled(sp
))
730 pkt
->ref_tag_mask
[0] = 0xff;
731 pkt
->ref_tag_mask
[1] = 0xff;
732 pkt
->ref_tag_mask
[2] = 0xff;
733 pkt
->ref_tag_mask
[3] = 0xff;
737 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
738 * match LBA in CDB + N
740 case SCSI_PROT_DIF_TYPE2
:
741 pkt
->app_tag
= __constant_cpu_to_le16(0);
742 pkt
->app_tag_mask
[0] = 0x0;
743 pkt
->app_tag_mask
[1] = 0x0;
745 pkt
->ref_tag
= cpu_to_le32((uint32_t)
746 (0xffffffff & scsi_get_lba(cmd
)));
748 if (!qla2x00_hba_err_chk_enabled(sp
))
751 /* enable ALL bytes of the ref tag */
752 pkt
->ref_tag_mask
[0] = 0xff;
753 pkt
->ref_tag_mask
[1] = 0xff;
754 pkt
->ref_tag_mask
[2] = 0xff;
755 pkt
->ref_tag_mask
[3] = 0xff;
758 /* For Type 3 protection: 16 bit GUARD only */
759 case SCSI_PROT_DIF_TYPE3
:
760 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
761 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
766 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
769 case SCSI_PROT_DIF_TYPE1
:
770 pkt
->ref_tag
= cpu_to_le32((uint32_t)
771 (0xffffffff & scsi_get_lba(cmd
)));
772 pkt
->app_tag
= __constant_cpu_to_le16(0);
773 pkt
->app_tag_mask
[0] = 0x0;
774 pkt
->app_tag_mask
[1] = 0x0;
776 if (!qla2x00_hba_err_chk_enabled(sp
))
779 /* enable ALL bytes of the ref tag */
780 pkt
->ref_tag_mask
[0] = 0xff;
781 pkt
->ref_tag_mask
[1] = 0xff;
782 pkt
->ref_tag_mask
[2] = 0xff;
783 pkt
->ref_tag_mask
[3] = 0xff;
787 ql_dbg(ql_dbg_io
, vha
, 0x3009,
788 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
789 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
790 pkt
->ref_tag
, pkt
->app_tag
, protcnt
, (int)scsi_get_lba(cmd
),
791 scsi_get_prot_type(cmd
), cmd
);
795 dma_addr_t dma_addr
; /* OUT */
796 uint32_t dma_len
; /* OUT */
798 uint32_t tot_bytes
; /* IN */
799 struct scatterlist
*cur_sg
; /* IN */
801 /* for book keeping, bzero on initial invocation */
802 uint32_t bytes_consumed
;
804 uint32_t tot_partial
;
812 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
815 struct scatterlist
*sg
;
816 uint32_t cumulative_partial
, sg_len
;
817 dma_addr_t sg_dma_addr
;
819 if (sgx
->num_bytes
== sgx
->tot_bytes
)
823 cumulative_partial
= sgx
->tot_partial
;
825 sg_dma_addr
= sg_dma_address(sg
);
826 sg_len
= sg_dma_len(sg
);
828 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
830 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
831 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
832 sgx
->tot_partial
= 0;
833 sgx
->num_bytes
+= blk_sz
;
836 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
837 sgx
->tot_partial
+= sgx
->dma_len
;
841 sgx
->bytes_consumed
+= sgx
->dma_len
;
843 if (sg_len
== sgx
->bytes_consumed
) {
847 sgx
->bytes_consumed
= 0;
854 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
855 uint32_t *dsd
, uint16_t tot_dsds
)
858 uint8_t avail_dsds
= 0;
859 uint32_t dsd_list_len
;
860 struct dsd_dma
*dsd_ptr
;
861 struct scatterlist
*sg_prot
;
862 uint32_t *cur_dsd
= dsd
;
863 uint16_t used_dsds
= tot_dsds
;
869 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
870 struct scsi_cmnd
*cmd
= sp
->cmd
;
872 prot_int
= cmd
->device
->sector_size
;
874 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
875 sgx
.tot_bytes
= scsi_bufflen(sp
->cmd
);
876 sgx
.cur_sg
= scsi_sglist(sp
->cmd
);
879 sg_prot
= scsi_prot_sglist(sp
->cmd
);
881 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
883 sle_dma
= sgx
.dma_addr
;
884 sle_dma_len
= sgx
.dma_len
;
886 /* Allocate additional continuation packets? */
887 if (avail_dsds
== 0) {
888 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
889 QLA_DSDS_PER_IOCB
: used_dsds
;
890 dsd_list_len
= (avail_dsds
+ 1) * 12;
891 used_dsds
-= avail_dsds
;
893 /* allocate tracking DS */
894 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
898 /* allocate new list */
899 dsd_ptr
->dsd_addr
= next_dsd
=
900 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
901 &dsd_ptr
->dsd_list_dma
);
905 * Need to cleanup only this dsd_ptr, rest
906 * will be done by sp_free_dma()
912 list_add_tail(&dsd_ptr
->list
,
913 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
915 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
917 /* add new list to cmd iocb or last list */
918 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
919 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
920 *cur_dsd
++ = dsd_list_len
;
921 cur_dsd
= (uint32_t *)next_dsd
;
923 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
924 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
925 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
929 /* Got a full protection interval */
930 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
933 tot_prot_dma_len
+= sle_dma_len
;
934 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
935 tot_prot_dma_len
= 0;
936 sg_prot
= sg_next(sg_prot
);
939 partial
= 1; /* So as to not re-enter this block */
943 /* Null termination */
950 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
954 uint8_t avail_dsds
= 0;
955 uint32_t dsd_list_len
;
956 struct dsd_dma
*dsd_ptr
;
957 struct scatterlist
*sg
;
958 uint32_t *cur_dsd
= dsd
;
960 uint16_t used_dsds
= tot_dsds
;
961 scsi_qla_host_t
*vha
= shost_priv(sp
->cmd
->device
->host
);
965 scsi_for_each_sg(sp
->cmd
, sg
, tot_dsds
, i
) {
968 /* Allocate additional continuation packets? */
969 if (avail_dsds
== 0) {
970 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
971 QLA_DSDS_PER_IOCB
: used_dsds
;
972 dsd_list_len
= (avail_dsds
+ 1) * 12;
973 used_dsds
-= avail_dsds
;
975 /* allocate tracking DS */
976 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
980 /* allocate new list */
981 dsd_ptr
->dsd_addr
= next_dsd
=
982 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
983 &dsd_ptr
->dsd_list_dma
);
987 * Need to cleanup only this dsd_ptr, rest
988 * will be done by sp_free_dma()
994 list_add_tail(&dsd_ptr
->list
,
995 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
997 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
999 /* add new list to cmd iocb or last list */
1000 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1001 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1002 *cur_dsd
++ = dsd_list_len
;
1003 cur_dsd
= (uint32_t *)next_dsd
;
1005 sle_dma
= sg_dma_address(sg
);
1006 ql_dbg(ql_dbg_io
, vha
, 0x300a,
1007 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1008 cur_dsd
, i
, LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
),
1010 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1011 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1012 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1015 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
1016 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1017 ql_dbg(ql_dbg_io
, vha
, 0x300b,
1018 "User data buffer=%p for cmd=%p.\n", cp
, sp
->cmd
);
1021 /* Null termination */
1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1034 uint8_t avail_dsds
= 0;
1035 uint32_t dsd_list_len
;
1036 struct dsd_dma
*dsd_ptr
;
1037 struct scatterlist
*sg
;
1039 struct scsi_cmnd
*cmd
;
1040 uint32_t *cur_dsd
= dsd
;
1041 uint16_t used_dsds
= tot_dsds
;
1042 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
1047 scsi_for_each_prot_sg(cmd
, sg
, tot_dsds
, i
) {
1050 /* Allocate additional continuation packets? */
1051 if (avail_dsds
== 0) {
1052 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1053 QLA_DSDS_PER_IOCB
: used_dsds
;
1054 dsd_list_len
= (avail_dsds
+ 1) * 12;
1055 used_dsds
-= avail_dsds
;
1057 /* allocate tracking DS */
1058 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1062 /* allocate new list */
1063 dsd_ptr
->dsd_addr
= next_dsd
=
1064 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1065 &dsd_ptr
->dsd_list_dma
);
1069 * Need to cleanup only this dsd_ptr, rest
1070 * will be done by sp_free_dma()
1076 list_add_tail(&dsd_ptr
->list
,
1077 &((struct crc_context
*)sp
->ctx
)->dsd_list
);
1079 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1081 /* add new list to cmd iocb or last list */
1082 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1083 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1084 *cur_dsd
++ = dsd_list_len
;
1085 cur_dsd
= (uint32_t *)next_dsd
;
1087 sle_dma
= sg_dma_address(sg
);
1088 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
1089 ql_dbg(ql_dbg_io
, vha
, 0x3027,
1090 "%s(): %p, sg_entry %d - "
1091 "addr=0x%x0x%x, len=%d.\n",
1092 __func__
, cur_dsd
, i
,
1093 LSD(sle_dma
), MSD(sle_dma
), sg_dma_len(sg
));
1095 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1096 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1097 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1099 if (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_PASS
) {
1100 cp
= page_address(sg_page(sg
)) + sg
->offset
;
1101 ql_dbg(ql_dbg_io
, vha
, 0x3028,
1102 "%s(): Protection Data buffer = %p.\n", __func__
,
1107 /* Null termination */
1115 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1116 * Type 6 IOCB types.
1118 * @sp: SRB command to process
1119 * @cmd_pkt: Command type 3 IOCB
1120 * @tot_dsds: Total number of segments to transfer
1123 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1124 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1126 uint32_t *cur_dsd
, *fcp_dl
;
1127 scsi_qla_host_t
*vha
;
1128 struct scsi_cmnd
*cmd
;
1129 struct scatterlist
*cur_seg
;
1131 uint32_t total_bytes
= 0;
1132 uint32_t data_bytes
;
1134 uint8_t bundling
= 1;
1137 struct crc_context
*crc_ctx_pkt
= NULL
;
1138 struct qla_hw_data
*ha
;
1139 uint8_t additional_fcpcdb_len
;
1140 uint16_t fcp_cmnd_len
;
1141 struct fcp_cmnd
*fcp_cmnd
;
1142 dma_addr_t crc_ctx_dma
;
1148 /* Update entry type to indicate Command Type CRC_2 IOCB */
1149 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
1150 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2
);
1152 vha
= sp
->fcport
->vha
;
1155 /* No data transfer */
1156 data_bytes
= scsi_bufflen(cmd
);
1157 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1158 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1162 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1164 /* Set transfer direction */
1165 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1166 cmd_pkt
->control_flags
=
1167 __constant_cpu_to_le16(CF_WRITE_DATA
);
1168 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1169 cmd_pkt
->control_flags
=
1170 __constant_cpu_to_le16(CF_READ_DATA
);
1173 if ((scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_READ_INSERT
) ||
1174 (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1175 (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_READ_STRIP
) ||
1176 (scsi_get_prot_op(sp
->cmd
) == SCSI_PROT_WRITE_INSERT
))
1179 /* Allocate CRC context from global pool */
1180 crc_ctx_pkt
= sp
->ctx
= dma_pool_alloc(ha
->dl_dma_pool
,
1181 GFP_ATOMIC
, &crc_ctx_dma
);
1184 goto crc_queuing_error
;
1186 /* Zero out CTX area. */
1187 clr_ptr
= (uint8_t *)crc_ctx_pkt
;
1188 memset(clr_ptr
, 0, sizeof(*crc_ctx_pkt
));
1190 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1192 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1195 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1197 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1199 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1200 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1202 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1203 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1204 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1206 /* Determine SCSI command length -- align to 4 byte boundary */
1207 if (cmd
->cmd_len
> 16) {
1208 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1209 if ((cmd
->cmd_len
% 4) != 0) {
1210 /* SCSI cmd > 16 bytes must be multiple of 4 */
1211 goto crc_queuing_error
;
1213 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1215 additional_fcpcdb_len
= 0;
1216 fcp_cmnd_len
= 12 + 16 + 4;
1219 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1221 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1222 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1223 fcp_cmnd
->additional_cdb_len
|= 1;
1224 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1225 fcp_cmnd
->additional_cdb_len
|= 2;
1227 int_to_scsilun(sp
->cmd
->device
->lun
, &fcp_cmnd
->lun
);
1228 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1229 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1230 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1231 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1232 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1233 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1234 fcp_cmnd
->task_management
= 0;
1237 * Update tagged queuing modifier if using command tag queuing
1239 if (scsi_populate_tag_msg(cmd
, tag
)) {
1241 case HEAD_OF_QUEUE_TAG
:
1242 fcp_cmnd
->task_attribute
= TSK_HEAD_OF_QUEUE
;
1244 case ORDERED_QUEUE_TAG
:
1245 fcp_cmnd
->task_attribute
= TSK_ORDERED
;
1248 fcp_cmnd
->task_attribute
= 0;
1252 fcp_cmnd
->task_attribute
= 0;
1255 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1257 /* Compute dif len and adjust data len to incude protection */
1259 blk_size
= cmd
->device
->sector_size
;
1260 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1262 switch (scsi_get_prot_op(sp
->cmd
)) {
1263 case SCSI_PROT_READ_INSERT
:
1264 case SCSI_PROT_WRITE_STRIP
:
1265 total_bytes
= data_bytes
;
1266 data_bytes
+= dif_bytes
;
1269 case SCSI_PROT_READ_STRIP
:
1270 case SCSI_PROT_WRITE_INSERT
:
1271 case SCSI_PROT_READ_PASS
:
1272 case SCSI_PROT_WRITE_PASS
:
1273 total_bytes
= data_bytes
+ dif_bytes
;
1279 if (!qla2x00_hba_err_chk_enabled(sp
))
1280 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1283 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1286 * Configure Bundling if we need to fetch interlaving
1287 * protection PCI accesses
1289 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1290 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1291 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1293 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1296 /* Finish the common fields of CRC pkt */
1297 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1298 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1299 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1300 crc_ctx_pkt
->guard_seed
= __constant_cpu_to_le16(0);
1301 /* Fibre channel byte count */
1302 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1303 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1304 additional_fcpcdb_len
);
1305 *fcp_dl
= htonl(total_bytes
);
1307 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1308 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
1311 /* Walks data segments */
1313 cmd_pkt
->control_flags
|=
1314 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1316 if (!bundling
&& tot_prot_dsds
) {
1317 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1319 goto crc_queuing_error
;
1320 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1321 (tot_dsds
- tot_prot_dsds
)))
1322 goto crc_queuing_error
;
1324 if (bundling
&& tot_prot_dsds
) {
1325 /* Walks dif segments */
1326 cur_seg
= scsi_prot_sglist(cmd
);
1327 cmd_pkt
->control_flags
|=
1328 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1329 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1330 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1332 goto crc_queuing_error
;
1337 /* Cleanup will be performed by the caller */
1339 return QLA_FUNCTION_FAILED
;
1343 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1344 * @sp: command to send to the ISP
1346 * Returns non-zero if a failure occurred, else zero.
1349 qla24xx_start_scsi(srb_t
*sp
)
1352 unsigned long flags
;
1356 struct cmd_type_7
*cmd_pkt
;
1360 struct req_que
*req
= NULL
;
1361 struct rsp_que
*rsp
= NULL
;
1362 struct scsi_cmnd
*cmd
= sp
->cmd
;
1363 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1364 struct qla_hw_data
*ha
= vha
->hw
;
1367 /* Setup device pointers. */
1370 qla25xx_set_que(sp
, &rsp
);
1373 /* So we know we haven't pci_map'ed anything yet */
1376 /* Send marker if required */
1377 if (vha
->marker_needed
!= 0) {
1378 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1380 return QLA_FUNCTION_FAILED
;
1381 vha
->marker_needed
= 0;
1384 /* Acquire ring specific lock */
1385 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1387 /* Check for room in outstanding command list. */
1388 handle
= req
->current_outstanding_cmd
;
1389 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1391 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1393 if (!req
->outstanding_cmds
[handle
])
1396 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1400 /* Map the sg table so we have an accurate count of sg entries needed */
1401 if (scsi_sg_count(cmd
)) {
1402 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1403 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1404 if (unlikely(!nseg
))
1410 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1411 if (req
->cnt
< (req_cnt
+ 2)) {
1412 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1414 if (req
->ring_index
< cnt
)
1415 req
->cnt
= cnt
- req
->ring_index
;
1417 req
->cnt
= req
->length
-
1418 (req
->ring_index
- cnt
);
1420 if (req
->cnt
< (req_cnt
+ 2))
1423 /* Build command packet. */
1424 req
->current_outstanding_cmd
= handle
;
1425 req
->outstanding_cmds
[handle
] = sp
;
1426 sp
->handle
= handle
;
1427 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1428 req
->cnt
-= req_cnt
;
1430 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1431 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1433 /* Zero out remaining portion of packet. */
1434 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1435 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1436 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1437 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1439 /* Set NPORT-ID and LUN number*/
1440 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1441 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1442 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1443 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1444 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
1446 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1447 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1449 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1450 if (scsi_populate_tag_msg(cmd
, tag
)) {
1452 case HEAD_OF_QUEUE_TAG
:
1453 cmd_pkt
->task
= TSK_HEAD_OF_QUEUE
;
1455 case ORDERED_QUEUE_TAG
:
1456 cmd_pkt
->task
= TSK_ORDERED
;
1461 /* Load SCSI command packet. */
1462 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1463 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1465 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1467 /* Build IOCB segments */
1468 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
1470 /* Set total data segment count. */
1471 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1472 /* Specify response queue number where completion should happen */
1473 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1475 /* Adjust ring index. */
1477 if (req
->ring_index
== req
->length
) {
1478 req
->ring_index
= 0;
1479 req
->ring_ptr
= req
->ring
;
1483 sp
->flags
|= SRB_DMA_VALID
;
1485 /* Set chip new ring index. */
1486 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1487 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1489 /* Manage unprocessed RIO/ZIO commands in response queue. */
1490 if (vha
->flags
.process_response_queue
&&
1491 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1492 qla24xx_process_response_queue(vha
, rsp
);
1494 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1499 scsi_dma_unmap(cmd
);
1501 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1503 return QLA_FUNCTION_FAILED
;
1508 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1509 * @sp: command to send to the ISP
1511 * Returns non-zero if a failure occurred, else zero.
1514 qla24xx_dif_start_scsi(srb_t
*sp
)
1517 unsigned long flags
;
1522 uint16_t req_cnt
= 0;
1524 uint16_t tot_prot_dsds
;
1525 uint16_t fw_prot_opts
= 0;
1526 struct req_que
*req
= NULL
;
1527 struct rsp_que
*rsp
= NULL
;
1528 struct scsi_cmnd
*cmd
= sp
->cmd
;
1529 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1530 struct qla_hw_data
*ha
= vha
->hw
;
1531 struct cmd_type_crc_2
*cmd_pkt
;
1532 uint32_t status
= 0;
1534 #define QDSS_GOT_Q_SPACE BIT_0
1536 /* Only process protection or >16 cdb in this routine */
1537 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1538 if (cmd
->cmd_len
<= 16)
1539 return qla24xx_start_scsi(sp
);
1542 /* Setup device pointers. */
1544 qla25xx_set_que(sp
, &rsp
);
1547 /* So we know we haven't pci_map'ed anything yet */
1550 /* Send marker if required */
1551 if (vha
->marker_needed
!= 0) {
1552 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1554 return QLA_FUNCTION_FAILED
;
1555 vha
->marker_needed
= 0;
1558 /* Acquire ring specific lock */
1559 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1561 /* Check for room in outstanding command list. */
1562 handle
= req
->current_outstanding_cmd
;
1563 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1565 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1567 if (!req
->outstanding_cmds
[handle
])
1571 if (index
== MAX_OUTSTANDING_COMMANDS
)
1574 /* Compute number of required data segments */
1575 /* Map the sg table so we have an accurate count of sg entries needed */
1576 if (scsi_sg_count(cmd
)) {
1577 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1578 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1579 if (unlikely(!nseg
))
1582 sp
->flags
|= SRB_DMA_VALID
;
1584 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1585 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1586 struct qla2_sgx sgx
;
1589 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1590 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1591 sgx
.cur_sg
= scsi_sglist(cmd
);
1595 while (qla24xx_get_one_block_sg(
1596 cmd
->device
->sector_size
, &sgx
, &partial
))
1602 /* number of required data segments */
1605 /* Compute number of required protection segments */
1606 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1607 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1608 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1609 if (unlikely(!nseg
))
1612 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1614 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1615 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1616 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1623 /* Total Data and protection sg segment(s) */
1624 tot_prot_dsds
= nseg
;
1626 if (req
->cnt
< (req_cnt
+ 2)) {
1627 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
1629 if (req
->ring_index
< cnt
)
1630 req
->cnt
= cnt
- req
->ring_index
;
1632 req
->cnt
= req
->length
-
1633 (req
->ring_index
- cnt
);
1636 if (req
->cnt
< (req_cnt
+ 2))
1639 status
|= QDSS_GOT_Q_SPACE
;
1641 /* Build header part of command packet (excluding the OPCODE). */
1642 req
->current_outstanding_cmd
= handle
;
1643 req
->outstanding_cmds
[handle
] = sp
;
1644 sp
->handle
= handle
;
1645 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1646 req
->cnt
-= req_cnt
;
1648 /* Fill-in common area */
1649 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1650 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1652 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1653 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1655 /* Set NPORT-ID and LUN number*/
1656 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1657 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1658 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1659 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1661 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
1662 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1664 /* Total Data and protection segment(s) */
1665 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1667 /* Build IOCB segments and adjust for data protection segments */
1668 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1669 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1673 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1674 /* Specify response queue number where completion should happen */
1675 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1676 cmd_pkt
->timeout
= __constant_cpu_to_le16(0);
1679 /* Adjust ring index. */
1681 if (req
->ring_index
== req
->length
) {
1682 req
->ring_index
= 0;
1683 req
->ring_ptr
= req
->ring
;
1687 /* Set chip new ring index. */
1688 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1689 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1691 /* Manage unprocessed RIO/ZIO commands in response queue. */
1692 if (vha
->flags
.process_response_queue
&&
1693 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1694 qla24xx_process_response_queue(vha
, rsp
);
1696 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1701 if (status
& QDSS_GOT_Q_SPACE
) {
1702 req
->outstanding_cmds
[handle
] = NULL
;
1703 req
->cnt
+= req_cnt
;
1705 /* Cleanup will be performed by the caller (queuecommand) */
1707 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1708 return QLA_FUNCTION_FAILED
;
1712 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
1714 struct scsi_cmnd
*cmd
= sp
->cmd
;
1715 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1716 int affinity
= cmd
->request
->cpu
;
1718 if (ha
->flags
.cpu_affinity_enabled
&& affinity
>= 0 &&
1719 affinity
< ha
->max_rsp_queues
- 1)
1720 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
1722 *rsp
= ha
->rsp_q_map
[0];
1725 /* Generic Control-SRB manipulation functions. */
1727 qla2x00_alloc_iocbs(scsi_qla_host_t
*vha
, srb_t
*sp
)
1729 struct qla_hw_data
*ha
= vha
->hw
;
1730 struct req_que
*req
= ha
->req_q_map
[0];
1731 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1732 uint32_t index
, handle
;
1734 uint16_t cnt
, req_cnt
;
1741 goto skip_cmd_array
;
1743 /* Check for room in outstanding command list. */
1744 handle
= req
->current_outstanding_cmd
;
1745 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
1747 if (handle
== MAX_OUTSTANDING_COMMANDS
)
1749 if (!req
->outstanding_cmds
[handle
])
1752 if (index
== MAX_OUTSTANDING_COMMANDS
) {
1753 ql_log(ql_log_warn
, vha
, 0x700b,
1754 "No room on oustanding cmd array.\n");
1758 /* Prep command array. */
1759 req
->current_outstanding_cmd
= handle
;
1760 req
->outstanding_cmds
[handle
] = sp
;
1761 sp
->handle
= handle
;
1764 /* Check for room on request queue. */
1765 if (req
->cnt
< req_cnt
) {
1767 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
1768 else if (IS_QLA82XX(ha
))
1769 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
1770 else if (IS_FWI2_CAPABLE(ha
))
1771 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
1773 cnt
= qla2x00_debounce_register(
1774 ISP_REQ_Q_OUT(ha
, ®
->isp
));
1776 if (req
->ring_index
< cnt
)
1777 req
->cnt
= cnt
- req
->ring_index
;
1779 req
->cnt
= req
->length
-
1780 (req
->ring_index
- cnt
);
1782 if (req
->cnt
< req_cnt
)
1786 req
->cnt
-= req_cnt
;
1787 pkt
= req
->ring_ptr
;
1788 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
1789 pkt
->entry_count
= req_cnt
;
1790 pkt
->handle
= handle
;
1797 qla2x00_start_iocbs(srb_t
*sp
)
1799 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1800 struct req_que
*req
= ha
->req_q_map
[0];
1801 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
1802 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
1804 if (IS_QLA82XX(ha
)) {
1805 qla82xx_start_iocbs(sp
);
1807 /* Adjust ring index. */
1809 if (req
->ring_index
== req
->length
) {
1810 req
->ring_index
= 0;
1811 req
->ring_ptr
= req
->ring
;
1815 /* Set chip new ring index. */
1817 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
1818 RD_REG_DWORD(&ioreg
->hccr
);
1819 } else if (IS_QLA82XX(ha
)) {
1820 qla82xx_start_iocbs(sp
);
1821 } else if (IS_FWI2_CAPABLE(ha
)) {
1822 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
1823 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
1825 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
1827 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
1833 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1835 struct srb_ctx
*ctx
= sp
->ctx
;
1836 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1838 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1839 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
1840 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
1841 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
1842 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
1843 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
1844 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1845 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1846 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1847 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1848 logio
->vp_index
= sp
->fcport
->vp_idx
;
1852 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1854 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1855 struct srb_ctx
*ctx
= sp
->ctx
;
1856 struct srb_iocb
*lio
= ctx
->u
.iocb_cmd
;
1859 mbx
->entry_type
= MBX_IOCB_TYPE
;
1860 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1861 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
1862 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
1863 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
1864 if (HAS_EXTENDED_IDS(ha
)) {
1865 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1866 mbx
->mb10
= cpu_to_le16(opts
);
1868 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
1870 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1871 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1872 sp
->fcport
->d_id
.b
.al_pa
);
1873 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1877 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1879 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1880 logio
->control_flags
=
1881 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
1882 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1883 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1884 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1885 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1886 logio
->vp_index
= sp
->fcport
->vp_idx
;
1890 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1892 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1894 mbx
->entry_type
= MBX_IOCB_TYPE
;
1895 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1896 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
1897 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
1898 cpu_to_le16(sp
->fcport
->loop_id
):
1899 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
1900 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
1901 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
1902 sp
->fcport
->d_id
.b
.al_pa
);
1903 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1904 /* Implicit: mbx->mbx10 = 0. */
1908 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
1910 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
1911 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
1912 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1913 logio
->vp_index
= sp
->fcport
->vp_idx
;
1917 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
1919 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
1921 mbx
->entry_type
= MBX_IOCB_TYPE
;
1922 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
1923 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
1924 if (HAS_EXTENDED_IDS(ha
)) {
1925 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
1926 mbx
->mb10
= cpu_to_le16(BIT_0
);
1928 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
1930 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
1931 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
1932 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
1933 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
1934 mbx
->mb9
= cpu_to_le16(sp
->fcport
->vp_idx
);
1938 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
1942 struct fc_port
*fcport
= sp
->fcport
;
1943 scsi_qla_host_t
*vha
= fcport
->vha
;
1944 struct qla_hw_data
*ha
= vha
->hw
;
1945 struct srb_ctx
*ctx
= sp
->ctx
;
1946 struct srb_iocb
*iocb
= ctx
->u
.iocb_cmd
;
1947 struct req_que
*req
= vha
->req
;
1949 flags
= iocb
->u
.tmf
.flags
;
1950 lun
= iocb
->u
.tmf
.lun
;
1952 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
1953 tsk
->entry_count
= 1;
1954 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
1955 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
1956 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
1957 tsk
->control_flags
= cpu_to_le32(flags
);
1958 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
1959 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
1960 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
1961 tsk
->vp_index
= fcport
->vp_idx
;
1963 if (flags
== TCF_LUN_RESET
) {
1964 int_to_scsilun(lun
, &tsk
->lun
);
1965 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
1971 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
1973 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
1975 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
1976 els_iocb
->entry_count
= 1;
1977 els_iocb
->sys_define
= 0;
1978 els_iocb
->entry_status
= 0;
1979 els_iocb
->handle
= sp
->handle
;
1980 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1981 els_iocb
->tx_dsd_count
= __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
1982 els_iocb
->vp_index
= sp
->fcport
->vp_idx
;
1983 els_iocb
->sof_type
= EST_SOFI3
;
1984 els_iocb
->rx_dsd_count
= __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
1987 (((struct srb_ctx
*)sp
->ctx
)->type
== SRB_ELS_CMD_RPT
) ?
1988 bsg_job
->request
->rqst_data
.r_els
.els_code
:
1989 bsg_job
->request
->rqst_data
.h_els
.command_code
;
1990 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1991 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1992 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1993 els_iocb
->control_flags
= 0;
1994 els_iocb
->rx_byte_count
=
1995 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
1996 els_iocb
->tx_byte_count
=
1997 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
1999 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2000 (bsg_job
->request_payload
.sg_list
)));
2001 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2002 (bsg_job
->request_payload
.sg_list
)));
2003 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2004 (bsg_job
->request_payload
.sg_list
));
2006 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2007 (bsg_job
->reply_payload
.sg_list
)));
2008 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2009 (bsg_job
->reply_payload
.sg_list
)));
2010 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2011 (bsg_job
->reply_payload
.sg_list
));
2015 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2017 uint16_t avail_dsds
;
2019 struct scatterlist
*sg
;
2022 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2023 struct qla_hw_data
*ha
= vha
->hw
;
2024 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
2025 int loop_iterartion
= 0;
2026 int cont_iocb_prsnt
= 0;
2027 int entry_count
= 1;
2029 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2030 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2031 ct_iocb
->entry_status
= 0;
2032 ct_iocb
->handle1
= sp
->handle
;
2033 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2034 ct_iocb
->status
= __constant_cpu_to_le16(0);
2035 ct_iocb
->control_flags
= __constant_cpu_to_le16(0);
2036 ct_iocb
->timeout
= 0;
2037 ct_iocb
->cmd_dsd_count
=
2038 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2039 ct_iocb
->total_dsd_count
=
2040 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2041 ct_iocb
->req_bytecount
=
2042 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2043 ct_iocb
->rsp_bytecount
=
2044 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2046 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2047 (bsg_job
->request_payload
.sg_list
)));
2048 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2049 (bsg_job
->request_payload
.sg_list
)));
2050 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2052 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2053 (bsg_job
->reply_payload
.sg_list
)));
2054 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2055 (bsg_job
->reply_payload
.sg_list
)));
2056 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2059 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2061 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2063 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2065 cont_a64_entry_t
*cont_pkt
;
2067 /* Allocate additional continuation packets? */
2068 if (avail_dsds
== 0) {
2070 * Five DSDs are available in the Cont.
2073 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
2074 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2076 cont_iocb_prsnt
= 1;
2080 sle_dma
= sg_dma_address(sg
);
2081 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2082 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2083 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2087 ct_iocb
->entry_count
= entry_count
;
2091 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2093 uint16_t avail_dsds
;
2095 struct scatterlist
*sg
;
2098 scsi_qla_host_t
*vha
= sp
->fcport
->vha
;
2099 struct fc_bsg_job
*bsg_job
= ((struct srb_ctx
*)sp
->ctx
)->u
.bsg_job
;
2100 int loop_iterartion
= 0;
2101 int cont_iocb_prsnt
= 0;
2102 int entry_count
= 1;
2104 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2105 ct_iocb
->entry_status
= 0;
2106 ct_iocb
->sys_define
= 0;
2107 ct_iocb
->handle
= sp
->handle
;
2109 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2110 ct_iocb
->vp_index
= sp
->fcport
->vp_idx
;
2111 ct_iocb
->comp_status
= __constant_cpu_to_le16(0);
2113 ct_iocb
->cmd_dsd_count
=
2114 __constant_cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2115 ct_iocb
->timeout
= 0;
2116 ct_iocb
->rsp_dsd_count
=
2117 __constant_cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2118 ct_iocb
->rsp_byte_count
=
2119 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2120 ct_iocb
->cmd_byte_count
=
2121 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2122 ct_iocb
->dseg_0_address
[0] = cpu_to_le32(LSD(sg_dma_address
2123 (bsg_job
->request_payload
.sg_list
)));
2124 ct_iocb
->dseg_0_address
[1] = cpu_to_le32(MSD(sg_dma_address
2125 (bsg_job
->request_payload
.sg_list
)));
2126 ct_iocb
->dseg_0_len
= cpu_to_le32(sg_dma_len
2127 (bsg_job
->request_payload
.sg_list
));
2130 cur_dsd
= (uint32_t *)ct_iocb
->dseg_1_address
;
2132 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2134 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2136 cont_a64_entry_t
*cont_pkt
;
2138 /* Allocate additional continuation packets? */
2139 if (avail_dsds
== 0) {
2141 * Five DSDs are available in the Cont.
2144 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
2145 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2147 cont_iocb_prsnt
= 1;
2151 sle_dma
= sg_dma_address(sg
);
2152 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2153 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2154 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2158 ct_iocb
->entry_count
= entry_count
;
2162 qla2x00_start_sp(srb_t
*sp
)
2165 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
2167 struct srb_ctx
*ctx
= sp
->ctx
;
2168 unsigned long flags
;
2170 rval
= QLA_FUNCTION_FAILED
;
2171 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2172 pkt
= qla2x00_alloc_iocbs(sp
->fcport
->vha
, sp
);
2174 ql_log(ql_log_warn
, sp
->fcport
->vha
, 0x700c,
2175 "qla2x00_alloc_iocbs failed.\n");
2180 switch (ctx
->type
) {
2182 IS_FWI2_CAPABLE(ha
) ?
2183 qla24xx_login_iocb(sp
, pkt
) :
2184 qla2x00_login_iocb(sp
, pkt
);
2186 case SRB_LOGOUT_CMD
:
2187 IS_FWI2_CAPABLE(ha
) ?
2188 qla24xx_logout_iocb(sp
, pkt
) :
2189 qla2x00_logout_iocb(sp
, pkt
);
2191 case SRB_ELS_CMD_RPT
:
2192 case SRB_ELS_CMD_HST
:
2193 qla24xx_els_iocb(sp
, pkt
);
2196 IS_FWI2_CAPABLE(ha
) ?
2197 qla24xx_ct_iocb(sp
, pkt
) :
2198 qla2x00_ct_iocb(sp
, pkt
);
2201 IS_FWI2_CAPABLE(ha
) ?
2202 qla24xx_adisc_iocb(sp
, pkt
) :
2203 qla2x00_adisc_iocb(sp
, pkt
);
2206 qla24xx_tm_iocb(sp
, pkt
);
2213 qla2x00_start_iocbs(sp
);
2215 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);