2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
25 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
26 struct scsi_qla_host
*vha
= sp
->vha
;
30 /* Set transfer direction */
31 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
34 vha
->qla_stats
.output_requests
++;
35 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
38 vha
->qla_stats
.input_requests
++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) = cpu_to_le32(CONTINUE_TYPE
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
125 cont_a64_entry_t
*cont_pkt
;
127 /* Adjust ring index. */
129 if (req
->ring_index
== req
->length
) {
131 req
->ring_ptr
= req
->ring
;
136 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt
->entry_type
)) = IS_QLAFX00(vha
->hw
) ?
140 cpu_to_le32(CONTINUE_A64_TYPE_FX00
) :
141 cpu_to_le32(CONTINUE_A64_TYPE
);
147 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
149 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
150 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
152 /* We always use DIFF Bundling for best performance */
155 /* Translate SCSI opcode to a protection opcode */
156 switch (scsi_get_prot_op(cmd
)) {
157 case SCSI_PROT_READ_STRIP
:
158 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
160 case SCSI_PROT_WRITE_INSERT
:
161 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
163 case SCSI_PROT_READ_INSERT
:
164 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
166 case SCSI_PROT_WRITE_STRIP
:
167 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
169 case SCSI_PROT_READ_PASS
:
170 case SCSI_PROT_WRITE_PASS
:
171 if (guard
& SHOST_DIX_GUARD_IP
)
172 *fw_prot_opts
|= PO_MODE_DIF_TCP_CKSUM
;
174 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
176 default: /* Normal Request */
177 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
181 return scsi_prot_sg_count(cmd
);
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
192 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
197 scsi_qla_host_t
*vha
;
198 struct scsi_cmnd
*cmd
;
199 struct scatterlist
*sg
;
202 cmd
= GET_CMD_SP(sp
);
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
206 cpu_to_le32(COMMAND_TYPE
);
208 /* No data transfer */
209 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
210 cmd_pkt
->byte_count
= cpu_to_le32(0);
215 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
217 /* Three DSDs are available in the Command Type 2 IOCB */
219 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
221 /* Load data segments */
222 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
223 cont_entry_t
*cont_pkt
;
225 /* Allocate additional continuation packets? */
226 if (avail_dsds
== 0) {
228 * Seven DSDs are available in the Continuation
231 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
232 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
236 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
237 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
250 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
255 scsi_qla_host_t
*vha
;
256 struct scsi_cmnd
*cmd
;
257 struct scatterlist
*sg
;
260 cmd
= GET_CMD_SP(sp
);
262 /* Update entry type to indicate Command Type 3 IOCB */
263 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_A64_TYPE
);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
267 cmd_pkt
->byte_count
= cpu_to_le32(0);
272 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
278 /* Load data segments */
279 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
281 cont_a64_entry_t
*cont_pkt
;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds
== 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
290 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
294 sle_dma
= sg_dma_address(sg
);
295 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
296 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
297 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t
*sp
)
313 scsi_qla_host_t
*vha
;
314 struct scsi_cmnd
*cmd
;
318 cmd_entry_t
*cmd_pkt
;
322 struct device_reg_2xxx __iomem
*reg
;
323 struct qla_hw_data
*ha
;
327 /* Setup device pointers. */
330 reg
= &ha
->iobase
->isp
;
331 cmd
= GET_CMD_SP(sp
);
332 req
= ha
->req_q_map
[0];
333 rsp
= ha
->rsp_q_map
[0];
334 /* So we know we haven't pci_map'ed anything yet */
337 /* Send marker if required */
338 if (vha
->marker_needed
!= 0) {
339 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
341 return (QLA_FUNCTION_FAILED
);
343 vha
->marker_needed
= 0;
346 /* Acquire ring specific lock */
347 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
349 /* Check for room in outstanding command list. */
350 handle
= req
->current_outstanding_cmd
;
351 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
353 if (handle
== req
->num_outstanding_cmds
)
355 if (!req
->outstanding_cmds
[handle
])
358 if (index
== req
->num_outstanding_cmds
)
361 /* Map the sg table so we have an accurate count of sg entries needed */
362 if (scsi_sg_count(cmd
)) {
363 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
364 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
372 /* Calculate the number of request entries needed. */
373 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
374 if (req
->cnt
< (req_cnt
+ 2)) {
375 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
376 if (req
->ring_index
< cnt
)
377 req
->cnt
= cnt
- req
->ring_index
;
379 req
->cnt
= req
->length
-
380 (req
->ring_index
- cnt
);
381 /* If still no head room then bail out */
382 if (req
->cnt
< (req_cnt
+ 2))
386 /* Build command packet */
387 req
->current_outstanding_cmd
= handle
;
388 req
->outstanding_cmds
[handle
] = sp
;
390 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
393 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
394 cmd_pkt
->handle
= handle
;
395 /* Zero out remaining portion of packet. */
396 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
397 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
398 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
402 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
403 cmd_pkt
->control_flags
= cpu_to_le16(CF_SIMPLE_TAG
);
405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
407 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
409 /* Build IOCB segments */
410 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
412 /* Set total data segment count. */
413 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
416 /* Adjust ring index. */
418 if (req
->ring_index
== req
->length
) {
420 req
->ring_ptr
= req
->ring
;
424 sp
->flags
|= SRB_DMA_VALID
;
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (vha
->flags
.process_response_queue
&&
432 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
433 qla2x00_process_response_queue(rsp
);
435 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
436 return (QLA_SUCCESS
);
442 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
444 return (QLA_FUNCTION_FAILED
);
448 * qla2x00_start_iocbs() - Execute the IOCB command
450 * @req: request queue
453 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
455 struct qla_hw_data
*ha
= vha
->hw
;
456 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
458 if (IS_P3P_TYPE(ha
)) {
459 qla82xx_start_iocbs(vha
);
461 /* Adjust ring index. */
463 if (req
->ring_index
== req
->length
) {
465 req
->ring_ptr
= req
->ring
;
469 /* Set chip new ring index. */
470 if (ha
->mqenable
|| IS_QLA27XX(ha
)) {
471 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
472 } else if (IS_QLA83XX(ha
)) {
473 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
474 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
475 } else if (IS_QLAFX00(ha
)) {
476 WRT_REG_DWORD(®
->ispfx00
.req_q_in
, req
->ring_index
);
477 RD_REG_DWORD_RELAXED(®
->ispfx00
.req_q_in
);
478 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
479 } else if (IS_FWI2_CAPABLE(ha
)) {
480 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
481 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
491 * qla2x00_marker() - Send a marker IOCB to the firmware.
493 * @req: request queue
494 * @rsp: response queue
497 * @type: marker modifier
499 * Can be called from both normal and interrupt context.
501 * Returns non-zero if a failure occurred, else zero.
504 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
505 struct rsp_que
*rsp
, uint16_t loop_id
,
506 uint64_t lun
, uint8_t type
)
509 struct mrk_entry_24xx
*mrk24
= NULL
;
511 struct qla_hw_data
*ha
= vha
->hw
;
512 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
514 req
= ha
->req_q_map
[0];
515 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
517 ql_log(ql_log_warn
, base_vha
, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
520 return (QLA_FUNCTION_FAILED
);
523 mrk
->entry_type
= MARKER_TYPE
;
524 mrk
->modifier
= type
;
525 if (type
!= MK_SYNC_ALL
) {
526 if (IS_FWI2_CAPABLE(ha
)) {
527 mrk24
= (struct mrk_entry_24xx
*) mrk
;
528 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
529 int_to_scsilun(lun
, (struct scsi_lun
*)&mrk24
->lun
);
530 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
531 mrk24
->vp_index
= vha
->vp_idx
;
532 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
534 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
535 mrk
->lun
= cpu_to_le16((uint16_t)lun
);
540 qla2x00_start_iocbs(vha
, req
);
542 return (QLA_SUCCESS
);
546 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
547 struct rsp_que
*rsp
, uint16_t loop_id
, uint64_t lun
,
551 unsigned long flags
= 0;
553 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
554 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
555 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
561 * qla2x00_issue_marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
567 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
570 if (__qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
571 MK_SYNC_ALL
) != QLA_SUCCESS
)
572 return QLA_FUNCTION_FAILED
;
574 if (qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
575 MK_SYNC_ALL
) != QLA_SUCCESS
)
576 return QLA_FUNCTION_FAILED
;
578 vha
->marker_needed
= 0;
584 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
587 uint32_t *cur_dsd
= NULL
;
588 scsi_qla_host_t
*vha
;
589 struct qla_hw_data
*ha
;
590 struct scsi_cmnd
*cmd
;
591 struct scatterlist
*cur_seg
;
595 uint8_t first_iocb
= 1;
596 uint32_t dsd_list_len
;
597 struct dsd_dma
*dsd_ptr
;
600 cmd
= GET_CMD_SP(sp
);
602 /* Update entry type to indicate Command Type 3 IOCB */
603 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_6
);
605 /* No data transfer */
606 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
607 cmd_pkt
->byte_count
= cpu_to_le32(0);
614 /* Set transfer direction */
615 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
616 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
617 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
618 vha
->qla_stats
.output_requests
++;
619 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
620 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
621 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
622 vha
->qla_stats
.input_requests
++;
625 cur_seg
= scsi_sglist(cmd
);
626 ctx
= GET_CMD_CTX_SP(sp
);
629 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
630 QLA_DSDS_PER_IOCB
: tot_dsds
;
631 tot_dsds
-= avail_dsds
;
632 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
634 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
635 struct dsd_dma
, list
);
636 next_dsd
= dsd_ptr
->dsd_addr
;
637 list_del(&dsd_ptr
->list
);
639 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
645 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
646 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
647 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
648 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
650 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
651 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
652 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
654 cur_dsd
= (uint32_t *)next_dsd
;
658 sle_dma
= sg_dma_address(cur_seg
);
659 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
660 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
661 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
662 cur_seg
= sg_next(cur_seg
);
667 /* Null termination */
671 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
679 * @dsds: number of data segment decriptors needed
681 * Returns the number of dsd list needed to store @dsds.
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds
)
686 uint16_t dsd_lists
= 0;
688 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
689 if (dsds
% QLA_DSDS_PER_IOCB
)
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
702 * @req: pointer to request queue
705 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
706 uint16_t tot_dsds
, struct req_que
*req
)
710 scsi_qla_host_t
*vha
;
711 struct scsi_cmnd
*cmd
;
712 struct scatterlist
*sg
;
715 cmd
= GET_CMD_SP(sp
);
717 /* Update entry type to indicate Command Type 3 IOCB */
718 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_7
);
720 /* No data transfer */
721 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
722 cmd_pkt
->byte_count
= cpu_to_le32(0);
728 /* Set transfer direction */
729 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
730 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_WRITE_DATA
);
731 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
732 vha
->qla_stats
.output_requests
++;
733 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
734 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_READ_DATA
);
735 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
736 vha
->qla_stats
.input_requests
++;
739 /* One DSD is available in the Command Type 3 IOCB */
741 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
743 /* Load data segments */
745 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
747 cont_a64_entry_t
*cont_pkt
;
749 /* Allocate additional continuation packets? */
750 if (avail_dsds
== 0) {
752 * Five DSDs are available in the Continuation
755 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, req
);
756 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
760 sle_dma
= sg_dma_address(sg
);
761 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
762 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
763 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
768 struct fw_dif_context
{
771 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
772 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
776 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
780 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
781 unsigned int protcnt
)
783 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
785 switch (scsi_get_prot_type(cmd
)) {
786 case SCSI_PROT_DIF_TYPE0
:
788 * No check for ql2xenablehba_err_chk, as it would be an
789 * I/O error if hba tag generation is not done.
791 pkt
->ref_tag
= cpu_to_le32((uint32_t)
792 (0xffffffff & scsi_get_lba(cmd
)));
794 if (!qla2x00_hba_err_chk_enabled(sp
))
797 pkt
->ref_tag_mask
[0] = 0xff;
798 pkt
->ref_tag_mask
[1] = 0xff;
799 pkt
->ref_tag_mask
[2] = 0xff;
800 pkt
->ref_tag_mask
[3] = 0xff;
804 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805 * match LBA in CDB + N
807 case SCSI_PROT_DIF_TYPE2
:
808 pkt
->app_tag
= cpu_to_le16(0);
809 pkt
->app_tag_mask
[0] = 0x0;
810 pkt
->app_tag_mask
[1] = 0x0;
812 pkt
->ref_tag
= cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd
)));
815 if (!qla2x00_hba_err_chk_enabled(sp
))
818 /* enable ALL bytes of the ref tag */
819 pkt
->ref_tag_mask
[0] = 0xff;
820 pkt
->ref_tag_mask
[1] = 0xff;
821 pkt
->ref_tag_mask
[2] = 0xff;
822 pkt
->ref_tag_mask
[3] = 0xff;
825 /* For Type 3 protection: 16 bit GUARD only */
826 case SCSI_PROT_DIF_TYPE3
:
827 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
828 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
833 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
836 case SCSI_PROT_DIF_TYPE1
:
837 pkt
->ref_tag
= cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd
)));
839 pkt
->app_tag
= cpu_to_le16(0);
840 pkt
->app_tag_mask
[0] = 0x0;
841 pkt
->app_tag_mask
[1] = 0x0;
843 if (!qla2x00_hba_err_chk_enabled(sp
))
846 /* enable ALL bytes of the ref tag */
847 pkt
->ref_tag_mask
[0] = 0xff;
848 pkt
->ref_tag_mask
[1] = 0xff;
849 pkt
->ref_tag_mask
[2] = 0xff;
850 pkt
->ref_tag_mask
[3] = 0xff;
856 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
859 struct scatterlist
*sg
;
860 uint32_t cumulative_partial
, sg_len
;
861 dma_addr_t sg_dma_addr
;
863 if (sgx
->num_bytes
== sgx
->tot_bytes
)
867 cumulative_partial
= sgx
->tot_partial
;
869 sg_dma_addr
= sg_dma_address(sg
);
870 sg_len
= sg_dma_len(sg
);
872 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
874 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
875 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
876 sgx
->tot_partial
= 0;
877 sgx
->num_bytes
+= blk_sz
;
880 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
881 sgx
->tot_partial
+= sgx
->dma_len
;
885 sgx
->bytes_consumed
+= sgx
->dma_len
;
887 if (sg_len
== sgx
->bytes_consumed
) {
891 sgx
->bytes_consumed
= 0;
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
899 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
902 uint8_t avail_dsds
= 0;
903 uint32_t dsd_list_len
;
904 struct dsd_dma
*dsd_ptr
;
905 struct scatterlist
*sg_prot
;
906 uint32_t *cur_dsd
= dsd
;
907 uint16_t used_dsds
= tot_dsds
;
908 uint32_t prot_int
; /* protection interval */
912 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
913 struct scsi_cmnd
*cmd
;
915 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
917 cmd
= GET_CMD_SP(sp
);
918 prot_int
= cmd
->device
->sector_size
;
920 sgx
.tot_bytes
= scsi_bufflen(cmd
);
921 sgx
.cur_sg
= scsi_sglist(cmd
);
924 sg_prot
= scsi_prot_sglist(cmd
);
926 prot_int
= tc
->blk_sz
;
927 sgx
.tot_bytes
= tc
->bufflen
;
929 sg_prot
= tc
->prot_sg
;
935 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
937 sle_dma
= sgx
.dma_addr
;
938 sle_dma_len
= sgx
.dma_len
;
940 /* Allocate additional continuation packets? */
941 if (avail_dsds
== 0) {
942 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
943 QLA_DSDS_PER_IOCB
: used_dsds
;
944 dsd_list_len
= (avail_dsds
+ 1) * 12;
945 used_dsds
-= avail_dsds
;
947 /* allocate tracking DS */
948 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
952 /* allocate new list */
953 dsd_ptr
->dsd_addr
= next_dsd
=
954 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
955 &dsd_ptr
->dsd_list_dma
);
959 * Need to cleanup only this dsd_ptr, rest
960 * will be done by sp_free_dma()
967 list_add_tail(&dsd_ptr
->list
,
968 &((struct crc_context
*)
969 sp
->u
.scmd
.ctx
)->dsd_list
);
971 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
973 list_add_tail(&dsd_ptr
->list
,
974 &(tc
->ctx
->dsd_list
));
975 *tc
->ctx_dsd_alloced
= 1;
979 /* add new list to cmd iocb or last list */
980 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
981 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
982 *cur_dsd
++ = dsd_list_len
;
983 cur_dsd
= (uint32_t *)next_dsd
;
985 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
986 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
987 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
991 /* Got a full protection interval */
992 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
995 tot_prot_dma_len
+= sle_dma_len
;
996 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
997 tot_prot_dma_len
= 0;
998 sg_prot
= sg_next(sg_prot
);
1001 partial
= 1; /* So as to not re-enter this block */
1002 goto alloc_and_fill
;
1005 /* Null termination */
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1014 uint16_t tot_dsds
, struct qla_tc_param
*tc
)
1017 uint8_t avail_dsds
= 0;
1018 uint32_t dsd_list_len
;
1019 struct dsd_dma
*dsd_ptr
;
1020 struct scatterlist
*sg
, *sgl
;
1021 uint32_t *cur_dsd
= dsd
;
1023 uint16_t used_dsds
= tot_dsds
;
1024 struct scsi_cmnd
*cmd
;
1027 cmd
= GET_CMD_SP(sp
);
1028 sgl
= scsi_sglist(cmd
);
1037 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1040 /* Allocate additional continuation packets? */
1041 if (avail_dsds
== 0) {
1042 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1043 QLA_DSDS_PER_IOCB
: used_dsds
;
1044 dsd_list_len
= (avail_dsds
+ 1) * 12;
1045 used_dsds
-= avail_dsds
;
1047 /* allocate tracking DS */
1048 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1052 /* allocate new list */
1053 dsd_ptr
->dsd_addr
= next_dsd
=
1054 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1055 &dsd_ptr
->dsd_list_dma
);
1059 * Need to cleanup only this dsd_ptr, rest
1060 * will be done by sp_free_dma()
1067 list_add_tail(&dsd_ptr
->list
,
1068 &((struct crc_context
*)
1069 sp
->u
.scmd
.ctx
)->dsd_list
);
1071 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1073 list_add_tail(&dsd_ptr
->list
,
1074 &(tc
->ctx
->dsd_list
));
1075 *tc
->ctx_dsd_alloced
= 1;
1078 /* add new list to cmd iocb or last list */
1079 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1080 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1081 *cur_dsd
++ = dsd_list_len
;
1082 cur_dsd
= (uint32_t *)next_dsd
;
1084 sle_dma
= sg_dma_address(sg
);
1086 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1087 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1088 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1092 /* Null termination */
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1101 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
1104 uint8_t avail_dsds
= 0;
1105 uint32_t dsd_list_len
;
1106 struct dsd_dma
*dsd_ptr
;
1107 struct scatterlist
*sg
, *sgl
;
1109 struct scsi_cmnd
*cmd
;
1110 uint32_t *cur_dsd
= dsd
;
1111 uint16_t used_dsds
= tot_dsds
;
1112 struct scsi_qla_host
*vha
;
1115 cmd
= GET_CMD_SP(sp
);
1116 sgl
= scsi_prot_sglist(cmd
);
1126 ql_dbg(ql_dbg_tgt
, vha
, 0xe021,
1127 "%s: enter\n", __func__
);
1129 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1132 /* Allocate additional continuation packets? */
1133 if (avail_dsds
== 0) {
1134 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1135 QLA_DSDS_PER_IOCB
: used_dsds
;
1136 dsd_list_len
= (avail_dsds
+ 1) * 12;
1137 used_dsds
-= avail_dsds
;
1139 /* allocate tracking DS */
1140 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1144 /* allocate new list */
1145 dsd_ptr
->dsd_addr
= next_dsd
=
1146 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1147 &dsd_ptr
->dsd_list_dma
);
1151 * Need to cleanup only this dsd_ptr, rest
1152 * will be done by sp_free_dma()
1159 list_add_tail(&dsd_ptr
->list
,
1160 &((struct crc_context
*)
1161 sp
->u
.scmd
.ctx
)->dsd_list
);
1163 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1165 list_add_tail(&dsd_ptr
->list
,
1166 &(tc
->ctx
->dsd_list
));
1167 *tc
->ctx_dsd_alloced
= 1;
1170 /* add new list to cmd iocb or last list */
1171 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1172 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1173 *cur_dsd
++ = dsd_list_len
;
1174 cur_dsd
= (uint32_t *)next_dsd
;
1176 sle_dma
= sg_dma_address(sg
);
1178 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1179 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1180 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1184 /* Null termination */
1192 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193 * Type 6 IOCB types.
1195 * @sp: SRB command to process
1196 * @cmd_pkt: Command type 3 IOCB
1197 * @tot_dsds: Total number of segments to transfer
1202 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1203 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1205 uint32_t *cur_dsd
, *fcp_dl
;
1206 scsi_qla_host_t
*vha
;
1207 struct scsi_cmnd
*cmd
;
1208 uint32_t total_bytes
= 0;
1209 uint32_t data_bytes
;
1211 uint8_t bundling
= 1;
1213 struct crc_context
*crc_ctx_pkt
= NULL
;
1214 struct qla_hw_data
*ha
;
1215 uint8_t additional_fcpcdb_len
;
1216 uint16_t fcp_cmnd_len
;
1217 struct fcp_cmnd
*fcp_cmnd
;
1218 dma_addr_t crc_ctx_dma
;
1220 cmd
= GET_CMD_SP(sp
);
1222 /* Update entry type to indicate Command Type CRC_2 IOCB */
1223 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_CRC_2
);
1228 /* No data transfer */
1229 data_bytes
= scsi_bufflen(cmd
);
1230 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1231 cmd_pkt
->byte_count
= cpu_to_le32(0);
1235 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1237 /* Set transfer direction */
1238 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1239 cmd_pkt
->control_flags
=
1240 cpu_to_le16(CF_WRITE_DATA
);
1241 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1242 cmd_pkt
->control_flags
=
1243 cpu_to_le16(CF_READ_DATA
);
1246 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1247 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1248 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1249 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1252 /* Allocate CRC context from global pool */
1253 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1254 dma_pool_zalloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1257 goto crc_queuing_error
;
1259 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1261 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1264 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1266 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1268 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1269 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1271 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1272 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1273 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1275 /* Determine SCSI command length -- align to 4 byte boundary */
1276 if (cmd
->cmd_len
> 16) {
1277 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1278 if ((cmd
->cmd_len
% 4) != 0) {
1279 /* SCSI cmd > 16 bytes must be multiple of 4 */
1280 goto crc_queuing_error
;
1282 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1284 additional_fcpcdb_len
= 0;
1285 fcp_cmnd_len
= 12 + 16 + 4;
1288 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1290 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1291 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1292 fcp_cmnd
->additional_cdb_len
|= 1;
1293 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1294 fcp_cmnd
->additional_cdb_len
|= 2;
1296 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1297 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1298 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1299 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1300 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1301 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1302 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1303 fcp_cmnd
->task_management
= 0;
1304 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1306 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1308 /* Compute dif len and adjust data len to incude protection */
1310 blk_size
= cmd
->device
->sector_size
;
1311 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1313 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1314 case SCSI_PROT_READ_INSERT
:
1315 case SCSI_PROT_WRITE_STRIP
:
1316 total_bytes
= data_bytes
;
1317 data_bytes
+= dif_bytes
;
1320 case SCSI_PROT_READ_STRIP
:
1321 case SCSI_PROT_WRITE_INSERT
:
1322 case SCSI_PROT_READ_PASS
:
1323 case SCSI_PROT_WRITE_PASS
:
1324 total_bytes
= data_bytes
+ dif_bytes
;
1330 if (!qla2x00_hba_err_chk_enabled(sp
))
1331 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1332 /* HBA error checking enabled */
1333 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
1334 if ((scsi_get_prot_type(GET_CMD_SP(sp
)) == SCSI_PROT_DIF_TYPE1
)
1335 || (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1336 SCSI_PROT_DIF_TYPE2
))
1337 fw_prot_opts
|= BIT_10
;
1338 else if (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1339 SCSI_PROT_DIF_TYPE3
)
1340 fw_prot_opts
|= BIT_11
;
1344 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1347 * Configure Bundling if we need to fetch interlaving
1348 * protection PCI accesses
1350 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1351 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1352 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1354 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1357 /* Finish the common fields of CRC pkt */
1358 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1359 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1360 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1361 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
1362 /* Fibre channel byte count */
1363 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1364 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1365 additional_fcpcdb_len
);
1366 *fcp_dl
= htonl(total_bytes
);
1368 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1369 cmd_pkt
->byte_count
= cpu_to_le32(0);
1372 /* Walks data segments */
1374 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1376 if (!bundling
&& tot_prot_dsds
) {
1377 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1378 cur_dsd
, tot_dsds
, NULL
))
1379 goto crc_queuing_error
;
1380 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1381 (tot_dsds
- tot_prot_dsds
), NULL
))
1382 goto crc_queuing_error
;
1384 if (bundling
&& tot_prot_dsds
) {
1385 /* Walks dif segments */
1386 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1387 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1388 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1389 tot_prot_dsds
, NULL
))
1390 goto crc_queuing_error
;
1395 /* Cleanup will be performed by the caller */
1397 return QLA_FUNCTION_FAILED
;
1401 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402 * @sp: command to send to the ISP
1404 * Returns non-zero if a failure occurred, else zero.
1407 qla24xx_start_scsi(srb_t
*sp
)
1410 unsigned long flags
;
1414 struct cmd_type_7
*cmd_pkt
;
1418 struct req_que
*req
= NULL
;
1419 struct rsp_que
*rsp
= NULL
;
1420 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1421 struct scsi_qla_host
*vha
= sp
->vha
;
1422 struct qla_hw_data
*ha
= vha
->hw
;
1424 /* Setup device pointers. */
1428 /* So we know we haven't pci_map'ed anything yet */
1431 /* Send marker if required */
1432 if (vha
->marker_needed
!= 0) {
1433 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1435 return QLA_FUNCTION_FAILED
;
1436 vha
->marker_needed
= 0;
1439 /* Acquire ring specific lock */
1440 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1442 /* Check for room in outstanding command list. */
1443 handle
= req
->current_outstanding_cmd
;
1444 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1446 if (handle
== req
->num_outstanding_cmds
)
1448 if (!req
->outstanding_cmds
[handle
])
1451 if (index
== req
->num_outstanding_cmds
)
1454 /* Map the sg table so we have an accurate count of sg entries needed */
1455 if (scsi_sg_count(cmd
)) {
1456 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1457 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1458 if (unlikely(!nseg
))
1464 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1465 if (req
->cnt
< (req_cnt
+ 2)) {
1466 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1467 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1468 if (req
->ring_index
< cnt
)
1469 req
->cnt
= cnt
- req
->ring_index
;
1471 req
->cnt
= req
->length
-
1472 (req
->ring_index
- cnt
);
1473 if (req
->cnt
< (req_cnt
+ 2))
1477 /* Build command packet. */
1478 req
->current_outstanding_cmd
= handle
;
1479 req
->outstanding_cmds
[handle
] = sp
;
1480 sp
->handle
= handle
;
1481 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1482 req
->cnt
-= req_cnt
;
1484 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1485 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1487 /* Zero out remaining portion of packet. */
1488 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1490 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1491 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1493 /* Set NPORT-ID and LUN number*/
1494 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1495 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1496 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1497 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1498 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1500 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1501 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1503 cmd_pkt
->task
= TSK_SIMPLE
;
1505 /* Load SCSI command packet. */
1506 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1507 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1509 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1511 /* Build IOCB segments */
1512 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1514 /* Set total data segment count. */
1515 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1517 /* Adjust ring index. */
1519 if (req
->ring_index
== req
->length
) {
1520 req
->ring_index
= 0;
1521 req
->ring_ptr
= req
->ring
;
1525 sp
->flags
|= SRB_DMA_VALID
;
1527 /* Set chip new ring index. */
1528 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1529 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1531 /* Manage unprocessed RIO/ZIO commands in response queue. */
1532 if (vha
->flags
.process_response_queue
&&
1533 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1534 qla24xx_process_response_queue(vha
, rsp
);
1536 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1541 scsi_dma_unmap(cmd
);
1543 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1545 return QLA_FUNCTION_FAILED
;
1549 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1550 * @sp: command to send to the ISP
1552 * Returns non-zero if a failure occurred, else zero.
1555 qla24xx_dif_start_scsi(srb_t
*sp
)
1558 unsigned long flags
;
1563 uint16_t req_cnt
= 0;
1565 uint16_t tot_prot_dsds
;
1566 uint16_t fw_prot_opts
= 0;
1567 struct req_que
*req
= NULL
;
1568 struct rsp_que
*rsp
= NULL
;
1569 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1570 struct scsi_qla_host
*vha
= sp
->vha
;
1571 struct qla_hw_data
*ha
= vha
->hw
;
1572 struct cmd_type_crc_2
*cmd_pkt
;
1573 uint32_t status
= 0;
1575 #define QDSS_GOT_Q_SPACE BIT_0
1577 /* Only process protection or >16 cdb in this routine */
1578 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1579 if (cmd
->cmd_len
<= 16)
1580 return qla24xx_start_scsi(sp
);
1583 /* Setup device pointers. */
1587 /* So we know we haven't pci_map'ed anything yet */
1590 /* Send marker if required */
1591 if (vha
->marker_needed
!= 0) {
1592 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1594 return QLA_FUNCTION_FAILED
;
1595 vha
->marker_needed
= 0;
1598 /* Acquire ring specific lock */
1599 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1601 /* Check for room in outstanding command list. */
1602 handle
= req
->current_outstanding_cmd
;
1603 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1605 if (handle
== req
->num_outstanding_cmds
)
1607 if (!req
->outstanding_cmds
[handle
])
1611 if (index
== req
->num_outstanding_cmds
)
1614 /* Compute number of required data segments */
1615 /* Map the sg table so we have an accurate count of sg entries needed */
1616 if (scsi_sg_count(cmd
)) {
1617 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1618 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1619 if (unlikely(!nseg
))
1622 sp
->flags
|= SRB_DMA_VALID
;
1624 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1625 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1626 struct qla2_sgx sgx
;
1629 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1630 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1631 sgx
.cur_sg
= scsi_sglist(cmd
);
1635 while (qla24xx_get_one_block_sg(
1636 cmd
->device
->sector_size
, &sgx
, &partial
))
1642 /* number of required data segments */
1645 /* Compute number of required protection segments */
1646 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1647 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1648 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1649 if (unlikely(!nseg
))
1652 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1654 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1655 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1656 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1663 /* Total Data and protection sg segment(s) */
1664 tot_prot_dsds
= nseg
;
1666 if (req
->cnt
< (req_cnt
+ 2)) {
1667 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1668 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1669 if (req
->ring_index
< cnt
)
1670 req
->cnt
= cnt
- req
->ring_index
;
1672 req
->cnt
= req
->length
-
1673 (req
->ring_index
- cnt
);
1674 if (req
->cnt
< (req_cnt
+ 2))
1678 status
|= QDSS_GOT_Q_SPACE
;
1680 /* Build header part of command packet (excluding the OPCODE). */
1681 req
->current_outstanding_cmd
= handle
;
1682 req
->outstanding_cmds
[handle
] = sp
;
1683 sp
->handle
= handle
;
1684 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1685 req
->cnt
-= req_cnt
;
1687 /* Fill-in common area */
1688 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1689 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1691 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1692 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1694 /* Set NPORT-ID and LUN number*/
1695 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1696 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1697 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1698 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1700 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1701 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1703 /* Total Data and protection segment(s) */
1704 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1706 /* Build IOCB segments and adjust for data protection segments */
1707 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1708 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1712 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1713 /* Specify response queue number where completion should happen */
1714 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1715 cmd_pkt
->timeout
= cpu_to_le16(0);
1718 /* Adjust ring index. */
1720 if (req
->ring_index
== req
->length
) {
1721 req
->ring_index
= 0;
1722 req
->ring_ptr
= req
->ring
;
1726 /* Set chip new ring index. */
1727 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1728 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
1730 /* Manage unprocessed RIO/ZIO commands in response queue. */
1731 if (vha
->flags
.process_response_queue
&&
1732 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1733 qla24xx_process_response_queue(vha
, rsp
);
1735 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1740 if (status
& QDSS_GOT_Q_SPACE
) {
1741 req
->outstanding_cmds
[handle
] = NULL
;
1742 req
->cnt
+= req_cnt
;
1744 /* Cleanup will be performed by the caller (queuecommand) */
1746 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1747 return QLA_FUNCTION_FAILED
;
1751 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1752 * @sp: command to send to the ISP
1754 * Returns non-zero if a failure occurred, else zero.
1757 qla2xxx_start_scsi_mq(srb_t
*sp
)
1760 unsigned long flags
;
1764 struct cmd_type_7
*cmd_pkt
;
1768 struct req_que
*req
= NULL
;
1769 struct rsp_que
*rsp
= NULL
;
1770 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1771 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1772 struct qla_hw_data
*ha
= vha
->hw
;
1773 struct qla_qpair
*qpair
= sp
->qpair
;
1775 /* Acquire qpair specific lock */
1776 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1778 /* Setup qpair pointers */
1782 /* So we know we haven't pci_map'ed anything yet */
1785 /* Send marker if required */
1786 if (vha
->marker_needed
!= 0) {
1787 if (__qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1789 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1790 return QLA_FUNCTION_FAILED
;
1792 vha
->marker_needed
= 0;
1795 /* Check for room in outstanding command list. */
1796 handle
= req
->current_outstanding_cmd
;
1797 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1799 if (handle
== req
->num_outstanding_cmds
)
1801 if (!req
->outstanding_cmds
[handle
])
1804 if (index
== req
->num_outstanding_cmds
)
1807 /* Map the sg table so we have an accurate count of sg entries needed */
1808 if (scsi_sg_count(cmd
)) {
1809 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1810 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1811 if (unlikely(!nseg
))
1817 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1818 if (req
->cnt
< (req_cnt
+ 2)) {
1819 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1820 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1821 if (req
->ring_index
< cnt
)
1822 req
->cnt
= cnt
- req
->ring_index
;
1824 req
->cnt
= req
->length
-
1825 (req
->ring_index
- cnt
);
1826 if (req
->cnt
< (req_cnt
+ 2))
1830 /* Build command packet. */
1831 req
->current_outstanding_cmd
= handle
;
1832 req
->outstanding_cmds
[handle
] = sp
;
1833 sp
->handle
= handle
;
1834 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1835 req
->cnt
-= req_cnt
;
1837 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1838 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1840 /* Zero out remaining portion of packet. */
1841 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1842 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1843 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1844 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1846 /* Set NPORT-ID and LUN number*/
1847 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1848 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1849 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1850 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1851 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1853 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1854 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1856 cmd_pkt
->task
= TSK_SIMPLE
;
1858 /* Load SCSI command packet. */
1859 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1860 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1862 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1864 /* Build IOCB segments */
1865 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1867 /* Set total data segment count. */
1868 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1870 /* Adjust ring index. */
1872 if (req
->ring_index
== req
->length
) {
1873 req
->ring_index
= 0;
1874 req
->ring_ptr
= req
->ring
;
1878 sp
->flags
|= SRB_DMA_VALID
;
1880 /* Set chip new ring index. */
1881 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1883 /* Manage unprocessed RIO/ZIO commands in response queue. */
1884 if (vha
->flags
.process_response_queue
&&
1885 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
1886 qla24xx_process_response_queue(vha
, rsp
);
1888 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1893 scsi_dma_unmap(cmd
);
1895 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1897 return QLA_FUNCTION_FAILED
;
1902 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1903 * @sp: command to send to the ISP
1905 * Returns non-zero if a failure occurred, else zero.
1908 qla2xxx_dif_start_scsi_mq(srb_t
*sp
)
1911 unsigned long flags
;
1916 uint16_t req_cnt
= 0;
1918 uint16_t tot_prot_dsds
;
1919 uint16_t fw_prot_opts
= 0;
1920 struct req_que
*req
= NULL
;
1921 struct rsp_que
*rsp
= NULL
;
1922 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1923 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1924 struct qla_hw_data
*ha
= vha
->hw
;
1925 struct cmd_type_crc_2
*cmd_pkt
;
1926 uint32_t status
= 0;
1927 struct qla_qpair
*qpair
= sp
->qpair
;
1929 #define QDSS_GOT_Q_SPACE BIT_0
1931 /* Check for host side state */
1932 if (!qpair
->online
) {
1933 cmd
->result
= DID_NO_CONNECT
<< 16;
1934 return QLA_INTERFACE_ERROR
;
1937 if (!qpair
->difdix_supported
&&
1938 scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
1939 cmd
->result
= DID_NO_CONNECT
<< 16;
1940 return QLA_INTERFACE_ERROR
;
1943 /* Only process protection or >16 cdb in this routine */
1944 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1945 if (cmd
->cmd_len
<= 16)
1946 return qla2xxx_start_scsi_mq(sp
);
1949 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1951 /* Setup qpair pointers */
1955 /* So we know we haven't pci_map'ed anything yet */
1958 /* Send marker if required */
1959 if (vha
->marker_needed
!= 0) {
1960 if (__qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1962 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1963 return QLA_FUNCTION_FAILED
;
1965 vha
->marker_needed
= 0;
1968 /* Check for room in outstanding command list. */
1969 handle
= req
->current_outstanding_cmd
;
1970 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1972 if (handle
== req
->num_outstanding_cmds
)
1974 if (!req
->outstanding_cmds
[handle
])
1978 if (index
== req
->num_outstanding_cmds
)
1981 /* Compute number of required data segments */
1982 /* Map the sg table so we have an accurate count of sg entries needed */
1983 if (scsi_sg_count(cmd
)) {
1984 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1985 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1986 if (unlikely(!nseg
))
1989 sp
->flags
|= SRB_DMA_VALID
;
1991 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1992 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1993 struct qla2_sgx sgx
;
1996 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1997 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1998 sgx
.cur_sg
= scsi_sglist(cmd
);
2002 while (qla24xx_get_one_block_sg(
2003 cmd
->device
->sector_size
, &sgx
, &partial
))
2009 /* number of required data segments */
2012 /* Compute number of required protection segments */
2013 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
2014 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
2015 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
2016 if (unlikely(!nseg
))
2019 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
2021 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
2022 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
2023 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
2030 /* Total Data and protection sg segment(s) */
2031 tot_prot_dsds
= nseg
;
2033 if (req
->cnt
< (req_cnt
+ 2)) {
2034 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
2035 RD_REG_DWORD_RELAXED(req
->req_q_out
);
2036 if (req
->ring_index
< cnt
)
2037 req
->cnt
= cnt
- req
->ring_index
;
2039 req
->cnt
= req
->length
-
2040 (req
->ring_index
- cnt
);
2041 if (req
->cnt
< (req_cnt
+ 2))
2045 status
|= QDSS_GOT_Q_SPACE
;
2047 /* Build header part of command packet (excluding the OPCODE). */
2048 req
->current_outstanding_cmd
= handle
;
2049 req
->outstanding_cmds
[handle
] = sp
;
2050 sp
->handle
= handle
;
2051 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2052 req
->cnt
-= req_cnt
;
2054 /* Fill-in common area */
2055 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
2056 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2058 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2059 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2061 /* Set NPORT-ID and LUN number*/
2062 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2063 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2064 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2065 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2067 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2068 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2070 /* Total Data and protection segment(s) */
2071 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2073 /* Build IOCB segments and adjust for data protection segments */
2074 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
2075 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
2079 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2080 cmd_pkt
->timeout
= cpu_to_le16(0);
2083 /* Adjust ring index. */
2085 if (req
->ring_index
== req
->length
) {
2086 req
->ring_index
= 0;
2087 req
->ring_ptr
= req
->ring
;
2091 /* Set chip new ring index. */
2092 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
2094 /* Manage unprocessed RIO/ZIO commands in response queue. */
2095 if (vha
->flags
.process_response_queue
&&
2096 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2097 qla24xx_process_response_queue(vha
, rsp
);
2099 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2104 if (status
& QDSS_GOT_Q_SPACE
) {
2105 req
->outstanding_cmds
[handle
] = NULL
;
2106 req
->cnt
+= req_cnt
;
2108 /* Cleanup will be performed by the caller (queuecommand) */
2110 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2111 return QLA_FUNCTION_FAILED
;
2114 /* Generic Control-SRB manipulation functions. */
2116 /* hardware_lock assumed to be held. */
2119 __qla2x00_alloc_iocbs(struct qla_qpair
*qpair
, srb_t
*sp
)
2121 scsi_qla_host_t
*vha
= qpair
->vha
;
2122 struct qla_hw_data
*ha
= vha
->hw
;
2123 struct req_que
*req
= qpair
->req
;
2124 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
2125 uint32_t index
, handle
;
2127 uint16_t cnt
, req_cnt
;
2134 goto skip_cmd_array
;
2136 /* Check for room in outstanding command list. */
2137 handle
= req
->current_outstanding_cmd
;
2138 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2140 if (handle
== req
->num_outstanding_cmds
)
2142 if (!req
->outstanding_cmds
[handle
])
2145 if (index
== req
->num_outstanding_cmds
) {
2146 ql_log(ql_log_warn
, vha
, 0x700b,
2147 "No room on outstanding cmd array.\n");
2151 /* Prep command array. */
2152 req
->current_outstanding_cmd
= handle
;
2153 req
->outstanding_cmds
[handle
] = sp
;
2154 sp
->handle
= handle
;
2156 /* Adjust entry-counts as needed. */
2157 if (sp
->type
!= SRB_SCSI_CMD
)
2158 req_cnt
= sp
->iocbs
;
2161 /* Check for room on request queue. */
2162 if (req
->cnt
< req_cnt
+ 2) {
2163 if (qpair
->use_shadow_reg
)
2164 cnt
= *req
->out_ptr
;
2165 else if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
))
2166 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
2167 else if (IS_P3P_TYPE(ha
))
2168 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
2169 else if (IS_FWI2_CAPABLE(ha
))
2170 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
2171 else if (IS_QLAFX00(ha
))
2172 cnt
= RD_REG_DWORD(®
->ispfx00
.req_q_out
);
2174 cnt
= qla2x00_debounce_register(
2175 ISP_REQ_Q_OUT(ha
, ®
->isp
));
2177 if (req
->ring_index
< cnt
)
2178 req
->cnt
= cnt
- req
->ring_index
;
2180 req
->cnt
= req
->length
-
2181 (req
->ring_index
- cnt
);
2183 if (req
->cnt
< req_cnt
+ 2)
2187 req
->cnt
-= req_cnt
;
2188 pkt
= req
->ring_ptr
;
2189 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
2190 if (IS_QLAFX00(ha
)) {
2191 WRT_REG_BYTE((void __iomem
*)&pkt
->entry_count
, req_cnt
);
2192 WRT_REG_WORD((void __iomem
*)&pkt
->handle
, handle
);
2194 pkt
->entry_count
= req_cnt
;
2195 pkt
->handle
= handle
;
2199 qpair
->tgt_counters
.num_alloc_iocb_failed
++;
2204 qla2x00_alloc_iocbs_ready(struct qla_qpair
*qpair
, srb_t
*sp
)
2206 scsi_qla_host_t
*vha
= qpair
->vha
;
2208 if (qla2x00_reset_active(vha
))
2211 return __qla2x00_alloc_iocbs(qpair
, sp
);
2215 qla2x00_alloc_iocbs(struct scsi_qla_host
*vha
, srb_t
*sp
)
2217 return __qla2x00_alloc_iocbs(vha
->hw
->base_qpair
, sp
);
2221 qla24xx_prli_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2223 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2225 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2226 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2227 if (lio
->u
.logio
.flags
& SRB_LOGIN_NVME_PRLI
)
2228 logio
->control_flags
|= LCF_NVME_PRLI
;
2230 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2231 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2232 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2233 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2234 logio
->vp_index
= sp
->vha
->vp_idx
;
2238 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2240 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2242 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2243 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
2245 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
2246 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
2247 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
2248 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
2249 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2250 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2251 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2252 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2253 logio
->vp_index
= sp
->vha
->vp_idx
;
2257 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2259 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2260 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2263 mbx
->entry_type
= MBX_IOCB_TYPE
;
2264 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2265 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
2266 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
2267 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
2268 if (HAS_EXTENDED_IDS(ha
)) {
2269 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2270 mbx
->mb10
= cpu_to_le16(opts
);
2272 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
2274 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2275 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2276 sp
->fcport
->d_id
.b
.al_pa
);
2277 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2281 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2283 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2284 logio
->control_flags
=
2285 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
2286 if (!sp
->fcport
->se_sess
||
2287 !sp
->fcport
->keep_nport_handle
)
2288 logio
->control_flags
|= cpu_to_le16(LCF_FREE_NPORT
);
2289 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2290 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2291 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2292 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2293 logio
->vp_index
= sp
->vha
->vp_idx
;
2297 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2299 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2301 mbx
->entry_type
= MBX_IOCB_TYPE
;
2302 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2303 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
2304 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
2305 cpu_to_le16(sp
->fcport
->loop_id
):
2306 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
2307 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2308 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2309 sp
->fcport
->d_id
.b
.al_pa
);
2310 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2311 /* Implicit: mbx->mbx10 = 0. */
2315 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2317 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2318 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
2319 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2320 logio
->vp_index
= sp
->vha
->vp_idx
;
2324 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2326 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2328 mbx
->entry_type
= MBX_IOCB_TYPE
;
2329 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2330 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
2331 if (HAS_EXTENDED_IDS(ha
)) {
2332 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2333 mbx
->mb10
= cpu_to_le16(BIT_0
);
2335 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2337 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2338 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2339 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2340 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2341 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2345 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2349 struct fc_port
*fcport
= sp
->fcport
;
2350 scsi_qla_host_t
*vha
= fcport
->vha
;
2351 struct qla_hw_data
*ha
= vha
->hw
;
2352 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2353 struct req_que
*req
= vha
->req
;
2355 flags
= iocb
->u
.tmf
.flags
;
2356 lun
= iocb
->u
.tmf
.lun
;
2358 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2359 tsk
->entry_count
= 1;
2360 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2361 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2362 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2363 tsk
->control_flags
= cpu_to_le32(flags
);
2364 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2365 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2366 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2367 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2369 if (flags
== TCF_LUN_RESET
) {
2370 int_to_scsilun(lun
, &tsk
->lun
);
2371 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2377 qla2x00_els_dcmd_sp_free(void *data
)
2380 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2384 if (elsio
->u
.els_logo
.els_logo_pyld
)
2385 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
, DMA_POOL_SIZE
,
2386 elsio
->u
.els_logo
.els_logo_pyld
,
2387 elsio
->u
.els_logo
.els_logo_pyld_dma
);
2389 del_timer(&elsio
->timer
);
2394 qla2x00_els_dcmd_iocb_timeout(void *data
)
2397 fc_port_t
*fcport
= sp
->fcport
;
2398 struct scsi_qla_host
*vha
= sp
->vha
;
2399 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2401 ql_dbg(ql_dbg_io
, vha
, 0x3069,
2402 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2403 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
2404 fcport
->d_id
.b
.al_pa
);
2406 complete(&lio
->u
.els_logo
.comp
);
2410 qla2x00_els_dcmd_sp_done(void *ptr
, int res
)
2413 fc_port_t
*fcport
= sp
->fcport
;
2414 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2415 struct scsi_qla_host
*vha
= sp
->vha
;
2417 ql_dbg(ql_dbg_io
, vha
, 0x3072,
2418 "%s hdl=%x, portid=%02x%02x%02x done\n",
2419 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
,
2420 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2422 complete(&lio
->u
.els_logo
.comp
);
2426 qla24xx_els_dcmd_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2427 port_id_t remote_did
)
2430 fc_port_t
*fcport
= NULL
;
2431 struct srb_iocb
*elsio
= NULL
;
2432 struct qla_hw_data
*ha
= vha
->hw
;
2433 struct els_logo_payload logo_pyld
;
2434 int rval
= QLA_SUCCESS
;
2436 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2438 ql_log(ql_log_info
, vha
, 0x70e5, "fcport allocation failed\n");
2442 /* Alloc SRB structure */
2443 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2446 ql_log(ql_log_info
, vha
, 0x70e6,
2447 "SRB allocation failed\n");
2451 elsio
= &sp
->u
.iocb_cmd
;
2452 fcport
->loop_id
= 0xFFFF;
2453 fcport
->d_id
.b
.domain
= remote_did
.b
.domain
;
2454 fcport
->d_id
.b
.area
= remote_did
.b
.area
;
2455 fcport
->d_id
.b
.al_pa
= remote_did
.b
.al_pa
;
2457 ql_dbg(ql_dbg_io
, vha
, 0x3073, "portid=%02x%02x%02x done\n",
2458 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2460 sp
->type
= SRB_ELS_DCMD
;
2461 sp
->name
= "ELS_DCMD";
2462 sp
->fcport
= fcport
;
2463 elsio
->timeout
= qla2x00_els_dcmd_iocb_timeout
;
2464 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
);
2465 sp
->done
= qla2x00_els_dcmd_sp_done
;
2466 sp
->free
= qla2x00_els_dcmd_sp_free
;
2468 elsio
->u
.els_logo
.els_logo_pyld
= dma_alloc_coherent(&ha
->pdev
->dev
,
2469 DMA_POOL_SIZE
, &elsio
->u
.els_logo
.els_logo_pyld_dma
,
2472 if (!elsio
->u
.els_logo
.els_logo_pyld
) {
2474 return QLA_FUNCTION_FAILED
;
2477 memset(&logo_pyld
, 0, sizeof(struct els_logo_payload
));
2479 elsio
->u
.els_logo
.els_cmd
= els_opcode
;
2480 logo_pyld
.opcode
= els_opcode
;
2481 logo_pyld
.s_id
[0] = vha
->d_id
.b
.al_pa
;
2482 logo_pyld
.s_id
[1] = vha
->d_id
.b
.area
;
2483 logo_pyld
.s_id
[2] = vha
->d_id
.b
.domain
;
2484 host_to_fcp_swap(logo_pyld
.s_id
, sizeof(uint32_t));
2485 memcpy(&logo_pyld
.wwpn
, vha
->port_name
, WWN_SIZE
);
2487 memcpy(elsio
->u
.els_logo
.els_logo_pyld
, &logo_pyld
,
2488 sizeof(struct els_logo_payload
));
2490 rval
= qla2x00_start_sp(sp
);
2491 if (rval
!= QLA_SUCCESS
) {
2493 return QLA_FUNCTION_FAILED
;
2496 ql_dbg(ql_dbg_io
, vha
, 0x3074,
2497 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2498 sp
->name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
2499 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2501 wait_for_completion(&elsio
->u
.els_logo
.comp
);
2508 qla24xx_els_logo_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2510 scsi_qla_host_t
*vha
= sp
->vha
;
2511 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2512 uint32_t dsd_len
= 24;
2514 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2515 els_iocb
->entry_count
= 1;
2516 els_iocb
->sys_define
= 0;
2517 els_iocb
->entry_status
= 0;
2518 els_iocb
->handle
= sp
->handle
;
2519 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2520 els_iocb
->tx_dsd_count
= 1;
2521 els_iocb
->vp_index
= vha
->vp_idx
;
2522 els_iocb
->sof_type
= EST_SOFI3
;
2523 els_iocb
->rx_dsd_count
= 0;
2524 els_iocb
->opcode
= elsio
->u
.els_logo
.els_cmd
;
2526 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2527 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2528 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2529 els_iocb
->s_id
[0] = vha
->d_id
.b
.al_pa
;
2530 els_iocb
->s_id
[1] = vha
->d_id
.b
.area
;
2531 els_iocb
->s_id
[2] = vha
->d_id
.b
.domain
;
2532 els_iocb
->control_flags
= 0;
2534 if (elsio
->u
.els_logo
.els_cmd
== ELS_DCMD_PLOGI
) {
2535 els_iocb
->tx_byte_count
= sizeof(struct els_plogi_payload
);
2536 els_iocb
->tx_address
[0] =
2537 cpu_to_le32(LSD(elsio
->u
.els_plogi
.els_plogi_pyld_dma
));
2538 els_iocb
->tx_address
[1] =
2539 cpu_to_le32(MSD(elsio
->u
.els_plogi
.els_plogi_pyld_dma
));
2540 els_iocb
->tx_len
= dsd_len
;
2542 els_iocb
->rx_dsd_count
= 1;
2543 els_iocb
->rx_byte_count
= sizeof(struct els_plogi_payload
);
2544 els_iocb
->rx_address
[0] =
2545 cpu_to_le32(LSD(elsio
->u
.els_plogi
.els_resp_pyld_dma
));
2546 els_iocb
->rx_address
[1] =
2547 cpu_to_le32(MSD(elsio
->u
.els_plogi
.els_resp_pyld_dma
));
2548 els_iocb
->rx_len
= dsd_len
;
2549 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3073,
2550 "PLOGI ELS IOCB:\n");
2551 ql_dump_buffer(ql_log_info
, vha
, 0x0109,
2552 (uint8_t *)els_iocb
, 0x70);
2554 els_iocb
->tx_byte_count
= sizeof(struct els_logo_payload
);
2555 els_iocb
->tx_address
[0] =
2556 cpu_to_le32(LSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2557 els_iocb
->tx_address
[1] =
2558 cpu_to_le32(MSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2559 els_iocb
->tx_len
= cpu_to_le32(sizeof(struct els_logo_payload
));
2561 els_iocb
->rx_byte_count
= 0;
2562 els_iocb
->rx_address
[0] = 0;
2563 els_iocb
->rx_address
[1] = 0;
2564 els_iocb
->rx_len
= 0;
2567 sp
->vha
->qla_stats
.control_requests
++;
2571 qla2x00_els_dcmd2_sp_free(void *data
)
2574 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2576 if (elsio
->u
.els_plogi
.els_plogi_pyld
)
2577 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
, DMA_POOL_SIZE
,
2578 elsio
->u
.els_plogi
.els_plogi_pyld
,
2579 elsio
->u
.els_plogi
.els_plogi_pyld_dma
);
2581 if (elsio
->u
.els_plogi
.els_resp_pyld
)
2582 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
, DMA_POOL_SIZE
,
2583 elsio
->u
.els_plogi
.els_resp_pyld
,
2584 elsio
->u
.els_plogi
.els_resp_pyld_dma
);
2586 del_timer(&elsio
->timer
);
2591 qla2x00_els_dcmd2_iocb_timeout(void *data
)
2594 fc_port_t
*fcport
= sp
->fcport
;
2595 struct scsi_qla_host
*vha
= sp
->vha
;
2596 struct qla_hw_data
*ha
= vha
->hw
;
2597 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2598 unsigned long flags
= 0;
2601 ql_dbg(ql_dbg_io
+ ql_dbg_disc
, vha
, 0x3069,
2602 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2603 sp
->name
, sp
->handle
, fcport
->port_name
, fcport
->d_id
.b24
);
2605 /* Abort the exchange */
2606 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2607 res
= ha
->isp_ops
->abort_command(sp
);
2608 ql_dbg(ql_dbg_io
, vha
, 0x3070,
2609 "mbx abort_command %s\n",
2610 (res
== QLA_SUCCESS
) ? "successful" : "failed");
2611 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2613 complete(&lio
->u
.els_plogi
.comp
);
2617 qla2x00_els_dcmd2_sp_done(void *ptr
, int res
)
2620 fc_port_t
*fcport
= sp
->fcport
;
2621 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2622 struct scsi_qla_host
*vha
= sp
->vha
;
2624 ql_dbg(ql_dbg_io
+ ql_dbg_disc
, vha
, 0x3072,
2625 "%s ELS hdl=%x, portid=%06x done %8phC\n",
2626 sp
->name
, sp
->handle
, fcport
->d_id
.b24
, fcport
->port_name
);
2628 complete(&lio
->u
.els_plogi
.comp
);
2632 qla24xx_els_dcmd2_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2633 fc_port_t
*fcport
, port_id_t remote_did
)
2636 struct srb_iocb
*elsio
= NULL
;
2637 struct qla_hw_data
*ha
= vha
->hw
;
2638 int rval
= QLA_SUCCESS
;
2639 void *ptr
, *resp_ptr
;
2642 /* Alloc SRB structure */
2643 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2645 ql_log(ql_log_info
, vha
, 0x70e6,
2646 "SRB allocation failed\n");
2650 elsio
= &sp
->u
.iocb_cmd
;
2651 fcport
->d_id
.b
.domain
= remote_did
.b
.domain
;
2652 fcport
->d_id
.b
.area
= remote_did
.b
.area
;
2653 fcport
->d_id
.b
.al_pa
= remote_did
.b
.al_pa
;
2655 ql_dbg(ql_dbg_io
, vha
, 0x3073,
2656 "Enter: PLOGI portid=%06x\n", fcport
->d_id
.b24
);
2658 sp
->type
= SRB_ELS_DCMD
;
2659 sp
->name
= "ELS_DCMD";
2660 sp
->fcport
= fcport
;
2662 elsio
->timeout
= qla2x00_els_dcmd2_iocb_timeout
;
2663 init_completion(&elsio
->u
.els_plogi
.comp
);
2664 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
);
2666 sp
->done
= qla2x00_els_dcmd2_sp_done
;
2667 sp
->free
= qla2x00_els_dcmd2_sp_free
;
2669 ptr
= elsio
->u
.els_plogi
.els_plogi_pyld
=
2670 dma_alloc_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2671 &elsio
->u
.els_plogi
.els_plogi_pyld_dma
, GFP_KERNEL
);
2672 ptr_dma
= elsio
->u
.els_plogi
.els_plogi_pyld_dma
;
2674 if (!elsio
->u
.els_plogi
.els_plogi_pyld
) {
2675 rval
= QLA_FUNCTION_FAILED
;
2679 resp_ptr
= elsio
->u
.els_plogi
.els_resp_pyld
=
2680 dma_alloc_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2681 &elsio
->u
.els_plogi
.els_resp_pyld_dma
, GFP_KERNEL
);
2683 if (!elsio
->u
.els_plogi
.els_resp_pyld
) {
2684 rval
= QLA_FUNCTION_FAILED
;
2688 ql_dbg(ql_dbg_io
, vha
, 0x3073, "PLOGI %p %p\n", ptr
, resp_ptr
);
2690 memset(ptr
, 0, sizeof(struct els_plogi_payload
));
2691 memset(resp_ptr
, 0, sizeof(struct els_plogi_payload
));
2692 elsio
->u
.els_plogi
.els_cmd
= els_opcode
;
2693 elsio
->u
.els_plogi
.els_plogi_pyld
->opcode
= els_opcode
;
2694 qla24xx_get_port_login_templ(vha
, ptr_dma
+ 4,
2695 &elsio
->u
.els_plogi
.els_plogi_pyld
->data
[0],
2696 sizeof(struct els_plogi_payload
));
2698 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3073, "PLOGI buffer:\n");
2699 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x0109,
2700 (uint8_t *)elsio
->u
.els_plogi
.els_plogi_pyld
, 0x70);
2702 rval
= qla2x00_start_sp(sp
);
2703 if (rval
!= QLA_SUCCESS
) {
2704 rval
= QLA_FUNCTION_FAILED
;
2708 ql_dbg(ql_dbg_io
, vha
, 0x3074,
2709 "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
2710 sp
->name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b24
);
2712 wait_for_completion(&elsio
->u
.els_plogi
.comp
);
2714 if (elsio
->u
.els_plogi
.comp_status
!= CS_COMPLETE
)
2715 rval
= QLA_FUNCTION_FAILED
;
2723 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2725 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2726 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2728 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2729 els_iocb
->entry_count
= 1;
2730 els_iocb
->sys_define
= 0;
2731 els_iocb
->entry_status
= 0;
2732 els_iocb
->handle
= sp
->handle
;
2733 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2734 els_iocb
->tx_dsd_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2735 els_iocb
->vp_index
= sp
->vha
->vp_idx
;
2736 els_iocb
->sof_type
= EST_SOFI3
;
2737 els_iocb
->rx_dsd_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2740 sp
->type
== SRB_ELS_CMD_RPT
?
2741 bsg_request
->rqst_data
.r_els
.els_code
:
2742 bsg_request
->rqst_data
.h_els
.command_code
;
2743 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2744 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2745 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2746 els_iocb
->control_flags
= 0;
2747 els_iocb
->rx_byte_count
=
2748 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2749 els_iocb
->tx_byte_count
=
2750 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2752 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2753 (bsg_job
->request_payload
.sg_list
)));
2754 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2755 (bsg_job
->request_payload
.sg_list
)));
2756 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2757 (bsg_job
->request_payload
.sg_list
));
2759 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2760 (bsg_job
->reply_payload
.sg_list
)));
2761 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2762 (bsg_job
->reply_payload
.sg_list
)));
2763 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2764 (bsg_job
->reply_payload
.sg_list
));
2766 sp
->vha
->qla_stats
.control_requests
++;
2770 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2772 uint16_t avail_dsds
;
2774 struct scatterlist
*sg
;
2777 scsi_qla_host_t
*vha
= sp
->vha
;
2778 struct qla_hw_data
*ha
= vha
->hw
;
2779 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2780 int loop_iterartion
= 0;
2781 int entry_count
= 1;
2783 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2784 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2785 ct_iocb
->entry_status
= 0;
2786 ct_iocb
->handle1
= sp
->handle
;
2787 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2788 ct_iocb
->status
= cpu_to_le16(0);
2789 ct_iocb
->control_flags
= cpu_to_le16(0);
2790 ct_iocb
->timeout
= 0;
2791 ct_iocb
->cmd_dsd_count
=
2792 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2793 ct_iocb
->total_dsd_count
=
2794 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2795 ct_iocb
->req_bytecount
=
2796 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2797 ct_iocb
->rsp_bytecount
=
2798 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2800 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2801 (bsg_job
->request_payload
.sg_list
)));
2802 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2803 (bsg_job
->request_payload
.sg_list
)));
2804 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2806 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2807 (bsg_job
->reply_payload
.sg_list
)));
2808 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2809 (bsg_job
->reply_payload
.sg_list
)));
2810 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2813 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2815 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2817 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2819 cont_a64_entry_t
*cont_pkt
;
2821 /* Allocate additional continuation packets? */
2822 if (avail_dsds
== 0) {
2824 * Five DSDs are available in the Cont.
2827 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2828 vha
->hw
->req_q_map
[0]);
2829 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2834 sle_dma
= sg_dma_address(sg
);
2835 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2836 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2837 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2841 ct_iocb
->entry_count
= entry_count
;
2843 sp
->vha
->qla_stats
.control_requests
++;
2847 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2849 uint16_t avail_dsds
;
2851 struct scatterlist
*sg
;
2853 uint16_t cmd_dsds
, rsp_dsds
;
2854 scsi_qla_host_t
*vha
= sp
->vha
;
2855 struct qla_hw_data
*ha
= vha
->hw
;
2856 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2857 int entry_count
= 1;
2858 cont_a64_entry_t
*cont_pkt
= NULL
;
2860 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2861 ct_iocb
->entry_status
= 0;
2862 ct_iocb
->sys_define
= 0;
2863 ct_iocb
->handle
= sp
->handle
;
2865 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2866 ct_iocb
->vp_index
= sp
->vha
->vp_idx
;
2867 ct_iocb
->comp_status
= cpu_to_le16(0);
2869 cmd_dsds
= bsg_job
->request_payload
.sg_cnt
;
2870 rsp_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2872 ct_iocb
->cmd_dsd_count
= cpu_to_le16(cmd_dsds
);
2873 ct_iocb
->timeout
= 0;
2874 ct_iocb
->rsp_dsd_count
= cpu_to_le16(rsp_dsds
);
2875 ct_iocb
->cmd_byte_count
=
2876 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2879 cur_dsd
= (uint32_t *)ct_iocb
->dseg_0_address
;
2882 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
, cmd_dsds
, index
) {
2885 /* Allocate additional continuation packets? */
2886 if (avail_dsds
== 0) {
2888 * Five DSDs are available in the Cont.
2891 cont_pkt
= qla2x00_prep_cont_type1_iocb(
2892 vha
, ha
->req_q_map
[0]);
2893 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2898 sle_dma
= sg_dma_address(sg
);
2899 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2900 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2901 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2907 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, rsp_dsds
, index
) {
2910 /* Allocate additional continuation packets? */
2911 if (avail_dsds
== 0) {
2913 * Five DSDs are available in the Cont.
2916 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2918 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2923 sle_dma
= sg_dma_address(sg
);
2924 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2925 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2926 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2929 ct_iocb
->entry_count
= entry_count
;
2933 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2934 * @sp: command to send to the ISP
2936 * Returns non-zero if a failure occurred, else zero.
2939 qla82xx_start_scsi(srb_t
*sp
)
2942 unsigned long flags
;
2943 struct scsi_cmnd
*cmd
;
2950 struct device_reg_82xx __iomem
*reg
;
2953 uint8_t additional_cdb_len
;
2954 struct ct6_dsd
*ctx
;
2955 struct scsi_qla_host
*vha
= sp
->vha
;
2956 struct qla_hw_data
*ha
= vha
->hw
;
2957 struct req_que
*req
= NULL
;
2958 struct rsp_que
*rsp
= NULL
;
2960 /* Setup device pointers. */
2961 reg
= &ha
->iobase
->isp82
;
2962 cmd
= GET_CMD_SP(sp
);
2964 rsp
= ha
->rsp_q_map
[0];
2966 /* So we know we haven't pci_map'ed anything yet */
2969 dbval
= 0x04 | (ha
->portnum
<< 5);
2971 /* Send marker if required */
2972 if (vha
->marker_needed
!= 0) {
2973 if (qla2x00_marker(vha
, req
,
2974 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2975 ql_log(ql_log_warn
, vha
, 0x300c,
2976 "qla2x00_marker failed for cmd=%p.\n", cmd
);
2977 return QLA_FUNCTION_FAILED
;
2979 vha
->marker_needed
= 0;
2982 /* Acquire ring specific lock */
2983 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2985 /* Check for room in outstanding command list. */
2986 handle
= req
->current_outstanding_cmd
;
2987 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2989 if (handle
== req
->num_outstanding_cmds
)
2991 if (!req
->outstanding_cmds
[handle
])
2994 if (index
== req
->num_outstanding_cmds
)
2997 /* Map the sg table so we have an accurate count of sg entries needed */
2998 if (scsi_sg_count(cmd
)) {
2999 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
3000 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
3001 if (unlikely(!nseg
))
3008 if (tot_dsds
> ql2xshiftctondsd
) {
3009 struct cmd_type_6
*cmd_pkt
;
3010 uint16_t more_dsd_lists
= 0;
3011 struct dsd_dma
*dsd_ptr
;
3014 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
3015 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
3016 ql_dbg(ql_dbg_io
, vha
, 0x300d,
3017 "Num of DSD list %d is than %d for cmd=%p.\n",
3018 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
3023 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
3024 goto sufficient_dsds
;
3026 more_dsd_lists
-= ha
->gbl_dsd_avail
;
3028 for (i
= 0; i
< more_dsd_lists
; i
++) {
3029 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
3031 ql_log(ql_log_fatal
, vha
, 0x300e,
3032 "Failed to allocate memory for dsd_dma "
3033 "for cmd=%p.\n", cmd
);
3037 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
3038 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
3039 if (!dsd_ptr
->dsd_addr
) {
3041 ql_log(ql_log_fatal
, vha
, 0x300f,
3042 "Failed to allocate memory for dsd_addr "
3043 "for cmd=%p.\n", cmd
);
3046 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
3047 ha
->gbl_dsd_avail
++;
3053 if (req
->cnt
< (req_cnt
+ 2)) {
3054 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
3055 ®
->req_q_out
[0]);
3056 if (req
->ring_index
< cnt
)
3057 req
->cnt
= cnt
- req
->ring_index
;
3059 req
->cnt
= req
->length
-
3060 (req
->ring_index
- cnt
);
3061 if (req
->cnt
< (req_cnt
+ 2))
3065 ctx
= sp
->u
.scmd
.ctx
=
3066 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
3068 ql_log(ql_log_fatal
, vha
, 0x3010,
3069 "Failed to allocate ctx for cmd=%p.\n", cmd
);
3073 memset(ctx
, 0, sizeof(struct ct6_dsd
));
3074 ctx
->fcp_cmnd
= dma_pool_zalloc(ha
->fcp_cmnd_dma_pool
,
3075 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
3076 if (!ctx
->fcp_cmnd
) {
3077 ql_log(ql_log_fatal
, vha
, 0x3011,
3078 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
3082 /* Initialize the DSD list and dma handle */
3083 INIT_LIST_HEAD(&ctx
->dsd_list
);
3084 ctx
->dsd_use_cnt
= 0;
3086 if (cmd
->cmd_len
> 16) {
3087 additional_cdb_len
= cmd
->cmd_len
- 16;
3088 if ((cmd
->cmd_len
% 4) != 0) {
3089 /* SCSI command bigger than 16 bytes must be
3092 ql_log(ql_log_warn
, vha
, 0x3012,
3093 "scsi cmd len %d not multiple of 4 "
3094 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
3095 goto queuing_error_fcp_cmnd
;
3097 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
3099 additional_cdb_len
= 0;
3100 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
3103 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
3104 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3106 /* Zero out remaining portion of packet. */
3107 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3108 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3109 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3110 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3112 /* Set NPORT-ID and LUN number*/
3113 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3114 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3115 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3116 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3117 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3119 /* Build IOCB segments */
3120 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
3121 goto queuing_error_fcp_cmnd
;
3123 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3124 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
3126 /* build FCP_CMND IU */
3127 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
3128 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
3130 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
3131 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
3132 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
3133 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
3135 /* Populate the FCP_PRIO. */
3136 if (ha
->flags
.fcp_prio_enabled
)
3137 ctx
->fcp_cmnd
->task_attribute
|=
3138 sp
->fcport
->fcp_prio
<< 3;
3140 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3142 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
3143 additional_cdb_len
);
3144 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
3146 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
3147 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
3148 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
3149 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
3150 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
3152 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
3153 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3154 /* Set total data segment count. */
3155 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3156 /* Specify response queue number where
3157 * completion should happen
3159 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3161 struct cmd_type_7
*cmd_pkt
;
3162 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3163 if (req
->cnt
< (req_cnt
+ 2)) {
3164 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
3165 ®
->req_q_out
[0]);
3166 if (req
->ring_index
< cnt
)
3167 req
->cnt
= cnt
- req
->ring_index
;
3169 req
->cnt
= req
->length
-
3170 (req
->ring_index
- cnt
);
3172 if (req
->cnt
< (req_cnt
+ 2))
3175 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
3176 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3178 /* Zero out remaining portion of packet. */
3179 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3180 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3181 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3182 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3184 /* Set NPORT-ID and LUN number*/
3185 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3186 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3187 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3188 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3189 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3191 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3192 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
3193 sizeof(cmd_pkt
->lun
));
3195 /* Populate the FCP_PRIO. */
3196 if (ha
->flags
.fcp_prio_enabled
)
3197 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
3199 /* Load SCSI command packet. */
3200 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3201 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
3203 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3205 /* Build IOCB segments */
3206 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
3208 /* Set total data segment count. */
3209 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3210 /* Specify response queue number where
3211 * completion should happen.
3213 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3216 /* Build command packet. */
3217 req
->current_outstanding_cmd
= handle
;
3218 req
->outstanding_cmds
[handle
] = sp
;
3219 sp
->handle
= handle
;
3220 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3221 req
->cnt
-= req_cnt
;
3224 /* Adjust ring index. */
3226 if (req
->ring_index
== req
->length
) {
3227 req
->ring_index
= 0;
3228 req
->ring_ptr
= req
->ring
;
3232 sp
->flags
|= SRB_DMA_VALID
;
3234 /* Set chip new ring index. */
3235 /* write, read and verify logic */
3236 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
3238 qla82xx_wr_32(ha
, (uintptr_t __force
)ha
->nxdb_wr_ptr
, dbval
);
3240 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3242 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
3243 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3248 /* Manage unprocessed RIO/ZIO commands in response queue. */
3249 if (vha
->flags
.process_response_queue
&&
3250 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
3251 qla24xx_process_response_queue(vha
, rsp
);
3253 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3256 queuing_error_fcp_cmnd
:
3257 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
3260 scsi_dma_unmap(cmd
);
3262 if (sp
->u
.scmd
.ctx
) {
3263 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
3264 sp
->u
.scmd
.ctx
= NULL
;
3266 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3268 return QLA_FUNCTION_FAILED
;
3272 qla24xx_abort_iocb(srb_t
*sp
, struct abort_entry_24xx
*abt_iocb
)
3274 struct srb_iocb
*aio
= &sp
->u
.iocb_cmd
;
3275 scsi_qla_host_t
*vha
= sp
->vha
;
3276 struct req_que
*req
= vha
->req
;
3278 memset(abt_iocb
, 0, sizeof(struct abort_entry_24xx
));
3279 abt_iocb
->entry_type
= ABORT_IOCB_TYPE
;
3280 abt_iocb
->entry_count
= 1;
3281 abt_iocb
->handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
3282 abt_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3283 abt_iocb
->handle_to_abort
=
3284 cpu_to_le32(MAKE_HANDLE(aio
->u
.abt
.req_que_no
,
3285 aio
->u
.abt
.cmd_hndl
));
3286 abt_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3287 abt_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3288 abt_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3289 abt_iocb
->vp_index
= vha
->vp_idx
;
3290 abt_iocb
->req_que_no
= cpu_to_le16(aio
->u
.abt
.req_que_no
);
3291 /* Send the command to the firmware */
3296 qla2x00_mb_iocb(srb_t
*sp
, struct mbx_24xx_entry
*mbx
)
3300 mbx
->entry_type
= MBX_IOCB_TYPE
;
3301 mbx
->handle
= sp
->handle
;
3302 sz
= min(ARRAY_SIZE(mbx
->mb
), ARRAY_SIZE(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
));
3304 for (i
= 0; i
< sz
; i
++)
3305 mbx
->mb
[i
] = cpu_to_le16(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
[i
]);
3309 qla2x00_ctpthru_cmd_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_pkt
)
3311 sp
->u
.iocb_cmd
.u
.ctarg
.iocb
= ct_pkt
;
3312 qla24xx_prep_ms_iocb(sp
->vha
, &sp
->u
.iocb_cmd
.u
.ctarg
);
3313 ct_pkt
->handle
= sp
->handle
;
3316 static void qla2x00_send_notify_ack_iocb(srb_t
*sp
,
3317 struct nack_to_isp
*nack
)
3319 struct imm_ntfy_from_isp
*ntfy
= sp
->u
.iocb_cmd
.u
.nack
.ntfy
;
3321 nack
->entry_type
= NOTIFY_ACK_TYPE
;
3322 nack
->entry_count
= 1;
3323 nack
->ox_id
= ntfy
->ox_id
;
3325 nack
->u
.isp24
.handle
= sp
->handle
;
3326 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
3327 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
3328 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
3329 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
3331 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
3332 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
3333 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
3334 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
3335 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
3336 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
3337 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3338 nack
->u
.isp24
.srr_flags
= 0;
3339 nack
->u
.isp24
.srr_reject_code
= 0;
3340 nack
->u
.isp24
.srr_reject_code_expl
= 0;
3341 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
3345 * Build NVME LS request
3348 qla_nvme_ls(srb_t
*sp
, struct pt_ls4_request
*cmd_pkt
)
3350 struct srb_iocb
*nvme
;
3351 int rval
= QLA_SUCCESS
;
3353 nvme
= &sp
->u
.iocb_cmd
;
3354 cmd_pkt
->entry_type
= PT_LS4_REQUEST
;
3355 cmd_pkt
->entry_count
= 1;
3356 cmd_pkt
->control_flags
= CF_LS4_ORIGINATOR
<< CF_LS4_SHIFT
;
3358 cmd_pkt
->timeout
= cpu_to_le16(nvme
->u
.nvme
.timeout_sec
);
3359 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3360 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3362 cmd_pkt
->tx_dseg_count
= 1;
3363 cmd_pkt
->tx_byte_count
= nvme
->u
.nvme
.cmd_len
;
3364 cmd_pkt
->dseg0_len
= nvme
->u
.nvme
.cmd_len
;
3365 cmd_pkt
->dseg0_address
[0] = cpu_to_le32(LSD(nvme
->u
.nvme
.cmd_dma
));
3366 cmd_pkt
->dseg0_address
[1] = cpu_to_le32(MSD(nvme
->u
.nvme
.cmd_dma
));
3368 cmd_pkt
->rx_dseg_count
= 1;
3369 cmd_pkt
->rx_byte_count
= nvme
->u
.nvme
.rsp_len
;
3370 cmd_pkt
->dseg1_len
= nvme
->u
.nvme
.rsp_len
;
3371 cmd_pkt
->dseg1_address
[0] = cpu_to_le32(LSD(nvme
->u
.nvme
.rsp_dma
));
3372 cmd_pkt
->dseg1_address
[1] = cpu_to_le32(MSD(nvme
->u
.nvme
.rsp_dma
));
3378 qla25xx_ctrlvp_iocb(srb_t
*sp
, struct vp_ctrl_entry_24xx
*vce
)
3382 vce
->entry_type
= VP_CTRL_IOCB_TYPE
;
3383 vce
->handle
= sp
->handle
;
3384 vce
->entry_count
= 1;
3385 vce
->command
= cpu_to_le16(sp
->u
.iocb_cmd
.u
.ctrlvp
.cmd
);
3386 vce
->vp_count
= cpu_to_le16(1);
3389 * index map in firmware starts with 1; decrement index
3390 * this is ok as we never use index 0
3392 map
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) / 8;
3393 pos
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) & 7;
3394 vce
->vp_idx_map
[map
] |= 1 << pos
;
3398 qla24xx_prlo_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
3400 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
3401 logio
->control_flags
=
3402 cpu_to_le16(LCF_COMMAND_PRLO
|LCF_IMPL_PRLO
);
3404 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3405 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3406 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3407 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3408 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3412 qla2x00_start_sp(srb_t
*sp
)
3415 scsi_qla_host_t
*vha
= sp
->vha
;
3416 struct qla_hw_data
*ha
= vha
->hw
;
3418 unsigned long flags
;
3420 rval
= QLA_FUNCTION_FAILED
;
3421 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3422 pkt
= qla2x00_alloc_iocbs(vha
, sp
);
3424 ql_log(ql_log_warn
, vha
, 0x700c,
3425 "qla2x00_alloc_iocbs failed.\n");
3432 IS_FWI2_CAPABLE(ha
) ?
3433 qla24xx_login_iocb(sp
, pkt
) :
3434 qla2x00_login_iocb(sp
, pkt
);
3437 qla24xx_prli_iocb(sp
, pkt
);
3439 case SRB_LOGOUT_CMD
:
3440 IS_FWI2_CAPABLE(ha
) ?
3441 qla24xx_logout_iocb(sp
, pkt
) :
3442 qla2x00_logout_iocb(sp
, pkt
);
3444 case SRB_ELS_CMD_RPT
:
3445 case SRB_ELS_CMD_HST
:
3446 qla24xx_els_iocb(sp
, pkt
);
3449 IS_FWI2_CAPABLE(ha
) ?
3450 qla24xx_ct_iocb(sp
, pkt
) :
3451 qla2x00_ct_iocb(sp
, pkt
);
3454 IS_FWI2_CAPABLE(ha
) ?
3455 qla24xx_adisc_iocb(sp
, pkt
) :
3456 qla2x00_adisc_iocb(sp
, pkt
);
3460 qlafx00_tm_iocb(sp
, pkt
) :
3461 qla24xx_tm_iocb(sp
, pkt
);
3463 case SRB_FXIOCB_DCMD
:
3464 case SRB_FXIOCB_BCMD
:
3465 qlafx00_fxdisc_iocb(sp
, pkt
);
3468 qla_nvme_ls(sp
, pkt
);
3472 qlafx00_abort_iocb(sp
, pkt
) :
3473 qla24xx_abort_iocb(sp
, pkt
);
3476 qla24xx_els_logo_iocb(sp
, pkt
);
3478 case SRB_CT_PTHRU_CMD
:
3479 qla2x00_ctpthru_cmd_iocb(sp
, pkt
);
3482 qla2x00_mb_iocb(sp
, pkt
);
3484 case SRB_NACK_PLOGI
:
3487 qla2x00_send_notify_ack_iocb(sp
, pkt
);
3490 qla25xx_ctrlvp_iocb(sp
, pkt
);
3493 qla24xx_prlo_iocb(sp
, pkt
);
3500 qla2x00_start_iocbs(vha
, ha
->req_q_map
[0]);
3502 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3507 qla25xx_build_bidir_iocb(srb_t
*sp
, struct scsi_qla_host
*vha
,
3508 struct cmd_bidir
*cmd_pkt
, uint32_t tot_dsds
)
3510 uint16_t avail_dsds
;
3512 uint32_t req_data_len
= 0;
3513 uint32_t rsp_data_len
= 0;
3514 struct scatterlist
*sg
;
3516 int entry_count
= 1;
3517 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3519 /*Update entry type to indicate bidir command */
3520 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
3521 cpu_to_le32(COMMAND_BIDIRECTIONAL
);
3523 /* Set the transfer direction, in this set both flags
3524 * Also set the BD_WRAP_BACK flag, firmware will take care
3525 * assigning DID=SID for outgoing pkts.
3527 cmd_pkt
->wr_dseg_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3528 cmd_pkt
->rd_dseg_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
3529 cmd_pkt
->control_flags
= cpu_to_le16(BD_WRITE_DATA
| BD_READ_DATA
|
3532 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
3533 cmd_pkt
->wr_byte_count
= cpu_to_le32(req_data_len
);
3534 cmd_pkt
->rd_byte_count
= cpu_to_le32(rsp_data_len
);
3535 cmd_pkt
->timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
3537 vha
->bidi_stats
.transfer_bytes
+= req_data_len
;
3538 vha
->bidi_stats
.io_count
++;
3540 vha
->qla_stats
.output_bytes
+= req_data_len
;
3541 vha
->qla_stats
.output_requests
++;
3543 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3544 * are bundled in continuation iocb
3547 cur_dsd
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
3551 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
3552 bsg_job
->request_payload
.sg_cnt
, index
) {
3554 cont_a64_entry_t
*cont_pkt
;
3556 /* Allocate additional continuation packets */
3557 if (avail_dsds
== 0) {
3558 /* Continuation type 1 IOCB can accomodate
3561 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3562 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3566 sle_dma
= sg_dma_address(sg
);
3567 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3568 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3569 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3572 /* For read request DSD will always goes to continuation IOCB
3573 * and follow the write DSD. If there is room on the current IOCB
3574 * then it is added to that IOCB else new continuation IOCB is
3577 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
3578 bsg_job
->reply_payload
.sg_cnt
, index
) {
3580 cont_a64_entry_t
*cont_pkt
;
3582 /* Allocate additional continuation packets */
3583 if (avail_dsds
== 0) {
3584 /* Continuation type 1 IOCB can accomodate
3587 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3588 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3592 sle_dma
= sg_dma_address(sg
);
3593 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3594 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3595 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3598 /* This value should be same as number of IOCB required for this cmd */
3599 cmd_pkt
->entry_count
= entry_count
;
3603 qla2x00_start_bidir(srb_t
*sp
, struct scsi_qla_host
*vha
, uint32_t tot_dsds
)
3606 struct qla_hw_data
*ha
= vha
->hw
;
3607 unsigned long flags
;
3613 struct cmd_bidir
*cmd_pkt
= NULL
;
3614 struct rsp_que
*rsp
;
3615 struct req_que
*req
;
3616 int rval
= EXT_STATUS_OK
;
3620 rsp
= ha
->rsp_q_map
[0];
3623 /* Send marker if required */
3624 if (vha
->marker_needed
!= 0) {
3625 if (qla2x00_marker(vha
, req
,
3626 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
)
3627 return EXT_STATUS_MAILBOX
;
3628 vha
->marker_needed
= 0;
3631 /* Acquire ring specific lock */
3632 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3634 /* Check for room in outstanding command list. */
3635 handle
= req
->current_outstanding_cmd
;
3636 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3638 if (handle
== req
->num_outstanding_cmds
)
3640 if (!req
->outstanding_cmds
[handle
])
3644 if (index
== req
->num_outstanding_cmds
) {
3645 rval
= EXT_STATUS_BUSY
;
3649 /* Calculate number of IOCB required */
3650 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3652 /* Check for room on request queue. */
3653 if (req
->cnt
< req_cnt
+ 2) {
3654 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
3655 RD_REG_DWORD_RELAXED(req
->req_q_out
);
3656 if (req
->ring_index
< cnt
)
3657 req
->cnt
= cnt
- req
->ring_index
;
3659 req
->cnt
= req
->length
-
3660 (req
->ring_index
- cnt
);
3662 if (req
->cnt
< req_cnt
+ 2) {
3663 rval
= EXT_STATUS_BUSY
;
3667 cmd_pkt
= (struct cmd_bidir
*)req
->ring_ptr
;
3668 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3670 /* Zero out remaining portion of packet. */
3671 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3672 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3673 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3675 /* Set NPORT-ID (of vha)*/
3676 cmd_pkt
->nport_handle
= cpu_to_le16(vha
->self_login_loop_id
);
3677 cmd_pkt
->port_id
[0] = vha
->d_id
.b
.al_pa
;
3678 cmd_pkt
->port_id
[1] = vha
->d_id
.b
.area
;
3679 cmd_pkt
->port_id
[2] = vha
->d_id
.b
.domain
;
3681 qla25xx_build_bidir_iocb(sp
, vha
, cmd_pkt
, tot_dsds
);
3682 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3683 /* Build command packet. */
3684 req
->current_outstanding_cmd
= handle
;
3685 req
->outstanding_cmds
[handle
] = sp
;
3686 sp
->handle
= handle
;
3687 req
->cnt
-= req_cnt
;
3689 /* Send the command to the firmware */
3691 qla2x00_start_iocbs(vha
, req
);
3693 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);