2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
25 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
26 struct scsi_qla_host
*vha
= sp
->vha
;
30 /* Set transfer direction */
31 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
34 vha
->qla_stats
.output_requests
++;
35 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
38 vha
->qla_stats
.input_requests
++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) = cpu_to_le32(CONTINUE_TYPE
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
125 cont_a64_entry_t
*cont_pkt
;
127 /* Adjust ring index. */
129 if (req
->ring_index
== req
->length
) {
131 req
->ring_ptr
= req
->ring
;
136 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt
->entry_type
)) = IS_QLAFX00(vha
->hw
) ?
140 cpu_to_le32(CONTINUE_A64_TYPE_FX00
) :
141 cpu_to_le32(CONTINUE_A64_TYPE
);
147 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
149 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
150 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
152 /* We always use DIFF Bundling for best performance */
155 /* Translate SCSI opcode to a protection opcode */
156 switch (scsi_get_prot_op(cmd
)) {
157 case SCSI_PROT_READ_STRIP
:
158 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
160 case SCSI_PROT_WRITE_INSERT
:
161 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
163 case SCSI_PROT_READ_INSERT
:
164 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
166 case SCSI_PROT_WRITE_STRIP
:
167 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
169 case SCSI_PROT_READ_PASS
:
170 case SCSI_PROT_WRITE_PASS
:
171 if (guard
& SHOST_DIX_GUARD_IP
)
172 *fw_prot_opts
|= PO_MODE_DIF_TCP_CKSUM
;
174 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
176 default: /* Normal Request */
177 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
181 return scsi_prot_sg_count(cmd
);
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
192 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
197 scsi_qla_host_t
*vha
;
198 struct scsi_cmnd
*cmd
;
199 struct scatterlist
*sg
;
202 cmd
= GET_CMD_SP(sp
);
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
206 cpu_to_le32(COMMAND_TYPE
);
208 /* No data transfer */
209 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
210 cmd_pkt
->byte_count
= cpu_to_le32(0);
215 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
217 /* Three DSDs are available in the Command Type 2 IOCB */
219 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
221 /* Load data segments */
222 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
223 cont_entry_t
*cont_pkt
;
225 /* Allocate additional continuation packets? */
226 if (avail_dsds
== 0) {
228 * Seven DSDs are available in the Continuation
231 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
232 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
236 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
237 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
250 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
255 scsi_qla_host_t
*vha
;
256 struct scsi_cmnd
*cmd
;
257 struct scatterlist
*sg
;
260 cmd
= GET_CMD_SP(sp
);
262 /* Update entry type to indicate Command Type 3 IOCB */
263 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_A64_TYPE
);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
267 cmd_pkt
->byte_count
= cpu_to_le32(0);
272 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
278 /* Load data segments */
279 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
281 cont_a64_entry_t
*cont_pkt
;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds
== 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
290 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
294 sle_dma
= sg_dma_address(sg
);
295 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
296 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
297 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t
*sp
)
313 scsi_qla_host_t
*vha
;
314 struct scsi_cmnd
*cmd
;
318 cmd_entry_t
*cmd_pkt
;
322 struct device_reg_2xxx __iomem
*reg
;
323 struct qla_hw_data
*ha
;
327 /* Setup device pointers. */
330 reg
= &ha
->iobase
->isp
;
331 cmd
= GET_CMD_SP(sp
);
332 req
= ha
->req_q_map
[0];
333 rsp
= ha
->rsp_q_map
[0];
334 /* So we know we haven't pci_map'ed anything yet */
337 /* Send marker if required */
338 if (vha
->marker_needed
!= 0) {
339 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
341 return (QLA_FUNCTION_FAILED
);
343 vha
->marker_needed
= 0;
346 /* Acquire ring specific lock */
347 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
349 /* Check for room in outstanding command list. */
350 handle
= req
->current_outstanding_cmd
;
351 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
353 if (handle
== req
->num_outstanding_cmds
)
355 if (!req
->outstanding_cmds
[handle
])
358 if (index
== req
->num_outstanding_cmds
)
361 /* Map the sg table so we have an accurate count of sg entries needed */
362 if (scsi_sg_count(cmd
)) {
363 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
364 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
372 /* Calculate the number of request entries needed. */
373 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
374 if (req
->cnt
< (req_cnt
+ 2)) {
375 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
376 if (req
->ring_index
< cnt
)
377 req
->cnt
= cnt
- req
->ring_index
;
379 req
->cnt
= req
->length
-
380 (req
->ring_index
- cnt
);
381 /* If still no head room then bail out */
382 if (req
->cnt
< (req_cnt
+ 2))
386 /* Build command packet */
387 req
->current_outstanding_cmd
= handle
;
388 req
->outstanding_cmds
[handle
] = sp
;
390 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
393 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
394 cmd_pkt
->handle
= handle
;
395 /* Zero out remaining portion of packet. */
396 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
397 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
398 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
402 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
403 cmd_pkt
->control_flags
= cpu_to_le16(CF_SIMPLE_TAG
);
405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
407 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
409 /* Build IOCB segments */
410 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
412 /* Set total data segment count. */
413 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
416 /* Adjust ring index. */
418 if (req
->ring_index
== req
->length
) {
420 req
->ring_ptr
= req
->ring
;
424 sp
->flags
|= SRB_DMA_VALID
;
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (vha
->flags
.process_response_queue
&&
432 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
433 qla2x00_process_response_queue(rsp
);
435 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
436 return (QLA_SUCCESS
);
442 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
444 return (QLA_FUNCTION_FAILED
);
448 * qla2x00_start_iocbs() - Execute the IOCB command
450 * @req: request queue
453 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
455 struct qla_hw_data
*ha
= vha
->hw
;
456 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
458 if (IS_P3P_TYPE(ha
)) {
459 qla82xx_start_iocbs(vha
);
461 /* Adjust ring index. */
463 if (req
->ring_index
== req
->length
) {
465 req
->ring_ptr
= req
->ring
;
469 /* Set chip new ring index. */
470 if (ha
->mqenable
|| IS_QLA27XX(ha
)) {
471 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
472 } else if (IS_QLA83XX(ha
)) {
473 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
474 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
475 } else if (IS_QLAFX00(ha
)) {
476 WRT_REG_DWORD(®
->ispfx00
.req_q_in
, req
->ring_index
);
477 RD_REG_DWORD_RELAXED(®
->ispfx00
.req_q_in
);
478 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
479 } else if (IS_FWI2_CAPABLE(ha
)) {
480 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
481 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
491 * qla2x00_marker() - Send a marker IOCB to the firmware.
493 * @req: request queue
494 * @rsp: response queue
497 * @type: marker modifier
499 * Can be called from both normal and interrupt context.
501 * Returns non-zero if a failure occurred, else zero.
504 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
505 struct rsp_que
*rsp
, uint16_t loop_id
,
506 uint64_t lun
, uint8_t type
)
509 struct mrk_entry_24xx
*mrk24
= NULL
;
511 struct qla_hw_data
*ha
= vha
->hw
;
512 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
514 req
= ha
->req_q_map
[0];
515 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
517 ql_log(ql_log_warn
, base_vha
, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
520 return (QLA_FUNCTION_FAILED
);
523 mrk
->entry_type
= MARKER_TYPE
;
524 mrk
->modifier
= type
;
525 if (type
!= MK_SYNC_ALL
) {
526 if (IS_FWI2_CAPABLE(ha
)) {
527 mrk24
= (struct mrk_entry_24xx
*) mrk
;
528 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
529 int_to_scsilun(lun
, (struct scsi_lun
*)&mrk24
->lun
);
530 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
531 mrk24
->vp_index
= vha
->vp_idx
;
532 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
534 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
535 mrk
->lun
= cpu_to_le16((uint16_t)lun
);
540 qla2x00_start_iocbs(vha
, req
);
542 return (QLA_SUCCESS
);
546 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
547 struct rsp_que
*rsp
, uint16_t loop_id
, uint64_t lun
,
551 unsigned long flags
= 0;
553 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
554 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
555 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
561 * qla2x00_issue_marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
567 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
570 if (__qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
571 MK_SYNC_ALL
) != QLA_SUCCESS
)
572 return QLA_FUNCTION_FAILED
;
574 if (qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
575 MK_SYNC_ALL
) != QLA_SUCCESS
)
576 return QLA_FUNCTION_FAILED
;
578 vha
->marker_needed
= 0;
584 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
587 uint32_t *cur_dsd
= NULL
;
588 scsi_qla_host_t
*vha
;
589 struct qla_hw_data
*ha
;
590 struct scsi_cmnd
*cmd
;
591 struct scatterlist
*cur_seg
;
595 uint8_t first_iocb
= 1;
596 uint32_t dsd_list_len
;
597 struct dsd_dma
*dsd_ptr
;
600 cmd
= GET_CMD_SP(sp
);
602 /* Update entry type to indicate Command Type 3 IOCB */
603 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_6
);
605 /* No data transfer */
606 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
607 cmd_pkt
->byte_count
= cpu_to_le32(0);
614 /* Set transfer direction */
615 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
616 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
617 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
618 vha
->qla_stats
.output_requests
++;
619 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
620 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
621 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
622 vha
->qla_stats
.input_requests
++;
625 cur_seg
= scsi_sglist(cmd
);
626 ctx
= GET_CMD_CTX_SP(sp
);
629 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
630 QLA_DSDS_PER_IOCB
: tot_dsds
;
631 tot_dsds
-= avail_dsds
;
632 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
634 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
635 struct dsd_dma
, list
);
636 next_dsd
= dsd_ptr
->dsd_addr
;
637 list_del(&dsd_ptr
->list
);
639 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
645 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
646 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
647 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
648 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
650 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
651 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
652 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
654 cur_dsd
= (uint32_t *)next_dsd
;
658 sle_dma
= sg_dma_address(cur_seg
);
659 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
660 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
661 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
662 cur_seg
= sg_next(cur_seg
);
667 /* Null termination */
671 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
679 * @dsds: number of data segment decriptors needed
681 * Returns the number of dsd list needed to store @dsds.
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds
)
686 uint16_t dsd_lists
= 0;
688 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
689 if (dsds
% QLA_DSDS_PER_IOCB
)
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
702 * @req: pointer to request queue
705 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
706 uint16_t tot_dsds
, struct req_que
*req
)
710 scsi_qla_host_t
*vha
;
711 struct scsi_cmnd
*cmd
;
712 struct scatterlist
*sg
;
715 cmd
= GET_CMD_SP(sp
);
717 /* Update entry type to indicate Command Type 3 IOCB */
718 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_7
);
720 /* No data transfer */
721 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
722 cmd_pkt
->byte_count
= cpu_to_le32(0);
728 /* Set transfer direction */
729 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
730 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_WRITE_DATA
);
731 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
732 vha
->qla_stats
.output_requests
++;
733 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
734 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_READ_DATA
);
735 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
736 vha
->qla_stats
.input_requests
++;
739 /* One DSD is available in the Command Type 3 IOCB */
741 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
743 /* Load data segments */
745 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
747 cont_a64_entry_t
*cont_pkt
;
749 /* Allocate additional continuation packets? */
750 if (avail_dsds
== 0) {
752 * Five DSDs are available in the Continuation
755 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, req
);
756 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
760 sle_dma
= sg_dma_address(sg
);
761 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
762 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
763 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
768 struct fw_dif_context
{
771 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
772 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
776 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
780 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
781 unsigned int protcnt
)
783 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
785 switch (scsi_get_prot_type(cmd
)) {
786 case SCSI_PROT_DIF_TYPE0
:
788 * No check for ql2xenablehba_err_chk, as it would be an
789 * I/O error if hba tag generation is not done.
791 pkt
->ref_tag
= cpu_to_le32((uint32_t)
792 (0xffffffff & scsi_get_lba(cmd
)));
794 if (!qla2x00_hba_err_chk_enabled(sp
))
797 pkt
->ref_tag_mask
[0] = 0xff;
798 pkt
->ref_tag_mask
[1] = 0xff;
799 pkt
->ref_tag_mask
[2] = 0xff;
800 pkt
->ref_tag_mask
[3] = 0xff;
804 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805 * match LBA in CDB + N
807 case SCSI_PROT_DIF_TYPE2
:
808 pkt
->app_tag
= cpu_to_le16(0);
809 pkt
->app_tag_mask
[0] = 0x0;
810 pkt
->app_tag_mask
[1] = 0x0;
812 pkt
->ref_tag
= cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd
)));
815 if (!qla2x00_hba_err_chk_enabled(sp
))
818 /* enable ALL bytes of the ref tag */
819 pkt
->ref_tag_mask
[0] = 0xff;
820 pkt
->ref_tag_mask
[1] = 0xff;
821 pkt
->ref_tag_mask
[2] = 0xff;
822 pkt
->ref_tag_mask
[3] = 0xff;
825 /* For Type 3 protection: 16 bit GUARD only */
826 case SCSI_PROT_DIF_TYPE3
:
827 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
828 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
833 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
836 case SCSI_PROT_DIF_TYPE1
:
837 pkt
->ref_tag
= cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd
)));
839 pkt
->app_tag
= cpu_to_le16(0);
840 pkt
->app_tag_mask
[0] = 0x0;
841 pkt
->app_tag_mask
[1] = 0x0;
843 if (!qla2x00_hba_err_chk_enabled(sp
))
846 /* enable ALL bytes of the ref tag */
847 pkt
->ref_tag_mask
[0] = 0xff;
848 pkt
->ref_tag_mask
[1] = 0xff;
849 pkt
->ref_tag_mask
[2] = 0xff;
850 pkt
->ref_tag_mask
[3] = 0xff;
856 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
859 struct scatterlist
*sg
;
860 uint32_t cumulative_partial
, sg_len
;
861 dma_addr_t sg_dma_addr
;
863 if (sgx
->num_bytes
== sgx
->tot_bytes
)
867 cumulative_partial
= sgx
->tot_partial
;
869 sg_dma_addr
= sg_dma_address(sg
);
870 sg_len
= sg_dma_len(sg
);
872 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
874 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
875 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
876 sgx
->tot_partial
= 0;
877 sgx
->num_bytes
+= blk_sz
;
880 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
881 sgx
->tot_partial
+= sgx
->dma_len
;
885 sgx
->bytes_consumed
+= sgx
->dma_len
;
887 if (sg_len
== sgx
->bytes_consumed
) {
891 sgx
->bytes_consumed
= 0;
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
899 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
902 uint8_t avail_dsds
= 0;
903 uint32_t dsd_list_len
;
904 struct dsd_dma
*dsd_ptr
;
905 struct scatterlist
*sg_prot
;
906 uint32_t *cur_dsd
= dsd
;
907 uint16_t used_dsds
= tot_dsds
;
908 uint32_t prot_int
; /* protection interval */
912 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
913 struct scsi_cmnd
*cmd
;
915 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
917 cmd
= GET_CMD_SP(sp
);
918 prot_int
= cmd
->device
->sector_size
;
920 sgx
.tot_bytes
= scsi_bufflen(cmd
);
921 sgx
.cur_sg
= scsi_sglist(cmd
);
924 sg_prot
= scsi_prot_sglist(cmd
);
926 prot_int
= tc
->blk_sz
;
927 sgx
.tot_bytes
= tc
->bufflen
;
929 sg_prot
= tc
->prot_sg
;
935 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
937 sle_dma
= sgx
.dma_addr
;
938 sle_dma_len
= sgx
.dma_len
;
940 /* Allocate additional continuation packets? */
941 if (avail_dsds
== 0) {
942 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
943 QLA_DSDS_PER_IOCB
: used_dsds
;
944 dsd_list_len
= (avail_dsds
+ 1) * 12;
945 used_dsds
-= avail_dsds
;
947 /* allocate tracking DS */
948 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
952 /* allocate new list */
953 dsd_ptr
->dsd_addr
= next_dsd
=
954 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
955 &dsd_ptr
->dsd_list_dma
);
959 * Need to cleanup only this dsd_ptr, rest
960 * will be done by sp_free_dma()
967 list_add_tail(&dsd_ptr
->list
,
968 &((struct crc_context
*)
969 sp
->u
.scmd
.ctx
)->dsd_list
);
971 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
973 list_add_tail(&dsd_ptr
->list
,
974 &(tc
->ctx
->dsd_list
));
975 *tc
->ctx_dsd_alloced
= 1;
979 /* add new list to cmd iocb or last list */
980 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
981 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
982 *cur_dsd
++ = dsd_list_len
;
983 cur_dsd
= (uint32_t *)next_dsd
;
985 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
986 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
987 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
991 /* Got a full protection interval */
992 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
995 tot_prot_dma_len
+= sle_dma_len
;
996 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
997 tot_prot_dma_len
= 0;
998 sg_prot
= sg_next(sg_prot
);
1001 partial
= 1; /* So as to not re-enter this block */
1002 goto alloc_and_fill
;
1005 /* Null termination */
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1014 uint16_t tot_dsds
, struct qla_tc_param
*tc
)
1017 uint8_t avail_dsds
= 0;
1018 uint32_t dsd_list_len
;
1019 struct dsd_dma
*dsd_ptr
;
1020 struct scatterlist
*sg
, *sgl
;
1021 uint32_t *cur_dsd
= dsd
;
1023 uint16_t used_dsds
= tot_dsds
;
1024 struct scsi_cmnd
*cmd
;
1027 cmd
= GET_CMD_SP(sp
);
1028 sgl
= scsi_sglist(cmd
);
1037 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1040 /* Allocate additional continuation packets? */
1041 if (avail_dsds
== 0) {
1042 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1043 QLA_DSDS_PER_IOCB
: used_dsds
;
1044 dsd_list_len
= (avail_dsds
+ 1) * 12;
1045 used_dsds
-= avail_dsds
;
1047 /* allocate tracking DS */
1048 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1052 /* allocate new list */
1053 dsd_ptr
->dsd_addr
= next_dsd
=
1054 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1055 &dsd_ptr
->dsd_list_dma
);
1059 * Need to cleanup only this dsd_ptr, rest
1060 * will be done by sp_free_dma()
1067 list_add_tail(&dsd_ptr
->list
,
1068 &((struct crc_context
*)
1069 sp
->u
.scmd
.ctx
)->dsd_list
);
1071 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1073 list_add_tail(&dsd_ptr
->list
,
1074 &(tc
->ctx
->dsd_list
));
1075 *tc
->ctx_dsd_alloced
= 1;
1078 /* add new list to cmd iocb or last list */
1079 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1080 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1081 *cur_dsd
++ = dsd_list_len
;
1082 cur_dsd
= (uint32_t *)next_dsd
;
1084 sle_dma
= sg_dma_address(sg
);
1086 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1087 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1088 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1092 /* Null termination */
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1101 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
1104 uint8_t avail_dsds
= 0;
1105 uint32_t dsd_list_len
;
1106 struct dsd_dma
*dsd_ptr
;
1107 struct scatterlist
*sg
, *sgl
;
1109 struct scsi_cmnd
*cmd
;
1110 uint32_t *cur_dsd
= dsd
;
1111 uint16_t used_dsds
= tot_dsds
;
1112 struct scsi_qla_host
*vha
;
1115 cmd
= GET_CMD_SP(sp
);
1116 sgl
= scsi_prot_sglist(cmd
);
1126 ql_dbg(ql_dbg_tgt
, vha
, 0xe021,
1127 "%s: enter\n", __func__
);
1129 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1132 /* Allocate additional continuation packets? */
1133 if (avail_dsds
== 0) {
1134 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1135 QLA_DSDS_PER_IOCB
: used_dsds
;
1136 dsd_list_len
= (avail_dsds
+ 1) * 12;
1137 used_dsds
-= avail_dsds
;
1139 /* allocate tracking DS */
1140 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1144 /* allocate new list */
1145 dsd_ptr
->dsd_addr
= next_dsd
=
1146 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1147 &dsd_ptr
->dsd_list_dma
);
1151 * Need to cleanup only this dsd_ptr, rest
1152 * will be done by sp_free_dma()
1159 list_add_tail(&dsd_ptr
->list
,
1160 &((struct crc_context
*)
1161 sp
->u
.scmd
.ctx
)->dsd_list
);
1163 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1165 list_add_tail(&dsd_ptr
->list
,
1166 &(tc
->ctx
->dsd_list
));
1167 *tc
->ctx_dsd_alloced
= 1;
1170 /* add new list to cmd iocb or last list */
1171 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1172 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1173 *cur_dsd
++ = dsd_list_len
;
1174 cur_dsd
= (uint32_t *)next_dsd
;
1176 sle_dma
= sg_dma_address(sg
);
1178 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1179 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1180 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1184 /* Null termination */
1192 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193 * Type 6 IOCB types.
1195 * @sp: SRB command to process
1196 * @cmd_pkt: Command type 3 IOCB
1197 * @tot_dsds: Total number of segments to transfer
1202 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1203 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1205 uint32_t *cur_dsd
, *fcp_dl
;
1206 scsi_qla_host_t
*vha
;
1207 struct scsi_cmnd
*cmd
;
1208 uint32_t total_bytes
= 0;
1209 uint32_t data_bytes
;
1211 uint8_t bundling
= 1;
1213 struct crc_context
*crc_ctx_pkt
= NULL
;
1214 struct qla_hw_data
*ha
;
1215 uint8_t additional_fcpcdb_len
;
1216 uint16_t fcp_cmnd_len
;
1217 struct fcp_cmnd
*fcp_cmnd
;
1218 dma_addr_t crc_ctx_dma
;
1220 cmd
= GET_CMD_SP(sp
);
1222 /* Update entry type to indicate Command Type CRC_2 IOCB */
1223 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_CRC_2
);
1228 /* No data transfer */
1229 data_bytes
= scsi_bufflen(cmd
);
1230 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1231 cmd_pkt
->byte_count
= cpu_to_le32(0);
1235 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1237 /* Set transfer direction */
1238 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1239 cmd_pkt
->control_flags
=
1240 cpu_to_le16(CF_WRITE_DATA
);
1241 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1242 cmd_pkt
->control_flags
=
1243 cpu_to_le16(CF_READ_DATA
);
1246 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1247 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1248 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1249 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1252 /* Allocate CRC context from global pool */
1253 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1254 dma_pool_zalloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1257 goto crc_queuing_error
;
1259 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1261 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1264 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1266 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1268 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1269 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1271 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1272 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1273 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1275 /* Determine SCSI command length -- align to 4 byte boundary */
1276 if (cmd
->cmd_len
> 16) {
1277 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1278 if ((cmd
->cmd_len
% 4) != 0) {
1279 /* SCSI cmd > 16 bytes must be multiple of 4 */
1280 goto crc_queuing_error
;
1282 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1284 additional_fcpcdb_len
= 0;
1285 fcp_cmnd_len
= 12 + 16 + 4;
1288 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1290 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1291 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1292 fcp_cmnd
->additional_cdb_len
|= 1;
1293 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1294 fcp_cmnd
->additional_cdb_len
|= 2;
1296 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1297 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1298 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1299 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1300 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1301 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1302 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1303 fcp_cmnd
->task_management
= 0;
1304 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1306 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1308 /* Compute dif len and adjust data len to incude protection */
1310 blk_size
= cmd
->device
->sector_size
;
1311 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1313 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1314 case SCSI_PROT_READ_INSERT
:
1315 case SCSI_PROT_WRITE_STRIP
:
1316 total_bytes
= data_bytes
;
1317 data_bytes
+= dif_bytes
;
1320 case SCSI_PROT_READ_STRIP
:
1321 case SCSI_PROT_WRITE_INSERT
:
1322 case SCSI_PROT_READ_PASS
:
1323 case SCSI_PROT_WRITE_PASS
:
1324 total_bytes
= data_bytes
+ dif_bytes
;
1330 if (!qla2x00_hba_err_chk_enabled(sp
))
1331 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1332 /* HBA error checking enabled */
1333 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
1334 if ((scsi_get_prot_type(GET_CMD_SP(sp
)) == SCSI_PROT_DIF_TYPE1
)
1335 || (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1336 SCSI_PROT_DIF_TYPE2
))
1337 fw_prot_opts
|= BIT_10
;
1338 else if (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1339 SCSI_PROT_DIF_TYPE3
)
1340 fw_prot_opts
|= BIT_11
;
1344 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1347 * Configure Bundling if we need to fetch interlaving
1348 * protection PCI accesses
1350 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1351 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1352 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1354 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1357 /* Finish the common fields of CRC pkt */
1358 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1359 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1360 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1361 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
1362 /* Fibre channel byte count */
1363 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1364 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1365 additional_fcpcdb_len
);
1366 *fcp_dl
= htonl(total_bytes
);
1368 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1369 cmd_pkt
->byte_count
= cpu_to_le32(0);
1372 /* Walks data segments */
1374 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1376 if (!bundling
&& tot_prot_dsds
) {
1377 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1378 cur_dsd
, tot_dsds
, NULL
))
1379 goto crc_queuing_error
;
1380 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1381 (tot_dsds
- tot_prot_dsds
), NULL
))
1382 goto crc_queuing_error
;
1384 if (bundling
&& tot_prot_dsds
) {
1385 /* Walks dif segments */
1386 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1387 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1388 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1389 tot_prot_dsds
, NULL
))
1390 goto crc_queuing_error
;
1395 /* Cleanup will be performed by the caller */
1397 return QLA_FUNCTION_FAILED
;
1401 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402 * @sp: command to send to the ISP
1404 * Returns non-zero if a failure occurred, else zero.
1407 qla24xx_start_scsi(srb_t
*sp
)
1410 unsigned long flags
;
1414 struct cmd_type_7
*cmd_pkt
;
1418 struct req_que
*req
= NULL
;
1419 struct rsp_que
*rsp
= NULL
;
1420 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1421 struct scsi_qla_host
*vha
= sp
->vha
;
1422 struct qla_hw_data
*ha
= vha
->hw
;
1424 /* Setup device pointers. */
1428 /* So we know we haven't pci_map'ed anything yet */
1431 /* Send marker if required */
1432 if (vha
->marker_needed
!= 0) {
1433 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1435 return QLA_FUNCTION_FAILED
;
1436 vha
->marker_needed
= 0;
1439 /* Acquire ring specific lock */
1440 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1442 /* Check for room in outstanding command list. */
1443 handle
= req
->current_outstanding_cmd
;
1444 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1446 if (handle
== req
->num_outstanding_cmds
)
1448 if (!req
->outstanding_cmds
[handle
])
1451 if (index
== req
->num_outstanding_cmds
)
1454 /* Map the sg table so we have an accurate count of sg entries needed */
1455 if (scsi_sg_count(cmd
)) {
1456 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1457 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1458 if (unlikely(!nseg
))
1464 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1465 if (req
->cnt
< (req_cnt
+ 2)) {
1466 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1467 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1468 if (req
->ring_index
< cnt
)
1469 req
->cnt
= cnt
- req
->ring_index
;
1471 req
->cnt
= req
->length
-
1472 (req
->ring_index
- cnt
);
1473 if (req
->cnt
< (req_cnt
+ 2))
1477 /* Build command packet. */
1478 req
->current_outstanding_cmd
= handle
;
1479 req
->outstanding_cmds
[handle
] = sp
;
1480 sp
->handle
= handle
;
1481 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1482 req
->cnt
-= req_cnt
;
1484 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1485 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1487 /* Zero out remaining portion of packet. */
1488 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1490 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1491 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1493 /* Set NPORT-ID and LUN number*/
1494 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1495 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1496 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1497 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1498 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1500 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1501 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1503 cmd_pkt
->task
= TSK_SIMPLE
;
1505 /* Load SCSI command packet. */
1506 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1507 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1509 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1511 /* Build IOCB segments */
1512 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1514 /* Set total data segment count. */
1515 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1517 /* Adjust ring index. */
1519 if (req
->ring_index
== req
->length
) {
1520 req
->ring_index
= 0;
1521 req
->ring_ptr
= req
->ring
;
1525 sp
->flags
|= SRB_DMA_VALID
;
1527 /* Set chip new ring index. */
1528 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1530 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1535 scsi_dma_unmap(cmd
);
1537 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1539 return QLA_FUNCTION_FAILED
;
1543 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1544 * @sp: command to send to the ISP
1546 * Returns non-zero if a failure occurred, else zero.
1549 qla24xx_dif_start_scsi(srb_t
*sp
)
1552 unsigned long flags
;
1557 uint16_t req_cnt
= 0;
1559 uint16_t tot_prot_dsds
;
1560 uint16_t fw_prot_opts
= 0;
1561 struct req_que
*req
= NULL
;
1562 struct rsp_que
*rsp
= NULL
;
1563 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1564 struct scsi_qla_host
*vha
= sp
->vha
;
1565 struct qla_hw_data
*ha
= vha
->hw
;
1566 struct cmd_type_crc_2
*cmd_pkt
;
1567 uint32_t status
= 0;
1569 #define QDSS_GOT_Q_SPACE BIT_0
1571 /* Only process protection or >16 cdb in this routine */
1572 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1573 if (cmd
->cmd_len
<= 16)
1574 return qla24xx_start_scsi(sp
);
1577 /* Setup device pointers. */
1581 /* So we know we haven't pci_map'ed anything yet */
1584 /* Send marker if required */
1585 if (vha
->marker_needed
!= 0) {
1586 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1588 return QLA_FUNCTION_FAILED
;
1589 vha
->marker_needed
= 0;
1592 /* Acquire ring specific lock */
1593 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1595 /* Check for room in outstanding command list. */
1596 handle
= req
->current_outstanding_cmd
;
1597 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1599 if (handle
== req
->num_outstanding_cmds
)
1601 if (!req
->outstanding_cmds
[handle
])
1605 if (index
== req
->num_outstanding_cmds
)
1608 /* Compute number of required data segments */
1609 /* Map the sg table so we have an accurate count of sg entries needed */
1610 if (scsi_sg_count(cmd
)) {
1611 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1612 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1613 if (unlikely(!nseg
))
1616 sp
->flags
|= SRB_DMA_VALID
;
1618 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1619 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1620 struct qla2_sgx sgx
;
1623 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1624 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1625 sgx
.cur_sg
= scsi_sglist(cmd
);
1629 while (qla24xx_get_one_block_sg(
1630 cmd
->device
->sector_size
, &sgx
, &partial
))
1636 /* number of required data segments */
1639 /* Compute number of required protection segments */
1640 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1641 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1642 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1643 if (unlikely(!nseg
))
1646 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1648 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1649 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1650 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1657 /* Total Data and protection sg segment(s) */
1658 tot_prot_dsds
= nseg
;
1660 if (req
->cnt
< (req_cnt
+ 2)) {
1661 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1662 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1663 if (req
->ring_index
< cnt
)
1664 req
->cnt
= cnt
- req
->ring_index
;
1666 req
->cnt
= req
->length
-
1667 (req
->ring_index
- cnt
);
1668 if (req
->cnt
< (req_cnt
+ 2))
1672 status
|= QDSS_GOT_Q_SPACE
;
1674 /* Build header part of command packet (excluding the OPCODE). */
1675 req
->current_outstanding_cmd
= handle
;
1676 req
->outstanding_cmds
[handle
] = sp
;
1677 sp
->handle
= handle
;
1678 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1679 req
->cnt
-= req_cnt
;
1681 /* Fill-in common area */
1682 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1683 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1685 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1686 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1688 /* Set NPORT-ID and LUN number*/
1689 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1690 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1691 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1692 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1694 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1695 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1697 /* Total Data and protection segment(s) */
1698 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1700 /* Build IOCB segments and adjust for data protection segments */
1701 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1702 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1706 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1707 /* Specify response queue number where completion should happen */
1708 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1709 cmd_pkt
->timeout
= cpu_to_le16(0);
1712 /* Adjust ring index. */
1714 if (req
->ring_index
== req
->length
) {
1715 req
->ring_index
= 0;
1716 req
->ring_ptr
= req
->ring
;
1720 /* Set chip new ring index. */
1721 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1723 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1728 if (status
& QDSS_GOT_Q_SPACE
) {
1729 req
->outstanding_cmds
[handle
] = NULL
;
1730 req
->cnt
+= req_cnt
;
1732 /* Cleanup will be performed by the caller (queuecommand) */
1734 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1735 return QLA_FUNCTION_FAILED
;
1739 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1740 * @sp: command to send to the ISP
1742 * Returns non-zero if a failure occurred, else zero.
1745 qla2xxx_start_scsi_mq(srb_t
*sp
)
1748 unsigned long flags
;
1752 struct cmd_type_7
*cmd_pkt
;
1756 struct req_que
*req
= NULL
;
1757 struct rsp_que
*rsp
= NULL
;
1758 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1759 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1760 struct qla_hw_data
*ha
= vha
->hw
;
1761 struct qla_qpair
*qpair
= sp
->qpair
;
1763 /* Acquire qpair specific lock */
1764 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1766 /* Setup qpair pointers */
1770 /* So we know we haven't pci_map'ed anything yet */
1773 /* Send marker if required */
1774 if (vha
->marker_needed
!= 0) {
1775 if (__qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1777 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1778 return QLA_FUNCTION_FAILED
;
1780 vha
->marker_needed
= 0;
1783 /* Check for room in outstanding command list. */
1784 handle
= req
->current_outstanding_cmd
;
1785 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1787 if (handle
== req
->num_outstanding_cmds
)
1789 if (!req
->outstanding_cmds
[handle
])
1792 if (index
== req
->num_outstanding_cmds
)
1795 /* Map the sg table so we have an accurate count of sg entries needed */
1796 if (scsi_sg_count(cmd
)) {
1797 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1798 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1799 if (unlikely(!nseg
))
1805 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1806 if (req
->cnt
< (req_cnt
+ 2)) {
1807 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1808 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1809 if (req
->ring_index
< cnt
)
1810 req
->cnt
= cnt
- req
->ring_index
;
1812 req
->cnt
= req
->length
-
1813 (req
->ring_index
- cnt
);
1814 if (req
->cnt
< (req_cnt
+ 2))
1818 /* Build command packet. */
1819 req
->current_outstanding_cmd
= handle
;
1820 req
->outstanding_cmds
[handle
] = sp
;
1821 sp
->handle
= handle
;
1822 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1823 req
->cnt
-= req_cnt
;
1825 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1826 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1828 /* Zero out remaining portion of packet. */
1829 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1830 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1831 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1832 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1834 /* Set NPORT-ID and LUN number*/
1835 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1836 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1837 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1838 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1839 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1841 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1842 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1844 cmd_pkt
->task
= TSK_SIMPLE
;
1846 /* Load SCSI command packet. */
1847 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1848 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1850 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1852 /* Build IOCB segments */
1853 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1855 /* Set total data segment count. */
1856 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1858 /* Adjust ring index. */
1860 if (req
->ring_index
== req
->length
) {
1861 req
->ring_index
= 0;
1862 req
->ring_ptr
= req
->ring
;
1866 sp
->flags
|= SRB_DMA_VALID
;
1868 /* Set chip new ring index. */
1869 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1871 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1876 scsi_dma_unmap(cmd
);
1878 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1880 return QLA_FUNCTION_FAILED
;
1885 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1886 * @sp: command to send to the ISP
1888 * Returns non-zero if a failure occurred, else zero.
1891 qla2xxx_dif_start_scsi_mq(srb_t
*sp
)
1894 unsigned long flags
;
1899 uint16_t req_cnt
= 0;
1901 uint16_t tot_prot_dsds
;
1902 uint16_t fw_prot_opts
= 0;
1903 struct req_que
*req
= NULL
;
1904 struct rsp_que
*rsp
= NULL
;
1905 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1906 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1907 struct qla_hw_data
*ha
= vha
->hw
;
1908 struct cmd_type_crc_2
*cmd_pkt
;
1909 uint32_t status
= 0;
1910 struct qla_qpair
*qpair
= sp
->qpair
;
1912 #define QDSS_GOT_Q_SPACE BIT_0
1914 /* Check for host side state */
1915 if (!qpair
->online
) {
1916 cmd
->result
= DID_NO_CONNECT
<< 16;
1917 return QLA_INTERFACE_ERROR
;
1920 if (!qpair
->difdix_supported
&&
1921 scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
1922 cmd
->result
= DID_NO_CONNECT
<< 16;
1923 return QLA_INTERFACE_ERROR
;
1926 /* Only process protection or >16 cdb in this routine */
1927 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1928 if (cmd
->cmd_len
<= 16)
1929 return qla2xxx_start_scsi_mq(sp
);
1932 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1934 /* Setup qpair pointers */
1938 /* So we know we haven't pci_map'ed anything yet */
1941 /* Send marker if required */
1942 if (vha
->marker_needed
!= 0) {
1943 if (__qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1945 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1946 return QLA_FUNCTION_FAILED
;
1948 vha
->marker_needed
= 0;
1951 /* Check for room in outstanding command list. */
1952 handle
= req
->current_outstanding_cmd
;
1953 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1955 if (handle
== req
->num_outstanding_cmds
)
1957 if (!req
->outstanding_cmds
[handle
])
1961 if (index
== req
->num_outstanding_cmds
)
1964 /* Compute number of required data segments */
1965 /* Map the sg table so we have an accurate count of sg entries needed */
1966 if (scsi_sg_count(cmd
)) {
1967 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1968 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1969 if (unlikely(!nseg
))
1972 sp
->flags
|= SRB_DMA_VALID
;
1974 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1975 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1976 struct qla2_sgx sgx
;
1979 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1980 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1981 sgx
.cur_sg
= scsi_sglist(cmd
);
1985 while (qla24xx_get_one_block_sg(
1986 cmd
->device
->sector_size
, &sgx
, &partial
))
1992 /* number of required data segments */
1995 /* Compute number of required protection segments */
1996 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1997 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1998 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1999 if (unlikely(!nseg
))
2002 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
2004 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
2005 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
2006 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
2013 /* Total Data and protection sg segment(s) */
2014 tot_prot_dsds
= nseg
;
2016 if (req
->cnt
< (req_cnt
+ 2)) {
2017 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
2018 RD_REG_DWORD_RELAXED(req
->req_q_out
);
2019 if (req
->ring_index
< cnt
)
2020 req
->cnt
= cnt
- req
->ring_index
;
2022 req
->cnt
= req
->length
-
2023 (req
->ring_index
- cnt
);
2024 if (req
->cnt
< (req_cnt
+ 2))
2028 status
|= QDSS_GOT_Q_SPACE
;
2030 /* Build header part of command packet (excluding the OPCODE). */
2031 req
->current_outstanding_cmd
= handle
;
2032 req
->outstanding_cmds
[handle
] = sp
;
2033 sp
->handle
= handle
;
2034 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2035 req
->cnt
-= req_cnt
;
2037 /* Fill-in common area */
2038 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
2039 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2041 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2042 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2044 /* Set NPORT-ID and LUN number*/
2045 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2046 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2047 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2048 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2050 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2051 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2053 /* Total Data and protection segment(s) */
2054 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2056 /* Build IOCB segments and adjust for data protection segments */
2057 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
2058 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
2062 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2063 cmd_pkt
->timeout
= cpu_to_le16(0);
2066 /* Adjust ring index. */
2068 if (req
->ring_index
== req
->length
) {
2069 req
->ring_index
= 0;
2070 req
->ring_ptr
= req
->ring
;
2074 /* Set chip new ring index. */
2075 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
2077 /* Manage unprocessed RIO/ZIO commands in response queue. */
2078 if (vha
->flags
.process_response_queue
&&
2079 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2080 qla24xx_process_response_queue(vha
, rsp
);
2082 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2087 if (status
& QDSS_GOT_Q_SPACE
) {
2088 req
->outstanding_cmds
[handle
] = NULL
;
2089 req
->cnt
+= req_cnt
;
2091 /* Cleanup will be performed by the caller (queuecommand) */
2093 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2094 return QLA_FUNCTION_FAILED
;
2097 /* Generic Control-SRB manipulation functions. */
2099 /* hardware_lock assumed to be held. */
2102 __qla2x00_alloc_iocbs(struct qla_qpair
*qpair
, srb_t
*sp
)
2104 scsi_qla_host_t
*vha
= qpair
->vha
;
2105 struct qla_hw_data
*ha
= vha
->hw
;
2106 struct req_que
*req
= qpair
->req
;
2107 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
2108 uint32_t index
, handle
;
2110 uint16_t cnt
, req_cnt
;
2116 if (sp
&& (sp
->type
!= SRB_SCSI_CMD
)) {
2117 /* Adjust entry-counts as needed. */
2118 req_cnt
= sp
->iocbs
;
2121 /* Check for room on request queue. */
2122 if (req
->cnt
< req_cnt
+ 2) {
2123 if (qpair
->use_shadow_reg
)
2124 cnt
= *req
->out_ptr
;
2125 else if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
))
2126 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
2127 else if (IS_P3P_TYPE(ha
))
2128 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
2129 else if (IS_FWI2_CAPABLE(ha
))
2130 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
2131 else if (IS_QLAFX00(ha
))
2132 cnt
= RD_REG_DWORD(®
->ispfx00
.req_q_out
);
2134 cnt
= qla2x00_debounce_register(
2135 ISP_REQ_Q_OUT(ha
, ®
->isp
));
2137 if (req
->ring_index
< cnt
)
2138 req
->cnt
= cnt
- req
->ring_index
;
2140 req
->cnt
= req
->length
-
2141 (req
->ring_index
- cnt
);
2143 if (req
->cnt
< req_cnt
+ 2)
2147 /* Check for room in outstanding command list. */
2148 handle
= req
->current_outstanding_cmd
;
2149 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2151 if (handle
== req
->num_outstanding_cmds
)
2153 if (!req
->outstanding_cmds
[handle
])
2156 if (index
== req
->num_outstanding_cmds
) {
2157 ql_log(ql_log_warn
, vha
, 0x700b,
2158 "No room on outstanding cmd array.\n");
2162 /* Prep command array. */
2163 req
->current_outstanding_cmd
= handle
;
2164 req
->outstanding_cmds
[handle
] = sp
;
2165 sp
->handle
= handle
;
2169 req
->cnt
-= req_cnt
;
2170 pkt
= req
->ring_ptr
;
2171 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
2172 if (IS_QLAFX00(ha
)) {
2173 WRT_REG_BYTE((void __iomem
*)&pkt
->entry_count
, req_cnt
);
2174 WRT_REG_WORD((void __iomem
*)&pkt
->handle
, handle
);
2176 pkt
->entry_count
= req_cnt
;
2177 pkt
->handle
= handle
;
2183 qpair
->tgt_counters
.num_alloc_iocb_failed
++;
2188 qla2x00_alloc_iocbs_ready(struct qla_qpair
*qpair
, srb_t
*sp
)
2190 scsi_qla_host_t
*vha
= qpair
->vha
;
2192 if (qla2x00_reset_active(vha
))
2195 return __qla2x00_alloc_iocbs(qpair
, sp
);
2199 qla2x00_alloc_iocbs(struct scsi_qla_host
*vha
, srb_t
*sp
)
2201 return __qla2x00_alloc_iocbs(vha
->hw
->base_qpair
, sp
);
2205 qla24xx_prli_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2207 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2209 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2210 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2211 if (lio
->u
.logio
.flags
& SRB_LOGIN_NVME_PRLI
)
2212 logio
->control_flags
|= LCF_NVME_PRLI
;
2214 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2215 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2216 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2217 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2218 logio
->vp_index
= sp
->vha
->vp_idx
;
2222 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2224 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2226 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2227 if (lio
->u
.logio
.flags
& SRB_LOGIN_PRLI_ONLY
) {
2228 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2230 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
2231 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
2232 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
2233 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
2234 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
2236 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2237 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2238 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2239 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2240 logio
->vp_index
= sp
->vha
->vp_idx
;
2244 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2246 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2247 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2250 mbx
->entry_type
= MBX_IOCB_TYPE
;
2251 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2252 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
2253 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
2254 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
2255 if (HAS_EXTENDED_IDS(ha
)) {
2256 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2257 mbx
->mb10
= cpu_to_le16(opts
);
2259 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
2261 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2262 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2263 sp
->fcport
->d_id
.b
.al_pa
);
2264 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2268 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2270 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2271 logio
->control_flags
=
2272 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
2273 if (!sp
->fcport
->se_sess
||
2274 !sp
->fcport
->keep_nport_handle
)
2275 logio
->control_flags
|= cpu_to_le16(LCF_FREE_NPORT
);
2276 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2277 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2278 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2279 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2280 logio
->vp_index
= sp
->vha
->vp_idx
;
2284 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2286 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2288 mbx
->entry_type
= MBX_IOCB_TYPE
;
2289 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2290 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
2291 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
2292 cpu_to_le16(sp
->fcport
->loop_id
):
2293 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
2294 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2295 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2296 sp
->fcport
->d_id
.b
.al_pa
);
2297 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2298 /* Implicit: mbx->mbx10 = 0. */
2302 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2304 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2305 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
2306 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2307 logio
->vp_index
= sp
->vha
->vp_idx
;
2311 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2313 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2315 mbx
->entry_type
= MBX_IOCB_TYPE
;
2316 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2317 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
2318 if (HAS_EXTENDED_IDS(ha
)) {
2319 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2320 mbx
->mb10
= cpu_to_le16(BIT_0
);
2322 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2324 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2325 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2326 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2327 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2328 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2332 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2336 struct fc_port
*fcport
= sp
->fcport
;
2337 scsi_qla_host_t
*vha
= fcport
->vha
;
2338 struct qla_hw_data
*ha
= vha
->hw
;
2339 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2340 struct req_que
*req
= vha
->req
;
2342 flags
= iocb
->u
.tmf
.flags
;
2343 lun
= iocb
->u
.tmf
.lun
;
2345 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2346 tsk
->entry_count
= 1;
2347 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2348 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2349 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2350 tsk
->control_flags
= cpu_to_le32(flags
);
2351 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2352 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2353 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2354 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2356 if (flags
== TCF_LUN_RESET
) {
2357 int_to_scsilun(lun
, &tsk
->lun
);
2358 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2364 qla2x00_els_dcmd_sp_free(void *data
)
2367 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2371 if (elsio
->u
.els_logo
.els_logo_pyld
)
2372 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
, DMA_POOL_SIZE
,
2373 elsio
->u
.els_logo
.els_logo_pyld
,
2374 elsio
->u
.els_logo
.els_logo_pyld_dma
);
2376 del_timer(&elsio
->timer
);
2381 qla2x00_els_dcmd_iocb_timeout(void *data
)
2384 fc_port_t
*fcport
= sp
->fcport
;
2385 struct scsi_qla_host
*vha
= sp
->vha
;
2386 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2388 ql_dbg(ql_dbg_io
, vha
, 0x3069,
2389 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2390 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
2391 fcport
->d_id
.b
.al_pa
);
2393 complete(&lio
->u
.els_logo
.comp
);
2397 qla2x00_els_dcmd_sp_done(void *ptr
, int res
)
2400 fc_port_t
*fcport
= sp
->fcport
;
2401 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2402 struct scsi_qla_host
*vha
= sp
->vha
;
2404 ql_dbg(ql_dbg_io
, vha
, 0x3072,
2405 "%s hdl=%x, portid=%02x%02x%02x done\n",
2406 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
,
2407 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2409 complete(&lio
->u
.els_logo
.comp
);
2413 qla24xx_els_dcmd_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2414 port_id_t remote_did
)
2417 fc_port_t
*fcport
= NULL
;
2418 struct srb_iocb
*elsio
= NULL
;
2419 struct qla_hw_data
*ha
= vha
->hw
;
2420 struct els_logo_payload logo_pyld
;
2421 int rval
= QLA_SUCCESS
;
2423 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2425 ql_log(ql_log_info
, vha
, 0x70e5, "fcport allocation failed\n");
2429 /* Alloc SRB structure */
2430 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2433 ql_log(ql_log_info
, vha
, 0x70e6,
2434 "SRB allocation failed\n");
2438 elsio
= &sp
->u
.iocb_cmd
;
2439 fcport
->loop_id
= 0xFFFF;
2440 fcport
->d_id
.b
.domain
= remote_did
.b
.domain
;
2441 fcport
->d_id
.b
.area
= remote_did
.b
.area
;
2442 fcport
->d_id
.b
.al_pa
= remote_did
.b
.al_pa
;
2444 ql_dbg(ql_dbg_io
, vha
, 0x3073, "portid=%02x%02x%02x done\n",
2445 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2447 sp
->type
= SRB_ELS_DCMD
;
2448 sp
->name
= "ELS_DCMD";
2449 sp
->fcport
= fcport
;
2450 elsio
->timeout
= qla2x00_els_dcmd_iocb_timeout
;
2451 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
);
2452 init_completion(&sp
->u
.iocb_cmd
.u
.els_logo
.comp
);
2453 sp
->done
= qla2x00_els_dcmd_sp_done
;
2454 sp
->free
= qla2x00_els_dcmd_sp_free
;
2456 elsio
->u
.els_logo
.els_logo_pyld
= dma_alloc_coherent(&ha
->pdev
->dev
,
2457 DMA_POOL_SIZE
, &elsio
->u
.els_logo
.els_logo_pyld_dma
,
2460 if (!elsio
->u
.els_logo
.els_logo_pyld
) {
2462 return QLA_FUNCTION_FAILED
;
2465 memset(&logo_pyld
, 0, sizeof(struct els_logo_payload
));
2467 elsio
->u
.els_logo
.els_cmd
= els_opcode
;
2468 logo_pyld
.opcode
= els_opcode
;
2469 logo_pyld
.s_id
[0] = vha
->d_id
.b
.al_pa
;
2470 logo_pyld
.s_id
[1] = vha
->d_id
.b
.area
;
2471 logo_pyld
.s_id
[2] = vha
->d_id
.b
.domain
;
2472 host_to_fcp_swap(logo_pyld
.s_id
, sizeof(uint32_t));
2473 memcpy(&logo_pyld
.wwpn
, vha
->port_name
, WWN_SIZE
);
2475 memcpy(elsio
->u
.els_logo
.els_logo_pyld
, &logo_pyld
,
2476 sizeof(struct els_logo_payload
));
2478 rval
= qla2x00_start_sp(sp
);
2479 if (rval
!= QLA_SUCCESS
) {
2481 return QLA_FUNCTION_FAILED
;
2484 ql_dbg(ql_dbg_io
, vha
, 0x3074,
2485 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2486 sp
->name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
2487 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2489 wait_for_completion(&elsio
->u
.els_logo
.comp
);
2496 qla24xx_els_logo_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2498 scsi_qla_host_t
*vha
= sp
->vha
;
2499 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2501 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2502 els_iocb
->entry_count
= 1;
2503 els_iocb
->sys_define
= 0;
2504 els_iocb
->entry_status
= 0;
2505 els_iocb
->handle
= sp
->handle
;
2506 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2507 els_iocb
->tx_dsd_count
= 1;
2508 els_iocb
->vp_index
= vha
->vp_idx
;
2509 els_iocb
->sof_type
= EST_SOFI3
;
2510 els_iocb
->rx_dsd_count
= 0;
2511 els_iocb
->opcode
= elsio
->u
.els_logo
.els_cmd
;
2513 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2514 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2515 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2516 els_iocb
->s_id
[0] = vha
->d_id
.b
.al_pa
;
2517 els_iocb
->s_id
[1] = vha
->d_id
.b
.area
;
2518 els_iocb
->s_id
[2] = vha
->d_id
.b
.domain
;
2519 els_iocb
->control_flags
= 0;
2521 if (elsio
->u
.els_logo
.els_cmd
== ELS_DCMD_PLOGI
) {
2522 els_iocb
->tx_byte_count
= els_iocb
->tx_len
=
2523 sizeof(struct els_plogi_payload
);
2524 els_iocb
->tx_address
[0] =
2525 cpu_to_le32(LSD(elsio
->u
.els_plogi
.els_plogi_pyld_dma
));
2526 els_iocb
->tx_address
[1] =
2527 cpu_to_le32(MSD(elsio
->u
.els_plogi
.els_plogi_pyld_dma
));
2529 els_iocb
->rx_dsd_count
= 1;
2530 els_iocb
->rx_byte_count
= els_iocb
->rx_len
=
2531 sizeof(struct els_plogi_payload
);
2532 els_iocb
->rx_address
[0] =
2533 cpu_to_le32(LSD(elsio
->u
.els_plogi
.els_resp_pyld_dma
));
2534 els_iocb
->rx_address
[1] =
2535 cpu_to_le32(MSD(elsio
->u
.els_plogi
.els_resp_pyld_dma
));
2537 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3073,
2538 "PLOGI ELS IOCB:\n");
2539 ql_dump_buffer(ql_log_info
, vha
, 0x0109,
2540 (uint8_t *)els_iocb
,
2543 els_iocb
->tx_byte_count
= sizeof(struct els_logo_payload
);
2544 els_iocb
->tx_address
[0] =
2545 cpu_to_le32(LSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2546 els_iocb
->tx_address
[1] =
2547 cpu_to_le32(MSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2548 els_iocb
->tx_len
= cpu_to_le32(sizeof(struct els_logo_payload
));
2550 els_iocb
->rx_byte_count
= 0;
2551 els_iocb
->rx_address
[0] = 0;
2552 els_iocb
->rx_address
[1] = 0;
2553 els_iocb
->rx_len
= 0;
2556 sp
->vha
->qla_stats
.control_requests
++;
2560 qla2x00_els_dcmd2_iocb_timeout(void *data
)
2563 fc_port_t
*fcport
= sp
->fcport
;
2564 struct scsi_qla_host
*vha
= sp
->vha
;
2565 struct qla_hw_data
*ha
= vha
->hw
;
2566 unsigned long flags
= 0;
2569 ql_dbg(ql_dbg_io
+ ql_dbg_disc
, vha
, 0x3069,
2570 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2571 sp
->name
, sp
->handle
, fcport
->port_name
, fcport
->d_id
.b24
);
2573 /* Abort the exchange */
2574 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2575 res
= ha
->isp_ops
->abort_command(sp
);
2576 ql_dbg(ql_dbg_io
, vha
, 0x3070,
2577 "mbx abort_command %s\n",
2578 (res
== QLA_SUCCESS
) ? "successful" : "failed");
2579 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2581 sp
->done(sp
, QLA_FUNCTION_TIMEOUT
);
2585 qla2x00_els_dcmd2_sp_done(void *ptr
, int res
)
2588 fc_port_t
*fcport
= sp
->fcport
;
2589 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2590 struct scsi_qla_host
*vha
= sp
->vha
;
2591 struct event_arg ea
;
2592 struct qla_work_evt
*e
;
2594 ql_dbg(ql_dbg_disc
, vha
, 0x3072,
2595 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2596 sp
->name
, res
, sp
->handle
, fcport
->d_id
.b24
, fcport
->port_name
);
2598 fcport
->flags
&= ~(FCF_ASYNC_SENT
|FCF_ASYNC_ACTIVE
);
2599 del_timer(&sp
->u
.iocb_cmd
.timer
);
2601 if (sp
->flags
& SRB_WAKEUP_ON_COMP
)
2602 complete(&lio
->u
.els_plogi
.comp
);
2605 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
2607 memset(&ea
, 0, sizeof(ea
));
2610 ea
.event
= FCME_ELS_PLOGI_DONE
;
2611 qla2x00_fcport_event_handler(vha
, &ea
);
2614 e
= qla2x00_alloc_work(vha
, QLA_EVT_UNMAP
);
2616 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2618 if (elsio
->u
.els_plogi
.els_plogi_pyld
)
2619 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2620 elsio
->u
.els_plogi
.tx_size
,
2621 elsio
->u
.els_plogi
.els_plogi_pyld
,
2622 elsio
->u
.els_plogi
.els_plogi_pyld_dma
);
2624 if (elsio
->u
.els_plogi
.els_resp_pyld
)
2625 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2626 elsio
->u
.els_plogi
.rx_size
,
2627 elsio
->u
.els_plogi
.els_resp_pyld
,
2628 elsio
->u
.els_plogi
.els_resp_pyld_dma
);
2633 qla2x00_post_work(vha
, e
);
2638 qla24xx_els_dcmd2_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2639 fc_port_t
*fcport
, bool wait
)
2642 struct srb_iocb
*elsio
= NULL
;
2643 struct qla_hw_data
*ha
= vha
->hw
;
2644 int rval
= QLA_SUCCESS
;
2645 void *ptr
, *resp_ptr
;
2648 /* Alloc SRB structure */
2649 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2651 ql_log(ql_log_info
, vha
, 0x70e6,
2652 "SRB allocation failed\n");
2656 elsio
= &sp
->u
.iocb_cmd
;
2657 ql_dbg(ql_dbg_io
, vha
, 0x3073,
2658 "Enter: PLOGI portid=%06x\n", fcport
->d_id
.b24
);
2660 fcport
->flags
|= FCF_ASYNC_SENT
;
2661 sp
->type
= SRB_ELS_DCMD
;
2662 sp
->name
= "ELS_DCMD";
2663 sp
->fcport
= fcport
;
2665 elsio
->timeout
= qla2x00_els_dcmd2_iocb_timeout
;
2666 init_completion(&elsio
->u
.els_plogi
.comp
);
2668 sp
->flags
= SRB_WAKEUP_ON_COMP
;
2670 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
+ 2);
2672 sp
->done
= qla2x00_els_dcmd2_sp_done
;
2673 elsio
->u
.els_plogi
.tx_size
= elsio
->u
.els_plogi
.rx_size
= DMA_POOL_SIZE
;
2675 ptr
= elsio
->u
.els_plogi
.els_plogi_pyld
=
2676 dma_alloc_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2677 &elsio
->u
.els_plogi
.els_plogi_pyld_dma
, GFP_KERNEL
);
2678 ptr_dma
= elsio
->u
.els_plogi
.els_plogi_pyld_dma
;
2680 if (!elsio
->u
.els_plogi
.els_plogi_pyld
) {
2681 rval
= QLA_FUNCTION_FAILED
;
2685 resp_ptr
= elsio
->u
.els_plogi
.els_resp_pyld
=
2686 dma_alloc_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2687 &elsio
->u
.els_plogi
.els_resp_pyld_dma
, GFP_KERNEL
);
2689 if (!elsio
->u
.els_plogi
.els_resp_pyld
) {
2690 rval
= QLA_FUNCTION_FAILED
;
2694 ql_dbg(ql_dbg_io
, vha
, 0x3073, "PLOGI %p %p\n", ptr
, resp_ptr
);
2696 memset(ptr
, 0, sizeof(struct els_plogi_payload
));
2697 memset(resp_ptr
, 0, sizeof(struct els_plogi_payload
));
2698 memcpy(elsio
->u
.els_plogi
.els_plogi_pyld
->data
,
2699 &ha
->plogi_els_payld
.data
,
2700 sizeof(elsio
->u
.els_plogi
.els_plogi_pyld
->data
));
2702 elsio
->u
.els_plogi
.els_cmd
= els_opcode
;
2703 elsio
->u
.els_plogi
.els_plogi_pyld
->opcode
= els_opcode
;
2705 ql_dbg(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x3073, "PLOGI buffer:\n");
2706 ql_dump_buffer(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x0109,
2707 (uint8_t *)elsio
->u
.els_plogi
.els_plogi_pyld
,
2708 sizeof(*elsio
->u
.els_plogi
.els_plogi_pyld
));
2710 rval
= qla2x00_start_sp(sp
);
2711 if (rval
!= QLA_SUCCESS
) {
2712 rval
= QLA_FUNCTION_FAILED
;
2714 ql_dbg(ql_dbg_disc
, vha
, 0x3074,
2715 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2716 sp
->name
, sp
->handle
, fcport
->loop_id
,
2717 fcport
->d_id
.b24
, vha
->d_id
.b24
);
2721 wait_for_completion(&elsio
->u
.els_plogi
.comp
);
2723 if (elsio
->u
.els_plogi
.comp_status
!= CS_COMPLETE
)
2724 rval
= QLA_FUNCTION_FAILED
;
2730 fcport
->flags
&= ~(FCF_ASYNC_SENT
);
2731 if (elsio
->u
.els_plogi
.els_plogi_pyld
)
2732 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2733 elsio
->u
.els_plogi
.tx_size
,
2734 elsio
->u
.els_plogi
.els_plogi_pyld
,
2735 elsio
->u
.els_plogi
.els_plogi_pyld_dma
);
2737 if (elsio
->u
.els_plogi
.els_resp_pyld
)
2738 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2739 elsio
->u
.els_plogi
.rx_size
,
2740 elsio
->u
.els_plogi
.els_resp_pyld
,
2741 elsio
->u
.els_plogi
.els_resp_pyld_dma
);
2749 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2751 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2752 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2754 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2755 els_iocb
->entry_count
= 1;
2756 els_iocb
->sys_define
= 0;
2757 els_iocb
->entry_status
= 0;
2758 els_iocb
->handle
= sp
->handle
;
2759 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2760 els_iocb
->tx_dsd_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2761 els_iocb
->vp_index
= sp
->vha
->vp_idx
;
2762 els_iocb
->sof_type
= EST_SOFI3
;
2763 els_iocb
->rx_dsd_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2766 sp
->type
== SRB_ELS_CMD_RPT
?
2767 bsg_request
->rqst_data
.r_els
.els_code
:
2768 bsg_request
->rqst_data
.h_els
.command_code
;
2769 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2770 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2771 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2772 els_iocb
->control_flags
= 0;
2773 els_iocb
->rx_byte_count
=
2774 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2775 els_iocb
->tx_byte_count
=
2776 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2778 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2779 (bsg_job
->request_payload
.sg_list
)));
2780 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2781 (bsg_job
->request_payload
.sg_list
)));
2782 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2783 (bsg_job
->request_payload
.sg_list
));
2785 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2786 (bsg_job
->reply_payload
.sg_list
)));
2787 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2788 (bsg_job
->reply_payload
.sg_list
)));
2789 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2790 (bsg_job
->reply_payload
.sg_list
));
2792 sp
->vha
->qla_stats
.control_requests
++;
2796 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2798 uint16_t avail_dsds
;
2800 struct scatterlist
*sg
;
2803 scsi_qla_host_t
*vha
= sp
->vha
;
2804 struct qla_hw_data
*ha
= vha
->hw
;
2805 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2806 int loop_iterartion
= 0;
2807 int entry_count
= 1;
2809 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2810 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2811 ct_iocb
->entry_status
= 0;
2812 ct_iocb
->handle1
= sp
->handle
;
2813 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2814 ct_iocb
->status
= cpu_to_le16(0);
2815 ct_iocb
->control_flags
= cpu_to_le16(0);
2816 ct_iocb
->timeout
= 0;
2817 ct_iocb
->cmd_dsd_count
=
2818 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2819 ct_iocb
->total_dsd_count
=
2820 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2821 ct_iocb
->req_bytecount
=
2822 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2823 ct_iocb
->rsp_bytecount
=
2824 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2826 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2827 (bsg_job
->request_payload
.sg_list
)));
2828 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2829 (bsg_job
->request_payload
.sg_list
)));
2830 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2832 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2833 (bsg_job
->reply_payload
.sg_list
)));
2834 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2835 (bsg_job
->reply_payload
.sg_list
)));
2836 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2839 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2841 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2843 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2845 cont_a64_entry_t
*cont_pkt
;
2847 /* Allocate additional continuation packets? */
2848 if (avail_dsds
== 0) {
2850 * Five DSDs are available in the Cont.
2853 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2854 vha
->hw
->req_q_map
[0]);
2855 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2860 sle_dma
= sg_dma_address(sg
);
2861 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2862 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2863 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2867 ct_iocb
->entry_count
= entry_count
;
2869 sp
->vha
->qla_stats
.control_requests
++;
2873 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2875 uint16_t avail_dsds
;
2877 struct scatterlist
*sg
;
2879 uint16_t cmd_dsds
, rsp_dsds
;
2880 scsi_qla_host_t
*vha
= sp
->vha
;
2881 struct qla_hw_data
*ha
= vha
->hw
;
2882 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2883 int entry_count
= 1;
2884 cont_a64_entry_t
*cont_pkt
= NULL
;
2886 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2887 ct_iocb
->entry_status
= 0;
2888 ct_iocb
->sys_define
= 0;
2889 ct_iocb
->handle
= sp
->handle
;
2891 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2892 ct_iocb
->vp_index
= sp
->vha
->vp_idx
;
2893 ct_iocb
->comp_status
= cpu_to_le16(0);
2895 cmd_dsds
= bsg_job
->request_payload
.sg_cnt
;
2896 rsp_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2898 ct_iocb
->cmd_dsd_count
= cpu_to_le16(cmd_dsds
);
2899 ct_iocb
->timeout
= 0;
2900 ct_iocb
->rsp_dsd_count
= cpu_to_le16(rsp_dsds
);
2901 ct_iocb
->cmd_byte_count
=
2902 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2905 cur_dsd
= (uint32_t *)ct_iocb
->dseg_0_address
;
2908 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
, cmd_dsds
, index
) {
2911 /* Allocate additional continuation packets? */
2912 if (avail_dsds
== 0) {
2914 * Five DSDs are available in the Cont.
2917 cont_pkt
= qla2x00_prep_cont_type1_iocb(
2918 vha
, ha
->req_q_map
[0]);
2919 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2924 sle_dma
= sg_dma_address(sg
);
2925 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2926 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2927 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2933 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, rsp_dsds
, index
) {
2936 /* Allocate additional continuation packets? */
2937 if (avail_dsds
== 0) {
2939 * Five DSDs are available in the Cont.
2942 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2944 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2949 sle_dma
= sg_dma_address(sg
);
2950 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2951 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2952 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2955 ct_iocb
->entry_count
= entry_count
;
2959 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2960 * @sp: command to send to the ISP
2962 * Returns non-zero if a failure occurred, else zero.
2965 qla82xx_start_scsi(srb_t
*sp
)
2968 unsigned long flags
;
2969 struct scsi_cmnd
*cmd
;
2976 struct device_reg_82xx __iomem
*reg
;
2979 uint8_t additional_cdb_len
;
2980 struct ct6_dsd
*ctx
;
2981 struct scsi_qla_host
*vha
= sp
->vha
;
2982 struct qla_hw_data
*ha
= vha
->hw
;
2983 struct req_que
*req
= NULL
;
2984 struct rsp_que
*rsp
= NULL
;
2986 /* Setup device pointers. */
2987 reg
= &ha
->iobase
->isp82
;
2988 cmd
= GET_CMD_SP(sp
);
2990 rsp
= ha
->rsp_q_map
[0];
2992 /* So we know we haven't pci_map'ed anything yet */
2995 dbval
= 0x04 | (ha
->portnum
<< 5);
2997 /* Send marker if required */
2998 if (vha
->marker_needed
!= 0) {
2999 if (qla2x00_marker(vha
, req
,
3000 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
3001 ql_log(ql_log_warn
, vha
, 0x300c,
3002 "qla2x00_marker failed for cmd=%p.\n", cmd
);
3003 return QLA_FUNCTION_FAILED
;
3005 vha
->marker_needed
= 0;
3008 /* Acquire ring specific lock */
3009 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3011 /* Check for room in outstanding command list. */
3012 handle
= req
->current_outstanding_cmd
;
3013 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3015 if (handle
== req
->num_outstanding_cmds
)
3017 if (!req
->outstanding_cmds
[handle
])
3020 if (index
== req
->num_outstanding_cmds
)
3023 /* Map the sg table so we have an accurate count of sg entries needed */
3024 if (scsi_sg_count(cmd
)) {
3025 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
3026 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
3027 if (unlikely(!nseg
))
3034 if (tot_dsds
> ql2xshiftctondsd
) {
3035 struct cmd_type_6
*cmd_pkt
;
3036 uint16_t more_dsd_lists
= 0;
3037 struct dsd_dma
*dsd_ptr
;
3040 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
3041 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
3042 ql_dbg(ql_dbg_io
, vha
, 0x300d,
3043 "Num of DSD list %d is than %d for cmd=%p.\n",
3044 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
3049 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
3050 goto sufficient_dsds
;
3052 more_dsd_lists
-= ha
->gbl_dsd_avail
;
3054 for (i
= 0; i
< more_dsd_lists
; i
++) {
3055 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
3057 ql_log(ql_log_fatal
, vha
, 0x300e,
3058 "Failed to allocate memory for dsd_dma "
3059 "for cmd=%p.\n", cmd
);
3063 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
3064 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
3065 if (!dsd_ptr
->dsd_addr
) {
3067 ql_log(ql_log_fatal
, vha
, 0x300f,
3068 "Failed to allocate memory for dsd_addr "
3069 "for cmd=%p.\n", cmd
);
3072 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
3073 ha
->gbl_dsd_avail
++;
3079 if (req
->cnt
< (req_cnt
+ 2)) {
3080 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
3081 ®
->req_q_out
[0]);
3082 if (req
->ring_index
< cnt
)
3083 req
->cnt
= cnt
- req
->ring_index
;
3085 req
->cnt
= req
->length
-
3086 (req
->ring_index
- cnt
);
3087 if (req
->cnt
< (req_cnt
+ 2))
3091 ctx
= sp
->u
.scmd
.ctx
=
3092 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
3094 ql_log(ql_log_fatal
, vha
, 0x3010,
3095 "Failed to allocate ctx for cmd=%p.\n", cmd
);
3099 memset(ctx
, 0, sizeof(struct ct6_dsd
));
3100 ctx
->fcp_cmnd
= dma_pool_zalloc(ha
->fcp_cmnd_dma_pool
,
3101 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
3102 if (!ctx
->fcp_cmnd
) {
3103 ql_log(ql_log_fatal
, vha
, 0x3011,
3104 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
3108 /* Initialize the DSD list and dma handle */
3109 INIT_LIST_HEAD(&ctx
->dsd_list
);
3110 ctx
->dsd_use_cnt
= 0;
3112 if (cmd
->cmd_len
> 16) {
3113 additional_cdb_len
= cmd
->cmd_len
- 16;
3114 if ((cmd
->cmd_len
% 4) != 0) {
3115 /* SCSI command bigger than 16 bytes must be
3118 ql_log(ql_log_warn
, vha
, 0x3012,
3119 "scsi cmd len %d not multiple of 4 "
3120 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
3121 goto queuing_error_fcp_cmnd
;
3123 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
3125 additional_cdb_len
= 0;
3126 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
3129 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
3130 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3132 /* Zero out remaining portion of packet. */
3133 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3134 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3135 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3136 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3138 /* Set NPORT-ID and LUN number*/
3139 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3140 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3141 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3142 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3143 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3145 /* Build IOCB segments */
3146 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
3147 goto queuing_error_fcp_cmnd
;
3149 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3150 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
3152 /* build FCP_CMND IU */
3153 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
3154 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
3156 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
3157 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
3158 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
3159 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
3161 /* Populate the FCP_PRIO. */
3162 if (ha
->flags
.fcp_prio_enabled
)
3163 ctx
->fcp_cmnd
->task_attribute
|=
3164 sp
->fcport
->fcp_prio
<< 3;
3166 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3168 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
3169 additional_cdb_len
);
3170 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
3172 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
3173 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
3174 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
3175 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
3176 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
3178 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
3179 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3180 /* Set total data segment count. */
3181 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3182 /* Specify response queue number where
3183 * completion should happen
3185 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3187 struct cmd_type_7
*cmd_pkt
;
3188 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3189 if (req
->cnt
< (req_cnt
+ 2)) {
3190 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
3191 ®
->req_q_out
[0]);
3192 if (req
->ring_index
< cnt
)
3193 req
->cnt
= cnt
- req
->ring_index
;
3195 req
->cnt
= req
->length
-
3196 (req
->ring_index
- cnt
);
3198 if (req
->cnt
< (req_cnt
+ 2))
3201 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
3202 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3204 /* Zero out remaining portion of packet. */
3205 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3206 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3207 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3208 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3210 /* Set NPORT-ID and LUN number*/
3211 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3212 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3213 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3214 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3215 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3217 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3218 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
3219 sizeof(cmd_pkt
->lun
));
3221 /* Populate the FCP_PRIO. */
3222 if (ha
->flags
.fcp_prio_enabled
)
3223 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
3225 /* Load SCSI command packet. */
3226 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3227 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
3229 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3231 /* Build IOCB segments */
3232 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
3234 /* Set total data segment count. */
3235 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3236 /* Specify response queue number where
3237 * completion should happen.
3239 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3242 /* Build command packet. */
3243 req
->current_outstanding_cmd
= handle
;
3244 req
->outstanding_cmds
[handle
] = sp
;
3245 sp
->handle
= handle
;
3246 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3247 req
->cnt
-= req_cnt
;
3250 /* Adjust ring index. */
3252 if (req
->ring_index
== req
->length
) {
3253 req
->ring_index
= 0;
3254 req
->ring_ptr
= req
->ring
;
3258 sp
->flags
|= SRB_DMA_VALID
;
3260 /* Set chip new ring index. */
3261 /* write, read and verify logic */
3262 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
3264 qla82xx_wr_32(ha
, (uintptr_t __force
)ha
->nxdb_wr_ptr
, dbval
);
3266 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3268 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
3269 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3274 /* Manage unprocessed RIO/ZIO commands in response queue. */
3275 if (vha
->flags
.process_response_queue
&&
3276 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
3277 qla24xx_process_response_queue(vha
, rsp
);
3279 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3282 queuing_error_fcp_cmnd
:
3283 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
3286 scsi_dma_unmap(cmd
);
3288 if (sp
->u
.scmd
.ctx
) {
3289 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
3290 sp
->u
.scmd
.ctx
= NULL
;
3292 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3294 return QLA_FUNCTION_FAILED
;
3298 qla24xx_abort_iocb(srb_t
*sp
, struct abort_entry_24xx
*abt_iocb
)
3300 struct srb_iocb
*aio
= &sp
->u
.iocb_cmd
;
3301 scsi_qla_host_t
*vha
= sp
->vha
;
3302 struct req_que
*req
= sp
->qpair
->req
;
3304 memset(abt_iocb
, 0, sizeof(struct abort_entry_24xx
));
3305 abt_iocb
->entry_type
= ABORT_IOCB_TYPE
;
3306 abt_iocb
->entry_count
= 1;
3307 abt_iocb
->handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
3309 abt_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3310 abt_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3311 abt_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3312 abt_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3314 abt_iocb
->handle_to_abort
=
3315 cpu_to_le32(MAKE_HANDLE(aio
->u
.abt
.req_que_no
,
3316 aio
->u
.abt
.cmd_hndl
));
3317 abt_iocb
->vp_index
= vha
->vp_idx
;
3318 abt_iocb
->req_que_no
= cpu_to_le16(aio
->u
.abt
.req_que_no
);
3319 /* Send the command to the firmware */
3324 qla2x00_mb_iocb(srb_t
*sp
, struct mbx_24xx_entry
*mbx
)
3328 mbx
->entry_type
= MBX_IOCB_TYPE
;
3329 mbx
->handle
= sp
->handle
;
3330 sz
= min(ARRAY_SIZE(mbx
->mb
), ARRAY_SIZE(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
));
3332 for (i
= 0; i
< sz
; i
++)
3333 mbx
->mb
[i
] = cpu_to_le16(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
[i
]);
3337 qla2x00_ctpthru_cmd_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_pkt
)
3339 sp
->u
.iocb_cmd
.u
.ctarg
.iocb
= ct_pkt
;
3340 qla24xx_prep_ms_iocb(sp
->vha
, &sp
->u
.iocb_cmd
.u
.ctarg
);
3341 ct_pkt
->handle
= sp
->handle
;
3344 static void qla2x00_send_notify_ack_iocb(srb_t
*sp
,
3345 struct nack_to_isp
*nack
)
3347 struct imm_ntfy_from_isp
*ntfy
= sp
->u
.iocb_cmd
.u
.nack
.ntfy
;
3349 nack
->entry_type
= NOTIFY_ACK_TYPE
;
3350 nack
->entry_count
= 1;
3351 nack
->ox_id
= ntfy
->ox_id
;
3353 nack
->u
.isp24
.handle
= sp
->handle
;
3354 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
3355 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
3356 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
3357 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
3359 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
3360 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
3361 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
3362 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
3363 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
3364 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
3365 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3366 nack
->u
.isp24
.srr_flags
= 0;
3367 nack
->u
.isp24
.srr_reject_code
= 0;
3368 nack
->u
.isp24
.srr_reject_code_expl
= 0;
3369 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
3373 * Build NVME LS request
3376 qla_nvme_ls(srb_t
*sp
, struct pt_ls4_request
*cmd_pkt
)
3378 struct srb_iocb
*nvme
;
3379 int rval
= QLA_SUCCESS
;
3381 nvme
= &sp
->u
.iocb_cmd
;
3382 cmd_pkt
->entry_type
= PT_LS4_REQUEST
;
3383 cmd_pkt
->entry_count
= 1;
3384 cmd_pkt
->control_flags
= CF_LS4_ORIGINATOR
<< CF_LS4_SHIFT
;
3386 cmd_pkt
->timeout
= cpu_to_le16(nvme
->u
.nvme
.timeout_sec
);
3387 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3388 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3390 cmd_pkt
->tx_dseg_count
= 1;
3391 cmd_pkt
->tx_byte_count
= nvme
->u
.nvme
.cmd_len
;
3392 cmd_pkt
->dseg0_len
= nvme
->u
.nvme
.cmd_len
;
3393 cmd_pkt
->dseg0_address
[0] = cpu_to_le32(LSD(nvme
->u
.nvme
.cmd_dma
));
3394 cmd_pkt
->dseg0_address
[1] = cpu_to_le32(MSD(nvme
->u
.nvme
.cmd_dma
));
3396 cmd_pkt
->rx_dseg_count
= 1;
3397 cmd_pkt
->rx_byte_count
= nvme
->u
.nvme
.rsp_len
;
3398 cmd_pkt
->dseg1_len
= nvme
->u
.nvme
.rsp_len
;
3399 cmd_pkt
->dseg1_address
[0] = cpu_to_le32(LSD(nvme
->u
.nvme
.rsp_dma
));
3400 cmd_pkt
->dseg1_address
[1] = cpu_to_le32(MSD(nvme
->u
.nvme
.rsp_dma
));
3406 qla25xx_ctrlvp_iocb(srb_t
*sp
, struct vp_ctrl_entry_24xx
*vce
)
3410 vce
->entry_type
= VP_CTRL_IOCB_TYPE
;
3411 vce
->handle
= sp
->handle
;
3412 vce
->entry_count
= 1;
3413 vce
->command
= cpu_to_le16(sp
->u
.iocb_cmd
.u
.ctrlvp
.cmd
);
3414 vce
->vp_count
= cpu_to_le16(1);
3417 * index map in firmware starts with 1; decrement index
3418 * this is ok as we never use index 0
3420 map
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) / 8;
3421 pos
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) & 7;
3422 vce
->vp_idx_map
[map
] |= 1 << pos
;
3426 qla24xx_prlo_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
3428 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
3429 logio
->control_flags
=
3430 cpu_to_le16(LCF_COMMAND_PRLO
|LCF_IMPL_PRLO
);
3432 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3433 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3434 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3435 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3436 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3440 qla2x00_start_sp(srb_t
*sp
)
3443 scsi_qla_host_t
*vha
= sp
->vha
;
3444 struct qla_hw_data
*ha
= vha
->hw
;
3446 unsigned long flags
;
3448 rval
= QLA_FUNCTION_FAILED
;
3449 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3450 pkt
= qla2x00_alloc_iocbs(vha
, sp
);
3452 ql_log(ql_log_warn
, vha
, 0x700c,
3453 "qla2x00_alloc_iocbs failed.\n");
3460 IS_FWI2_CAPABLE(ha
) ?
3461 qla24xx_login_iocb(sp
, pkt
) :
3462 qla2x00_login_iocb(sp
, pkt
);
3465 qla24xx_prli_iocb(sp
, pkt
);
3467 case SRB_LOGOUT_CMD
:
3468 IS_FWI2_CAPABLE(ha
) ?
3469 qla24xx_logout_iocb(sp
, pkt
) :
3470 qla2x00_logout_iocb(sp
, pkt
);
3472 case SRB_ELS_CMD_RPT
:
3473 case SRB_ELS_CMD_HST
:
3474 qla24xx_els_iocb(sp
, pkt
);
3477 IS_FWI2_CAPABLE(ha
) ?
3478 qla24xx_ct_iocb(sp
, pkt
) :
3479 qla2x00_ct_iocb(sp
, pkt
);
3482 IS_FWI2_CAPABLE(ha
) ?
3483 qla24xx_adisc_iocb(sp
, pkt
) :
3484 qla2x00_adisc_iocb(sp
, pkt
);
3488 qlafx00_tm_iocb(sp
, pkt
) :
3489 qla24xx_tm_iocb(sp
, pkt
);
3491 case SRB_FXIOCB_DCMD
:
3492 case SRB_FXIOCB_BCMD
:
3493 qlafx00_fxdisc_iocb(sp
, pkt
);
3496 qla_nvme_ls(sp
, pkt
);
3500 qlafx00_abort_iocb(sp
, pkt
) :
3501 qla24xx_abort_iocb(sp
, pkt
);
3504 qla24xx_els_logo_iocb(sp
, pkt
);
3506 case SRB_CT_PTHRU_CMD
:
3507 qla2x00_ctpthru_cmd_iocb(sp
, pkt
);
3510 qla2x00_mb_iocb(sp
, pkt
);
3512 case SRB_NACK_PLOGI
:
3515 qla2x00_send_notify_ack_iocb(sp
, pkt
);
3518 qla25xx_ctrlvp_iocb(sp
, pkt
);
3521 qla24xx_prlo_iocb(sp
, pkt
);
3528 qla2x00_start_iocbs(vha
, ha
->req_q_map
[0]);
3530 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3535 qla25xx_build_bidir_iocb(srb_t
*sp
, struct scsi_qla_host
*vha
,
3536 struct cmd_bidir
*cmd_pkt
, uint32_t tot_dsds
)
3538 uint16_t avail_dsds
;
3540 uint32_t req_data_len
= 0;
3541 uint32_t rsp_data_len
= 0;
3542 struct scatterlist
*sg
;
3544 int entry_count
= 1;
3545 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3547 /*Update entry type to indicate bidir command */
3548 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
3549 cpu_to_le32(COMMAND_BIDIRECTIONAL
);
3551 /* Set the transfer direction, in this set both flags
3552 * Also set the BD_WRAP_BACK flag, firmware will take care
3553 * assigning DID=SID for outgoing pkts.
3555 cmd_pkt
->wr_dseg_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3556 cmd_pkt
->rd_dseg_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
3557 cmd_pkt
->control_flags
= cpu_to_le16(BD_WRITE_DATA
| BD_READ_DATA
|
3560 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
3561 cmd_pkt
->wr_byte_count
= cpu_to_le32(req_data_len
);
3562 cmd_pkt
->rd_byte_count
= cpu_to_le32(rsp_data_len
);
3563 cmd_pkt
->timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
3565 vha
->bidi_stats
.transfer_bytes
+= req_data_len
;
3566 vha
->bidi_stats
.io_count
++;
3568 vha
->qla_stats
.output_bytes
+= req_data_len
;
3569 vha
->qla_stats
.output_requests
++;
3571 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3572 * are bundled in continuation iocb
3575 cur_dsd
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
3579 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
3580 bsg_job
->request_payload
.sg_cnt
, index
) {
3582 cont_a64_entry_t
*cont_pkt
;
3584 /* Allocate additional continuation packets */
3585 if (avail_dsds
== 0) {
3586 /* Continuation type 1 IOCB can accomodate
3589 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3590 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3594 sle_dma
= sg_dma_address(sg
);
3595 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3596 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3597 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3600 /* For read request DSD will always goes to continuation IOCB
3601 * and follow the write DSD. If there is room on the current IOCB
3602 * then it is added to that IOCB else new continuation IOCB is
3605 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
3606 bsg_job
->reply_payload
.sg_cnt
, index
) {
3608 cont_a64_entry_t
*cont_pkt
;
3610 /* Allocate additional continuation packets */
3611 if (avail_dsds
== 0) {
3612 /* Continuation type 1 IOCB can accomodate
3615 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3616 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3620 sle_dma
= sg_dma_address(sg
);
3621 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3622 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3623 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3626 /* This value should be same as number of IOCB required for this cmd */
3627 cmd_pkt
->entry_count
= entry_count
;
3631 qla2x00_start_bidir(srb_t
*sp
, struct scsi_qla_host
*vha
, uint32_t tot_dsds
)
3634 struct qla_hw_data
*ha
= vha
->hw
;
3635 unsigned long flags
;
3641 struct cmd_bidir
*cmd_pkt
= NULL
;
3642 struct rsp_que
*rsp
;
3643 struct req_que
*req
;
3644 int rval
= EXT_STATUS_OK
;
3648 rsp
= ha
->rsp_q_map
[0];
3651 /* Send marker if required */
3652 if (vha
->marker_needed
!= 0) {
3653 if (qla2x00_marker(vha
, req
,
3654 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
)
3655 return EXT_STATUS_MAILBOX
;
3656 vha
->marker_needed
= 0;
3659 /* Acquire ring specific lock */
3660 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3662 /* Check for room in outstanding command list. */
3663 handle
= req
->current_outstanding_cmd
;
3664 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3666 if (handle
== req
->num_outstanding_cmds
)
3668 if (!req
->outstanding_cmds
[handle
])
3672 if (index
== req
->num_outstanding_cmds
) {
3673 rval
= EXT_STATUS_BUSY
;
3677 /* Calculate number of IOCB required */
3678 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3680 /* Check for room on request queue. */
3681 if (req
->cnt
< req_cnt
+ 2) {
3682 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
3683 RD_REG_DWORD_RELAXED(req
->req_q_out
);
3684 if (req
->ring_index
< cnt
)
3685 req
->cnt
= cnt
- req
->ring_index
;
3687 req
->cnt
= req
->length
-
3688 (req
->ring_index
- cnt
);
3690 if (req
->cnt
< req_cnt
+ 2) {
3691 rval
= EXT_STATUS_BUSY
;
3695 cmd_pkt
= (struct cmd_bidir
*)req
->ring_ptr
;
3696 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3698 /* Zero out remaining portion of packet. */
3699 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3700 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3701 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3703 /* Set NPORT-ID (of vha)*/
3704 cmd_pkt
->nport_handle
= cpu_to_le16(vha
->self_login_loop_id
);
3705 cmd_pkt
->port_id
[0] = vha
->d_id
.b
.al_pa
;
3706 cmd_pkt
->port_id
[1] = vha
->d_id
.b
.area
;
3707 cmd_pkt
->port_id
[2] = vha
->d_id
.b
.domain
;
3709 qla25xx_build_bidir_iocb(sp
, vha
, cmd_pkt
, tot_dsds
);
3710 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3711 /* Build command packet. */
3712 req
->current_outstanding_cmd
= handle
;
3713 req
->outstanding_cmds
[handle
] = sp
;
3714 sp
->handle
= handle
;
3715 req
->cnt
-= req_cnt
;
3717 /* Send the command to the firmware */
3719 qla2x00_start_iocbs(vha
, req
);
3721 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);