2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t
*sp
)
25 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
26 struct scsi_qla_host
*vha
= sp
->vha
;
30 /* Set transfer direction */
31 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
33 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
34 vha
->qla_stats
.output_requests
++;
35 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
37 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
38 vha
->qla_stats
.input_requests
++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds
)
58 iocbs
+= (dsds
- 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds
)
80 iocbs
+= (dsds
- 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t
*
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
96 cont_entry_t
*cont_pkt
;
97 struct req_que
*req
= vha
->req
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) = cpu_to_le32(CONTINUE_TYPE
);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
, struct req_que
*req
)
125 cont_a64_entry_t
*cont_pkt
;
127 /* Adjust ring index. */
129 if (req
->ring_index
== req
->length
) {
131 req
->ring_ptr
= req
->ring
;
136 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt
->entry_type
)) = IS_QLAFX00(vha
->hw
) ?
140 cpu_to_le32(CONTINUE_A64_TYPE_FX00
) :
141 cpu_to_le32(CONTINUE_A64_TYPE
);
147 qla24xx_configure_prot_mode(srb_t
*sp
, uint16_t *fw_prot_opts
)
149 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
150 uint8_t guard
= scsi_host_get_guard(cmd
->device
->host
);
152 /* We always use DIFF Bundling for best performance */
155 /* Translate SCSI opcode to a protection opcode */
156 switch (scsi_get_prot_op(cmd
)) {
157 case SCSI_PROT_READ_STRIP
:
158 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
160 case SCSI_PROT_WRITE_INSERT
:
161 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
163 case SCSI_PROT_READ_INSERT
:
164 *fw_prot_opts
|= PO_MODE_DIF_INSERT
;
166 case SCSI_PROT_WRITE_STRIP
:
167 *fw_prot_opts
|= PO_MODE_DIF_REMOVE
;
169 case SCSI_PROT_READ_PASS
:
170 case SCSI_PROT_WRITE_PASS
:
171 if (guard
& SHOST_DIX_GUARD_IP
)
172 *fw_prot_opts
|= PO_MODE_DIF_TCP_CKSUM
;
174 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
176 default: /* Normal Request */
177 *fw_prot_opts
|= PO_MODE_DIF_PASS
;
181 return scsi_prot_sg_count(cmd
);
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
192 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
197 scsi_qla_host_t
*vha
;
198 struct scsi_cmnd
*cmd
;
199 struct scatterlist
*sg
;
202 cmd
= GET_CMD_SP(sp
);
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
206 cpu_to_le32(COMMAND_TYPE
);
208 /* No data transfer */
209 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
210 cmd_pkt
->byte_count
= cpu_to_le32(0);
215 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
217 /* Three DSDs are available in the Command Type 2 IOCB */
219 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
221 /* Load data segments */
222 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
223 cont_entry_t
*cont_pkt
;
225 /* Allocate additional continuation packets? */
226 if (avail_dsds
== 0) {
228 * Seven DSDs are available in the Continuation
231 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
232 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
236 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
237 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
250 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
255 scsi_qla_host_t
*vha
;
256 struct scsi_cmnd
*cmd
;
257 struct scatterlist
*sg
;
260 cmd
= GET_CMD_SP(sp
);
262 /* Update entry type to indicate Command Type 3 IOCB */
263 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_A64_TYPE
);
265 /* No data transfer */
266 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
267 cmd_pkt
->byte_count
= cpu_to_le32(0);
272 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
274 /* Two DSDs are available in the Command Type 3 IOCB */
276 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
278 /* Load data segments */
279 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
281 cont_a64_entry_t
*cont_pkt
;
283 /* Allocate additional continuation packets? */
284 if (avail_dsds
== 0) {
286 * Five DSDs are available in the Continuation
289 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
290 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
294 sle_dma
= sg_dma_address(sg
);
295 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
296 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
297 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
306 * Returns non-zero if a failure occurred, else zero.
309 qla2x00_start_scsi(srb_t
*sp
)
313 scsi_qla_host_t
*vha
;
314 struct scsi_cmnd
*cmd
;
318 cmd_entry_t
*cmd_pkt
;
322 struct device_reg_2xxx __iomem
*reg
;
323 struct qla_hw_data
*ha
;
327 /* Setup device pointers. */
330 reg
= &ha
->iobase
->isp
;
331 cmd
= GET_CMD_SP(sp
);
332 req
= ha
->req_q_map
[0];
333 rsp
= ha
->rsp_q_map
[0];
334 /* So we know we haven't pci_map'ed anything yet */
337 /* Send marker if required */
338 if (vha
->marker_needed
!= 0) {
339 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
341 return (QLA_FUNCTION_FAILED
);
343 vha
->marker_needed
= 0;
346 /* Acquire ring specific lock */
347 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
349 /* Check for room in outstanding command list. */
350 handle
= req
->current_outstanding_cmd
;
351 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
353 if (handle
== req
->num_outstanding_cmds
)
355 if (!req
->outstanding_cmds
[handle
])
358 if (index
== req
->num_outstanding_cmds
)
361 /* Map the sg table so we have an accurate count of sg entries needed */
362 if (scsi_sg_count(cmd
)) {
363 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
364 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
372 /* Calculate the number of request entries needed. */
373 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
374 if (req
->cnt
< (req_cnt
+ 2)) {
375 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
376 if (req
->ring_index
< cnt
)
377 req
->cnt
= cnt
- req
->ring_index
;
379 req
->cnt
= req
->length
-
380 (req
->ring_index
- cnt
);
381 /* If still no head room then bail out */
382 if (req
->cnt
< (req_cnt
+ 2))
386 /* Build command packet */
387 req
->current_outstanding_cmd
= handle
;
388 req
->outstanding_cmds
[handle
] = sp
;
390 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
393 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
394 cmd_pkt
->handle
= handle
;
395 /* Zero out remaining portion of packet. */
396 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
397 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
398 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
402 cmd_pkt
->lun
= cpu_to_le16(cmd
->device
->lun
);
403 cmd_pkt
->control_flags
= cpu_to_le16(CF_SIMPLE_TAG
);
405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
407 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
409 /* Build IOCB segments */
410 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
412 /* Set total data segment count. */
413 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
416 /* Adjust ring index. */
418 if (req
->ring_index
== req
->length
) {
420 req
->ring_ptr
= req
->ring
;
424 sp
->flags
|= SRB_DMA_VALID
;
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (vha
->flags
.process_response_queue
&&
432 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
433 qla2x00_process_response_queue(rsp
);
435 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
436 return (QLA_SUCCESS
);
442 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
444 return (QLA_FUNCTION_FAILED
);
448 * qla2x00_start_iocbs() - Execute the IOCB command
450 * @req: request queue
453 qla2x00_start_iocbs(struct scsi_qla_host
*vha
, struct req_que
*req
)
455 struct qla_hw_data
*ha
= vha
->hw
;
456 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
458 if (IS_P3P_TYPE(ha
)) {
459 qla82xx_start_iocbs(vha
);
461 /* Adjust ring index. */
463 if (req
->ring_index
== req
->length
) {
465 req
->ring_ptr
= req
->ring
;
469 /* Set chip new ring index. */
470 if (ha
->mqenable
|| IS_QLA27XX(ha
)) {
471 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
472 } else if (IS_QLA83XX(ha
)) {
473 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
474 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
475 } else if (IS_QLAFX00(ha
)) {
476 WRT_REG_DWORD(®
->ispfx00
.req_q_in
, req
->ring_index
);
477 RD_REG_DWORD_RELAXED(®
->ispfx00
.req_q_in
);
478 QLAFX00_SET_HST_INTR(ha
, ha
->rqstq_intr_code
);
479 } else if (IS_FWI2_CAPABLE(ha
)) {
480 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
481 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
491 * qla2x00_marker() - Send a marker IOCB to the firmware.
493 * @req: request queue
494 * @rsp: response queue
497 * @type: marker modifier
499 * Can be called from both normal and interrupt context.
501 * Returns non-zero if a failure occurred, else zero.
504 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
505 struct rsp_que
*rsp
, uint16_t loop_id
,
506 uint64_t lun
, uint8_t type
)
509 struct mrk_entry_24xx
*mrk24
= NULL
;
511 struct qla_hw_data
*ha
= vha
->hw
;
512 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
514 req
= ha
->req_q_map
[0];
515 mrk
= (mrk_entry_t
*)qla2x00_alloc_iocbs(vha
, NULL
);
517 ql_log(ql_log_warn
, base_vha
, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
520 return (QLA_FUNCTION_FAILED
);
523 mrk
->entry_type
= MARKER_TYPE
;
524 mrk
->modifier
= type
;
525 if (type
!= MK_SYNC_ALL
) {
526 if (IS_FWI2_CAPABLE(ha
)) {
527 mrk24
= (struct mrk_entry_24xx
*) mrk
;
528 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
529 int_to_scsilun(lun
, (struct scsi_lun
*)&mrk24
->lun
);
530 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
531 mrk24
->vp_index
= vha
->vp_idx
;
532 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
534 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
535 mrk
->lun
= cpu_to_le16((uint16_t)lun
);
540 qla2x00_start_iocbs(vha
, req
);
542 return (QLA_SUCCESS
);
546 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
547 struct rsp_que
*rsp
, uint16_t loop_id
, uint64_t lun
,
551 unsigned long flags
= 0;
553 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
554 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
555 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
561 * qla2x00_issue_marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
567 int qla2x00_issue_marker(scsi_qla_host_t
*vha
, int ha_locked
)
570 if (__qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
571 MK_SYNC_ALL
) != QLA_SUCCESS
)
572 return QLA_FUNCTION_FAILED
;
574 if (qla2x00_marker(vha
, vha
->req
, vha
->req
->rsp
, 0, 0,
575 MK_SYNC_ALL
) != QLA_SUCCESS
)
576 return QLA_FUNCTION_FAILED
;
578 vha
->marker_needed
= 0;
584 qla24xx_build_scsi_type_6_iocbs(srb_t
*sp
, struct cmd_type_6
*cmd_pkt
,
587 uint32_t *cur_dsd
= NULL
;
588 scsi_qla_host_t
*vha
;
589 struct qla_hw_data
*ha
;
590 struct scsi_cmnd
*cmd
;
591 struct scatterlist
*cur_seg
;
595 uint8_t first_iocb
= 1;
596 uint32_t dsd_list_len
;
597 struct dsd_dma
*dsd_ptr
;
600 cmd
= GET_CMD_SP(sp
);
602 /* Update entry type to indicate Command Type 3 IOCB */
603 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_6
);
605 /* No data transfer */
606 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
607 cmd_pkt
->byte_count
= cpu_to_le32(0);
614 /* Set transfer direction */
615 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
616 cmd_pkt
->control_flags
= cpu_to_le16(CF_WRITE_DATA
);
617 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
618 vha
->qla_stats
.output_requests
++;
619 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
620 cmd_pkt
->control_flags
= cpu_to_le16(CF_READ_DATA
);
621 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
622 vha
->qla_stats
.input_requests
++;
625 cur_seg
= scsi_sglist(cmd
);
626 ctx
= GET_CMD_CTX_SP(sp
);
629 avail_dsds
= (tot_dsds
> QLA_DSDS_PER_IOCB
) ?
630 QLA_DSDS_PER_IOCB
: tot_dsds
;
631 tot_dsds
-= avail_dsds
;
632 dsd_list_len
= (avail_dsds
+ 1) * QLA_DSD_SIZE
;
634 dsd_ptr
= list_first_entry(&ha
->gbl_dsd_list
,
635 struct dsd_dma
, list
);
636 next_dsd
= dsd_ptr
->dsd_addr
;
637 list_del(&dsd_ptr
->list
);
639 list_add_tail(&dsd_ptr
->list
, &ctx
->dsd_list
);
645 dsd_seg
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
646 *dsd_seg
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
647 *dsd_seg
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
648 cmd_pkt
->fcp_data_dseg_len
= cpu_to_le32(dsd_list_len
);
650 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
651 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
652 *cur_dsd
++ = cpu_to_le32(dsd_list_len
);
654 cur_dsd
= (uint32_t *)next_dsd
;
658 sle_dma
= sg_dma_address(cur_seg
);
659 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
660 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
661 *cur_dsd
++ = cpu_to_le32(sg_dma_len(cur_seg
));
662 cur_seg
= sg_next(cur_seg
);
667 /* Null termination */
671 cmd_pkt
->control_flags
|= CF_DATA_SEG_DESCR_ENABLE
;
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
679 * @dsds: number of data segment decriptors needed
681 * Returns the number of dsd list needed to store @dsds.
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds
)
686 uint16_t dsd_lists
= 0;
688 dsd_lists
= (dsds
/QLA_DSDS_PER_IOCB
);
689 if (dsds
% QLA_DSDS_PER_IOCB
)
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
702 * @req: pointer to request queue
705 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
706 uint16_t tot_dsds
, struct req_que
*req
)
710 scsi_qla_host_t
*vha
;
711 struct scsi_cmnd
*cmd
;
712 struct scatterlist
*sg
;
715 cmd
= GET_CMD_SP(sp
);
717 /* Update entry type to indicate Command Type 3 IOCB */
718 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_7
);
720 /* No data transfer */
721 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
722 cmd_pkt
->byte_count
= cpu_to_le32(0);
728 /* Set transfer direction */
729 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
730 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_WRITE_DATA
);
731 vha
->qla_stats
.output_bytes
+= scsi_bufflen(cmd
);
732 vha
->qla_stats
.output_requests
++;
733 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
734 cmd_pkt
->task_mgmt_flags
= cpu_to_le16(TMF_READ_DATA
);
735 vha
->qla_stats
.input_bytes
+= scsi_bufflen(cmd
);
736 vha
->qla_stats
.input_requests
++;
739 /* One DSD is available in the Command Type 3 IOCB */
741 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
743 /* Load data segments */
745 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
747 cont_a64_entry_t
*cont_pkt
;
749 /* Allocate additional continuation packets? */
750 if (avail_dsds
== 0) {
752 * Five DSDs are available in the Continuation
755 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, req
);
756 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
760 sle_dma
= sg_dma_address(sg
);
761 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
762 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
763 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
768 struct fw_dif_context
{
771 uint8_t ref_tag_mask
[4]; /* Validation/Replacement Mask*/
772 uint8_t app_tag_mask
[2]; /* Validation/Replacement Mask*/
776 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
780 qla24xx_set_t10dif_tags(srb_t
*sp
, struct fw_dif_context
*pkt
,
781 unsigned int protcnt
)
783 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
785 switch (scsi_get_prot_type(cmd
)) {
786 case SCSI_PROT_DIF_TYPE0
:
788 * No check for ql2xenablehba_err_chk, as it would be an
789 * I/O error if hba tag generation is not done.
791 pkt
->ref_tag
= cpu_to_le32((uint32_t)
792 (0xffffffff & scsi_get_lba(cmd
)));
794 if (!qla2x00_hba_err_chk_enabled(sp
))
797 pkt
->ref_tag_mask
[0] = 0xff;
798 pkt
->ref_tag_mask
[1] = 0xff;
799 pkt
->ref_tag_mask
[2] = 0xff;
800 pkt
->ref_tag_mask
[3] = 0xff;
804 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805 * match LBA in CDB + N
807 case SCSI_PROT_DIF_TYPE2
:
808 pkt
->app_tag
= cpu_to_le16(0);
809 pkt
->app_tag_mask
[0] = 0x0;
810 pkt
->app_tag_mask
[1] = 0x0;
812 pkt
->ref_tag
= cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd
)));
815 if (!qla2x00_hba_err_chk_enabled(sp
))
818 /* enable ALL bytes of the ref tag */
819 pkt
->ref_tag_mask
[0] = 0xff;
820 pkt
->ref_tag_mask
[1] = 0xff;
821 pkt
->ref_tag_mask
[2] = 0xff;
822 pkt
->ref_tag_mask
[3] = 0xff;
825 /* For Type 3 protection: 16 bit GUARD only */
826 case SCSI_PROT_DIF_TYPE3
:
827 pkt
->ref_tag_mask
[0] = pkt
->ref_tag_mask
[1] =
828 pkt
->ref_tag_mask
[2] = pkt
->ref_tag_mask
[3] =
833 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
836 case SCSI_PROT_DIF_TYPE1
:
837 pkt
->ref_tag
= cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd
)));
839 pkt
->app_tag
= cpu_to_le16(0);
840 pkt
->app_tag_mask
[0] = 0x0;
841 pkt
->app_tag_mask
[1] = 0x0;
843 if (!qla2x00_hba_err_chk_enabled(sp
))
846 /* enable ALL bytes of the ref tag */
847 pkt
->ref_tag_mask
[0] = 0xff;
848 pkt
->ref_tag_mask
[1] = 0xff;
849 pkt
->ref_tag_mask
[2] = 0xff;
850 pkt
->ref_tag_mask
[3] = 0xff;
856 qla24xx_get_one_block_sg(uint32_t blk_sz
, struct qla2_sgx
*sgx
,
859 struct scatterlist
*sg
;
860 uint32_t cumulative_partial
, sg_len
;
861 dma_addr_t sg_dma_addr
;
863 if (sgx
->num_bytes
== sgx
->tot_bytes
)
867 cumulative_partial
= sgx
->tot_partial
;
869 sg_dma_addr
= sg_dma_address(sg
);
870 sg_len
= sg_dma_len(sg
);
872 sgx
->dma_addr
= sg_dma_addr
+ sgx
->bytes_consumed
;
874 if ((cumulative_partial
+ (sg_len
- sgx
->bytes_consumed
)) >= blk_sz
) {
875 sgx
->dma_len
= (blk_sz
- cumulative_partial
);
876 sgx
->tot_partial
= 0;
877 sgx
->num_bytes
+= blk_sz
;
880 sgx
->dma_len
= sg_len
- sgx
->bytes_consumed
;
881 sgx
->tot_partial
+= sgx
->dma_len
;
885 sgx
->bytes_consumed
+= sgx
->dma_len
;
887 if (sg_len
== sgx
->bytes_consumed
) {
891 sgx
->bytes_consumed
= 0;
898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data
*ha
, srb_t
*sp
,
899 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
902 uint8_t avail_dsds
= 0;
903 uint32_t dsd_list_len
;
904 struct dsd_dma
*dsd_ptr
;
905 struct scatterlist
*sg_prot
;
906 uint32_t *cur_dsd
= dsd
;
907 uint16_t used_dsds
= tot_dsds
;
908 uint32_t prot_int
; /* protection interval */
912 uint32_t sle_dma_len
, tot_prot_dma_len
= 0;
913 struct scsi_cmnd
*cmd
;
915 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
917 cmd
= GET_CMD_SP(sp
);
918 prot_int
= cmd
->device
->sector_size
;
920 sgx
.tot_bytes
= scsi_bufflen(cmd
);
921 sgx
.cur_sg
= scsi_sglist(cmd
);
924 sg_prot
= scsi_prot_sglist(cmd
);
926 prot_int
= tc
->blk_sz
;
927 sgx
.tot_bytes
= tc
->bufflen
;
929 sg_prot
= tc
->prot_sg
;
935 while (qla24xx_get_one_block_sg(prot_int
, &sgx
, &partial
)) {
937 sle_dma
= sgx
.dma_addr
;
938 sle_dma_len
= sgx
.dma_len
;
940 /* Allocate additional continuation packets? */
941 if (avail_dsds
== 0) {
942 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
943 QLA_DSDS_PER_IOCB
: used_dsds
;
944 dsd_list_len
= (avail_dsds
+ 1) * 12;
945 used_dsds
-= avail_dsds
;
947 /* allocate tracking DS */
948 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
952 /* allocate new list */
953 dsd_ptr
->dsd_addr
= next_dsd
=
954 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
955 &dsd_ptr
->dsd_list_dma
);
959 * Need to cleanup only this dsd_ptr, rest
960 * will be done by sp_free_dma()
967 list_add_tail(&dsd_ptr
->list
,
968 &((struct crc_context
*)
969 sp
->u
.scmd
.ctx
)->dsd_list
);
971 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
973 list_add_tail(&dsd_ptr
->list
,
974 &(tc
->ctx
->dsd_list
));
975 *tc
->ctx_dsd_alloced
= 1;
979 /* add new list to cmd iocb or last list */
980 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
981 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
982 *cur_dsd
++ = dsd_list_len
;
983 cur_dsd
= (uint32_t *)next_dsd
;
985 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
986 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
987 *cur_dsd
++ = cpu_to_le32(sle_dma_len
);
991 /* Got a full protection interval */
992 sle_dma
= sg_dma_address(sg_prot
) + tot_prot_dma_len
;
995 tot_prot_dma_len
+= sle_dma_len
;
996 if (tot_prot_dma_len
== sg_dma_len(sg_prot
)) {
997 tot_prot_dma_len
= 0;
998 sg_prot
= sg_next(sg_prot
);
1001 partial
= 1; /* So as to not re-enter this block */
1002 goto alloc_and_fill
;
1005 /* Null termination */
1013 qla24xx_walk_and_build_sglist(struct qla_hw_data
*ha
, srb_t
*sp
, uint32_t *dsd
,
1014 uint16_t tot_dsds
, struct qla_tc_param
*tc
)
1017 uint8_t avail_dsds
= 0;
1018 uint32_t dsd_list_len
;
1019 struct dsd_dma
*dsd_ptr
;
1020 struct scatterlist
*sg
, *sgl
;
1021 uint32_t *cur_dsd
= dsd
;
1023 uint16_t used_dsds
= tot_dsds
;
1024 struct scsi_cmnd
*cmd
;
1027 cmd
= GET_CMD_SP(sp
);
1028 sgl
= scsi_sglist(cmd
);
1037 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1040 /* Allocate additional continuation packets? */
1041 if (avail_dsds
== 0) {
1042 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1043 QLA_DSDS_PER_IOCB
: used_dsds
;
1044 dsd_list_len
= (avail_dsds
+ 1) * 12;
1045 used_dsds
-= avail_dsds
;
1047 /* allocate tracking DS */
1048 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1052 /* allocate new list */
1053 dsd_ptr
->dsd_addr
= next_dsd
=
1054 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1055 &dsd_ptr
->dsd_list_dma
);
1059 * Need to cleanup only this dsd_ptr, rest
1060 * will be done by sp_free_dma()
1067 list_add_tail(&dsd_ptr
->list
,
1068 &((struct crc_context
*)
1069 sp
->u
.scmd
.ctx
)->dsd_list
);
1071 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1073 list_add_tail(&dsd_ptr
->list
,
1074 &(tc
->ctx
->dsd_list
));
1075 *tc
->ctx_dsd_alloced
= 1;
1078 /* add new list to cmd iocb or last list */
1079 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1080 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1081 *cur_dsd
++ = dsd_list_len
;
1082 cur_dsd
= (uint32_t *)next_dsd
;
1084 sle_dma
= sg_dma_address(sg
);
1086 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1087 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1088 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1092 /* Null termination */
1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data
*ha
, srb_t
*sp
,
1101 uint32_t *dsd
, uint16_t tot_dsds
, struct qla_tc_param
*tc
)
1104 uint8_t avail_dsds
= 0;
1105 uint32_t dsd_list_len
;
1106 struct dsd_dma
*dsd_ptr
;
1107 struct scatterlist
*sg
, *sgl
;
1109 struct scsi_cmnd
*cmd
;
1110 uint32_t *cur_dsd
= dsd
;
1111 uint16_t used_dsds
= tot_dsds
;
1112 struct scsi_qla_host
*vha
;
1115 cmd
= GET_CMD_SP(sp
);
1116 sgl
= scsi_prot_sglist(cmd
);
1126 ql_dbg(ql_dbg_tgt
, vha
, 0xe021,
1127 "%s: enter\n", __func__
);
1129 for_each_sg(sgl
, sg
, tot_dsds
, i
) {
1132 /* Allocate additional continuation packets? */
1133 if (avail_dsds
== 0) {
1134 avail_dsds
= (used_dsds
> QLA_DSDS_PER_IOCB
) ?
1135 QLA_DSDS_PER_IOCB
: used_dsds
;
1136 dsd_list_len
= (avail_dsds
+ 1) * 12;
1137 used_dsds
-= avail_dsds
;
1139 /* allocate tracking DS */
1140 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
1144 /* allocate new list */
1145 dsd_ptr
->dsd_addr
= next_dsd
=
1146 dma_pool_alloc(ha
->dl_dma_pool
, GFP_ATOMIC
,
1147 &dsd_ptr
->dsd_list_dma
);
1151 * Need to cleanup only this dsd_ptr, rest
1152 * will be done by sp_free_dma()
1159 list_add_tail(&dsd_ptr
->list
,
1160 &((struct crc_context
*)
1161 sp
->u
.scmd
.ctx
)->dsd_list
);
1163 sp
->flags
|= SRB_CRC_CTX_DSD_VALID
;
1165 list_add_tail(&dsd_ptr
->list
,
1166 &(tc
->ctx
->dsd_list
));
1167 *tc
->ctx_dsd_alloced
= 1;
1170 /* add new list to cmd iocb or last list */
1171 *cur_dsd
++ = cpu_to_le32(LSD(dsd_ptr
->dsd_list_dma
));
1172 *cur_dsd
++ = cpu_to_le32(MSD(dsd_ptr
->dsd_list_dma
));
1173 *cur_dsd
++ = dsd_list_len
;
1174 cur_dsd
= (uint32_t *)next_dsd
;
1176 sle_dma
= sg_dma_address(sg
);
1178 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
1179 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
1180 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
1184 /* Null termination */
1192 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193 * Type 6 IOCB types.
1195 * @sp: SRB command to process
1196 * @cmd_pkt: Command type 3 IOCB
1197 * @tot_dsds: Total number of segments to transfer
1202 qla24xx_build_scsi_crc_2_iocbs(srb_t
*sp
, struct cmd_type_crc_2
*cmd_pkt
,
1203 uint16_t tot_dsds
, uint16_t tot_prot_dsds
, uint16_t fw_prot_opts
)
1205 uint32_t *cur_dsd
, *fcp_dl
;
1206 scsi_qla_host_t
*vha
;
1207 struct scsi_cmnd
*cmd
;
1208 uint32_t total_bytes
= 0;
1209 uint32_t data_bytes
;
1211 uint8_t bundling
= 1;
1213 struct crc_context
*crc_ctx_pkt
= NULL
;
1214 struct qla_hw_data
*ha
;
1215 uint8_t additional_fcpcdb_len
;
1216 uint16_t fcp_cmnd_len
;
1217 struct fcp_cmnd
*fcp_cmnd
;
1218 dma_addr_t crc_ctx_dma
;
1220 cmd
= GET_CMD_SP(sp
);
1222 /* Update entry type to indicate Command Type CRC_2 IOCB */
1223 *((uint32_t *)(&cmd_pkt
->entry_type
)) = cpu_to_le32(COMMAND_TYPE_CRC_2
);
1228 /* No data transfer */
1229 data_bytes
= scsi_bufflen(cmd
);
1230 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1231 cmd_pkt
->byte_count
= cpu_to_le32(0);
1235 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1237 /* Set transfer direction */
1238 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
1239 cmd_pkt
->control_flags
=
1240 cpu_to_le16(CF_WRITE_DATA
);
1241 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
1242 cmd_pkt
->control_flags
=
1243 cpu_to_le16(CF_READ_DATA
);
1246 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1247 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
) ||
1248 (scsi_get_prot_op(cmd
) == SCSI_PROT_READ_STRIP
) ||
1249 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_INSERT
))
1252 /* Allocate CRC context from global pool */
1253 crc_ctx_pkt
= sp
->u
.scmd
.ctx
=
1254 dma_pool_zalloc(ha
->dl_dma_pool
, GFP_ATOMIC
, &crc_ctx_dma
);
1257 goto crc_queuing_error
;
1259 crc_ctx_pkt
->crc_ctx_dma
= crc_ctx_dma
;
1261 sp
->flags
|= SRB_CRC_CTX_DMA_VALID
;
1264 crc_ctx_pkt
->handle
= cmd_pkt
->handle
;
1266 INIT_LIST_HEAD(&crc_ctx_pkt
->dsd_list
);
1268 qla24xx_set_t10dif_tags(sp
, (struct fw_dif_context
*)
1269 &crc_ctx_pkt
->ref_tag
, tot_prot_dsds
);
1271 cmd_pkt
->crc_context_address
[0] = cpu_to_le32(LSD(crc_ctx_dma
));
1272 cmd_pkt
->crc_context_address
[1] = cpu_to_le32(MSD(crc_ctx_dma
));
1273 cmd_pkt
->crc_context_len
= CRC_CONTEXT_LEN_FW
;
1275 /* Determine SCSI command length -- align to 4 byte boundary */
1276 if (cmd
->cmd_len
> 16) {
1277 additional_fcpcdb_len
= cmd
->cmd_len
- 16;
1278 if ((cmd
->cmd_len
% 4) != 0) {
1279 /* SCSI cmd > 16 bytes must be multiple of 4 */
1280 goto crc_queuing_error
;
1282 fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
1284 additional_fcpcdb_len
= 0;
1285 fcp_cmnd_len
= 12 + 16 + 4;
1288 fcp_cmnd
= &crc_ctx_pkt
->fcp_cmnd
;
1290 fcp_cmnd
->additional_cdb_len
= additional_fcpcdb_len
;
1291 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1292 fcp_cmnd
->additional_cdb_len
|= 1;
1293 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
1294 fcp_cmnd
->additional_cdb_len
|= 2;
1296 int_to_scsilun(cmd
->device
->lun
, &fcp_cmnd
->lun
);
1297 memcpy(fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1298 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(fcp_cmnd_len
);
1299 cmd_pkt
->fcp_cmnd_dseg_address
[0] = cpu_to_le32(
1300 LSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1301 cmd_pkt
->fcp_cmnd_dseg_address
[1] = cpu_to_le32(
1302 MSD(crc_ctx_dma
+ CRC_CONTEXT_FCPCMND_OFF
));
1303 fcp_cmnd
->task_management
= 0;
1304 fcp_cmnd
->task_attribute
= TSK_SIMPLE
;
1306 cmd_pkt
->fcp_rsp_dseg_len
= 0; /* Let response come in status iocb */
1308 /* Compute dif len and adjust data len to incude protection */
1310 blk_size
= cmd
->device
->sector_size
;
1311 dif_bytes
= (data_bytes
/ blk_size
) * 8;
1313 switch (scsi_get_prot_op(GET_CMD_SP(sp
))) {
1314 case SCSI_PROT_READ_INSERT
:
1315 case SCSI_PROT_WRITE_STRIP
:
1316 total_bytes
= data_bytes
;
1317 data_bytes
+= dif_bytes
;
1320 case SCSI_PROT_READ_STRIP
:
1321 case SCSI_PROT_WRITE_INSERT
:
1322 case SCSI_PROT_READ_PASS
:
1323 case SCSI_PROT_WRITE_PASS
:
1324 total_bytes
= data_bytes
+ dif_bytes
;
1330 if (!qla2x00_hba_err_chk_enabled(sp
))
1331 fw_prot_opts
|= 0x10; /* Disable Guard tag checking */
1332 /* HBA error checking enabled */
1333 else if (IS_PI_UNINIT_CAPABLE(ha
)) {
1334 if ((scsi_get_prot_type(GET_CMD_SP(sp
)) == SCSI_PROT_DIF_TYPE1
)
1335 || (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1336 SCSI_PROT_DIF_TYPE2
))
1337 fw_prot_opts
|= BIT_10
;
1338 else if (scsi_get_prot_type(GET_CMD_SP(sp
)) ==
1339 SCSI_PROT_DIF_TYPE3
)
1340 fw_prot_opts
|= BIT_11
;
1344 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.nobundling
.data_address
;
1347 * Configure Bundling if we need to fetch interlaving
1348 * protection PCI accesses
1350 fw_prot_opts
|= PO_ENABLE_DIF_BUNDLING
;
1351 crc_ctx_pkt
->u
.bundling
.dif_byte_count
= cpu_to_le32(dif_bytes
);
1352 crc_ctx_pkt
->u
.bundling
.dseg_count
= cpu_to_le16(tot_dsds
-
1354 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.data_address
;
1357 /* Finish the common fields of CRC pkt */
1358 crc_ctx_pkt
->blk_size
= cpu_to_le16(blk_size
);
1359 crc_ctx_pkt
->prot_opts
= cpu_to_le16(fw_prot_opts
);
1360 crc_ctx_pkt
->byte_count
= cpu_to_le32(data_bytes
);
1361 crc_ctx_pkt
->guard_seed
= cpu_to_le16(0);
1362 /* Fibre channel byte count */
1363 cmd_pkt
->byte_count
= cpu_to_le32(total_bytes
);
1364 fcp_dl
= (uint32_t *)(crc_ctx_pkt
->fcp_cmnd
.cdb
+ 16 +
1365 additional_fcpcdb_len
);
1366 *fcp_dl
= htonl(total_bytes
);
1368 if (!data_bytes
|| cmd
->sc_data_direction
== DMA_NONE
) {
1369 cmd_pkt
->byte_count
= cpu_to_le32(0);
1372 /* Walks data segments */
1374 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE
);
1376 if (!bundling
&& tot_prot_dsds
) {
1377 if (qla24xx_walk_and_build_sglist_no_difb(ha
, sp
,
1378 cur_dsd
, tot_dsds
, NULL
))
1379 goto crc_queuing_error
;
1380 } else if (qla24xx_walk_and_build_sglist(ha
, sp
, cur_dsd
,
1381 (tot_dsds
- tot_prot_dsds
), NULL
))
1382 goto crc_queuing_error
;
1384 if (bundling
&& tot_prot_dsds
) {
1385 /* Walks dif segments */
1386 cmd_pkt
->control_flags
|= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE
);
1387 cur_dsd
= (uint32_t *) &crc_ctx_pkt
->u
.bundling
.dif_address
;
1388 if (qla24xx_walk_and_build_prot_sglist(ha
, sp
, cur_dsd
,
1389 tot_prot_dsds
, NULL
))
1390 goto crc_queuing_error
;
1395 /* Cleanup will be performed by the caller */
1397 return QLA_FUNCTION_FAILED
;
1401 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402 * @sp: command to send to the ISP
1404 * Returns non-zero if a failure occurred, else zero.
1407 qla24xx_start_scsi(srb_t
*sp
)
1410 unsigned long flags
;
1414 struct cmd_type_7
*cmd_pkt
;
1418 struct req_que
*req
= NULL
;
1419 struct rsp_que
*rsp
= NULL
;
1420 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1421 struct scsi_qla_host
*vha
= sp
->vha
;
1422 struct qla_hw_data
*ha
= vha
->hw
;
1424 /* Setup device pointers. */
1428 /* So we know we haven't pci_map'ed anything yet */
1431 /* Send marker if required */
1432 if (vha
->marker_needed
!= 0) {
1433 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1435 return QLA_FUNCTION_FAILED
;
1436 vha
->marker_needed
= 0;
1439 /* Acquire ring specific lock */
1440 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1442 /* Check for room in outstanding command list. */
1443 handle
= req
->current_outstanding_cmd
;
1444 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1446 if (handle
== req
->num_outstanding_cmds
)
1448 if (!req
->outstanding_cmds
[handle
])
1451 if (index
== req
->num_outstanding_cmds
)
1454 /* Map the sg table so we have an accurate count of sg entries needed */
1455 if (scsi_sg_count(cmd
)) {
1456 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1457 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1458 if (unlikely(!nseg
))
1464 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1465 if (req
->cnt
< (req_cnt
+ 2)) {
1466 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1467 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1468 if (req
->ring_index
< cnt
)
1469 req
->cnt
= cnt
- req
->ring_index
;
1471 req
->cnt
= req
->length
-
1472 (req
->ring_index
- cnt
);
1473 if (req
->cnt
< (req_cnt
+ 2))
1477 /* Build command packet. */
1478 req
->current_outstanding_cmd
= handle
;
1479 req
->outstanding_cmds
[handle
] = sp
;
1480 sp
->handle
= handle
;
1481 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1482 req
->cnt
-= req_cnt
;
1484 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1485 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1487 /* Zero out remaining portion of packet. */
1488 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1490 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1491 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1493 /* Set NPORT-ID and LUN number*/
1494 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1495 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1496 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1497 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1498 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
1500 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1501 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1503 cmd_pkt
->task
= TSK_SIMPLE
;
1505 /* Load SCSI command packet. */
1506 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1507 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1509 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1511 /* Build IOCB segments */
1512 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1514 /* Set total data segment count. */
1515 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1517 /* Adjust ring index. */
1519 if (req
->ring_index
== req
->length
) {
1520 req
->ring_index
= 0;
1521 req
->ring_ptr
= req
->ring
;
1525 sp
->flags
|= SRB_DMA_VALID
;
1527 /* Set chip new ring index. */
1528 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1530 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1535 scsi_dma_unmap(cmd
);
1537 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1539 return QLA_FUNCTION_FAILED
;
1543 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1544 * @sp: command to send to the ISP
1546 * Returns non-zero if a failure occurred, else zero.
1549 qla24xx_dif_start_scsi(srb_t
*sp
)
1552 unsigned long flags
;
1557 uint16_t req_cnt
= 0;
1559 uint16_t tot_prot_dsds
;
1560 uint16_t fw_prot_opts
= 0;
1561 struct req_que
*req
= NULL
;
1562 struct rsp_que
*rsp
= NULL
;
1563 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1564 struct scsi_qla_host
*vha
= sp
->vha
;
1565 struct qla_hw_data
*ha
= vha
->hw
;
1566 struct cmd_type_crc_2
*cmd_pkt
;
1567 uint32_t status
= 0;
1569 #define QDSS_GOT_Q_SPACE BIT_0
1571 /* Only process protection or >16 cdb in this routine */
1572 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1573 if (cmd
->cmd_len
<= 16)
1574 return qla24xx_start_scsi(sp
);
1577 /* Setup device pointers. */
1581 /* So we know we haven't pci_map'ed anything yet */
1584 /* Send marker if required */
1585 if (vha
->marker_needed
!= 0) {
1586 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1588 return QLA_FUNCTION_FAILED
;
1589 vha
->marker_needed
= 0;
1592 /* Acquire ring specific lock */
1593 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1595 /* Check for room in outstanding command list. */
1596 handle
= req
->current_outstanding_cmd
;
1597 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1599 if (handle
== req
->num_outstanding_cmds
)
1601 if (!req
->outstanding_cmds
[handle
])
1605 if (index
== req
->num_outstanding_cmds
)
1608 /* Compute number of required data segments */
1609 /* Map the sg table so we have an accurate count of sg entries needed */
1610 if (scsi_sg_count(cmd
)) {
1611 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1612 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1613 if (unlikely(!nseg
))
1616 sp
->flags
|= SRB_DMA_VALID
;
1618 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1619 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1620 struct qla2_sgx sgx
;
1623 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1624 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1625 sgx
.cur_sg
= scsi_sglist(cmd
);
1629 while (qla24xx_get_one_block_sg(
1630 cmd
->device
->sector_size
, &sgx
, &partial
))
1636 /* number of required data segments */
1639 /* Compute number of required protection segments */
1640 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1641 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1642 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1643 if (unlikely(!nseg
))
1646 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
1648 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1649 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1650 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
1657 /* Total Data and protection sg segment(s) */
1658 tot_prot_dsds
= nseg
;
1660 if (req
->cnt
< (req_cnt
+ 2)) {
1661 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1662 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1663 if (req
->ring_index
< cnt
)
1664 req
->cnt
= cnt
- req
->ring_index
;
1666 req
->cnt
= req
->length
-
1667 (req
->ring_index
- cnt
);
1668 if (req
->cnt
< (req_cnt
+ 2))
1672 status
|= QDSS_GOT_Q_SPACE
;
1674 /* Build header part of command packet (excluding the OPCODE). */
1675 req
->current_outstanding_cmd
= handle
;
1676 req
->outstanding_cmds
[handle
] = sp
;
1677 sp
->handle
= handle
;
1678 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1679 req
->cnt
-= req_cnt
;
1681 /* Fill-in common area */
1682 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
1683 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1685 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1686 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1688 /* Set NPORT-ID and LUN number*/
1689 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1690 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1691 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1692 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1694 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1695 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1697 /* Total Data and protection segment(s) */
1698 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1700 /* Build IOCB segments and adjust for data protection segments */
1701 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
1702 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
1706 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1707 /* Specify response queue number where completion should happen */
1708 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
1709 cmd_pkt
->timeout
= cpu_to_le16(0);
1712 /* Adjust ring index. */
1714 if (req
->ring_index
== req
->length
) {
1715 req
->ring_index
= 0;
1716 req
->ring_ptr
= req
->ring
;
1720 /* Set chip new ring index. */
1721 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1723 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1728 if (status
& QDSS_GOT_Q_SPACE
) {
1729 req
->outstanding_cmds
[handle
] = NULL
;
1730 req
->cnt
+= req_cnt
;
1732 /* Cleanup will be performed by the caller (queuecommand) */
1734 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1735 return QLA_FUNCTION_FAILED
;
1739 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1740 * @sp: command to send to the ISP
1742 * Returns non-zero if a failure occurred, else zero.
1745 qla2xxx_start_scsi_mq(srb_t
*sp
)
1748 unsigned long flags
;
1752 struct cmd_type_7
*cmd_pkt
;
1756 struct req_que
*req
= NULL
;
1757 struct rsp_que
*rsp
= NULL
;
1758 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1759 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1760 struct qla_hw_data
*ha
= vha
->hw
;
1761 struct qla_qpair
*qpair
= sp
->qpair
;
1763 /* Acquire qpair specific lock */
1764 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1766 /* Setup qpair pointers */
1770 /* So we know we haven't pci_map'ed anything yet */
1773 /* Send marker if required */
1774 if (vha
->marker_needed
!= 0) {
1775 if (__qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1777 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1778 return QLA_FUNCTION_FAILED
;
1780 vha
->marker_needed
= 0;
1783 /* Check for room in outstanding command list. */
1784 handle
= req
->current_outstanding_cmd
;
1785 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1787 if (handle
== req
->num_outstanding_cmds
)
1789 if (!req
->outstanding_cmds
[handle
])
1792 if (index
== req
->num_outstanding_cmds
)
1795 /* Map the sg table so we have an accurate count of sg entries needed */
1796 if (scsi_sg_count(cmd
)) {
1797 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1798 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1799 if (unlikely(!nseg
))
1805 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
1806 if (req
->cnt
< (req_cnt
+ 2)) {
1807 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
1808 RD_REG_DWORD_RELAXED(req
->req_q_out
);
1809 if (req
->ring_index
< cnt
)
1810 req
->cnt
= cnt
- req
->ring_index
;
1812 req
->cnt
= req
->length
-
1813 (req
->ring_index
- cnt
);
1814 if (req
->cnt
< (req_cnt
+ 2))
1818 /* Build command packet. */
1819 req
->current_outstanding_cmd
= handle
;
1820 req
->outstanding_cmds
[handle
] = sp
;
1821 sp
->handle
= handle
;
1822 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
1823 req
->cnt
-= req_cnt
;
1825 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
1826 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
1828 /* Zero out remaining portion of packet. */
1829 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1830 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
1831 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
1832 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
1834 /* Set NPORT-ID and LUN number*/
1835 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
1836 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
1837 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
1838 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
1839 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
1841 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
1842 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
1844 cmd_pkt
->task
= TSK_SIMPLE
;
1846 /* Load SCSI command packet. */
1847 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
1848 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
1850 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
1852 /* Build IOCB segments */
1853 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
1855 /* Set total data segment count. */
1856 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
1858 /* Adjust ring index. */
1860 if (req
->ring_index
== req
->length
) {
1861 req
->ring_index
= 0;
1862 req
->ring_ptr
= req
->ring
;
1866 sp
->flags
|= SRB_DMA_VALID
;
1868 /* Set chip new ring index. */
1869 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
1871 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1876 scsi_dma_unmap(cmd
);
1878 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1880 return QLA_FUNCTION_FAILED
;
1885 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1886 * @sp: command to send to the ISP
1888 * Returns non-zero if a failure occurred, else zero.
1891 qla2xxx_dif_start_scsi_mq(srb_t
*sp
)
1894 unsigned long flags
;
1899 uint16_t req_cnt
= 0;
1901 uint16_t tot_prot_dsds
;
1902 uint16_t fw_prot_opts
= 0;
1903 struct req_que
*req
= NULL
;
1904 struct rsp_que
*rsp
= NULL
;
1905 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1906 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1907 struct qla_hw_data
*ha
= vha
->hw
;
1908 struct cmd_type_crc_2
*cmd_pkt
;
1909 uint32_t status
= 0;
1910 struct qla_qpair
*qpair
= sp
->qpair
;
1912 #define QDSS_GOT_Q_SPACE BIT_0
1914 /* Check for host side state */
1915 if (!qpair
->online
) {
1916 cmd
->result
= DID_NO_CONNECT
<< 16;
1917 return QLA_INTERFACE_ERROR
;
1920 if (!qpair
->difdix_supported
&&
1921 scsi_get_prot_op(cmd
) != SCSI_PROT_NORMAL
) {
1922 cmd
->result
= DID_NO_CONNECT
<< 16;
1923 return QLA_INTERFACE_ERROR
;
1926 /* Only process protection or >16 cdb in this routine */
1927 if (scsi_get_prot_op(cmd
) == SCSI_PROT_NORMAL
) {
1928 if (cmd
->cmd_len
<= 16)
1929 return qla2xxx_start_scsi_mq(sp
);
1932 spin_lock_irqsave(&qpair
->qp_lock
, flags
);
1934 /* Setup qpair pointers */
1938 /* So we know we haven't pci_map'ed anything yet */
1941 /* Send marker if required */
1942 if (vha
->marker_needed
!= 0) {
1943 if (__qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
) !=
1945 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
1946 return QLA_FUNCTION_FAILED
;
1948 vha
->marker_needed
= 0;
1951 /* Check for room in outstanding command list. */
1952 handle
= req
->current_outstanding_cmd
;
1953 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
1955 if (handle
== req
->num_outstanding_cmds
)
1957 if (!req
->outstanding_cmds
[handle
])
1961 if (index
== req
->num_outstanding_cmds
)
1964 /* Compute number of required data segments */
1965 /* Map the sg table so we have an accurate count of sg entries needed */
1966 if (scsi_sg_count(cmd
)) {
1967 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
1968 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
1969 if (unlikely(!nseg
))
1972 sp
->flags
|= SRB_DMA_VALID
;
1974 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
1975 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
1976 struct qla2_sgx sgx
;
1979 memset(&sgx
, 0, sizeof(struct qla2_sgx
));
1980 sgx
.tot_bytes
= scsi_bufflen(cmd
);
1981 sgx
.cur_sg
= scsi_sglist(cmd
);
1985 while (qla24xx_get_one_block_sg(
1986 cmd
->device
->sector_size
, &sgx
, &partial
))
1992 /* number of required data segments */
1995 /* Compute number of required protection segments */
1996 if (qla24xx_configure_prot_mode(sp
, &fw_prot_opts
)) {
1997 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_prot_sglist(cmd
),
1998 scsi_prot_sg_count(cmd
), cmd
->sc_data_direction
);
1999 if (unlikely(!nseg
))
2002 sp
->flags
|= SRB_CRC_PROT_DMA_VALID
;
2004 if ((scsi_get_prot_op(cmd
) == SCSI_PROT_READ_INSERT
) ||
2005 (scsi_get_prot_op(cmd
) == SCSI_PROT_WRITE_STRIP
)) {
2006 nseg
= scsi_bufflen(cmd
) / cmd
->device
->sector_size
;
2013 /* Total Data and protection sg segment(s) */
2014 tot_prot_dsds
= nseg
;
2016 if (req
->cnt
< (req_cnt
+ 2)) {
2017 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
2018 RD_REG_DWORD_RELAXED(req
->req_q_out
);
2019 if (req
->ring_index
< cnt
)
2020 req
->cnt
= cnt
- req
->ring_index
;
2022 req
->cnt
= req
->length
-
2023 (req
->ring_index
- cnt
);
2024 if (req
->cnt
< (req_cnt
+ 2))
2028 status
|= QDSS_GOT_Q_SPACE
;
2030 /* Build header part of command packet (excluding the OPCODE). */
2031 req
->current_outstanding_cmd
= handle
;
2032 req
->outstanding_cmds
[handle
] = sp
;
2033 sp
->handle
= handle
;
2034 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
2035 req
->cnt
-= req_cnt
;
2037 /* Fill-in common area */
2038 cmd_pkt
= (struct cmd_type_crc_2
*)req
->ring_ptr
;
2039 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
2041 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
2042 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
2044 /* Set NPORT-ID and LUN number*/
2045 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2046 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2047 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2048 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2050 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
2051 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
2053 /* Total Data and protection segment(s) */
2054 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
2056 /* Build IOCB segments and adjust for data protection segments */
2057 if (qla24xx_build_scsi_crc_2_iocbs(sp
, (struct cmd_type_crc_2
*)
2058 req
->ring_ptr
, tot_dsds
, tot_prot_dsds
, fw_prot_opts
) !=
2062 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
2063 cmd_pkt
->timeout
= cpu_to_le16(0);
2066 /* Adjust ring index. */
2068 if (req
->ring_index
== req
->length
) {
2069 req
->ring_index
= 0;
2070 req
->ring_ptr
= req
->ring
;
2074 /* Set chip new ring index. */
2075 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
2077 /* Manage unprocessed RIO/ZIO commands in response queue. */
2078 if (vha
->flags
.process_response_queue
&&
2079 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
2080 qla24xx_process_response_queue(vha
, rsp
);
2082 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2087 if (status
& QDSS_GOT_Q_SPACE
) {
2088 req
->outstanding_cmds
[handle
] = NULL
;
2089 req
->cnt
+= req_cnt
;
2091 /* Cleanup will be performed by the caller (queuecommand) */
2093 spin_unlock_irqrestore(&qpair
->qp_lock
, flags
);
2094 return QLA_FUNCTION_FAILED
;
2097 /* Generic Control-SRB manipulation functions. */
2099 /* hardware_lock assumed to be held. */
2102 __qla2x00_alloc_iocbs(struct qla_qpair
*qpair
, srb_t
*sp
)
2104 scsi_qla_host_t
*vha
= qpair
->vha
;
2105 struct qla_hw_data
*ha
= vha
->hw
;
2106 struct req_que
*req
= qpair
->req
;
2107 device_reg_t
*reg
= ISP_QUE_REG(ha
, req
->id
);
2108 uint32_t index
, handle
;
2110 uint16_t cnt
, req_cnt
;
2116 if (sp
&& (sp
->type
!= SRB_SCSI_CMD
)) {
2117 /* Adjust entry-counts as needed. */
2118 req_cnt
= sp
->iocbs
;
2121 /* Check for room on request queue. */
2122 if (req
->cnt
< req_cnt
+ 2) {
2123 if (qpair
->use_shadow_reg
)
2124 cnt
= *req
->out_ptr
;
2125 else if (ha
->mqenable
|| IS_QLA83XX(ha
) || IS_QLA27XX(ha
))
2126 cnt
= RD_REG_DWORD(®
->isp25mq
.req_q_out
);
2127 else if (IS_P3P_TYPE(ha
))
2128 cnt
= RD_REG_DWORD(®
->isp82
.req_q_out
);
2129 else if (IS_FWI2_CAPABLE(ha
))
2130 cnt
= RD_REG_DWORD(®
->isp24
.req_q_out
);
2131 else if (IS_QLAFX00(ha
))
2132 cnt
= RD_REG_DWORD(®
->ispfx00
.req_q_out
);
2134 cnt
= qla2x00_debounce_register(
2135 ISP_REQ_Q_OUT(ha
, ®
->isp
));
2137 if (req
->ring_index
< cnt
)
2138 req
->cnt
= cnt
- req
->ring_index
;
2140 req
->cnt
= req
->length
-
2141 (req
->ring_index
- cnt
);
2143 if (req
->cnt
< req_cnt
+ 2)
2147 /* Check for room in outstanding command list. */
2148 handle
= req
->current_outstanding_cmd
;
2149 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
2151 if (handle
== req
->num_outstanding_cmds
)
2153 if (!req
->outstanding_cmds
[handle
])
2156 if (index
== req
->num_outstanding_cmds
) {
2157 ql_log(ql_log_warn
, vha
, 0x700b,
2158 "No room on outstanding cmd array.\n");
2162 /* Prep command array. */
2163 req
->current_outstanding_cmd
= handle
;
2164 req
->outstanding_cmds
[handle
] = sp
;
2165 sp
->handle
= handle
;
2169 req
->cnt
-= req_cnt
;
2170 pkt
= req
->ring_ptr
;
2171 memset(pkt
, 0, REQUEST_ENTRY_SIZE
);
2172 if (IS_QLAFX00(ha
)) {
2173 WRT_REG_BYTE((void __iomem
*)&pkt
->entry_count
, req_cnt
);
2174 WRT_REG_WORD((void __iomem
*)&pkt
->handle
, handle
);
2176 pkt
->entry_count
= req_cnt
;
2177 pkt
->handle
= handle
;
2183 qpair
->tgt_counters
.num_alloc_iocb_failed
++;
2188 qla2x00_alloc_iocbs_ready(struct qla_qpair
*qpair
, srb_t
*sp
)
2190 scsi_qla_host_t
*vha
= qpair
->vha
;
2192 if (qla2x00_reset_active(vha
))
2195 return __qla2x00_alloc_iocbs(qpair
, sp
);
2199 qla2x00_alloc_iocbs(struct scsi_qla_host
*vha
, srb_t
*sp
)
2201 return __qla2x00_alloc_iocbs(vha
->hw
->base_qpair
, sp
);
2205 qla24xx_prli_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2207 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2209 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2210 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2211 if (lio
->u
.logio
.flags
& SRB_LOGIN_NVME_PRLI
)
2212 logio
->control_flags
|= LCF_NVME_PRLI
;
2214 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2215 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2216 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2217 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2218 logio
->vp_index
= sp
->vha
->vp_idx
;
2222 qla24xx_login_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2224 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2226 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2227 if (lio
->u
.logio
.flags
& SRB_LOGIN_PRLI_ONLY
) {
2228 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PRLI
);
2230 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_PLOGI
);
2231 if (lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
)
2232 logio
->control_flags
|= cpu_to_le16(LCF_COND_PLOGI
);
2233 if (lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
)
2234 logio
->control_flags
|= cpu_to_le16(LCF_SKIP_PRLI
);
2236 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2237 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2238 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2239 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2240 logio
->vp_index
= sp
->vha
->vp_idx
;
2244 qla2x00_login_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2246 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2247 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2250 mbx
->entry_type
= MBX_IOCB_TYPE
;
2251 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2252 mbx
->mb0
= cpu_to_le16(MBC_LOGIN_FABRIC_PORT
);
2253 opts
= lio
->u
.logio
.flags
& SRB_LOGIN_COND_PLOGI
? BIT_0
: 0;
2254 opts
|= lio
->u
.logio
.flags
& SRB_LOGIN_SKIP_PRLI
? BIT_1
: 0;
2255 if (HAS_EXTENDED_IDS(ha
)) {
2256 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2257 mbx
->mb10
= cpu_to_le16(opts
);
2259 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | opts
);
2261 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2262 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2263 sp
->fcport
->d_id
.b
.al_pa
);
2264 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2268 qla24xx_logout_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2270 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2271 logio
->control_flags
=
2272 cpu_to_le16(LCF_COMMAND_LOGO
|LCF_IMPL_LOGO
);
2273 if (!sp
->fcport
->se_sess
||
2274 !sp
->fcport
->keep_nport_handle
)
2275 logio
->control_flags
|= cpu_to_le16(LCF_FREE_NPORT
);
2276 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2277 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2278 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2279 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2280 logio
->vp_index
= sp
->vha
->vp_idx
;
2284 qla2x00_logout_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2286 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2288 mbx
->entry_type
= MBX_IOCB_TYPE
;
2289 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2290 mbx
->mb0
= cpu_to_le16(MBC_LOGOUT_FABRIC_PORT
);
2291 mbx
->mb1
= HAS_EXTENDED_IDS(ha
) ?
2292 cpu_to_le16(sp
->fcport
->loop_id
):
2293 cpu_to_le16(sp
->fcport
->loop_id
<< 8);
2294 mbx
->mb2
= cpu_to_le16(sp
->fcport
->d_id
.b
.domain
);
2295 mbx
->mb3
= cpu_to_le16(sp
->fcport
->d_id
.b
.area
<< 8 |
2296 sp
->fcport
->d_id
.b
.al_pa
);
2297 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2298 /* Implicit: mbx->mbx10 = 0. */
2302 qla24xx_adisc_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
2304 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
2305 logio
->control_flags
= cpu_to_le16(LCF_COMMAND_ADISC
);
2306 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2307 logio
->vp_index
= sp
->vha
->vp_idx
;
2311 qla2x00_adisc_iocb(srb_t
*sp
, struct mbx_entry
*mbx
)
2313 struct qla_hw_data
*ha
= sp
->vha
->hw
;
2315 mbx
->entry_type
= MBX_IOCB_TYPE
;
2316 SET_TARGET_ID(ha
, mbx
->loop_id
, sp
->fcport
->loop_id
);
2317 mbx
->mb0
= cpu_to_le16(MBC_GET_PORT_DATABASE
);
2318 if (HAS_EXTENDED_IDS(ha
)) {
2319 mbx
->mb1
= cpu_to_le16(sp
->fcport
->loop_id
);
2320 mbx
->mb10
= cpu_to_le16(BIT_0
);
2322 mbx
->mb1
= cpu_to_le16((sp
->fcport
->loop_id
<< 8) | BIT_0
);
2324 mbx
->mb2
= cpu_to_le16(MSW(ha
->async_pd_dma
));
2325 mbx
->mb3
= cpu_to_le16(LSW(ha
->async_pd_dma
));
2326 mbx
->mb6
= cpu_to_le16(MSW(MSD(ha
->async_pd_dma
)));
2327 mbx
->mb7
= cpu_to_le16(LSW(MSD(ha
->async_pd_dma
)));
2328 mbx
->mb9
= cpu_to_le16(sp
->vha
->vp_idx
);
2332 qla24xx_tm_iocb(srb_t
*sp
, struct tsk_mgmt_entry
*tsk
)
2336 struct fc_port
*fcport
= sp
->fcport
;
2337 scsi_qla_host_t
*vha
= fcport
->vha
;
2338 struct qla_hw_data
*ha
= vha
->hw
;
2339 struct srb_iocb
*iocb
= &sp
->u
.iocb_cmd
;
2340 struct req_que
*req
= vha
->req
;
2342 flags
= iocb
->u
.tmf
.flags
;
2343 lun
= iocb
->u
.tmf
.lun
;
2345 tsk
->entry_type
= TSK_MGMT_IOCB_TYPE
;
2346 tsk
->entry_count
= 1;
2347 tsk
->handle
= MAKE_HANDLE(req
->id
, tsk
->handle
);
2348 tsk
->nport_handle
= cpu_to_le16(fcport
->loop_id
);
2349 tsk
->timeout
= cpu_to_le16(ha
->r_a_tov
/ 10 * 2);
2350 tsk
->control_flags
= cpu_to_le32(flags
);
2351 tsk
->port_id
[0] = fcport
->d_id
.b
.al_pa
;
2352 tsk
->port_id
[1] = fcport
->d_id
.b
.area
;
2353 tsk
->port_id
[2] = fcport
->d_id
.b
.domain
;
2354 tsk
->vp_index
= fcport
->vha
->vp_idx
;
2356 if (flags
== TCF_LUN_RESET
) {
2357 int_to_scsilun(lun
, &tsk
->lun
);
2358 host_to_fcp_swap((uint8_t *)&tsk
->lun
,
2364 qla2x00_els_dcmd_sp_free(void *data
)
2367 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2371 if (elsio
->u
.els_logo
.els_logo_pyld
)
2372 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
, DMA_POOL_SIZE
,
2373 elsio
->u
.els_logo
.els_logo_pyld
,
2374 elsio
->u
.els_logo
.els_logo_pyld_dma
);
2376 del_timer(&elsio
->timer
);
2381 qla2x00_els_dcmd_iocb_timeout(void *data
)
2384 fc_port_t
*fcport
= sp
->fcport
;
2385 struct scsi_qla_host
*vha
= sp
->vha
;
2386 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2388 ql_dbg(ql_dbg_io
, vha
, 0x3069,
2389 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2390 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
2391 fcport
->d_id
.b
.al_pa
);
2393 complete(&lio
->u
.els_logo
.comp
);
2397 qla2x00_els_dcmd_sp_done(void *ptr
, int res
)
2400 fc_port_t
*fcport
= sp
->fcport
;
2401 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2402 struct scsi_qla_host
*vha
= sp
->vha
;
2404 ql_dbg(ql_dbg_io
, vha
, 0x3072,
2405 "%s hdl=%x, portid=%02x%02x%02x done\n",
2406 sp
->name
, sp
->handle
, fcport
->d_id
.b
.domain
,
2407 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2409 complete(&lio
->u
.els_logo
.comp
);
2413 qla24xx_els_dcmd_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2414 port_id_t remote_did
)
2417 fc_port_t
*fcport
= NULL
;
2418 struct srb_iocb
*elsio
= NULL
;
2419 struct qla_hw_data
*ha
= vha
->hw
;
2420 struct els_logo_payload logo_pyld
;
2421 int rval
= QLA_SUCCESS
;
2423 fcport
= qla2x00_alloc_fcport(vha
, GFP_KERNEL
);
2425 ql_log(ql_log_info
, vha
, 0x70e5, "fcport allocation failed\n");
2429 /* Alloc SRB structure */
2430 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2433 ql_log(ql_log_info
, vha
, 0x70e6,
2434 "SRB allocation failed\n");
2438 elsio
= &sp
->u
.iocb_cmd
;
2439 fcport
->loop_id
= 0xFFFF;
2440 fcport
->d_id
.b
.domain
= remote_did
.b
.domain
;
2441 fcport
->d_id
.b
.area
= remote_did
.b
.area
;
2442 fcport
->d_id
.b
.al_pa
= remote_did
.b
.al_pa
;
2444 ql_dbg(ql_dbg_io
, vha
, 0x3073, "portid=%02x%02x%02x done\n",
2445 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2447 sp
->type
= SRB_ELS_DCMD
;
2448 sp
->name
= "ELS_DCMD";
2449 sp
->fcport
= fcport
;
2450 elsio
->timeout
= qla2x00_els_dcmd_iocb_timeout
;
2451 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
);
2452 init_completion(&sp
->u
.iocb_cmd
.u
.els_logo
.comp
);
2453 sp
->done
= qla2x00_els_dcmd_sp_done
;
2454 sp
->free
= qla2x00_els_dcmd_sp_free
;
2456 elsio
->u
.els_logo
.els_logo_pyld
= dma_alloc_coherent(&ha
->pdev
->dev
,
2457 DMA_POOL_SIZE
, &elsio
->u
.els_logo
.els_logo_pyld_dma
,
2460 if (!elsio
->u
.els_logo
.els_logo_pyld
) {
2462 return QLA_FUNCTION_FAILED
;
2465 memset(&logo_pyld
, 0, sizeof(struct els_logo_payload
));
2467 elsio
->u
.els_logo
.els_cmd
= els_opcode
;
2468 logo_pyld
.opcode
= els_opcode
;
2469 logo_pyld
.s_id
[0] = vha
->d_id
.b
.al_pa
;
2470 logo_pyld
.s_id
[1] = vha
->d_id
.b
.area
;
2471 logo_pyld
.s_id
[2] = vha
->d_id
.b
.domain
;
2472 host_to_fcp_swap(logo_pyld
.s_id
, sizeof(uint32_t));
2473 memcpy(&logo_pyld
.wwpn
, vha
->port_name
, WWN_SIZE
);
2475 memcpy(elsio
->u
.els_logo
.els_logo_pyld
, &logo_pyld
,
2476 sizeof(struct els_logo_payload
));
2478 rval
= qla2x00_start_sp(sp
);
2479 if (rval
!= QLA_SUCCESS
) {
2481 return QLA_FUNCTION_FAILED
;
2484 ql_dbg(ql_dbg_io
, vha
, 0x3074,
2485 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2486 sp
->name
, sp
->handle
, fcport
->loop_id
, fcport
->d_id
.b
.domain
,
2487 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
);
2489 wait_for_completion(&elsio
->u
.els_logo
.comp
);
2496 qla24xx_els_logo_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2498 scsi_qla_host_t
*vha
= sp
->vha
;
2499 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2501 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2502 els_iocb
->entry_count
= 1;
2503 els_iocb
->sys_define
= 0;
2504 els_iocb
->entry_status
= 0;
2505 els_iocb
->handle
= sp
->handle
;
2506 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2507 els_iocb
->tx_dsd_count
= 1;
2508 els_iocb
->vp_index
= vha
->vp_idx
;
2509 els_iocb
->sof_type
= EST_SOFI3
;
2510 els_iocb
->rx_dsd_count
= 0;
2511 els_iocb
->opcode
= elsio
->u
.els_logo
.els_cmd
;
2513 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2514 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2515 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2516 els_iocb
->s_id
[0] = vha
->d_id
.b
.al_pa
;
2517 els_iocb
->s_id
[1] = vha
->d_id
.b
.area
;
2518 els_iocb
->s_id
[2] = vha
->d_id
.b
.domain
;
2519 els_iocb
->control_flags
= 0;
2521 if (elsio
->u
.els_logo
.els_cmd
== ELS_DCMD_PLOGI
) {
2522 els_iocb
->tx_byte_count
= els_iocb
->tx_len
=
2523 sizeof(struct els_plogi_payload
);
2524 els_iocb
->tx_address
[0] =
2525 cpu_to_le32(LSD(elsio
->u
.els_plogi
.els_plogi_pyld_dma
));
2526 els_iocb
->tx_address
[1] =
2527 cpu_to_le32(MSD(elsio
->u
.els_plogi
.els_plogi_pyld_dma
));
2529 els_iocb
->rx_dsd_count
= 1;
2530 els_iocb
->rx_byte_count
= els_iocb
->rx_len
=
2531 sizeof(struct els_plogi_payload
);
2532 els_iocb
->rx_address
[0] =
2533 cpu_to_le32(LSD(elsio
->u
.els_plogi
.els_resp_pyld_dma
));
2534 els_iocb
->rx_address
[1] =
2535 cpu_to_le32(MSD(elsio
->u
.els_plogi
.els_resp_pyld_dma
));
2537 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x3073,
2538 "PLOGI ELS IOCB:\n");
2539 ql_dump_buffer(ql_log_info
, vha
, 0x0109,
2540 (uint8_t *)els_iocb
, 0x70);
2542 els_iocb
->tx_byte_count
= sizeof(struct els_logo_payload
);
2543 els_iocb
->tx_address
[0] =
2544 cpu_to_le32(LSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2545 els_iocb
->tx_address
[1] =
2546 cpu_to_le32(MSD(elsio
->u
.els_logo
.els_logo_pyld_dma
));
2547 els_iocb
->tx_len
= cpu_to_le32(sizeof(struct els_logo_payload
));
2549 els_iocb
->rx_byte_count
= 0;
2550 els_iocb
->rx_address
[0] = 0;
2551 els_iocb
->rx_address
[1] = 0;
2552 els_iocb
->rx_len
= 0;
2555 sp
->vha
->qla_stats
.control_requests
++;
2559 qla2x00_els_dcmd2_iocb_timeout(void *data
)
2562 fc_port_t
*fcport
= sp
->fcport
;
2563 struct scsi_qla_host
*vha
= sp
->vha
;
2564 struct qla_hw_data
*ha
= vha
->hw
;
2565 unsigned long flags
= 0;
2568 ql_dbg(ql_dbg_io
+ ql_dbg_disc
, vha
, 0x3069,
2569 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2570 sp
->name
, sp
->handle
, fcport
->port_name
, fcport
->d_id
.b24
);
2572 /* Abort the exchange */
2573 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2574 res
= ha
->isp_ops
->abort_command(sp
);
2575 ql_dbg(ql_dbg_io
, vha
, 0x3070,
2576 "mbx abort_command %s\n",
2577 (res
== QLA_SUCCESS
) ? "successful" : "failed");
2578 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2580 sp
->done(sp
, QLA_FUNCTION_TIMEOUT
);
2584 qla2x00_els_dcmd2_sp_done(void *ptr
, int res
)
2587 fc_port_t
*fcport
= sp
->fcport
;
2588 struct srb_iocb
*lio
= &sp
->u
.iocb_cmd
;
2589 struct scsi_qla_host
*vha
= sp
->vha
;
2590 struct event_arg ea
;
2591 struct qla_work_evt
*e
;
2593 ql_dbg(ql_dbg_disc
, vha
, 0x3072,
2594 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2595 sp
->name
, res
, sp
->handle
, fcport
->d_id
.b24
, fcport
->port_name
);
2597 fcport
->flags
&= ~(FCF_ASYNC_SENT
|FCF_ASYNC_ACTIVE
);
2598 del_timer(&sp
->u
.iocb_cmd
.timer
);
2600 if (sp
->flags
& SRB_WAKEUP_ON_COMP
)
2601 complete(&lio
->u
.els_plogi
.comp
);
2604 set_bit(RELOGIN_NEEDED
, &vha
->dpc_flags
);
2606 memset(&ea
, 0, sizeof(ea
));
2609 ea
.event
= FCME_ELS_PLOGI_DONE
;
2610 qla2x00_fcport_event_handler(vha
, &ea
);
2613 e
= qla2x00_alloc_work(vha
, QLA_EVT_UNMAP
);
2615 struct srb_iocb
*elsio
= &sp
->u
.iocb_cmd
;
2617 if (elsio
->u
.els_plogi
.els_plogi_pyld
)
2618 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2619 elsio
->u
.els_plogi
.tx_size
,
2620 elsio
->u
.els_plogi
.els_plogi_pyld
,
2621 elsio
->u
.els_plogi
.els_plogi_pyld_dma
);
2623 if (elsio
->u
.els_plogi
.els_resp_pyld
)
2624 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2625 elsio
->u
.els_plogi
.rx_size
,
2626 elsio
->u
.els_plogi
.els_resp_pyld
,
2627 elsio
->u
.els_plogi
.els_resp_pyld_dma
);
2632 qla2x00_post_work(vha
, e
);
2637 qla24xx_els_dcmd2_iocb(scsi_qla_host_t
*vha
, int els_opcode
,
2638 fc_port_t
*fcport
, bool wait
)
2641 struct srb_iocb
*elsio
= NULL
;
2642 struct qla_hw_data
*ha
= vha
->hw
;
2643 int rval
= QLA_SUCCESS
;
2644 void *ptr
, *resp_ptr
;
2647 /* Alloc SRB structure */
2648 sp
= qla2x00_get_sp(vha
, fcport
, GFP_KERNEL
);
2650 ql_log(ql_log_info
, vha
, 0x70e6,
2651 "SRB allocation failed\n");
2655 elsio
= &sp
->u
.iocb_cmd
;
2656 ql_dbg(ql_dbg_io
, vha
, 0x3073,
2657 "Enter: PLOGI portid=%06x\n", fcport
->d_id
.b24
);
2659 fcport
->flags
|= FCF_ASYNC_SENT
;
2660 sp
->type
= SRB_ELS_DCMD
;
2661 sp
->name
= "ELS_DCMD";
2662 sp
->fcport
= fcport
;
2664 elsio
->timeout
= qla2x00_els_dcmd2_iocb_timeout
;
2665 init_completion(&elsio
->u
.els_plogi
.comp
);
2667 sp
->flags
= SRB_WAKEUP_ON_COMP
;
2669 qla2x00_init_timer(sp
, ELS_DCMD_TIMEOUT
+ 2);
2671 sp
->done
= qla2x00_els_dcmd2_sp_done
;
2672 elsio
->u
.els_plogi
.tx_size
= elsio
->u
.els_plogi
.rx_size
= DMA_POOL_SIZE
;
2674 ptr
= elsio
->u
.els_plogi
.els_plogi_pyld
=
2675 dma_alloc_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2676 &elsio
->u
.els_plogi
.els_plogi_pyld_dma
, GFP_KERNEL
);
2677 ptr_dma
= elsio
->u
.els_plogi
.els_plogi_pyld_dma
;
2679 if (!elsio
->u
.els_plogi
.els_plogi_pyld
) {
2680 rval
= QLA_FUNCTION_FAILED
;
2684 resp_ptr
= elsio
->u
.els_plogi
.els_resp_pyld
=
2685 dma_alloc_coherent(&ha
->pdev
->dev
, DMA_POOL_SIZE
,
2686 &elsio
->u
.els_plogi
.els_resp_pyld_dma
, GFP_KERNEL
);
2688 if (!elsio
->u
.els_plogi
.els_resp_pyld
) {
2689 rval
= QLA_FUNCTION_FAILED
;
2693 ql_dbg(ql_dbg_io
, vha
, 0x3073, "PLOGI %p %p\n", ptr
, resp_ptr
);
2695 memset(ptr
, 0, sizeof(struct els_plogi_payload
));
2696 memset(resp_ptr
, 0, sizeof(struct els_plogi_payload
));
2697 memcpy(elsio
->u
.els_plogi
.els_plogi_pyld
->data
,
2698 &ha
->plogi_els_payld
.data
,
2699 sizeof(elsio
->u
.els_plogi
.els_plogi_pyld
->data
));
2701 elsio
->u
.els_plogi
.els_cmd
= els_opcode
;
2702 elsio
->u
.els_plogi
.els_plogi_pyld
->opcode
= els_opcode
;
2704 ql_dbg(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x3073, "PLOGI buffer:\n");
2705 ql_dump_buffer(ql_dbg_disc
+ ql_dbg_buffer
, vha
, 0x0109,
2706 (uint8_t *)elsio
->u
.els_plogi
.els_plogi_pyld
, 0x70);
2708 rval
= qla2x00_start_sp(sp
);
2709 if (rval
!= QLA_SUCCESS
) {
2710 rval
= QLA_FUNCTION_FAILED
;
2712 ql_dbg(ql_dbg_disc
, vha
, 0x3074,
2713 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2714 sp
->name
, sp
->handle
, fcport
->loop_id
,
2715 fcport
->d_id
.b24
, vha
->d_id
.b24
);
2719 wait_for_completion(&elsio
->u
.els_plogi
.comp
);
2721 if (elsio
->u
.els_plogi
.comp_status
!= CS_COMPLETE
)
2722 rval
= QLA_FUNCTION_FAILED
;
2728 fcport
->flags
&= ~(FCF_ASYNC_SENT
);
2729 if (elsio
->u
.els_plogi
.els_plogi_pyld
)
2730 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2731 elsio
->u
.els_plogi
.tx_size
,
2732 elsio
->u
.els_plogi
.els_plogi_pyld
,
2733 elsio
->u
.els_plogi
.els_plogi_pyld_dma
);
2735 if (elsio
->u
.els_plogi
.els_resp_pyld
)
2736 dma_free_coherent(&sp
->vha
->hw
->pdev
->dev
,
2737 elsio
->u
.els_plogi
.rx_size
,
2738 elsio
->u
.els_plogi
.els_resp_pyld
,
2739 elsio
->u
.els_plogi
.els_resp_pyld_dma
);
2747 qla24xx_els_iocb(srb_t
*sp
, struct els_entry_24xx
*els_iocb
)
2749 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2750 struct fc_bsg_request
*bsg_request
= bsg_job
->request
;
2752 els_iocb
->entry_type
= ELS_IOCB_TYPE
;
2753 els_iocb
->entry_count
= 1;
2754 els_iocb
->sys_define
= 0;
2755 els_iocb
->entry_status
= 0;
2756 els_iocb
->handle
= sp
->handle
;
2757 els_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2758 els_iocb
->tx_dsd_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2759 els_iocb
->vp_index
= sp
->vha
->vp_idx
;
2760 els_iocb
->sof_type
= EST_SOFI3
;
2761 els_iocb
->rx_dsd_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
2764 sp
->type
== SRB_ELS_CMD_RPT
?
2765 bsg_request
->rqst_data
.r_els
.els_code
:
2766 bsg_request
->rqst_data
.h_els
.command_code
;
2767 els_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
2768 els_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
2769 els_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
2770 els_iocb
->control_flags
= 0;
2771 els_iocb
->rx_byte_count
=
2772 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2773 els_iocb
->tx_byte_count
=
2774 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2776 els_iocb
->tx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2777 (bsg_job
->request_payload
.sg_list
)));
2778 els_iocb
->tx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2779 (bsg_job
->request_payload
.sg_list
)));
2780 els_iocb
->tx_len
= cpu_to_le32(sg_dma_len
2781 (bsg_job
->request_payload
.sg_list
));
2783 els_iocb
->rx_address
[0] = cpu_to_le32(LSD(sg_dma_address
2784 (bsg_job
->reply_payload
.sg_list
)));
2785 els_iocb
->rx_address
[1] = cpu_to_le32(MSD(sg_dma_address
2786 (bsg_job
->reply_payload
.sg_list
)));
2787 els_iocb
->rx_len
= cpu_to_le32(sg_dma_len
2788 (bsg_job
->reply_payload
.sg_list
));
2790 sp
->vha
->qla_stats
.control_requests
++;
2794 qla2x00_ct_iocb(srb_t
*sp
, ms_iocb_entry_t
*ct_iocb
)
2796 uint16_t avail_dsds
;
2798 struct scatterlist
*sg
;
2801 scsi_qla_host_t
*vha
= sp
->vha
;
2802 struct qla_hw_data
*ha
= vha
->hw
;
2803 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2804 int loop_iterartion
= 0;
2805 int entry_count
= 1;
2807 memset(ct_iocb
, 0, sizeof(ms_iocb_entry_t
));
2808 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2809 ct_iocb
->entry_status
= 0;
2810 ct_iocb
->handle1
= sp
->handle
;
2811 SET_TARGET_ID(ha
, ct_iocb
->loop_id
, sp
->fcport
->loop_id
);
2812 ct_iocb
->status
= cpu_to_le16(0);
2813 ct_iocb
->control_flags
= cpu_to_le16(0);
2814 ct_iocb
->timeout
= 0;
2815 ct_iocb
->cmd_dsd_count
=
2816 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
2817 ct_iocb
->total_dsd_count
=
2818 cpu_to_le16(bsg_job
->request_payload
.sg_cnt
+ 1);
2819 ct_iocb
->req_bytecount
=
2820 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2821 ct_iocb
->rsp_bytecount
=
2822 cpu_to_le32(bsg_job
->reply_payload
.payload_len
);
2824 ct_iocb
->dseg_req_address
[0] = cpu_to_le32(LSD(sg_dma_address
2825 (bsg_job
->request_payload
.sg_list
)));
2826 ct_iocb
->dseg_req_address
[1] = cpu_to_le32(MSD(sg_dma_address
2827 (bsg_job
->request_payload
.sg_list
)));
2828 ct_iocb
->dseg_req_length
= ct_iocb
->req_bytecount
;
2830 ct_iocb
->dseg_rsp_address
[0] = cpu_to_le32(LSD(sg_dma_address
2831 (bsg_job
->reply_payload
.sg_list
)));
2832 ct_iocb
->dseg_rsp_address
[1] = cpu_to_le32(MSD(sg_dma_address
2833 (bsg_job
->reply_payload
.sg_list
)));
2834 ct_iocb
->dseg_rsp_length
= ct_iocb
->rsp_bytecount
;
2837 cur_dsd
= (uint32_t *)ct_iocb
->dseg_rsp_address
;
2839 tot_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2841 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, tot_dsds
, index
) {
2843 cont_a64_entry_t
*cont_pkt
;
2845 /* Allocate additional continuation packets? */
2846 if (avail_dsds
== 0) {
2848 * Five DSDs are available in the Cont.
2851 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2852 vha
->hw
->req_q_map
[0]);
2853 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2858 sle_dma
= sg_dma_address(sg
);
2859 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2860 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2861 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2865 ct_iocb
->entry_count
= entry_count
;
2867 sp
->vha
->qla_stats
.control_requests
++;
2871 qla24xx_ct_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_iocb
)
2873 uint16_t avail_dsds
;
2875 struct scatterlist
*sg
;
2877 uint16_t cmd_dsds
, rsp_dsds
;
2878 scsi_qla_host_t
*vha
= sp
->vha
;
2879 struct qla_hw_data
*ha
= vha
->hw
;
2880 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
2881 int entry_count
= 1;
2882 cont_a64_entry_t
*cont_pkt
= NULL
;
2884 ct_iocb
->entry_type
= CT_IOCB_TYPE
;
2885 ct_iocb
->entry_status
= 0;
2886 ct_iocb
->sys_define
= 0;
2887 ct_iocb
->handle
= sp
->handle
;
2889 ct_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
2890 ct_iocb
->vp_index
= sp
->vha
->vp_idx
;
2891 ct_iocb
->comp_status
= cpu_to_le16(0);
2893 cmd_dsds
= bsg_job
->request_payload
.sg_cnt
;
2894 rsp_dsds
= bsg_job
->reply_payload
.sg_cnt
;
2896 ct_iocb
->cmd_dsd_count
= cpu_to_le16(cmd_dsds
);
2897 ct_iocb
->timeout
= 0;
2898 ct_iocb
->rsp_dsd_count
= cpu_to_le16(rsp_dsds
);
2899 ct_iocb
->cmd_byte_count
=
2900 cpu_to_le32(bsg_job
->request_payload
.payload_len
);
2903 cur_dsd
= (uint32_t *)ct_iocb
->dseg_0_address
;
2906 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
, cmd_dsds
, index
) {
2909 /* Allocate additional continuation packets? */
2910 if (avail_dsds
== 0) {
2912 * Five DSDs are available in the Cont.
2915 cont_pkt
= qla2x00_prep_cont_type1_iocb(
2916 vha
, ha
->req_q_map
[0]);
2917 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2922 sle_dma
= sg_dma_address(sg
);
2923 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2924 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2925 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2931 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
, rsp_dsds
, index
) {
2934 /* Allocate additional continuation packets? */
2935 if (avail_dsds
== 0) {
2937 * Five DSDs are available in the Cont.
2940 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
,
2942 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
2947 sle_dma
= sg_dma_address(sg
);
2948 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
2949 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
2950 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
2953 ct_iocb
->entry_count
= entry_count
;
2957 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2958 * @sp: command to send to the ISP
2960 * Returns non-zero if a failure occurred, else zero.
2963 qla82xx_start_scsi(srb_t
*sp
)
2966 unsigned long flags
;
2967 struct scsi_cmnd
*cmd
;
2974 struct device_reg_82xx __iomem
*reg
;
2977 uint8_t additional_cdb_len
;
2978 struct ct6_dsd
*ctx
;
2979 struct scsi_qla_host
*vha
= sp
->vha
;
2980 struct qla_hw_data
*ha
= vha
->hw
;
2981 struct req_que
*req
= NULL
;
2982 struct rsp_que
*rsp
= NULL
;
2984 /* Setup device pointers. */
2985 reg
= &ha
->iobase
->isp82
;
2986 cmd
= GET_CMD_SP(sp
);
2988 rsp
= ha
->rsp_q_map
[0];
2990 /* So we know we haven't pci_map'ed anything yet */
2993 dbval
= 0x04 | (ha
->portnum
<< 5);
2995 /* Send marker if required */
2996 if (vha
->marker_needed
!= 0) {
2997 if (qla2x00_marker(vha
, req
,
2998 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
) {
2999 ql_log(ql_log_warn
, vha
, 0x300c,
3000 "qla2x00_marker failed for cmd=%p.\n", cmd
);
3001 return QLA_FUNCTION_FAILED
;
3003 vha
->marker_needed
= 0;
3006 /* Acquire ring specific lock */
3007 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3009 /* Check for room in outstanding command list. */
3010 handle
= req
->current_outstanding_cmd
;
3011 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3013 if (handle
== req
->num_outstanding_cmds
)
3015 if (!req
->outstanding_cmds
[handle
])
3018 if (index
== req
->num_outstanding_cmds
)
3021 /* Map the sg table so we have an accurate count of sg entries needed */
3022 if (scsi_sg_count(cmd
)) {
3023 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
3024 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
3025 if (unlikely(!nseg
))
3032 if (tot_dsds
> ql2xshiftctondsd
) {
3033 struct cmd_type_6
*cmd_pkt
;
3034 uint16_t more_dsd_lists
= 0;
3035 struct dsd_dma
*dsd_ptr
;
3038 more_dsd_lists
= qla24xx_calc_dsd_lists(tot_dsds
);
3039 if ((more_dsd_lists
+ ha
->gbl_dsd_inuse
) >= NUM_DSD_CHAIN
) {
3040 ql_dbg(ql_dbg_io
, vha
, 0x300d,
3041 "Num of DSD list %d is than %d for cmd=%p.\n",
3042 more_dsd_lists
+ ha
->gbl_dsd_inuse
, NUM_DSD_CHAIN
,
3047 if (more_dsd_lists
<= ha
->gbl_dsd_avail
)
3048 goto sufficient_dsds
;
3050 more_dsd_lists
-= ha
->gbl_dsd_avail
;
3052 for (i
= 0; i
< more_dsd_lists
; i
++) {
3053 dsd_ptr
= kzalloc(sizeof(struct dsd_dma
), GFP_ATOMIC
);
3055 ql_log(ql_log_fatal
, vha
, 0x300e,
3056 "Failed to allocate memory for dsd_dma "
3057 "for cmd=%p.\n", cmd
);
3061 dsd_ptr
->dsd_addr
= dma_pool_alloc(ha
->dl_dma_pool
,
3062 GFP_ATOMIC
, &dsd_ptr
->dsd_list_dma
);
3063 if (!dsd_ptr
->dsd_addr
) {
3065 ql_log(ql_log_fatal
, vha
, 0x300f,
3066 "Failed to allocate memory for dsd_addr "
3067 "for cmd=%p.\n", cmd
);
3070 list_add_tail(&dsd_ptr
->list
, &ha
->gbl_dsd_list
);
3071 ha
->gbl_dsd_avail
++;
3077 if (req
->cnt
< (req_cnt
+ 2)) {
3078 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
3079 ®
->req_q_out
[0]);
3080 if (req
->ring_index
< cnt
)
3081 req
->cnt
= cnt
- req
->ring_index
;
3083 req
->cnt
= req
->length
-
3084 (req
->ring_index
- cnt
);
3085 if (req
->cnt
< (req_cnt
+ 2))
3089 ctx
= sp
->u
.scmd
.ctx
=
3090 mempool_alloc(ha
->ctx_mempool
, GFP_ATOMIC
);
3092 ql_log(ql_log_fatal
, vha
, 0x3010,
3093 "Failed to allocate ctx for cmd=%p.\n", cmd
);
3097 memset(ctx
, 0, sizeof(struct ct6_dsd
));
3098 ctx
->fcp_cmnd
= dma_pool_zalloc(ha
->fcp_cmnd_dma_pool
,
3099 GFP_ATOMIC
, &ctx
->fcp_cmnd_dma
);
3100 if (!ctx
->fcp_cmnd
) {
3101 ql_log(ql_log_fatal
, vha
, 0x3011,
3102 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd
);
3106 /* Initialize the DSD list and dma handle */
3107 INIT_LIST_HEAD(&ctx
->dsd_list
);
3108 ctx
->dsd_use_cnt
= 0;
3110 if (cmd
->cmd_len
> 16) {
3111 additional_cdb_len
= cmd
->cmd_len
- 16;
3112 if ((cmd
->cmd_len
% 4) != 0) {
3113 /* SCSI command bigger than 16 bytes must be
3116 ql_log(ql_log_warn
, vha
, 0x3012,
3117 "scsi cmd len %d not multiple of 4 "
3118 "for cmd=%p.\n", cmd
->cmd_len
, cmd
);
3119 goto queuing_error_fcp_cmnd
;
3121 ctx
->fcp_cmnd_len
= 12 + cmd
->cmd_len
+ 4;
3123 additional_cdb_len
= 0;
3124 ctx
->fcp_cmnd_len
= 12 + 16 + 4;
3127 cmd_pkt
= (struct cmd_type_6
*)req
->ring_ptr
;
3128 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3130 /* Zero out remaining portion of packet. */
3131 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3132 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3133 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3134 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3136 /* Set NPORT-ID and LUN number*/
3137 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3138 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3139 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3140 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3141 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3143 /* Build IOCB segments */
3144 if (qla24xx_build_scsi_type_6_iocbs(sp
, cmd_pkt
, tot_dsds
))
3145 goto queuing_error_fcp_cmnd
;
3147 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3148 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
3150 /* build FCP_CMND IU */
3151 int_to_scsilun(cmd
->device
->lun
, &ctx
->fcp_cmnd
->lun
);
3152 ctx
->fcp_cmnd
->additional_cdb_len
= additional_cdb_len
;
3154 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
3155 ctx
->fcp_cmnd
->additional_cdb_len
|= 1;
3156 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
3157 ctx
->fcp_cmnd
->additional_cdb_len
|= 2;
3159 /* Populate the FCP_PRIO. */
3160 if (ha
->flags
.fcp_prio_enabled
)
3161 ctx
->fcp_cmnd
->task_attribute
|=
3162 sp
->fcport
->fcp_prio
<< 3;
3164 memcpy(ctx
->fcp_cmnd
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3166 fcp_dl
= (uint32_t *)(ctx
->fcp_cmnd
->cdb
+ 16 +
3167 additional_cdb_len
);
3168 *fcp_dl
= htonl((uint32_t)scsi_bufflen(cmd
));
3170 cmd_pkt
->fcp_cmnd_dseg_len
= cpu_to_le16(ctx
->fcp_cmnd_len
);
3171 cmd_pkt
->fcp_cmnd_dseg_address
[0] =
3172 cpu_to_le32(LSD(ctx
->fcp_cmnd_dma
));
3173 cmd_pkt
->fcp_cmnd_dseg_address
[1] =
3174 cpu_to_le32(MSD(ctx
->fcp_cmnd_dma
));
3176 sp
->flags
|= SRB_FCP_CMND_DMA_VALID
;
3177 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3178 /* Set total data segment count. */
3179 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3180 /* Specify response queue number where
3181 * completion should happen
3183 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3185 struct cmd_type_7
*cmd_pkt
;
3186 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3187 if (req
->cnt
< (req_cnt
+ 2)) {
3188 cnt
= (uint16_t)RD_REG_DWORD_RELAXED(
3189 ®
->req_q_out
[0]);
3190 if (req
->ring_index
< cnt
)
3191 req
->cnt
= cnt
- req
->ring_index
;
3193 req
->cnt
= req
->length
-
3194 (req
->ring_index
- cnt
);
3196 if (req
->cnt
< (req_cnt
+ 2))
3199 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
3200 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3202 /* Zero out remaining portion of packet. */
3203 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3204 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3205 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3206 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
3208 /* Set NPORT-ID and LUN number*/
3209 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3210 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3211 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3212 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3213 cmd_pkt
->vp_index
= sp
->vha
->vp_idx
;
3215 int_to_scsilun(cmd
->device
->lun
, &cmd_pkt
->lun
);
3216 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
,
3217 sizeof(cmd_pkt
->lun
));
3219 /* Populate the FCP_PRIO. */
3220 if (ha
->flags
.fcp_prio_enabled
)
3221 cmd_pkt
->task
|= sp
->fcport
->fcp_prio
<< 3;
3223 /* Load SCSI command packet. */
3224 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
3225 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
3227 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
3229 /* Build IOCB segments */
3230 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
, req
);
3232 /* Set total data segment count. */
3233 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
3234 /* Specify response queue number where
3235 * completion should happen.
3237 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3240 /* Build command packet. */
3241 req
->current_outstanding_cmd
= handle
;
3242 req
->outstanding_cmds
[handle
] = sp
;
3243 sp
->handle
= handle
;
3244 cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
3245 req
->cnt
-= req_cnt
;
3248 /* Adjust ring index. */
3250 if (req
->ring_index
== req
->length
) {
3251 req
->ring_index
= 0;
3252 req
->ring_ptr
= req
->ring
;
3256 sp
->flags
|= SRB_DMA_VALID
;
3258 /* Set chip new ring index. */
3259 /* write, read and verify logic */
3260 dbval
= dbval
| (req
->id
<< 8) | (req
->ring_index
<< 16);
3262 qla82xx_wr_32(ha
, (uintptr_t __force
)ha
->nxdb_wr_ptr
, dbval
);
3264 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3266 while (RD_REG_DWORD(ha
->nxdb_rd_ptr
) != dbval
) {
3267 WRT_REG_DWORD(ha
->nxdb_wr_ptr
, dbval
);
3272 /* Manage unprocessed RIO/ZIO commands in response queue. */
3273 if (vha
->flags
.process_response_queue
&&
3274 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
3275 qla24xx_process_response_queue(vha
, rsp
);
3277 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3280 queuing_error_fcp_cmnd
:
3281 dma_pool_free(ha
->fcp_cmnd_dma_pool
, ctx
->fcp_cmnd
, ctx
->fcp_cmnd_dma
);
3284 scsi_dma_unmap(cmd
);
3286 if (sp
->u
.scmd
.ctx
) {
3287 mempool_free(sp
->u
.scmd
.ctx
, ha
->ctx_mempool
);
3288 sp
->u
.scmd
.ctx
= NULL
;
3290 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
3292 return QLA_FUNCTION_FAILED
;
3296 qla24xx_abort_iocb(srb_t
*sp
, struct abort_entry_24xx
*abt_iocb
)
3298 struct srb_iocb
*aio
= &sp
->u
.iocb_cmd
;
3299 scsi_qla_host_t
*vha
= sp
->vha
;
3300 struct req_que
*req
= sp
->qpair
->req
;
3302 memset(abt_iocb
, 0, sizeof(struct abort_entry_24xx
));
3303 abt_iocb
->entry_type
= ABORT_IOCB_TYPE
;
3304 abt_iocb
->entry_count
= 1;
3305 abt_iocb
->handle
= cpu_to_le32(MAKE_HANDLE(req
->id
, sp
->handle
));
3307 abt_iocb
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3308 abt_iocb
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3309 abt_iocb
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3310 abt_iocb
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3312 abt_iocb
->handle_to_abort
=
3313 cpu_to_le32(MAKE_HANDLE(aio
->u
.abt
.req_que_no
,
3314 aio
->u
.abt
.cmd_hndl
));
3315 abt_iocb
->vp_index
= vha
->vp_idx
;
3316 abt_iocb
->req_que_no
= cpu_to_le16(aio
->u
.abt
.req_que_no
);
3317 /* Send the command to the firmware */
3322 qla2x00_mb_iocb(srb_t
*sp
, struct mbx_24xx_entry
*mbx
)
3326 mbx
->entry_type
= MBX_IOCB_TYPE
;
3327 mbx
->handle
= sp
->handle
;
3328 sz
= min(ARRAY_SIZE(mbx
->mb
), ARRAY_SIZE(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
));
3330 for (i
= 0; i
< sz
; i
++)
3331 mbx
->mb
[i
] = cpu_to_le16(sp
->u
.iocb_cmd
.u
.mbx
.out_mb
[i
]);
3335 qla2x00_ctpthru_cmd_iocb(srb_t
*sp
, struct ct_entry_24xx
*ct_pkt
)
3337 sp
->u
.iocb_cmd
.u
.ctarg
.iocb
= ct_pkt
;
3338 qla24xx_prep_ms_iocb(sp
->vha
, &sp
->u
.iocb_cmd
.u
.ctarg
);
3339 ct_pkt
->handle
= sp
->handle
;
3342 static void qla2x00_send_notify_ack_iocb(srb_t
*sp
,
3343 struct nack_to_isp
*nack
)
3345 struct imm_ntfy_from_isp
*ntfy
= sp
->u
.iocb_cmd
.u
.nack
.ntfy
;
3347 nack
->entry_type
= NOTIFY_ACK_TYPE
;
3348 nack
->entry_count
= 1;
3349 nack
->ox_id
= ntfy
->ox_id
;
3351 nack
->u
.isp24
.handle
= sp
->handle
;
3352 nack
->u
.isp24
.nport_handle
= ntfy
->u
.isp24
.nport_handle
;
3353 if (le16_to_cpu(ntfy
->u
.isp24
.status
) == IMM_NTFY_ELS
) {
3354 nack
->u
.isp24
.flags
= ntfy
->u
.isp24
.flags
&
3355 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB
);
3357 nack
->u
.isp24
.srr_rx_id
= ntfy
->u
.isp24
.srr_rx_id
;
3358 nack
->u
.isp24
.status
= ntfy
->u
.isp24
.status
;
3359 nack
->u
.isp24
.status_subcode
= ntfy
->u
.isp24
.status_subcode
;
3360 nack
->u
.isp24
.fw_handle
= ntfy
->u
.isp24
.fw_handle
;
3361 nack
->u
.isp24
.exchange_address
= ntfy
->u
.isp24
.exchange_address
;
3362 nack
->u
.isp24
.srr_rel_offs
= ntfy
->u
.isp24
.srr_rel_offs
;
3363 nack
->u
.isp24
.srr_ui
= ntfy
->u
.isp24
.srr_ui
;
3364 nack
->u
.isp24
.srr_flags
= 0;
3365 nack
->u
.isp24
.srr_reject_code
= 0;
3366 nack
->u
.isp24
.srr_reject_code_expl
= 0;
3367 nack
->u
.isp24
.vp_index
= ntfy
->u
.isp24
.vp_index
;
3371 * Build NVME LS request
3374 qla_nvme_ls(srb_t
*sp
, struct pt_ls4_request
*cmd_pkt
)
3376 struct srb_iocb
*nvme
;
3377 int rval
= QLA_SUCCESS
;
3379 nvme
= &sp
->u
.iocb_cmd
;
3380 cmd_pkt
->entry_type
= PT_LS4_REQUEST
;
3381 cmd_pkt
->entry_count
= 1;
3382 cmd_pkt
->control_flags
= CF_LS4_ORIGINATOR
<< CF_LS4_SHIFT
;
3384 cmd_pkt
->timeout
= cpu_to_le16(nvme
->u
.nvme
.timeout_sec
);
3385 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3386 cmd_pkt
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3388 cmd_pkt
->tx_dseg_count
= 1;
3389 cmd_pkt
->tx_byte_count
= nvme
->u
.nvme
.cmd_len
;
3390 cmd_pkt
->dseg0_len
= nvme
->u
.nvme
.cmd_len
;
3391 cmd_pkt
->dseg0_address
[0] = cpu_to_le32(LSD(nvme
->u
.nvme
.cmd_dma
));
3392 cmd_pkt
->dseg0_address
[1] = cpu_to_le32(MSD(nvme
->u
.nvme
.cmd_dma
));
3394 cmd_pkt
->rx_dseg_count
= 1;
3395 cmd_pkt
->rx_byte_count
= nvme
->u
.nvme
.rsp_len
;
3396 cmd_pkt
->dseg1_len
= nvme
->u
.nvme
.rsp_len
;
3397 cmd_pkt
->dseg1_address
[0] = cpu_to_le32(LSD(nvme
->u
.nvme
.rsp_dma
));
3398 cmd_pkt
->dseg1_address
[1] = cpu_to_le32(MSD(nvme
->u
.nvme
.rsp_dma
));
3404 qla25xx_ctrlvp_iocb(srb_t
*sp
, struct vp_ctrl_entry_24xx
*vce
)
3408 vce
->entry_type
= VP_CTRL_IOCB_TYPE
;
3409 vce
->handle
= sp
->handle
;
3410 vce
->entry_count
= 1;
3411 vce
->command
= cpu_to_le16(sp
->u
.iocb_cmd
.u
.ctrlvp
.cmd
);
3412 vce
->vp_count
= cpu_to_le16(1);
3415 * index map in firmware starts with 1; decrement index
3416 * this is ok as we never use index 0
3418 map
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) / 8;
3419 pos
= (sp
->u
.iocb_cmd
.u
.ctrlvp
.vp_index
- 1) & 7;
3420 vce
->vp_idx_map
[map
] |= 1 << pos
;
3424 qla24xx_prlo_iocb(srb_t
*sp
, struct logio_entry_24xx
*logio
)
3426 logio
->entry_type
= LOGINOUT_PORT_IOCB_TYPE
;
3427 logio
->control_flags
=
3428 cpu_to_le16(LCF_COMMAND_PRLO
|LCF_IMPL_PRLO
);
3430 logio
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
3431 logio
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
3432 logio
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
3433 logio
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
3434 logio
->vp_index
= sp
->fcport
->vha
->vp_idx
;
3438 qla2x00_start_sp(srb_t
*sp
)
3441 scsi_qla_host_t
*vha
= sp
->vha
;
3442 struct qla_hw_data
*ha
= vha
->hw
;
3443 struct qla_qpair
*qp
= sp
->qpair
;
3445 unsigned long flags
;
3447 rval
= QLA_FUNCTION_FAILED
;
3448 spin_lock_irqsave(qp
->qp_lock_ptr
, flags
);
3449 pkt
= __qla2x00_alloc_iocbs(sp
->qpair
, sp
);
3451 ql_log(ql_log_warn
, vha
, 0x700c,
3452 "qla2x00_alloc_iocbs failed.\n");
3459 IS_FWI2_CAPABLE(ha
) ?
3460 qla24xx_login_iocb(sp
, pkt
) :
3461 qla2x00_login_iocb(sp
, pkt
);
3464 qla24xx_prli_iocb(sp
, pkt
);
3466 case SRB_LOGOUT_CMD
:
3467 IS_FWI2_CAPABLE(ha
) ?
3468 qla24xx_logout_iocb(sp
, pkt
) :
3469 qla2x00_logout_iocb(sp
, pkt
);
3471 case SRB_ELS_CMD_RPT
:
3472 case SRB_ELS_CMD_HST
:
3473 qla24xx_els_iocb(sp
, pkt
);
3476 IS_FWI2_CAPABLE(ha
) ?
3477 qla24xx_ct_iocb(sp
, pkt
) :
3478 qla2x00_ct_iocb(sp
, pkt
);
3481 IS_FWI2_CAPABLE(ha
) ?
3482 qla24xx_adisc_iocb(sp
, pkt
) :
3483 qla2x00_adisc_iocb(sp
, pkt
);
3487 qlafx00_tm_iocb(sp
, pkt
) :
3488 qla24xx_tm_iocb(sp
, pkt
);
3490 case SRB_FXIOCB_DCMD
:
3491 case SRB_FXIOCB_BCMD
:
3492 qlafx00_fxdisc_iocb(sp
, pkt
);
3495 qla_nvme_ls(sp
, pkt
);
3499 qlafx00_abort_iocb(sp
, pkt
) :
3500 qla24xx_abort_iocb(sp
, pkt
);
3503 qla24xx_els_logo_iocb(sp
, pkt
);
3505 case SRB_CT_PTHRU_CMD
:
3506 qla2x00_ctpthru_cmd_iocb(sp
, pkt
);
3509 qla2x00_mb_iocb(sp
, pkt
);
3511 case SRB_NACK_PLOGI
:
3514 qla2x00_send_notify_ack_iocb(sp
, pkt
);
3517 qla25xx_ctrlvp_iocb(sp
, pkt
);
3520 qla24xx_prlo_iocb(sp
, pkt
);
3527 qla2x00_start_iocbs(vha
, qp
->req
);
3529 spin_unlock_irqrestore(qp
->qp_lock_ptr
, flags
);
3534 qla25xx_build_bidir_iocb(srb_t
*sp
, struct scsi_qla_host
*vha
,
3535 struct cmd_bidir
*cmd_pkt
, uint32_t tot_dsds
)
3537 uint16_t avail_dsds
;
3539 uint32_t req_data_len
= 0;
3540 uint32_t rsp_data_len
= 0;
3541 struct scatterlist
*sg
;
3543 int entry_count
= 1;
3544 struct bsg_job
*bsg_job
= sp
->u
.bsg_job
;
3546 /*Update entry type to indicate bidir command */
3547 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
3548 cpu_to_le32(COMMAND_BIDIRECTIONAL
);
3550 /* Set the transfer direction, in this set both flags
3551 * Also set the BD_WRAP_BACK flag, firmware will take care
3552 * assigning DID=SID for outgoing pkts.
3554 cmd_pkt
->wr_dseg_count
= cpu_to_le16(bsg_job
->request_payload
.sg_cnt
);
3555 cmd_pkt
->rd_dseg_count
= cpu_to_le16(bsg_job
->reply_payload
.sg_cnt
);
3556 cmd_pkt
->control_flags
= cpu_to_le16(BD_WRITE_DATA
| BD_READ_DATA
|
3559 req_data_len
= rsp_data_len
= bsg_job
->request_payload
.payload_len
;
3560 cmd_pkt
->wr_byte_count
= cpu_to_le32(req_data_len
);
3561 cmd_pkt
->rd_byte_count
= cpu_to_le32(rsp_data_len
);
3562 cmd_pkt
->timeout
= cpu_to_le16(qla2x00_get_async_timeout(vha
) + 2);
3564 vha
->bidi_stats
.transfer_bytes
+= req_data_len
;
3565 vha
->bidi_stats
.io_count
++;
3567 vha
->qla_stats
.output_bytes
+= req_data_len
;
3568 vha
->qla_stats
.output_requests
++;
3570 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3571 * are bundled in continuation iocb
3574 cur_dsd
= (uint32_t *)&cmd_pkt
->fcp_data_dseg_address
;
3578 for_each_sg(bsg_job
->request_payload
.sg_list
, sg
,
3579 bsg_job
->request_payload
.sg_cnt
, index
) {
3581 cont_a64_entry_t
*cont_pkt
;
3583 /* Allocate additional continuation packets */
3584 if (avail_dsds
== 0) {
3585 /* Continuation type 1 IOCB can accomodate
3588 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3589 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3593 sle_dma
= sg_dma_address(sg
);
3594 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3595 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3596 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3599 /* For read request DSD will always goes to continuation IOCB
3600 * and follow the write DSD. If there is room on the current IOCB
3601 * then it is added to that IOCB else new continuation IOCB is
3604 for_each_sg(bsg_job
->reply_payload
.sg_list
, sg
,
3605 bsg_job
->reply_payload
.sg_cnt
, index
) {
3607 cont_a64_entry_t
*cont_pkt
;
3609 /* Allocate additional continuation packets */
3610 if (avail_dsds
== 0) {
3611 /* Continuation type 1 IOCB can accomodate
3614 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
, vha
->req
);
3615 cur_dsd
= (uint32_t *) cont_pkt
->dseg_0_address
;
3619 sle_dma
= sg_dma_address(sg
);
3620 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
3621 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
3622 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
3625 /* This value should be same as number of IOCB required for this cmd */
3626 cmd_pkt
->entry_count
= entry_count
;
3630 qla2x00_start_bidir(srb_t
*sp
, struct scsi_qla_host
*vha
, uint32_t tot_dsds
)
3633 struct qla_hw_data
*ha
= vha
->hw
;
3634 unsigned long flags
;
3640 struct cmd_bidir
*cmd_pkt
= NULL
;
3641 struct rsp_que
*rsp
;
3642 struct req_que
*req
;
3643 int rval
= EXT_STATUS_OK
;
3647 rsp
= ha
->rsp_q_map
[0];
3650 /* Send marker if required */
3651 if (vha
->marker_needed
!= 0) {
3652 if (qla2x00_marker(vha
, req
,
3653 rsp
, 0, 0, MK_SYNC_ALL
) != QLA_SUCCESS
)
3654 return EXT_STATUS_MAILBOX
;
3655 vha
->marker_needed
= 0;
3658 /* Acquire ring specific lock */
3659 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
3661 /* Check for room in outstanding command list. */
3662 handle
= req
->current_outstanding_cmd
;
3663 for (index
= 1; index
< req
->num_outstanding_cmds
; index
++) {
3665 if (handle
== req
->num_outstanding_cmds
)
3667 if (!req
->outstanding_cmds
[handle
])
3671 if (index
== req
->num_outstanding_cmds
) {
3672 rval
= EXT_STATUS_BUSY
;
3676 /* Calculate number of IOCB required */
3677 req_cnt
= qla24xx_calc_iocbs(vha
, tot_dsds
);
3679 /* Check for room on request queue. */
3680 if (req
->cnt
< req_cnt
+ 2) {
3681 cnt
= IS_SHADOW_REG_CAPABLE(ha
) ? *req
->out_ptr
:
3682 RD_REG_DWORD_RELAXED(req
->req_q_out
);
3683 if (req
->ring_index
< cnt
)
3684 req
->cnt
= cnt
- req
->ring_index
;
3686 req
->cnt
= req
->length
-
3687 (req
->ring_index
- cnt
);
3689 if (req
->cnt
< req_cnt
+ 2) {
3690 rval
= EXT_STATUS_BUSY
;
3694 cmd_pkt
= (struct cmd_bidir
*)req
->ring_ptr
;
3695 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
3697 /* Zero out remaining portion of packet. */
3698 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3699 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
3700 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
3702 /* Set NPORT-ID (of vha)*/
3703 cmd_pkt
->nport_handle
= cpu_to_le16(vha
->self_login_loop_id
);
3704 cmd_pkt
->port_id
[0] = vha
->d_id
.b
.al_pa
;
3705 cmd_pkt
->port_id
[1] = vha
->d_id
.b
.area
;
3706 cmd_pkt
->port_id
[2] = vha
->d_id
.b
.domain
;
3708 qla25xx_build_bidir_iocb(sp
, vha
, cmd_pkt
, tot_dsds
);
3709 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
3710 /* Build command packet. */
3711 req
->current_outstanding_cmd
= handle
;
3712 req
->outstanding_cmds
[handle
] = sp
;
3713 sp
->handle
= handle
;
3714 req
->cnt
-= req_cnt
;
3716 /* Send the command to the firmware */
3718 qla2x00_start_iocbs(vha
, req
);
3720 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);