2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t
*qla2x00_req_pkt(struct scsi_qla_host
*, struct req_que
*,
16 static void qla2x00_isp_cmd(struct scsi_qla_host
*, struct req_que
*);
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
22 * Returns the proper CF_* direction based on CDB.
24 static inline uint16_t
25 qla2x00_get_cmd_direction(srb_t
*sp
)
31 /* Set transfer direction */
32 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
34 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
35 scsi_bufflen(sp
->cmd
);
36 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
38 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
39 scsi_bufflen(sp
->cmd
);
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
48 * @dsds: number of data segment decriptors needed
50 * Returns the number of IOCB entries needed to store @dsds.
53 qla2x00_calc_iocbs_32(uint16_t dsds
)
59 iocbs
+= (dsds
- 3) / 7;
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
70 * @dsds: number of data segment decriptors needed
72 * Returns the number of IOCB entries needed to store @dsds.
75 qla2x00_calc_iocbs_64(uint16_t dsds
)
81 iocbs
+= (dsds
- 2) / 5;
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
94 static inline cont_entry_t
*
95 qla2x00_prep_cont_type0_iocb(struct req_que
*req
, struct scsi_qla_host
*vha
)
97 cont_entry_t
*cont_pkt
;
98 /* Adjust ring index. */
100 if (req
->ring_index
== req
->length
) {
102 req
->ring_ptr
= req
->ring
;
107 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt
->entry_type
)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE
);
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t
*
123 qla2x00_prep_cont_type1_iocb(struct req_que
*req
, scsi_qla_host_t
*vha
)
125 cont_a64_entry_t
*cont_pkt
;
127 /* Adjust ring index. */
129 if (req
->ring_index
== req
->length
) {
131 req
->ring_ptr
= req
->ring
;
136 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt
->entry_type
)) =
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
146 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
147 * capable IOCB types.
149 * @sp: SRB command to process
150 * @cmd_pkt: Command type 2 IOCB
151 * @tot_dsds: Total number of segments to transfer
153 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
158 scsi_qla_host_t
*vha
;
159 struct scsi_cmnd
*cmd
;
160 struct scatterlist
*sg
;
166 /* Update entry type to indicate Command Type 2 IOCB */
167 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
168 __constant_cpu_to_le32(COMMAND_TYPE
);
170 /* No data transfer */
171 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
172 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
176 vha
= sp
->fcport
->vha
;
179 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
181 /* Three DSDs are available in the Command Type 2 IOCB */
183 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
185 /* Load data segments */
186 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
187 cont_entry_t
*cont_pkt
;
189 /* Allocate additional continuation packets? */
190 if (avail_dsds
== 0) {
192 * Seven DSDs are available in the Continuation
195 cont_pkt
= qla2x00_prep_cont_type0_iocb(req
, vha
);
196 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
200 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
201 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
214 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
219 scsi_qla_host_t
*vha
;
220 struct scsi_cmnd
*cmd
;
221 struct scatterlist
*sg
;
227 /* Update entry type to indicate Command Type 3 IOCB */
228 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
229 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
231 /* No data transfer */
232 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
233 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
237 vha
= sp
->fcport
->vha
;
240 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
242 /* Two DSDs are available in the Command Type 3 IOCB */
244 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
246 /* Load data segments */
247 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
249 cont_a64_entry_t
*cont_pkt
;
251 /* Allocate additional continuation packets? */
252 if (avail_dsds
== 0) {
254 * Five DSDs are available in the Continuation
257 cont_pkt
= qla2x00_prep_cont_type1_iocb(req
, vha
);
258 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
262 sle_dma
= sg_dma_address(sg
);
263 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
264 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
265 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
271 * qla2x00_start_scsi() - Send a SCSI command to the ISP
272 * @sp: command to send to the ISP
274 * Returns non-zero if a failure occurred, else zero.
277 qla2x00_start_scsi(srb_t
*sp
)
281 scsi_qla_host_t
*vha
;
282 struct scsi_cmnd
*cmd
;
286 cmd_entry_t
*cmd_pkt
;
290 struct device_reg_2xxx __iomem
*reg
;
291 struct qla_hw_data
*ha
;
295 /* Setup device pointers. */
297 vha
= sp
->fcport
->vha
;
299 reg
= &ha
->iobase
->isp
;
301 req
= ha
->req_q_map
[0];
302 rsp
= ha
->rsp_q_map
[0];
303 /* So we know we haven't pci_map'ed anything yet */
306 /* Send marker if required */
307 if (vha
->marker_needed
!= 0) {
308 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
310 return (QLA_FUNCTION_FAILED
);
311 vha
->marker_needed
= 0;
314 /* Acquire ring specific lock */
315 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
317 /* Check for room in outstanding command list. */
318 handle
= req
->current_outstanding_cmd
;
319 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
321 if (handle
== MAX_OUTSTANDING_COMMANDS
)
323 if (!req
->outstanding_cmds
[handle
])
326 if (index
== MAX_OUTSTANDING_COMMANDS
)
329 /* Map the sg table so we have an accurate count of sg entries needed */
330 if (scsi_sg_count(cmd
)) {
331 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
332 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
340 /* Calculate the number of request entries needed. */
341 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
342 if (req
->cnt
< (req_cnt
+ 2)) {
343 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
344 if (req
->ring_index
< cnt
)
345 req
->cnt
= cnt
- req
->ring_index
;
347 req
->cnt
= req
->length
-
348 (req
->ring_index
- cnt
);
350 if (req
->cnt
< (req_cnt
+ 2))
353 /* Build command packet */
354 req
->current_outstanding_cmd
= handle
;
355 req
->outstanding_cmds
[handle
] = sp
;
357 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
360 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
361 cmd_pkt
->handle
= handle
;
362 /* Zero out remaining portion of packet. */
363 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
364 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
365 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
367 /* Set target ID and LUN number*/
368 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
369 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
371 /* Update tagged queuing modifier */
372 cmd_pkt
->control_flags
= __constant_cpu_to_le16(CF_SIMPLE_TAG
);
374 /* Load SCSI command packet. */
375 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
376 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
378 /* Build IOCB segments */
379 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
381 /* Set total data segment count. */
382 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
385 /* Adjust ring index. */
387 if (req
->ring_index
== req
->length
) {
389 req
->ring_ptr
= req
->ring
;
393 sp
->flags
|= SRB_DMA_VALID
;
395 /* Set chip new ring index. */
396 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
397 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
399 /* Manage unprocessed RIO/ZIO commands in response queue. */
400 if (vha
->flags
.process_response_queue
&&
401 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
402 qla2x00_process_response_queue(rsp
);
404 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
405 return (QLA_SUCCESS
);
411 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
413 return (QLA_FUNCTION_FAILED
);
417 * qla2x00_marker() - Send a marker IOCB to the firmware.
421 * @type: marker modifier
423 * Can be called from both normal and interrupt context.
425 * Returns non-zero if a failure occurred, else zero.
428 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
429 struct rsp_que
*rsp
, uint16_t loop_id
,
430 uint16_t lun
, uint8_t type
)
433 struct mrk_entry_24xx
*mrk24
;
434 struct qla_hw_data
*ha
= vha
->hw
;
435 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
438 mrk
= (mrk_entry_t
*)qla2x00_req_pkt(vha
, req
, rsp
);
440 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
441 __func__
, base_vha
->host_no
));
443 return (QLA_FUNCTION_FAILED
);
446 mrk
->entry_type
= MARKER_TYPE
;
447 mrk
->modifier
= type
;
448 if (type
!= MK_SYNC_ALL
) {
449 if (IS_FWI2_CAPABLE(ha
)) {
450 mrk24
= (struct mrk_entry_24xx
*) mrk
;
451 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
452 mrk24
->lun
[1] = LSB(lun
);
453 mrk24
->lun
[2] = MSB(lun
);
454 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
455 mrk24
->vp_index
= vha
->vp_idx
;
457 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
458 mrk
->lun
= cpu_to_le16(lun
);
463 qla2x00_isp_cmd(vha
, req
);
465 return (QLA_SUCCESS
);
469 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
470 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
474 unsigned long flags
= 0;
476 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
477 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
478 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
484 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
487 * Note: The caller must hold the hardware lock before calling this routine.
489 * Returns NULL if function failed, else, a pointer to the request packet.
492 qla2x00_req_pkt(struct scsi_qla_host
*vha
, struct req_que
*req
,
495 struct qla_hw_data
*ha
= vha
->hw
;
496 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
497 request_t
*pkt
= NULL
;
501 uint16_t req_cnt
= 1;
503 /* Wait 1 second for slot. */
504 for (timer
= HZ
; timer
; timer
--) {
505 if ((req_cnt
+ 2) >= req
->cnt
) {
506 /* Calculate number of free request entries. */
509 RD_REG_DWORD(®
->isp25mq
.req_q_out
);
511 if (IS_FWI2_CAPABLE(ha
))
512 cnt
= (uint16_t)RD_REG_DWORD(
513 ®
->isp24
.req_q_out
);
515 cnt
= qla2x00_debounce_register(
516 ISP_REQ_Q_OUT(ha
, ®
->isp
));
518 if (req
->ring_index
< cnt
)
519 req
->cnt
= cnt
- req
->ring_index
;
521 req
->cnt
= req
->length
-
522 (req
->ring_index
- cnt
);
524 /* If room for request in request ring. */
525 if ((req_cnt
+ 2) < req
->cnt
) {
529 /* Zero out packet. */
530 dword_ptr
= (uint32_t *)pkt
;
531 for (cnt
= 0; cnt
< REQUEST_ENTRY_SIZE
/ 4; cnt
++)
534 /* Set system defined field. */
535 pkt
->sys_define
= (uint8_t)req
->ring_index
;
537 /* Set entry count. */
538 pkt
->entry_count
= 1;
543 /* Release ring specific lock */
544 spin_unlock_irq(&ha
->hardware_lock
);
546 udelay(2); /* 2 us */
548 /* Check for pending interrupts. */
549 /* During init we issue marker directly */
550 if (!vha
->marker_needed
&& !vha
->flags
.init_done
)
552 spin_lock_irq(&ha
->hardware_lock
);
555 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__
));
562 * qla2x00_isp_cmd() - Modify the request ring pointer.
565 * Note: The caller must hold the hardware lock before calling this routine.
568 qla2x00_isp_cmd(struct scsi_qla_host
*vha
, struct req_que
*req
)
570 struct qla_hw_data
*ha
= vha
->hw
;
571 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
572 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
574 DEBUG5(printk("%s(): IOCB data:\n", __func__
));
575 DEBUG5(qla2x00_dump_buffer(
576 (uint8_t *)req
->ring_ptr
, REQUEST_ENTRY_SIZE
));
578 /* Adjust ring index. */
580 if (req
->ring_index
== req
->length
) {
582 req
->ring_ptr
= req
->ring
;
586 /* Set chip new ring index. */
588 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
589 RD_REG_DWORD(&ioreg
->hccr
);
592 if (IS_FWI2_CAPABLE(ha
)) {
593 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
594 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
596 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
598 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
605 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
606 * Continuation Type 1 IOCBs to allocate.
608 * @dsds: number of data segment decriptors needed
610 * Returns the number of IOCB entries needed to store @dsds.
612 static inline uint16_t
613 qla24xx_calc_iocbs(uint16_t dsds
)
619 iocbs
+= (dsds
- 1) / 5;
627 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
630 * @sp: SRB command to process
631 * @cmd_pkt: Command type 3 IOCB
632 * @tot_dsds: Total number of segments to transfer
635 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
640 scsi_qla_host_t
*vha
;
641 struct scsi_cmnd
*cmd
;
642 struct scatterlist
*sg
;
648 /* Update entry type to indicate Command Type 3 IOCB */
649 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
650 __constant_cpu_to_le32(COMMAND_TYPE_7
);
652 /* No data transfer */
653 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
654 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
658 vha
= sp
->fcport
->vha
;
661 /* Set transfer direction */
662 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
663 cmd_pkt
->task_mgmt_flags
=
664 __constant_cpu_to_le16(TMF_WRITE_DATA
);
665 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
666 scsi_bufflen(sp
->cmd
);
667 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
668 cmd_pkt
->task_mgmt_flags
=
669 __constant_cpu_to_le16(TMF_READ_DATA
);
670 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
671 scsi_bufflen(sp
->cmd
);
674 /* One DSD is available in the Command Type 3 IOCB */
676 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
678 /* Load data segments */
680 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
682 cont_a64_entry_t
*cont_pkt
;
684 /* Allocate additional continuation packets? */
685 if (avail_dsds
== 0) {
687 * Five DSDs are available in the Continuation
690 cont_pkt
= qla2x00_prep_cont_type1_iocb(req
, vha
);
691 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
695 sle_dma
= sg_dma_address(sg
);
696 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
697 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
698 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
705 * qla24xx_start_scsi() - Send a SCSI command to the ISP
706 * @sp: command to send to the ISP
708 * Returns non-zero if a failure occurred, else zero.
711 qla24xx_start_scsi(srb_t
*sp
)
718 struct cmd_type_7
*cmd_pkt
;
722 struct req_que
*req
= NULL
;
723 struct rsp_que
*rsp
= NULL
;
724 struct scsi_cmnd
*cmd
= sp
->cmd
;
725 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
726 struct qla_hw_data
*ha
= vha
->hw
;
729 /* Setup device pointers. */
731 que_id
= vha
->req_ques
[0];
733 req
= ha
->req_q_map
[que_id
];
739 rsp
= ha
->rsp_q_map
[que_id
];
740 /* So we know we haven't pci_map'ed anything yet */
743 /* Send marker if required */
744 if (vha
->marker_needed
!= 0) {
745 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
747 return QLA_FUNCTION_FAILED
;
748 vha
->marker_needed
= 0;
751 /* Acquire ring specific lock */
752 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
754 /* Check for room in outstanding command list. */
755 handle
= req
->current_outstanding_cmd
;
756 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
758 if (handle
== MAX_OUTSTANDING_COMMANDS
)
760 if (!req
->outstanding_cmds
[handle
])
763 if (index
== MAX_OUTSTANDING_COMMANDS
)
766 /* Map the sg table so we have an accurate count of sg entries needed */
767 if (scsi_sg_count(cmd
)) {
768 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
769 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
777 req_cnt
= qla24xx_calc_iocbs(tot_dsds
);
778 if (req
->cnt
< (req_cnt
+ 2)) {
779 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
781 if (req
->ring_index
< cnt
)
782 req
->cnt
= cnt
- req
->ring_index
;
784 req
->cnt
= req
->length
-
785 (req
->ring_index
- cnt
);
787 if (req
->cnt
< (req_cnt
+ 2))
790 /* Build command packet. */
791 req
->current_outstanding_cmd
= handle
;
792 req
->outstanding_cmds
[handle
] = sp
;
793 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
796 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
797 cmd_pkt
->handle
= handle
;
799 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
801 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
802 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
803 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
805 /* Set NPORT-ID and LUN number*/
806 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
807 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
808 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
809 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
810 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
812 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
813 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
815 /* Load SCSI command packet. */
816 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
817 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
819 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
821 /* Build IOCB segments */
822 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
824 /* Set total data segment count. */
825 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
828 /* Adjust ring index. */
830 if (req
->ring_index
== req
->length
) {
832 req
->ring_ptr
= req
->ring
;
836 sp
->flags
|= SRB_DMA_VALID
;
838 /* Set chip new ring index. */
839 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
840 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
842 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha
->flags
.process_response_queue
&&
844 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
845 qla24xx_process_response_queue(rsp
);
847 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
854 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
856 return QLA_FUNCTION_FAILED
;