2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t
*qla2x00_req_pkt(struct scsi_qla_host
*, struct req_que
*,
16 static void qla2x00_isp_cmd(struct scsi_qla_host
*, struct req_que
*);
18 static void qla25xx_set_que(srb_t
*, struct rsp_que
**);
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
23 * Returns the proper CF_* direction based on CDB.
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t
*sp
)
32 /* Set transfer direction */
33 if (sp
->cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
35 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
36 scsi_bufflen(sp
->cmd
);
37 } else if (sp
->cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
39 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
40 scsi_bufflen(sp
->cmd
);
46 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47 * Continuation Type 0 IOCBs to allocate.
49 * @dsds: number of data segment decriptors needed
51 * Returns the number of IOCB entries needed to store @dsds.
54 qla2x00_calc_iocbs_32(uint16_t dsds
)
60 iocbs
+= (dsds
- 3) / 7;
68 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69 * Continuation Type 1 IOCBs to allocate.
71 * @dsds: number of data segment decriptors needed
73 * Returns the number of IOCB entries needed to store @dsds.
76 qla2x00_calc_iocbs_64(uint16_t dsds
)
82 iocbs
+= (dsds
- 2) / 5;
90 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
93 * Returns a pointer to the Continuation Type 0 IOCB packet.
95 static inline cont_entry_t
*
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host
*vha
)
98 cont_entry_t
*cont_pkt
;
99 struct req_que
*req
= vha
->req
;
100 /* Adjust ring index. */
102 if (req
->ring_index
== req
->length
) {
104 req
->ring_ptr
= req
->ring
;
109 cont_pkt
= (cont_entry_t
*)req
->ring_ptr
;
111 /* Load packet defaults. */
112 *((uint32_t *)(&cont_pkt
->entry_type
)) =
113 __constant_cpu_to_le32(CONTINUE_TYPE
);
119 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
122 * Returns a pointer to the continuation type 1 IOCB packet.
124 static inline cont_a64_entry_t
*
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t
*vha
)
127 cont_a64_entry_t
*cont_pkt
;
129 struct req_que
*req
= vha
->req
;
130 /* Adjust ring index. */
132 if (req
->ring_index
== req
->length
) {
134 req
->ring_ptr
= req
->ring
;
139 cont_pkt
= (cont_a64_entry_t
*)req
->ring_ptr
;
141 /* Load packet defaults. */
142 *((uint32_t *)(&cont_pkt
->entry_type
)) =
143 __constant_cpu_to_le32(CONTINUE_A64_TYPE
);
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types.
152 * @sp: SRB command to process
153 * @cmd_pkt: Command type 2 IOCB
154 * @tot_dsds: Total number of segments to transfer
156 void qla2x00_build_scsi_iocbs_32(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
161 scsi_qla_host_t
*vha
;
162 struct scsi_cmnd
*cmd
;
163 struct scatterlist
*sg
;
168 /* Update entry type to indicate Command Type 2 IOCB */
169 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
170 __constant_cpu_to_le32(COMMAND_TYPE
);
172 /* No data transfer */
173 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
174 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
178 vha
= sp
->fcport
->vha
;
179 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
181 /* Three DSDs are available in the Command Type 2 IOCB */
183 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
185 /* Load data segments */
186 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
187 cont_entry_t
*cont_pkt
;
189 /* Allocate additional continuation packets? */
190 if (avail_dsds
== 0) {
192 * Seven DSDs are available in the Continuation
195 cont_pkt
= qla2x00_prep_cont_type0_iocb(vha
);
196 cur_dsd
= (uint32_t *)&cont_pkt
->dseg_0_address
;
200 *cur_dsd
++ = cpu_to_le32(sg_dma_address(sg
));
201 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
214 void qla2x00_build_scsi_iocbs_64(srb_t
*sp
, cmd_entry_t
*cmd_pkt
,
219 scsi_qla_host_t
*vha
;
220 struct scsi_cmnd
*cmd
;
221 struct scatterlist
*sg
;
226 /* Update entry type to indicate Command Type 3 IOCB */
227 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
228 __constant_cpu_to_le32(COMMAND_A64_TYPE
);
230 /* No data transfer */
231 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
232 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
236 vha
= sp
->fcport
->vha
;
237 cmd_pkt
->control_flags
|= cpu_to_le16(qla2x00_get_cmd_direction(sp
));
239 /* Two DSDs are available in the Command Type 3 IOCB */
241 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
243 /* Load data segments */
244 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
246 cont_a64_entry_t
*cont_pkt
;
248 /* Allocate additional continuation packets? */
249 if (avail_dsds
== 0) {
251 * Five DSDs are available in the Continuation
254 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
255 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
259 sle_dma
= sg_dma_address(sg
);
260 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
261 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
262 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
268 * qla2x00_start_scsi() - Send a SCSI command to the ISP
269 * @sp: command to send to the ISP
271 * Returns non-zero if a failure occurred, else zero.
274 qla2x00_start_scsi(srb_t
*sp
)
278 scsi_qla_host_t
*vha
;
279 struct scsi_cmnd
*cmd
;
283 cmd_entry_t
*cmd_pkt
;
287 struct device_reg_2xxx __iomem
*reg
;
288 struct qla_hw_data
*ha
;
292 /* Setup device pointers. */
294 vha
= sp
->fcport
->vha
;
296 reg
= &ha
->iobase
->isp
;
298 req
= ha
->req_q_map
[0];
299 rsp
= ha
->rsp_q_map
[0];
300 /* So we know we haven't pci_map'ed anything yet */
303 /* Send marker if required */
304 if (vha
->marker_needed
!= 0) {
305 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
307 return (QLA_FUNCTION_FAILED
);
308 vha
->marker_needed
= 0;
311 /* Acquire ring specific lock */
312 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
314 /* Check for room in outstanding command list. */
315 handle
= req
->current_outstanding_cmd
;
316 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
318 if (handle
== MAX_OUTSTANDING_COMMANDS
)
320 if (!req
->outstanding_cmds
[handle
])
323 if (index
== MAX_OUTSTANDING_COMMANDS
)
326 /* Map the sg table so we have an accurate count of sg entries needed */
327 if (scsi_sg_count(cmd
)) {
328 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
329 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
337 /* Calculate the number of request entries needed. */
338 req_cnt
= ha
->isp_ops
->calc_req_entries(tot_dsds
);
339 if (req
->cnt
< (req_cnt
+ 2)) {
340 cnt
= RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha
, reg
));
341 if (req
->ring_index
< cnt
)
342 req
->cnt
= cnt
- req
->ring_index
;
344 req
->cnt
= req
->length
-
345 (req
->ring_index
- cnt
);
347 if (req
->cnt
< (req_cnt
+ 2))
350 /* Build command packet */
351 req
->current_outstanding_cmd
= handle
;
352 req
->outstanding_cmds
[handle
] = sp
;
353 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
356 cmd_pkt
= (cmd_entry_t
*)req
->ring_ptr
;
357 cmd_pkt
->handle
= handle
;
358 /* Zero out remaining portion of packet. */
359 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
360 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
361 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
363 /* Set target ID and LUN number*/
364 SET_TARGET_ID(ha
, cmd_pkt
->target
, sp
->fcport
->loop_id
);
365 cmd_pkt
->lun
= cpu_to_le16(sp
->cmd
->device
->lun
);
367 /* Update tagged queuing modifier */
368 cmd_pkt
->control_flags
= __constant_cpu_to_le16(CF_SIMPLE_TAG
);
370 /* Load SCSI command packet. */
371 memcpy(cmd_pkt
->scsi_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
372 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
374 /* Build IOCB segments */
375 ha
->isp_ops
->build_iocbs(sp
, cmd_pkt
, tot_dsds
);
377 /* Set total data segment count. */
378 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
381 /* Adjust ring index. */
383 if (req
->ring_index
== req
->length
) {
385 req
->ring_ptr
= req
->ring
;
389 sp
->flags
|= SRB_DMA_VALID
;
391 /* Set chip new ring index. */
392 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, reg
), req
->ring_index
);
393 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, reg
)); /* PCI Posting. */
395 /* Manage unprocessed RIO/ZIO commands in response queue. */
396 if (vha
->flags
.process_response_queue
&&
397 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
398 qla2x00_process_response_queue(rsp
);
400 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
401 return (QLA_SUCCESS
);
407 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
409 return (QLA_FUNCTION_FAILED
);
413 * qla2x00_marker() - Send a marker IOCB to the firmware.
417 * @type: marker modifier
419 * Can be called from both normal and interrupt context.
421 * Returns non-zero if a failure occurred, else zero.
424 __qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
425 struct rsp_que
*rsp
, uint16_t loop_id
,
426 uint16_t lun
, uint8_t type
)
429 struct mrk_entry_24xx
*mrk24
;
430 struct qla_hw_data
*ha
= vha
->hw
;
431 scsi_qla_host_t
*base_vha
= pci_get_drvdata(ha
->pdev
);
434 mrk
= (mrk_entry_t
*)qla2x00_req_pkt(vha
, req
, rsp
);
436 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
437 __func__
, base_vha
->host_no
));
439 return (QLA_FUNCTION_FAILED
);
442 mrk
->entry_type
= MARKER_TYPE
;
443 mrk
->modifier
= type
;
444 if (type
!= MK_SYNC_ALL
) {
445 if (IS_FWI2_CAPABLE(ha
)) {
446 mrk24
= (struct mrk_entry_24xx
*) mrk
;
447 mrk24
->nport_handle
= cpu_to_le16(loop_id
);
448 mrk24
->lun
[1] = LSB(lun
);
449 mrk24
->lun
[2] = MSB(lun
);
450 host_to_fcp_swap(mrk24
->lun
, sizeof(mrk24
->lun
));
451 mrk24
->vp_index
= vha
->vp_idx
;
452 mrk24
->handle
= MAKE_HANDLE(req
->id
, mrk24
->handle
);
454 SET_TARGET_ID(ha
, mrk
->target
, loop_id
);
455 mrk
->lun
= cpu_to_le16(lun
);
460 qla2x00_isp_cmd(vha
, req
);
462 return (QLA_SUCCESS
);
466 qla2x00_marker(struct scsi_qla_host
*vha
, struct req_que
*req
,
467 struct rsp_que
*rsp
, uint16_t loop_id
, uint16_t lun
,
471 unsigned long flags
= 0;
473 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
474 ret
= __qla2x00_marker(vha
, req
, rsp
, loop_id
, lun
, type
);
475 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
481 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
484 * Note: The caller must hold the hardware lock before calling this routine.
486 * Returns NULL if function failed, else, a pointer to the request packet.
489 qla2x00_req_pkt(struct scsi_qla_host
*vha
, struct req_que
*req
,
492 struct qla_hw_data
*ha
= vha
->hw
;
493 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
494 request_t
*pkt
= NULL
;
498 uint16_t req_cnt
= 1;
500 /* Wait 1 second for slot. */
501 for (timer
= HZ
; timer
; timer
--) {
502 if ((req_cnt
+ 2) >= req
->cnt
) {
503 /* Calculate number of free request entries. */
506 RD_REG_DWORD(®
->isp25mq
.req_q_out
);
508 if (IS_FWI2_CAPABLE(ha
))
509 cnt
= (uint16_t)RD_REG_DWORD(
510 ®
->isp24
.req_q_out
);
512 cnt
= qla2x00_debounce_register(
513 ISP_REQ_Q_OUT(ha
, ®
->isp
));
515 if (req
->ring_index
< cnt
)
516 req
->cnt
= cnt
- req
->ring_index
;
518 req
->cnt
= req
->length
-
519 (req
->ring_index
- cnt
);
521 /* If room for request in request ring. */
522 if ((req_cnt
+ 2) < req
->cnt
) {
526 /* Zero out packet. */
527 dword_ptr
= (uint32_t *)pkt
;
528 for (cnt
= 0; cnt
< REQUEST_ENTRY_SIZE
/ 4; cnt
++)
531 /* Set entry count. */
532 pkt
->entry_count
= 1;
537 /* Release ring specific lock */
538 spin_unlock_irq(&ha
->hardware_lock
);
540 udelay(2); /* 2 us */
542 /* Check for pending interrupts. */
543 /* During init we issue marker directly */
544 if (!vha
->marker_needed
&& !vha
->flags
.init_done
)
546 spin_lock_irq(&ha
->hardware_lock
);
549 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__
));
556 * qla2x00_isp_cmd() - Modify the request ring pointer.
559 * Note: The caller must hold the hardware lock before calling this routine.
562 qla2x00_isp_cmd(struct scsi_qla_host
*vha
, struct req_que
*req
)
564 struct qla_hw_data
*ha
= vha
->hw
;
565 device_reg_t __iomem
*reg
= ISP_QUE_REG(ha
, req
->id
);
566 struct device_reg_2xxx __iomem
*ioreg
= &ha
->iobase
->isp
;
568 DEBUG5(printk("%s(): IOCB data:\n", __func__
));
569 DEBUG5(qla2x00_dump_buffer(
570 (uint8_t *)req
->ring_ptr
, REQUEST_ENTRY_SIZE
));
572 /* Adjust ring index. */
574 if (req
->ring_index
== req
->length
) {
576 req
->ring_ptr
= req
->ring
;
580 /* Set chip new ring index. */
582 WRT_REG_DWORD(®
->isp25mq
.req_q_in
, req
->ring_index
);
583 RD_REG_DWORD(&ioreg
->hccr
);
586 if (IS_FWI2_CAPABLE(ha
)) {
587 WRT_REG_DWORD(®
->isp24
.req_q_in
, req
->ring_index
);
588 RD_REG_DWORD_RELAXED(®
->isp24
.req_q_in
);
590 WRT_REG_WORD(ISP_REQ_Q_IN(ha
, ®
->isp
),
592 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha
, ®
->isp
));
599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
602 * @dsds: number of data segment decriptors needed
604 * Returns the number of IOCB entries needed to store @dsds.
606 static inline uint16_t
607 qla24xx_calc_iocbs(uint16_t dsds
)
613 iocbs
+= (dsds
- 1) / 5;
621 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
624 * @sp: SRB command to process
625 * @cmd_pkt: Command type 3 IOCB
626 * @tot_dsds: Total number of segments to transfer
629 qla24xx_build_scsi_iocbs(srb_t
*sp
, struct cmd_type_7
*cmd_pkt
,
634 scsi_qla_host_t
*vha
;
635 struct scsi_cmnd
*cmd
;
636 struct scatterlist
*sg
;
642 /* Update entry type to indicate Command Type 3 IOCB */
643 *((uint32_t *)(&cmd_pkt
->entry_type
)) =
644 __constant_cpu_to_le32(COMMAND_TYPE_7
);
646 /* No data transfer */
647 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
648 cmd_pkt
->byte_count
= __constant_cpu_to_le32(0);
652 vha
= sp
->fcport
->vha
;
655 /* Set transfer direction */
656 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
657 cmd_pkt
->task_mgmt_flags
=
658 __constant_cpu_to_le16(TMF_WRITE_DATA
);
659 sp
->fcport
->vha
->hw
->qla_stats
.output_bytes
+=
660 scsi_bufflen(sp
->cmd
);
661 } else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
662 cmd_pkt
->task_mgmt_flags
=
663 __constant_cpu_to_le16(TMF_READ_DATA
);
664 sp
->fcport
->vha
->hw
->qla_stats
.input_bytes
+=
665 scsi_bufflen(sp
->cmd
);
668 /* One DSD is available in the Command Type 3 IOCB */
670 cur_dsd
= (uint32_t *)&cmd_pkt
->dseg_0_address
;
672 /* Load data segments */
674 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
676 cont_a64_entry_t
*cont_pkt
;
678 /* Allocate additional continuation packets? */
679 if (avail_dsds
== 0) {
681 * Five DSDs are available in the Continuation
684 cont_pkt
= qla2x00_prep_cont_type1_iocb(vha
);
685 cur_dsd
= (uint32_t *)cont_pkt
->dseg_0_address
;
689 sle_dma
= sg_dma_address(sg
);
690 *cur_dsd
++ = cpu_to_le32(LSD(sle_dma
));
691 *cur_dsd
++ = cpu_to_le32(MSD(sle_dma
));
692 *cur_dsd
++ = cpu_to_le32(sg_dma_len(sg
));
699 * qla24xx_start_scsi() - Send a SCSI command to the ISP
700 * @sp: command to send to the ISP
702 * Returns non-zero if a failure occurred, else zero.
705 qla24xx_start_scsi(srb_t
*sp
)
712 struct cmd_type_7
*cmd_pkt
;
716 struct req_que
*req
= NULL
;
717 struct rsp_que
*rsp
= NULL
;
718 struct scsi_cmnd
*cmd
= sp
->cmd
;
719 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
720 struct qla_hw_data
*ha
= vha
->hw
;
722 /* Setup device pointers. */
725 qla25xx_set_que(sp
, &rsp
);
728 /* So we know we haven't pci_map'ed anything yet */
731 /* Send marker if required */
732 if (vha
->marker_needed
!= 0) {
733 if (qla2x00_marker(vha
, req
, rsp
, 0, 0, MK_SYNC_ALL
)
735 return QLA_FUNCTION_FAILED
;
736 vha
->marker_needed
= 0;
739 /* Acquire ring specific lock */
740 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
742 /* Check for room in outstanding command list. */
743 handle
= req
->current_outstanding_cmd
;
744 for (index
= 1; index
< MAX_OUTSTANDING_COMMANDS
; index
++) {
746 if (handle
== MAX_OUTSTANDING_COMMANDS
)
748 if (!req
->outstanding_cmds
[handle
])
751 if (index
== MAX_OUTSTANDING_COMMANDS
)
754 /* Map the sg table so we have an accurate count of sg entries needed */
755 if (scsi_sg_count(cmd
)) {
756 nseg
= dma_map_sg(&ha
->pdev
->dev
, scsi_sglist(cmd
),
757 scsi_sg_count(cmd
), cmd
->sc_data_direction
);
765 req_cnt
= qla24xx_calc_iocbs(tot_dsds
);
766 if (req
->cnt
< (req_cnt
+ 2)) {
767 cnt
= RD_REG_DWORD_RELAXED(req
->req_q_out
);
769 if (req
->ring_index
< cnt
)
770 req
->cnt
= cnt
- req
->ring_index
;
772 req
->cnt
= req
->length
-
773 (req
->ring_index
- cnt
);
775 if (req
->cnt
< (req_cnt
+ 2))
778 /* Build command packet. */
779 req
->current_outstanding_cmd
= handle
;
780 req
->outstanding_cmds
[handle
] = sp
;
781 sp
->cmd
->host_scribble
= (unsigned char *)(unsigned long)handle
;
784 cmd_pkt
= (struct cmd_type_7
*)req
->ring_ptr
;
785 cmd_pkt
->handle
= MAKE_HANDLE(req
->id
, handle
);
787 /* Zero out remaining portion of packet. */
788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
789 clr_ptr
= (uint32_t *)cmd_pkt
+ 2;
790 memset(clr_ptr
, 0, REQUEST_ENTRY_SIZE
- 8);
791 cmd_pkt
->dseg_count
= cpu_to_le16(tot_dsds
);
793 /* Set NPORT-ID and LUN number*/
794 cmd_pkt
->nport_handle
= cpu_to_le16(sp
->fcport
->loop_id
);
795 cmd_pkt
->port_id
[0] = sp
->fcport
->d_id
.b
.al_pa
;
796 cmd_pkt
->port_id
[1] = sp
->fcport
->d_id
.b
.area
;
797 cmd_pkt
->port_id
[2] = sp
->fcport
->d_id
.b
.domain
;
798 cmd_pkt
->vp_index
= sp
->fcport
->vp_idx
;
800 int_to_scsilun(sp
->cmd
->device
->lun
, &cmd_pkt
->lun
);
801 host_to_fcp_swap((uint8_t *)&cmd_pkt
->lun
, sizeof(cmd_pkt
->lun
));
803 /* Load SCSI command packet. */
804 memcpy(cmd_pkt
->fcp_cdb
, cmd
->cmnd
, cmd
->cmd_len
);
805 host_to_fcp_swap(cmd_pkt
->fcp_cdb
, sizeof(cmd_pkt
->fcp_cdb
));
807 cmd_pkt
->byte_count
= cpu_to_le32((uint32_t)scsi_bufflen(cmd
));
809 /* Build IOCB segments */
810 qla24xx_build_scsi_iocbs(sp
, cmd_pkt
, tot_dsds
);
812 /* Set total data segment count. */
813 cmd_pkt
->entry_count
= (uint8_t)req_cnt
;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt
->entry_status
= (uint8_t) rsp
->id
;
818 /* Adjust ring index. */
820 if (req
->ring_index
== req
->length
) {
822 req
->ring_ptr
= req
->ring
;
826 sp
->flags
|= SRB_DMA_VALID
;
828 /* Set chip new ring index. */
829 WRT_REG_DWORD(req
->req_q_in
, req
->ring_index
);
830 RD_REG_DWORD_RELAXED(&ha
->iobase
->isp24
.hccr
);
832 /* Manage unprocessed RIO/ZIO commands in response queue. */
833 if (vha
->flags
.process_response_queue
&&
834 rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
)
835 qla24xx_process_response_queue(vha
, rsp
);
837 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
844 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
846 return QLA_FUNCTION_FAILED
;
849 static void qla25xx_set_que(srb_t
*sp
, struct rsp_que
**rsp
)
851 struct scsi_cmnd
*cmd
= sp
->cmd
;
852 struct qla_hw_data
*ha
= sp
->fcport
->vha
->hw
;
853 int affinity
= cmd
->request
->cpu
;
855 if (ql2xmultique_tag
&& affinity
>= 0 &&
856 affinity
< ha
->max_rsp_queues
- 1)
857 *rsp
= ha
->rsp_q_map
[affinity
+ 1];
859 *rsp
= ha
->rsp_q_map
[0];