conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / drivers / scsi / qla2xxx / qla_iocb.c
blob13396beae2cedcde63b00c6156b44696168e6cc0
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 struct rsp_que *rsp);
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
18 static void qla25xx_set_que(srb_t *, struct rsp_que **);
19 /**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * @cmd: SCSI command
23 * Returns the proper CF_* direction based on CDB.
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t *sp)
28 uint16_t cflags;
30 cflags = 0;
32 /* Set transfer direction */
33 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
34 cflags = CF_WRITE;
35 sp->fcport->vha->hw->qla_stats.output_bytes +=
36 scsi_bufflen(sp->cmd);
37 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 cflags = CF_READ;
39 sp->fcport->vha->hw->qla_stats.input_bytes +=
40 scsi_bufflen(sp->cmd);
42 return (cflags);
45 /**
46 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47 * Continuation Type 0 IOCBs to allocate.
49 * @dsds: number of data segment decriptors needed
51 * Returns the number of IOCB entries needed to store @dsds.
53 uint16_t
54 qla2x00_calc_iocbs_32(uint16_t dsds)
56 uint16_t iocbs;
58 iocbs = 1;
59 if (dsds > 3) {
60 iocbs += (dsds - 3) / 7;
61 if ((dsds - 3) % 7)
62 iocbs++;
64 return (iocbs);
67 /**
68 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69 * Continuation Type 1 IOCBs to allocate.
71 * @dsds: number of data segment decriptors needed
73 * Returns the number of IOCB entries needed to store @dsds.
75 uint16_t
76 qla2x00_calc_iocbs_64(uint16_t dsds)
78 uint16_t iocbs;
80 iocbs = 1;
81 if (dsds > 2) {
82 iocbs += (dsds - 2) / 5;
83 if ((dsds - 2) % 5)
84 iocbs++;
86 return (iocbs);
89 /**
90 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * @ha: HA context
93 * Returns a pointer to the Continuation Type 0 IOCB packet.
95 static inline cont_entry_t *
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
98 cont_entry_t *cont_pkt;
99 struct req_que *req = vha->req;
100 /* Adjust ring index. */
101 req->ring_index++;
102 if (req->ring_index == req->length) {
103 req->ring_index = 0;
104 req->ring_ptr = req->ring;
105 } else {
106 req->ring_ptr++;
109 cont_pkt = (cont_entry_t *)req->ring_ptr;
111 /* Load packet defaults. */
112 *((uint32_t *)(&cont_pkt->entry_type)) =
113 __constant_cpu_to_le32(CONTINUE_TYPE);
115 return (cont_pkt);
119 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * @ha: HA context
122 * Returns a pointer to the continuation type 1 IOCB packet.
124 static inline cont_a64_entry_t *
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
127 cont_a64_entry_t *cont_pkt;
129 struct req_que *req = vha->req;
130 /* Adjust ring index. */
131 req->ring_index++;
132 if (req->ring_index == req->length) {
133 req->ring_index = 0;
134 req->ring_ptr = req->ring;
135 } else {
136 req->ring_ptr++;
139 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
141 /* Load packet defaults. */
142 *((uint32_t *)(&cont_pkt->entry_type)) =
143 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
145 return (cont_pkt);
149 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150 * capable IOCB types.
152 * @sp: SRB command to process
153 * @cmd_pkt: Command type 2 IOCB
154 * @tot_dsds: Total number of segments to transfer
156 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
157 uint16_t tot_dsds)
159 uint16_t avail_dsds;
160 uint32_t *cur_dsd;
161 scsi_qla_host_t *vha;
162 struct scsi_cmnd *cmd;
163 struct scatterlist *sg;
164 int i;
166 cmd = sp->cmd;
168 /* Update entry type to indicate Command Type 2 IOCB */
169 *((uint32_t *)(&cmd_pkt->entry_type)) =
170 __constant_cpu_to_le32(COMMAND_TYPE);
172 /* No data transfer */
173 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
174 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
175 return;
178 vha = sp->fcport->vha;
179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
181 /* Three DSDs are available in the Command Type 2 IOCB */
182 avail_dsds = 3;
183 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
185 /* Load data segments */
186 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187 cont_entry_t *cont_pkt;
189 /* Allocate additional continuation packets? */
190 if (avail_dsds == 0) {
192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB.
195 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7;
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
202 avail_dsds--;
207 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208 * capable IOCB types.
210 * @sp: SRB command to process
211 * @cmd_pkt: Command type 3 IOCB
212 * @tot_dsds: Total number of segments to transfer
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
215 uint16_t tot_dsds)
217 uint16_t avail_dsds;
218 uint32_t *cur_dsd;
219 scsi_qla_host_t *vha;
220 struct scsi_cmnd *cmd;
221 struct scatterlist *sg;
222 int i;
224 cmd = sp->cmd;
226 /* Update entry type to indicate Command Type 3 IOCB */
227 *((uint32_t *)(&cmd_pkt->entry_type)) =
228 __constant_cpu_to_le32(COMMAND_A64_TYPE);
230 /* No data transfer */
231 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
232 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
233 return;
236 vha = sp->fcport->vha;
237 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
239 /* Two DSDs are available in the Command Type 3 IOCB */
240 avail_dsds = 2;
241 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
243 /* Load data segments */
244 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
245 dma_addr_t sle_dma;
246 cont_a64_entry_t *cont_pkt;
248 /* Allocate additional continuation packets? */
249 if (avail_dsds == 0) {
251 * Five DSDs are available in the Continuation
252 * Type 1 IOCB.
254 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
256 avail_dsds = 5;
259 sle_dma = sg_dma_address(sg);
260 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
263 avail_dsds--;
268 * qla2x00_start_scsi() - Send a SCSI command to the ISP
269 * @sp: command to send to the ISP
271 * Returns non-zero if a failure occurred, else zero.
274 qla2x00_start_scsi(srb_t *sp)
276 int ret, nseg;
277 unsigned long flags;
278 scsi_qla_host_t *vha;
279 struct scsi_cmnd *cmd;
280 uint32_t *clr_ptr;
281 uint32_t index;
282 uint32_t handle;
283 cmd_entry_t *cmd_pkt;
284 uint16_t cnt;
285 uint16_t req_cnt;
286 uint16_t tot_dsds;
287 struct device_reg_2xxx __iomem *reg;
288 struct qla_hw_data *ha;
289 struct req_que *req;
290 struct rsp_que *rsp;
292 /* Setup device pointers. */
293 ret = 0;
294 vha = sp->fcport->vha;
295 ha = vha->hw;
296 reg = &ha->iobase->isp;
297 cmd = sp->cmd;
298 req = ha->req_q_map[0];
299 rsp = ha->rsp_q_map[0];
300 /* So we know we haven't pci_map'ed anything yet */
301 tot_dsds = 0;
303 /* Send marker if required */
304 if (vha->marker_needed != 0) {
305 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
306 != QLA_SUCCESS)
307 return (QLA_FUNCTION_FAILED);
308 vha->marker_needed = 0;
311 /* Acquire ring specific lock */
312 spin_lock_irqsave(&ha->hardware_lock, flags);
314 /* Check for room in outstanding command list. */
315 handle = req->current_outstanding_cmd;
316 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
317 handle++;
318 if (handle == MAX_OUTSTANDING_COMMANDS)
319 handle = 1;
320 if (!req->outstanding_cmds[handle])
321 break;
323 if (index == MAX_OUTSTANDING_COMMANDS)
324 goto queuing_error;
326 /* Map the sg table so we have an accurate count of sg entries needed */
327 if (scsi_sg_count(cmd)) {
328 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
329 scsi_sg_count(cmd), cmd->sc_data_direction);
330 if (unlikely(!nseg))
331 goto queuing_error;
332 } else
333 nseg = 0;
335 tot_dsds = nseg;
337 /* Calculate the number of request entries needed. */
338 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339 if (req->cnt < (req_cnt + 2)) {
340 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341 if (req->ring_index < cnt)
342 req->cnt = cnt - req->ring_index;
343 else
344 req->cnt = req->length -
345 (req->ring_index - cnt);
347 if (req->cnt < (req_cnt + 2))
348 goto queuing_error;
350 /* Build command packet */
351 req->current_outstanding_cmd = handle;
352 req->outstanding_cmds[handle] = sp;
353 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
354 req->cnt -= req_cnt;
356 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
357 cmd_pkt->handle = handle;
358 /* Zero out remaining portion of packet. */
359 clr_ptr = (uint32_t *)cmd_pkt + 2;
360 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
361 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
363 /* Set target ID and LUN number*/
364 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
365 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
367 /* Update tagged queuing modifier */
368 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
370 /* Load SCSI command packet. */
371 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
372 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
374 /* Build IOCB segments */
375 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
377 /* Set total data segment count. */
378 cmd_pkt->entry_count = (uint8_t)req_cnt;
379 wmb();
381 /* Adjust ring index. */
382 req->ring_index++;
383 if (req->ring_index == req->length) {
384 req->ring_index = 0;
385 req->ring_ptr = req->ring;
386 } else
387 req->ring_ptr++;
389 sp->flags |= SRB_DMA_VALID;
391 /* Set chip new ring index. */
392 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
393 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
395 /* Manage unprocessed RIO/ZIO commands in response queue. */
396 if (vha->flags.process_response_queue &&
397 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
398 qla2x00_process_response_queue(rsp);
400 spin_unlock_irqrestore(&ha->hardware_lock, flags);
401 return (QLA_SUCCESS);
403 queuing_error:
404 if (tot_dsds)
405 scsi_dma_unmap(cmd);
407 spin_unlock_irqrestore(&ha->hardware_lock, flags);
409 return (QLA_FUNCTION_FAILED);
413 * qla2x00_marker() - Send a marker IOCB to the firmware.
414 * @ha: HA context
415 * @loop_id: loop ID
416 * @lun: LUN
417 * @type: marker modifier
419 * Can be called from both normal and interrupt context.
421 * Returns non-zero if a failure occurred, else zero.
424 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
425 struct rsp_que *rsp, uint16_t loop_id,
426 uint16_t lun, uint8_t type)
428 mrk_entry_t *mrk;
429 struct mrk_entry_24xx *mrk24;
430 struct qla_hw_data *ha = vha->hw;
431 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
433 mrk24 = NULL;
434 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
435 if (mrk == NULL) {
436 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
437 __func__, base_vha->host_no));
439 return (QLA_FUNCTION_FAILED);
442 mrk->entry_type = MARKER_TYPE;
443 mrk->modifier = type;
444 if (type != MK_SYNC_ALL) {
445 if (IS_FWI2_CAPABLE(ha)) {
446 mrk24 = (struct mrk_entry_24xx *) mrk;
447 mrk24->nport_handle = cpu_to_le16(loop_id);
448 mrk24->lun[1] = LSB(lun);
449 mrk24->lun[2] = MSB(lun);
450 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
451 mrk24->vp_index = vha->vp_idx;
452 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
453 } else {
454 SET_TARGET_ID(ha, mrk->target, loop_id);
455 mrk->lun = cpu_to_le16(lun);
458 wmb();
460 qla2x00_isp_cmd(vha, req);
462 return (QLA_SUCCESS);
466 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
467 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
468 uint8_t type)
470 int ret;
471 unsigned long flags = 0;
473 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
474 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
475 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
477 return (ret);
481 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
482 * @ha: HA context
484 * Note: The caller must hold the hardware lock before calling this routine.
486 * Returns NULL if function failed, else, a pointer to the request packet.
488 static request_t *
489 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
490 struct rsp_que *rsp)
492 struct qla_hw_data *ha = vha->hw;
493 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
494 request_t *pkt = NULL;
495 uint16_t cnt;
496 uint32_t *dword_ptr;
497 uint32_t timer;
498 uint16_t req_cnt = 1;
500 /* Wait 1 second for slot. */
501 for (timer = HZ; timer; timer--) {
502 if ((req_cnt + 2) >= req->cnt) {
503 /* Calculate number of free request entries. */
504 if (ha->mqenable)
505 cnt = (uint16_t)
506 RD_REG_DWORD(&reg->isp25mq.req_q_out);
507 else {
508 if (IS_FWI2_CAPABLE(ha))
509 cnt = (uint16_t)RD_REG_DWORD(
510 &reg->isp24.req_q_out);
511 else
512 cnt = qla2x00_debounce_register(
513 ISP_REQ_Q_OUT(ha, &reg->isp));
515 if (req->ring_index < cnt)
516 req->cnt = cnt - req->ring_index;
517 else
518 req->cnt = req->length -
519 (req->ring_index - cnt);
521 /* If room for request in request ring. */
522 if ((req_cnt + 2) < req->cnt) {
523 req->cnt--;
524 pkt = req->ring_ptr;
526 /* Zero out packet. */
527 dword_ptr = (uint32_t *)pkt;
528 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
529 *dword_ptr++ = 0;
531 /* Set entry count. */
532 pkt->entry_count = 1;
534 break;
537 /* Release ring specific lock */
538 spin_unlock_irq(&ha->hardware_lock);
540 udelay(2); /* 2 us */
542 /* Check for pending interrupts. */
543 /* During init we issue marker directly */
544 if (!vha->marker_needed && !vha->flags.init_done)
545 qla2x00_poll(rsp);
546 spin_lock_irq(&ha->hardware_lock);
548 if (!pkt) {
549 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
552 return (pkt);
556 * qla2x00_isp_cmd() - Modify the request ring pointer.
557 * @ha: HA context
559 * Note: The caller must hold the hardware lock before calling this routine.
561 static void
562 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
564 struct qla_hw_data *ha = vha->hw;
565 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
566 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
568 DEBUG5(printk("%s(): IOCB data:\n", __func__));
569 DEBUG5(qla2x00_dump_buffer(
570 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
572 /* Adjust ring index. */
573 req->ring_index++;
574 if (req->ring_index == req->length) {
575 req->ring_index = 0;
576 req->ring_ptr = req->ring;
577 } else
578 req->ring_ptr++;
580 /* Set chip new ring index. */
581 if (ha->mqenable) {
582 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
583 RD_REG_DWORD(&ioreg->hccr);
585 else {
586 if (IS_FWI2_CAPABLE(ha)) {
587 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
588 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
589 } else {
590 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
591 req->ring_index);
592 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600 * Continuation Type 1 IOCBs to allocate.
602 * @dsds: number of data segment decriptors needed
604 * Returns the number of IOCB entries needed to store @dsds.
606 static inline uint16_t
607 qla24xx_calc_iocbs(uint16_t dsds)
609 uint16_t iocbs;
611 iocbs = 1;
612 if (dsds > 1) {
613 iocbs += (dsds - 1) / 5;
614 if ((dsds - 1) % 5)
615 iocbs++;
617 return iocbs;
621 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
622 * IOCB types.
624 * @sp: SRB command to process
625 * @cmd_pkt: Command type 3 IOCB
626 * @tot_dsds: Total number of segments to transfer
628 static inline void
629 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
630 uint16_t tot_dsds)
632 uint16_t avail_dsds;
633 uint32_t *cur_dsd;
634 scsi_qla_host_t *vha;
635 struct scsi_cmnd *cmd;
636 struct scatterlist *sg;
637 int i;
638 struct req_que *req;
640 cmd = sp->cmd;
642 /* Update entry type to indicate Command Type 3 IOCB */
643 *((uint32_t *)(&cmd_pkt->entry_type)) =
644 __constant_cpu_to_le32(COMMAND_TYPE_7);
646 /* No data transfer */
647 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
648 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
649 return;
652 vha = sp->fcport->vha;
653 req = vha->req;
655 /* Set transfer direction */
656 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
657 cmd_pkt->task_mgmt_flags =
658 __constant_cpu_to_le16(TMF_WRITE_DATA);
659 sp->fcport->vha->hw->qla_stats.output_bytes +=
660 scsi_bufflen(sp->cmd);
661 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
662 cmd_pkt->task_mgmt_flags =
663 __constant_cpu_to_le16(TMF_READ_DATA);
664 sp->fcport->vha->hw->qla_stats.input_bytes +=
665 scsi_bufflen(sp->cmd);
668 /* One DSD is available in the Command Type 3 IOCB */
669 avail_dsds = 1;
670 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
672 /* Load data segments */
674 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
675 dma_addr_t sle_dma;
676 cont_a64_entry_t *cont_pkt;
678 /* Allocate additional continuation packets? */
679 if (avail_dsds == 0) {
681 * Five DSDs are available in the Continuation
682 * Type 1 IOCB.
684 cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
685 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
686 avail_dsds = 5;
689 sle_dma = sg_dma_address(sg);
690 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
691 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
692 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
693 avail_dsds--;
699 * qla24xx_start_scsi() - Send a SCSI command to the ISP
700 * @sp: command to send to the ISP
702 * Returns non-zero if a failure occurred, else zero.
705 qla24xx_start_scsi(srb_t *sp)
707 int ret, nseg;
708 unsigned long flags;
709 uint32_t *clr_ptr;
710 uint32_t index;
711 uint32_t handle;
712 struct cmd_type_7 *cmd_pkt;
713 uint16_t cnt;
714 uint16_t req_cnt;
715 uint16_t tot_dsds;
716 struct req_que *req = NULL;
717 struct rsp_que *rsp = NULL;
718 struct scsi_cmnd *cmd = sp->cmd;
719 struct scsi_qla_host *vha = sp->fcport->vha;
720 struct qla_hw_data *ha = vha->hw;
722 /* Setup device pointers. */
723 ret = 0;
725 qla25xx_set_que(sp, &rsp);
726 req = vha->req;
728 /* So we know we haven't pci_map'ed anything yet */
729 tot_dsds = 0;
731 /* Send marker if required */
732 if (vha->marker_needed != 0) {
733 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
734 != QLA_SUCCESS)
735 return QLA_FUNCTION_FAILED;
736 vha->marker_needed = 0;
739 /* Acquire ring specific lock */
740 spin_lock_irqsave(&ha->hardware_lock, flags);
742 /* Check for room in outstanding command list. */
743 handle = req->current_outstanding_cmd;
744 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
745 handle++;
746 if (handle == MAX_OUTSTANDING_COMMANDS)
747 handle = 1;
748 if (!req->outstanding_cmds[handle])
749 break;
751 if (index == MAX_OUTSTANDING_COMMANDS)
752 goto queuing_error;
754 /* Map the sg table so we have an accurate count of sg entries needed */
755 if (scsi_sg_count(cmd)) {
756 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
757 scsi_sg_count(cmd), cmd->sc_data_direction);
758 if (unlikely(!nseg))
759 goto queuing_error;
760 } else
761 nseg = 0;
763 tot_dsds = nseg;
765 req_cnt = qla24xx_calc_iocbs(tot_dsds);
766 if (req->cnt < (req_cnt + 2)) {
767 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
769 if (req->ring_index < cnt)
770 req->cnt = cnt - req->ring_index;
771 else
772 req->cnt = req->length -
773 (req->ring_index - cnt);
775 if (req->cnt < (req_cnt + 2))
776 goto queuing_error;
778 /* Build command packet. */
779 req->current_outstanding_cmd = handle;
780 req->outstanding_cmds[handle] = sp;
781 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
782 req->cnt -= req_cnt;
784 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
785 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
787 /* Zero out remaining portion of packet. */
788 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
789 clr_ptr = (uint32_t *)cmd_pkt + 2;
790 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
791 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
793 /* Set NPORT-ID and LUN number*/
794 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
795 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
796 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
797 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
798 cmd_pkt->vp_index = sp->fcport->vp_idx;
800 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
801 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
803 /* Load SCSI command packet. */
804 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
805 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
807 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
809 /* Build IOCB segments */
810 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
812 /* Set total data segment count. */
813 cmd_pkt->entry_count = (uint8_t)req_cnt;
814 /* Specify response queue number where completion should happen */
815 cmd_pkt->entry_status = (uint8_t) rsp->id;
816 wmb();
818 /* Adjust ring index. */
819 req->ring_index++;
820 if (req->ring_index == req->length) {
821 req->ring_index = 0;
822 req->ring_ptr = req->ring;
823 } else
824 req->ring_ptr++;
826 sp->flags |= SRB_DMA_VALID;
828 /* Set chip new ring index. */
829 WRT_REG_DWORD(req->req_q_in, req->ring_index);
830 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
832 /* Manage unprocessed RIO/ZIO commands in response queue. */
833 if (vha->flags.process_response_queue &&
834 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
835 qla24xx_process_response_queue(vha, rsp);
837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
838 return QLA_SUCCESS;
840 queuing_error:
841 if (tot_dsds)
842 scsi_dma_unmap(cmd);
844 spin_unlock_irqrestore(&ha->hardware_lock, flags);
846 return QLA_FUNCTION_FAILED;
849 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
851 struct scsi_cmnd *cmd = sp->cmd;
852 struct qla_hw_data *ha = sp->fcport->vha->hw;
853 int affinity = cmd->request->cpu;
855 if (ql2xmultique_tag && affinity >= 0 &&
856 affinity < ha->max_rsp_queues - 1)
857 *rsp = ha->rsp_q_map[affinity + 1];
858 else
859 *rsp = ha->rsp_q_map[0];