Linux 4.16.11
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_iocb.c
blob8d00d559bd2659b13bb2362034dffd855a370d44
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 /**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
28 cflags = 0;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE;
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ;
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
40 return (cflags);
43 /**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
54 uint16_t iocbs;
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
62 return (iocbs);
65 /**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
76 uint16_t iocbs;
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
84 return (iocbs);
87 /**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @ha: HA context
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
103 } else {
104 req->ring_ptr++;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
112 return (cont_pkt);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
131 } else {
132 req->ring_ptr++;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 cpu_to_le32(CONTINUE_A64_TYPE);
142 return (cont_pkt);
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
194 uint16_t avail_dsds;
195 uint32_t *cur_dsd;
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
199 int i;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
205 cpu_to_le32(COMMAND_TYPE);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = cpu_to_le32(0);
210 return;
213 vha = sp->vha;
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216 /* Three DSDs are available in the Command Type 2 IOCB */
217 avail_dsds = 3;
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
227 * Seven DSDs are available in the Continuation
228 * Type 0 IOCB.
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232 avail_dsds = 7;
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237 avail_dsds--;
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 uint16_t tot_dsds)
252 uint16_t avail_dsds;
253 uint32_t *cur_dsd;
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
257 int i;
259 cmd = GET_CMD_SP(sp);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264 /* No data transfer */
265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 cmd_pkt->byte_count = cpu_to_le32(0);
267 return;
270 vha = sp->vha;
271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273 /* Two DSDs are available in the Command Type 3 IOCB */
274 avail_dsds = 2;
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277 /* Load data segments */
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 dma_addr_t sle_dma;
280 cont_a64_entry_t *cont_pkt;
282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
285 * Five DSDs are available in the Continuation
286 * Type 1 IOCB.
288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 avail_dsds = 5;
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297 avail_dsds--;
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
305 * Returns non-zero if a failure occurred, else zero.
308 qla2x00_start_scsi(srb_t *sp)
310 int nseg;
311 unsigned long flags;
312 scsi_qla_host_t *vha;
313 struct scsi_cmnd *cmd;
314 uint32_t *clr_ptr;
315 uint32_t index;
316 uint32_t handle;
317 cmd_entry_t *cmd_pkt;
318 uint16_t cnt;
319 uint16_t req_cnt;
320 uint16_t tot_dsds;
321 struct device_reg_2xxx __iomem *reg;
322 struct qla_hw_data *ha;
323 struct req_que *req;
324 struct rsp_que *rsp;
326 /* Setup device pointers. */
327 vha = sp->vha;
328 ha = vha->hw;
329 reg = &ha->iobase->isp;
330 cmd = GET_CMD_SP(sp);
331 req = ha->req_q_map[0];
332 rsp = ha->rsp_q_map[0];
333 /* So we know we haven't pci_map'ed anything yet */
334 tot_dsds = 0;
336 /* Send marker if required */
337 if (vha->marker_needed != 0) {
338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339 QLA_SUCCESS) {
340 return (QLA_FUNCTION_FAILED);
342 vha->marker_needed = 0;
345 /* Acquire ring specific lock */
346 spin_lock_irqsave(&ha->hardware_lock, flags);
348 /* Check for room in outstanding command list. */
349 handle = req->current_outstanding_cmd;
350 for (index = 1; index < req->num_outstanding_cmds; index++) {
351 handle++;
352 if (handle == req->num_outstanding_cmds)
353 handle = 1;
354 if (!req->outstanding_cmds[handle])
355 break;
357 if (index == req->num_outstanding_cmds)
358 goto queuing_error;
360 /* Map the sg table so we have an accurate count of sg entries needed */
361 if (scsi_sg_count(cmd)) {
362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 scsi_sg_count(cmd), cmd->sc_data_direction);
364 if (unlikely(!nseg))
365 goto queuing_error;
366 } else
367 nseg = 0;
369 tot_dsds = nseg;
371 /* Calculate the number of request entries needed. */
372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373 if (req->cnt < (req_cnt + 2)) {
374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375 if (req->ring_index < cnt)
376 req->cnt = cnt - req->ring_index;
377 else
378 req->cnt = req->length -
379 (req->ring_index - cnt);
380 /* If still no head room then bail out */
381 if (req->cnt < (req_cnt + 2))
382 goto queuing_error;
385 /* Build command packet */
386 req->current_outstanding_cmd = handle;
387 req->outstanding_cmds[handle] = sp;
388 sp->handle = handle;
389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
390 req->cnt -= req_cnt;
392 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393 cmd_pkt->handle = handle;
394 /* Zero out remaining portion of packet. */
395 clr_ptr = (uint32_t *)cmd_pkt + 2;
396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408 /* Build IOCB segments */
409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
413 wmb();
415 /* Adjust ring index. */
416 req->ring_index++;
417 if (req->ring_index == req->length) {
418 req->ring_index = 0;
419 req->ring_ptr = req->ring;
420 } else
421 req->ring_ptr++;
423 sp->flags |= SRB_DMA_VALID;
425 /* Set chip new ring index. */
426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429 /* Manage unprocessed RIO/ZIO commands in response queue. */
430 if (vha->flags.process_response_queue &&
431 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 qla2x00_process_response_queue(rsp);
434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 return (QLA_SUCCESS);
437 queuing_error:
438 if (tot_dsds)
439 scsi_dma_unmap(cmd);
441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
443 return (QLA_FUNCTION_FAILED);
447 * qla2x00_start_iocbs() - Execute the IOCB command
449 void
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
452 struct qla_hw_data *ha = vha->hw;
453 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
455 if (IS_P3P_TYPE(ha)) {
456 qla82xx_start_iocbs(vha);
457 } else {
458 /* Adjust ring index. */
459 req->ring_index++;
460 if (req->ring_index == req->length) {
461 req->ring_index = 0;
462 req->ring_ptr = req->ring;
463 } else
464 req->ring_ptr++;
466 /* Set chip new ring index. */
467 if (ha->mqenable || IS_QLA27XX(ha)) {
468 WRT_REG_DWORD(req->req_q_in, req->ring_index);
469 } else if (IS_QLA83XX(ha)) {
470 WRT_REG_DWORD(req->req_q_in, req->ring_index);
471 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
472 } else if (IS_QLAFX00(ha)) {
473 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
475 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
476 } else if (IS_FWI2_CAPABLE(ha)) {
477 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
478 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
479 } else {
480 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
481 req->ring_index);
482 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
488 * qla2x00_marker() - Send a marker IOCB to the firmware.
489 * @ha: HA context
490 * @loop_id: loop ID
491 * @lun: LUN
492 * @type: marker modifier
494 * Can be called from both normal and interrupt context.
496 * Returns non-zero if a failure occurred, else zero.
498 static int
499 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500 struct rsp_que *rsp, uint16_t loop_id,
501 uint64_t lun, uint8_t type)
503 mrk_entry_t *mrk;
504 struct mrk_entry_24xx *mrk24 = NULL;
506 struct qla_hw_data *ha = vha->hw;
507 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
509 req = ha->req_q_map[0];
510 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
511 if (mrk == NULL) {
512 ql_log(ql_log_warn, base_vha, 0x3026,
513 "Failed to allocate Marker IOCB.\n");
515 return (QLA_FUNCTION_FAILED);
518 mrk->entry_type = MARKER_TYPE;
519 mrk->modifier = type;
520 if (type != MK_SYNC_ALL) {
521 if (IS_FWI2_CAPABLE(ha)) {
522 mrk24 = (struct mrk_entry_24xx *) mrk;
523 mrk24->nport_handle = cpu_to_le16(loop_id);
524 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
525 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
526 mrk24->vp_index = vha->vp_idx;
527 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
528 } else {
529 SET_TARGET_ID(ha, mrk->target, loop_id);
530 mrk->lun = cpu_to_le16((uint16_t)lun);
533 wmb();
535 qla2x00_start_iocbs(vha, req);
537 return (QLA_SUCCESS);
541 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
542 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
543 uint8_t type)
545 int ret;
546 unsigned long flags = 0;
548 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
552 return (ret);
556 * qla2x00_issue_marker
558 * Issue marker
559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
560 * Might release it, then reaquire.
562 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
564 if (ha_locked) {
565 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566 MK_SYNC_ALL) != QLA_SUCCESS)
567 return QLA_FUNCTION_FAILED;
568 } else {
569 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
573 vha->marker_needed = 0;
575 return QLA_SUCCESS;
578 static inline int
579 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
580 uint16_t tot_dsds)
582 uint32_t *cur_dsd = NULL;
583 scsi_qla_host_t *vha;
584 struct qla_hw_data *ha;
585 struct scsi_cmnd *cmd;
586 struct scatterlist *cur_seg;
587 uint32_t *dsd_seg;
588 void *next_dsd;
589 uint8_t avail_dsds;
590 uint8_t first_iocb = 1;
591 uint32_t dsd_list_len;
592 struct dsd_dma *dsd_ptr;
593 struct ct6_dsd *ctx;
595 cmd = GET_CMD_SP(sp);
597 /* Update entry type to indicate Command Type 3 IOCB */
598 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
600 /* No data transfer */
601 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
602 cmd_pkt->byte_count = cpu_to_le32(0);
603 return 0;
606 vha = sp->vha;
607 ha = vha->hw;
609 /* Set transfer direction */
610 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
611 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
612 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
613 vha->qla_stats.output_requests++;
614 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
615 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
616 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
617 vha->qla_stats.input_requests++;
620 cur_seg = scsi_sglist(cmd);
621 ctx = GET_CMD_CTX_SP(sp);
623 while (tot_dsds) {
624 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625 QLA_DSDS_PER_IOCB : tot_dsds;
626 tot_dsds -= avail_dsds;
627 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
629 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630 struct dsd_dma, list);
631 next_dsd = dsd_ptr->dsd_addr;
632 list_del(&dsd_ptr->list);
633 ha->gbl_dsd_avail--;
634 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
635 ctx->dsd_use_cnt++;
636 ha->gbl_dsd_inuse++;
638 if (first_iocb) {
639 first_iocb = 0;
640 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
644 } else {
645 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647 *cur_dsd++ = cpu_to_le32(dsd_list_len);
649 cur_dsd = (uint32_t *)next_dsd;
650 while (avail_dsds) {
651 dma_addr_t sle_dma;
653 sle_dma = sg_dma_address(cur_seg);
654 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657 cur_seg = sg_next(cur_seg);
658 avail_dsds--;
662 /* Null termination */
663 *cur_dsd++ = 0;
664 *cur_dsd++ = 0;
665 *cur_dsd++ = 0;
666 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
667 return 0;
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
674 * @dsds: number of data segment decriptors needed
676 * Returns the number of dsd list needed to store @dsds.
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds)
681 uint16_t dsd_lists = 0;
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
685 dsd_lists++;
686 return dsd_lists;
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692 * IOCB types.
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
697 * @req: pointer to request queue
699 inline void
700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701 uint16_t tot_dsds, struct req_que *req)
703 uint16_t avail_dsds;
704 uint32_t *cur_dsd;
705 scsi_qla_host_t *vha;
706 struct scsi_cmnd *cmd;
707 struct scatterlist *sg;
708 int i;
710 cmd = GET_CMD_SP(sp);
712 /* Update entry type to indicate Command Type 3 IOCB */
713 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
715 /* No data transfer */
716 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
717 cmd_pkt->byte_count = cpu_to_le32(0);
718 return;
721 vha = sp->vha;
723 /* Set transfer direction */
724 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
725 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
726 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
727 vha->qla_stats.output_requests++;
728 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
729 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
730 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
731 vha->qla_stats.input_requests++;
734 /* One DSD is available in the Command Type 3 IOCB */
735 avail_dsds = 1;
736 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
738 /* Load data segments */
740 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741 dma_addr_t sle_dma;
742 cont_a64_entry_t *cont_pkt;
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
747 * Five DSDs are available in the Continuation
748 * Type 1 IOCB.
750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
752 avail_dsds = 5;
755 sle_dma = sg_dma_address(sg);
756 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
759 avail_dsds--;
763 struct fw_dif_context {
764 uint32_t ref_tag;
765 uint16_t app_tag;
766 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
767 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
774 static inline void
775 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
776 unsigned int protcnt)
778 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
780 switch (scsi_get_prot_type(cmd)) {
781 case SCSI_PROT_DIF_TYPE0:
783 * No check for ql2xenablehba_err_chk, as it would be an
784 * I/O error if hba tag generation is not done.
786 pkt->ref_tag = cpu_to_le32((uint32_t)
787 (0xffffffff & scsi_get_lba(cmd)));
789 if (!qla2x00_hba_err_chk_enabled(sp))
790 break;
792 pkt->ref_tag_mask[0] = 0xff;
793 pkt->ref_tag_mask[1] = 0xff;
794 pkt->ref_tag_mask[2] = 0xff;
795 pkt->ref_tag_mask[3] = 0xff;
796 break;
799 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800 * match LBA in CDB + N
802 case SCSI_PROT_DIF_TYPE2:
803 pkt->app_tag = cpu_to_le16(0);
804 pkt->app_tag_mask[0] = 0x0;
805 pkt->app_tag_mask[1] = 0x0;
807 pkt->ref_tag = cpu_to_le32((uint32_t)
808 (0xffffffff & scsi_get_lba(cmd)));
810 if (!qla2x00_hba_err_chk_enabled(sp))
811 break;
813 /* enable ALL bytes of the ref tag */
814 pkt->ref_tag_mask[0] = 0xff;
815 pkt->ref_tag_mask[1] = 0xff;
816 pkt->ref_tag_mask[2] = 0xff;
817 pkt->ref_tag_mask[3] = 0xff;
818 break;
820 /* For Type 3 protection: 16 bit GUARD only */
821 case SCSI_PROT_DIF_TYPE3:
822 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
824 0x00;
825 break;
828 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
829 * 16 bit app tag.
831 case SCSI_PROT_DIF_TYPE1:
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
834 pkt->app_tag = cpu_to_le16(0);
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
838 if (!qla2x00_hba_err_chk_enabled(sp))
839 break;
841 /* enable ALL bytes of the ref tag */
842 pkt->ref_tag_mask[0] = 0xff;
843 pkt->ref_tag_mask[1] = 0xff;
844 pkt->ref_tag_mask[2] = 0xff;
845 pkt->ref_tag_mask[3] = 0xff;
846 break;
851 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
852 uint32_t *partial)
854 struct scatterlist *sg;
855 uint32_t cumulative_partial, sg_len;
856 dma_addr_t sg_dma_addr;
858 if (sgx->num_bytes == sgx->tot_bytes)
859 return 0;
861 sg = sgx->cur_sg;
862 cumulative_partial = sgx->tot_partial;
864 sg_dma_addr = sg_dma_address(sg);
865 sg_len = sg_dma_len(sg);
867 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
869 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870 sgx->dma_len = (blk_sz - cumulative_partial);
871 sgx->tot_partial = 0;
872 sgx->num_bytes += blk_sz;
873 *partial = 0;
874 } else {
875 sgx->dma_len = sg_len - sgx->bytes_consumed;
876 sgx->tot_partial += sgx->dma_len;
877 *partial = 1;
880 sgx->bytes_consumed += sgx->dma_len;
882 if (sg_len == sgx->bytes_consumed) {
883 sg = sg_next(sg);
884 sgx->num_sg++;
885 sgx->cur_sg = sg;
886 sgx->bytes_consumed = 0;
889 return 1;
893 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
894 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
896 void *next_dsd;
897 uint8_t avail_dsds = 0;
898 uint32_t dsd_list_len;
899 struct dsd_dma *dsd_ptr;
900 struct scatterlist *sg_prot;
901 uint32_t *cur_dsd = dsd;
902 uint16_t used_dsds = tot_dsds;
903 uint32_t prot_int; /* protection interval */
904 uint32_t partial;
905 struct qla2_sgx sgx;
906 dma_addr_t sle_dma;
907 uint32_t sle_dma_len, tot_prot_dma_len = 0;
908 struct scsi_cmnd *cmd;
910 memset(&sgx, 0, sizeof(struct qla2_sgx));
911 if (sp) {
912 cmd = GET_CMD_SP(sp);
913 prot_int = cmd->device->sector_size;
915 sgx.tot_bytes = scsi_bufflen(cmd);
916 sgx.cur_sg = scsi_sglist(cmd);
917 sgx.sp = sp;
919 sg_prot = scsi_prot_sglist(cmd);
920 } else if (tc) {
921 prot_int = tc->blk_sz;
922 sgx.tot_bytes = tc->bufflen;
923 sgx.cur_sg = tc->sg;
924 sg_prot = tc->prot_sg;
925 } else {
926 BUG();
927 return 1;
930 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
932 sle_dma = sgx.dma_addr;
933 sle_dma_len = sgx.dma_len;
934 alloc_and_fill:
935 /* Allocate additional continuation packets? */
936 if (avail_dsds == 0) {
937 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938 QLA_DSDS_PER_IOCB : used_dsds;
939 dsd_list_len = (avail_dsds + 1) * 12;
940 used_dsds -= avail_dsds;
942 /* allocate tracking DS */
943 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
944 if (!dsd_ptr)
945 return 1;
947 /* allocate new list */
948 dsd_ptr->dsd_addr = next_dsd =
949 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950 &dsd_ptr->dsd_list_dma);
952 if (!next_dsd) {
954 * Need to cleanup only this dsd_ptr, rest
955 * will be done by sp_free_dma()
957 kfree(dsd_ptr);
958 return 1;
961 if (sp) {
962 list_add_tail(&dsd_ptr->list,
963 &((struct crc_context *)
964 sp->u.scmd.ctx)->dsd_list);
966 sp->flags |= SRB_CRC_CTX_DSD_VALID;
967 } else {
968 list_add_tail(&dsd_ptr->list,
969 &(tc->ctx->dsd_list));
970 *tc->ctx_dsd_alloced = 1;
974 /* add new list to cmd iocb or last list */
975 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = dsd_list_len;
978 cur_dsd = (uint32_t *)next_dsd;
980 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982 *cur_dsd++ = cpu_to_le32(sle_dma_len);
983 avail_dsds--;
985 if (partial == 0) {
986 /* Got a full protection interval */
987 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
988 sle_dma_len = 8;
990 tot_prot_dma_len += sle_dma_len;
991 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992 tot_prot_dma_len = 0;
993 sg_prot = sg_next(sg_prot);
996 partial = 1; /* So as to not re-enter this block */
997 goto alloc_and_fill;
1000 /* Null termination */
1001 *cur_dsd++ = 0;
1002 *cur_dsd++ = 0;
1003 *cur_dsd++ = 0;
1004 return 0;
1008 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009 uint16_t tot_dsds, struct qla_tc_param *tc)
1011 void *next_dsd;
1012 uint8_t avail_dsds = 0;
1013 uint32_t dsd_list_len;
1014 struct dsd_dma *dsd_ptr;
1015 struct scatterlist *sg, *sgl;
1016 uint32_t *cur_dsd = dsd;
1017 int i;
1018 uint16_t used_dsds = tot_dsds;
1019 struct scsi_cmnd *cmd;
1021 if (sp) {
1022 cmd = GET_CMD_SP(sp);
1023 sgl = scsi_sglist(cmd);
1024 } else if (tc) {
1025 sgl = tc->sg;
1026 } else {
1027 BUG();
1028 return 1;
1032 for_each_sg(sgl, sg, tot_dsds, i) {
1033 dma_addr_t sle_dma;
1035 /* Allocate additional continuation packets? */
1036 if (avail_dsds == 0) {
1037 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038 QLA_DSDS_PER_IOCB : used_dsds;
1039 dsd_list_len = (avail_dsds + 1) * 12;
1040 used_dsds -= avail_dsds;
1042 /* allocate tracking DS */
1043 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044 if (!dsd_ptr)
1045 return 1;
1047 /* allocate new list */
1048 dsd_ptr->dsd_addr = next_dsd =
1049 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050 &dsd_ptr->dsd_list_dma);
1052 if (!next_dsd) {
1054 * Need to cleanup only this dsd_ptr, rest
1055 * will be done by sp_free_dma()
1057 kfree(dsd_ptr);
1058 return 1;
1061 if (sp) {
1062 list_add_tail(&dsd_ptr->list,
1063 &((struct crc_context *)
1064 sp->u.scmd.ctx)->dsd_list);
1066 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067 } else {
1068 list_add_tail(&dsd_ptr->list,
1069 &(tc->ctx->dsd_list));
1070 *tc->ctx_dsd_alloced = 1;
1073 /* add new list to cmd iocb or last list */
1074 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076 *cur_dsd++ = dsd_list_len;
1077 cur_dsd = (uint32_t *)next_dsd;
1079 sle_dma = sg_dma_address(sg);
1081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084 avail_dsds--;
1087 /* Null termination */
1088 *cur_dsd++ = 0;
1089 *cur_dsd++ = 0;
1090 *cur_dsd++ = 0;
1091 return 0;
1095 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1098 void *next_dsd;
1099 uint8_t avail_dsds = 0;
1100 uint32_t dsd_list_len;
1101 struct dsd_dma *dsd_ptr;
1102 struct scatterlist *sg, *sgl;
1103 int i;
1104 struct scsi_cmnd *cmd;
1105 uint32_t *cur_dsd = dsd;
1106 uint16_t used_dsds = tot_dsds;
1107 struct scsi_qla_host *vha;
1109 if (sp) {
1110 cmd = GET_CMD_SP(sp);
1111 sgl = scsi_prot_sglist(cmd);
1112 vha = sp->vha;
1113 } else if (tc) {
1114 vha = tc->vha;
1115 sgl = tc->prot_sg;
1116 } else {
1117 BUG();
1118 return 1;
1121 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122 "%s: enter\n", __func__);
1124 for_each_sg(sgl, sg, tot_dsds, i) {
1125 dma_addr_t sle_dma;
1127 /* Allocate additional continuation packets? */
1128 if (avail_dsds == 0) {
1129 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130 QLA_DSDS_PER_IOCB : used_dsds;
1131 dsd_list_len = (avail_dsds + 1) * 12;
1132 used_dsds -= avail_dsds;
1134 /* allocate tracking DS */
1135 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136 if (!dsd_ptr)
1137 return 1;
1139 /* allocate new list */
1140 dsd_ptr->dsd_addr = next_dsd =
1141 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142 &dsd_ptr->dsd_list_dma);
1144 if (!next_dsd) {
1146 * Need to cleanup only this dsd_ptr, rest
1147 * will be done by sp_free_dma()
1149 kfree(dsd_ptr);
1150 return 1;
1153 if (sp) {
1154 list_add_tail(&dsd_ptr->list,
1155 &((struct crc_context *)
1156 sp->u.scmd.ctx)->dsd_list);
1158 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159 } else {
1160 list_add_tail(&dsd_ptr->list,
1161 &(tc->ctx->dsd_list));
1162 *tc->ctx_dsd_alloced = 1;
1165 /* add new list to cmd iocb or last list */
1166 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168 *cur_dsd++ = dsd_list_len;
1169 cur_dsd = (uint32_t *)next_dsd;
1171 sle_dma = sg_dma_address(sg);
1173 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1177 avail_dsds--;
1179 /* Null termination */
1180 *cur_dsd++ = 0;
1181 *cur_dsd++ = 0;
1182 *cur_dsd++ = 0;
1183 return 0;
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 * Type 6 IOCB types.
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1194 inline int
1195 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1198 uint32_t *cur_dsd, *fcp_dl;
1199 scsi_qla_host_t *vha;
1200 struct scsi_cmnd *cmd;
1201 uint32_t total_bytes = 0;
1202 uint32_t data_bytes;
1203 uint32_t dif_bytes;
1204 uint8_t bundling = 1;
1205 uint16_t blk_size;
1206 uint8_t *clr_ptr;
1207 struct crc_context *crc_ctx_pkt = NULL;
1208 struct qla_hw_data *ha;
1209 uint8_t additional_fcpcdb_len;
1210 uint16_t fcp_cmnd_len;
1211 struct fcp_cmnd *fcp_cmnd;
1212 dma_addr_t crc_ctx_dma;
1214 cmd = GET_CMD_SP(sp);
1216 /* Update entry type to indicate Command Type CRC_2 IOCB */
1217 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1219 vha = sp->vha;
1220 ha = vha->hw;
1222 /* No data transfer */
1223 data_bytes = scsi_bufflen(cmd);
1224 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225 cmd_pkt->byte_count = cpu_to_le32(0);
1226 return QLA_SUCCESS;
1229 cmd_pkt->vp_index = sp->vha->vp_idx;
1231 /* Set transfer direction */
1232 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233 cmd_pkt->control_flags =
1234 cpu_to_le16(CF_WRITE_DATA);
1235 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236 cmd_pkt->control_flags =
1237 cpu_to_le16(CF_READ_DATA);
1240 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1244 bundling = 0;
1246 /* Allocate CRC context from global pool */
1247 crc_ctx_pkt = sp->u.scmd.ctx =
1248 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1250 if (!crc_ctx_pkt)
1251 goto crc_queuing_error;
1253 /* Zero out CTX area. */
1254 clr_ptr = (uint8_t *)crc_ctx_pkt;
1255 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1257 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1259 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1261 /* Set handle */
1262 crc_ctx_pkt->handle = cmd_pkt->handle;
1264 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1266 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1269 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1273 /* Determine SCSI command length -- align to 4 byte boundary */
1274 if (cmd->cmd_len > 16) {
1275 additional_fcpcdb_len = cmd->cmd_len - 16;
1276 if ((cmd->cmd_len % 4) != 0) {
1277 /* SCSI cmd > 16 bytes must be multiple of 4 */
1278 goto crc_queuing_error;
1280 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281 } else {
1282 additional_fcpcdb_len = 0;
1283 fcp_cmnd_len = 12 + 16 + 4;
1286 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1288 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290 fcp_cmnd->additional_cdb_len |= 1;
1291 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 2;
1294 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301 fcp_cmnd->task_management = 0;
1302 fcp_cmnd->task_attribute = TSK_SIMPLE;
1304 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1306 /* Compute dif len and adjust data len to incude protection */
1307 dif_bytes = 0;
1308 blk_size = cmd->device->sector_size;
1309 dif_bytes = (data_bytes / blk_size) * 8;
1311 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_WRITE_STRIP:
1314 total_bytes = data_bytes;
1315 data_bytes += dif_bytes;
1316 break;
1318 case SCSI_PROT_READ_STRIP:
1319 case SCSI_PROT_WRITE_INSERT:
1320 case SCSI_PROT_READ_PASS:
1321 case SCSI_PROT_WRITE_PASS:
1322 total_bytes = data_bytes + dif_bytes;
1323 break;
1324 default:
1325 BUG();
1328 if (!qla2x00_hba_err_chk_enabled(sp))
1329 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330 /* HBA error checking enabled */
1331 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334 SCSI_PROT_DIF_TYPE2))
1335 fw_prot_opts |= BIT_10;
1336 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337 SCSI_PROT_DIF_TYPE3)
1338 fw_prot_opts |= BIT_11;
1341 if (!bundling) {
1342 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343 } else {
1345 * Configure Bundling if we need to fetch interlaving
1346 * protection PCI accesses
1348 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351 tot_prot_dsds);
1352 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355 /* Finish the common fields of CRC pkt */
1356 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360 /* Fibre channel byte count */
1361 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363 additional_fcpcdb_len);
1364 *fcp_dl = htonl(total_bytes);
1366 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367 cmd_pkt->byte_count = cpu_to_le32(0);
1368 return QLA_SUCCESS;
1370 /* Walks data segments */
1372 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1374 if (!bundling && tot_prot_dsds) {
1375 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376 cur_dsd, tot_dsds, NULL))
1377 goto crc_queuing_error;
1378 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379 (tot_dsds - tot_prot_dsds), NULL))
1380 goto crc_queuing_error;
1382 if (bundling && tot_prot_dsds) {
1383 /* Walks dif segments */
1384 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387 tot_prot_dsds, NULL))
1388 goto crc_queuing_error;
1390 return QLA_SUCCESS;
1392 crc_queuing_error:
1393 /* Cleanup will be performed by the caller */
1395 return QLA_FUNCTION_FAILED;
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1402 * Returns non-zero if a failure occurred, else zero.
1405 qla24xx_start_scsi(srb_t *sp)
1407 int nseg;
1408 unsigned long flags;
1409 uint32_t *clr_ptr;
1410 uint32_t index;
1411 uint32_t handle;
1412 struct cmd_type_7 *cmd_pkt;
1413 uint16_t cnt;
1414 uint16_t req_cnt;
1415 uint16_t tot_dsds;
1416 struct req_que *req = NULL;
1417 struct rsp_que *rsp = NULL;
1418 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419 struct scsi_qla_host *vha = sp->vha;
1420 struct qla_hw_data *ha = vha->hw;
1422 /* Setup device pointers. */
1423 req = vha->req;
1424 rsp = req->rsp;
1426 /* So we know we haven't pci_map'ed anything yet */
1427 tot_dsds = 0;
1429 /* Send marker if required */
1430 if (vha->marker_needed != 0) {
1431 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432 QLA_SUCCESS)
1433 return QLA_FUNCTION_FAILED;
1434 vha->marker_needed = 0;
1437 /* Acquire ring specific lock */
1438 spin_lock_irqsave(&ha->hardware_lock, flags);
1440 /* Check for room in outstanding command list. */
1441 handle = req->current_outstanding_cmd;
1442 for (index = 1; index < req->num_outstanding_cmds; index++) {
1443 handle++;
1444 if (handle == req->num_outstanding_cmds)
1445 handle = 1;
1446 if (!req->outstanding_cmds[handle])
1447 break;
1449 if (index == req->num_outstanding_cmds)
1450 goto queuing_error;
1452 /* Map the sg table so we have an accurate count of sg entries needed */
1453 if (scsi_sg_count(cmd)) {
1454 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455 scsi_sg_count(cmd), cmd->sc_data_direction);
1456 if (unlikely(!nseg))
1457 goto queuing_error;
1458 } else
1459 nseg = 0;
1461 tot_dsds = nseg;
1462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463 if (req->cnt < (req_cnt + 2)) {
1464 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465 RD_REG_DWORD_RELAXED(req->req_q_out);
1466 if (req->ring_index < cnt)
1467 req->cnt = cnt - req->ring_index;
1468 else
1469 req->cnt = req->length -
1470 (req->ring_index - cnt);
1471 if (req->cnt < (req_cnt + 2))
1472 goto queuing_error;
1475 /* Build command packet. */
1476 req->current_outstanding_cmd = handle;
1477 req->outstanding_cmds[handle] = sp;
1478 sp->handle = handle;
1479 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480 req->cnt -= req_cnt;
1482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1485 /* Zero out remaining portion of packet. */
1486 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487 clr_ptr = (uint32_t *)cmd_pkt + 2;
1488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1491 /* Set NPORT-ID and LUN number*/
1492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496 cmd_pkt->vp_index = sp->vha->vp_idx;
1498 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1501 cmd_pkt->task = TSK_SIMPLE;
1503 /* Load SCSI command packet. */
1504 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1507 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1509 /* Build IOCB segments */
1510 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1512 /* Set total data segment count. */
1513 cmd_pkt->entry_count = (uint8_t)req_cnt;
1514 wmb();
1515 /* Adjust ring index. */
1516 req->ring_index++;
1517 if (req->ring_index == req->length) {
1518 req->ring_index = 0;
1519 req->ring_ptr = req->ring;
1520 } else
1521 req->ring_ptr++;
1523 sp->flags |= SRB_DMA_VALID;
1525 /* Set chip new ring index. */
1526 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1529 /* Manage unprocessed RIO/ZIO commands in response queue. */
1530 if (vha->flags.process_response_queue &&
1531 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1532 qla24xx_process_response_queue(vha, rsp);
1534 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1535 return QLA_SUCCESS;
1537 queuing_error:
1538 if (tot_dsds)
1539 scsi_dma_unmap(cmd);
1541 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1543 return QLA_FUNCTION_FAILED;
1547 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548 * @sp: command to send to the ISP
1550 * Returns non-zero if a failure occurred, else zero.
1553 qla24xx_dif_start_scsi(srb_t *sp)
1555 int nseg;
1556 unsigned long flags;
1557 uint32_t *clr_ptr;
1558 uint32_t index;
1559 uint32_t handle;
1560 uint16_t cnt;
1561 uint16_t req_cnt = 0;
1562 uint16_t tot_dsds;
1563 uint16_t tot_prot_dsds;
1564 uint16_t fw_prot_opts = 0;
1565 struct req_que *req = NULL;
1566 struct rsp_que *rsp = NULL;
1567 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1568 struct scsi_qla_host *vha = sp->vha;
1569 struct qla_hw_data *ha = vha->hw;
1570 struct cmd_type_crc_2 *cmd_pkt;
1571 uint32_t status = 0;
1573 #define QDSS_GOT_Q_SPACE BIT_0
1575 /* Only process protection or >16 cdb in this routine */
1576 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577 if (cmd->cmd_len <= 16)
1578 return qla24xx_start_scsi(sp);
1581 /* Setup device pointers. */
1582 req = vha->req;
1583 rsp = req->rsp;
1585 /* So we know we haven't pci_map'ed anything yet */
1586 tot_dsds = 0;
1588 /* Send marker if required */
1589 if (vha->marker_needed != 0) {
1590 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1591 QLA_SUCCESS)
1592 return QLA_FUNCTION_FAILED;
1593 vha->marker_needed = 0;
1596 /* Acquire ring specific lock */
1597 spin_lock_irqsave(&ha->hardware_lock, flags);
1599 /* Check for room in outstanding command list. */
1600 handle = req->current_outstanding_cmd;
1601 for (index = 1; index < req->num_outstanding_cmds; index++) {
1602 handle++;
1603 if (handle == req->num_outstanding_cmds)
1604 handle = 1;
1605 if (!req->outstanding_cmds[handle])
1606 break;
1609 if (index == req->num_outstanding_cmds)
1610 goto queuing_error;
1612 /* Compute number of required data segments */
1613 /* Map the sg table so we have an accurate count of sg entries needed */
1614 if (scsi_sg_count(cmd)) {
1615 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616 scsi_sg_count(cmd), cmd->sc_data_direction);
1617 if (unlikely(!nseg))
1618 goto queuing_error;
1619 else
1620 sp->flags |= SRB_DMA_VALID;
1622 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624 struct qla2_sgx sgx;
1625 uint32_t partial;
1627 memset(&sgx, 0, sizeof(struct qla2_sgx));
1628 sgx.tot_bytes = scsi_bufflen(cmd);
1629 sgx.cur_sg = scsi_sglist(cmd);
1630 sgx.sp = sp;
1632 nseg = 0;
1633 while (qla24xx_get_one_block_sg(
1634 cmd->device->sector_size, &sgx, &partial))
1635 nseg++;
1637 } else
1638 nseg = 0;
1640 /* number of required data segments */
1641 tot_dsds = nseg;
1643 /* Compute number of required protection segments */
1644 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647 if (unlikely(!nseg))
1648 goto queuing_error;
1649 else
1650 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1652 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1656 } else {
1657 nseg = 0;
1660 req_cnt = 1;
1661 /* Total Data and protection sg segment(s) */
1662 tot_prot_dsds = nseg;
1663 tot_dsds += nseg;
1664 if (req->cnt < (req_cnt + 2)) {
1665 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666 RD_REG_DWORD_RELAXED(req->req_q_out);
1667 if (req->ring_index < cnt)
1668 req->cnt = cnt - req->ring_index;
1669 else
1670 req->cnt = req->length -
1671 (req->ring_index - cnt);
1672 if (req->cnt < (req_cnt + 2))
1673 goto queuing_error;
1676 status |= QDSS_GOT_Q_SPACE;
1678 /* Build header part of command packet (excluding the OPCODE). */
1679 req->current_outstanding_cmd = handle;
1680 req->outstanding_cmds[handle] = sp;
1681 sp->handle = handle;
1682 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1683 req->cnt -= req_cnt;
1685 /* Fill-in common area */
1686 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1689 clr_ptr = (uint32_t *)cmd_pkt + 2;
1690 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1692 /* Set NPORT-ID and LUN number*/
1693 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1698 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1699 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1701 /* Total Data and protection segment(s) */
1702 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1704 /* Build IOCB segments and adjust for data protection segments */
1705 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1707 QLA_SUCCESS)
1708 goto queuing_error;
1710 cmd_pkt->entry_count = (uint8_t)req_cnt;
1711 /* Specify response queue number where completion should happen */
1712 cmd_pkt->entry_status = (uint8_t) rsp->id;
1713 cmd_pkt->timeout = cpu_to_le16(0);
1714 wmb();
1716 /* Adjust ring index. */
1717 req->ring_index++;
1718 if (req->ring_index == req->length) {
1719 req->ring_index = 0;
1720 req->ring_ptr = req->ring;
1721 } else
1722 req->ring_ptr++;
1724 /* Set chip new ring index. */
1725 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1728 /* Manage unprocessed RIO/ZIO commands in response queue. */
1729 if (vha->flags.process_response_queue &&
1730 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731 qla24xx_process_response_queue(vha, rsp);
1733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1735 return QLA_SUCCESS;
1737 queuing_error:
1738 if (status & QDSS_GOT_Q_SPACE) {
1739 req->outstanding_cmds[handle] = NULL;
1740 req->cnt += req_cnt;
1742 /* Cleanup will be performed by the caller (queuecommand) */
1744 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1745 return QLA_FUNCTION_FAILED;
1749 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750 * @sp: command to send to the ISP
1752 * Returns non-zero if a failure occurred, else zero.
1754 static int
1755 qla2xxx_start_scsi_mq(srb_t *sp)
1757 int nseg;
1758 unsigned long flags;
1759 uint32_t *clr_ptr;
1760 uint32_t index;
1761 uint32_t handle;
1762 struct cmd_type_7 *cmd_pkt;
1763 uint16_t cnt;
1764 uint16_t req_cnt;
1765 uint16_t tot_dsds;
1766 struct req_que *req = NULL;
1767 struct rsp_que *rsp = NULL;
1768 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1769 struct scsi_qla_host *vha = sp->fcport->vha;
1770 struct qla_hw_data *ha = vha->hw;
1771 struct qla_qpair *qpair = sp->qpair;
1773 /* Acquire qpair specific lock */
1774 spin_lock_irqsave(&qpair->qp_lock, flags);
1776 /* Setup qpair pointers */
1777 rsp = qpair->rsp;
1778 req = qpair->req;
1780 /* So we know we haven't pci_map'ed anything yet */
1781 tot_dsds = 0;
1783 /* Send marker if required */
1784 if (vha->marker_needed != 0) {
1785 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1786 QLA_SUCCESS) {
1787 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1788 return QLA_FUNCTION_FAILED;
1790 vha->marker_needed = 0;
1793 /* Check for room in outstanding command list. */
1794 handle = req->current_outstanding_cmd;
1795 for (index = 1; index < req->num_outstanding_cmds; index++) {
1796 handle++;
1797 if (handle == req->num_outstanding_cmds)
1798 handle = 1;
1799 if (!req->outstanding_cmds[handle])
1800 break;
1802 if (index == req->num_outstanding_cmds)
1803 goto queuing_error;
1805 /* Map the sg table so we have an accurate count of sg entries needed */
1806 if (scsi_sg_count(cmd)) {
1807 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1808 scsi_sg_count(cmd), cmd->sc_data_direction);
1809 if (unlikely(!nseg))
1810 goto queuing_error;
1811 } else
1812 nseg = 0;
1814 tot_dsds = nseg;
1815 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1816 if (req->cnt < (req_cnt + 2)) {
1817 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1818 RD_REG_DWORD_RELAXED(req->req_q_out);
1819 if (req->ring_index < cnt)
1820 req->cnt = cnt - req->ring_index;
1821 else
1822 req->cnt = req->length -
1823 (req->ring_index - cnt);
1824 if (req->cnt < (req_cnt + 2))
1825 goto queuing_error;
1828 /* Build command packet. */
1829 req->current_outstanding_cmd = handle;
1830 req->outstanding_cmds[handle] = sp;
1831 sp->handle = handle;
1832 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1833 req->cnt -= req_cnt;
1835 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1836 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1838 /* Zero out remaining portion of packet. */
1839 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1840 clr_ptr = (uint32_t *)cmd_pkt + 2;
1841 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1842 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1844 /* Set NPORT-ID and LUN number*/
1845 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1846 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1847 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1848 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1849 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1851 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1852 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1854 cmd_pkt->task = TSK_SIMPLE;
1856 /* Load SCSI command packet. */
1857 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1858 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1860 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1862 /* Build IOCB segments */
1863 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1865 /* Set total data segment count. */
1866 cmd_pkt->entry_count = (uint8_t)req_cnt;
1867 wmb();
1868 /* Adjust ring index. */
1869 req->ring_index++;
1870 if (req->ring_index == req->length) {
1871 req->ring_index = 0;
1872 req->ring_ptr = req->ring;
1873 } else
1874 req->ring_ptr++;
1876 sp->flags |= SRB_DMA_VALID;
1878 /* Set chip new ring index. */
1879 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1881 /* Manage unprocessed RIO/ZIO commands in response queue. */
1882 if (vha->flags.process_response_queue &&
1883 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1884 qla24xx_process_response_queue(vha, rsp);
1886 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1887 return QLA_SUCCESS;
1889 queuing_error:
1890 if (tot_dsds)
1891 scsi_dma_unmap(cmd);
1893 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1895 return QLA_FUNCTION_FAILED;
1900 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1901 * @sp: command to send to the ISP
1903 * Returns non-zero if a failure occurred, else zero.
1906 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1908 int nseg;
1909 unsigned long flags;
1910 uint32_t *clr_ptr;
1911 uint32_t index;
1912 uint32_t handle;
1913 uint16_t cnt;
1914 uint16_t req_cnt = 0;
1915 uint16_t tot_dsds;
1916 uint16_t tot_prot_dsds;
1917 uint16_t fw_prot_opts = 0;
1918 struct req_que *req = NULL;
1919 struct rsp_que *rsp = NULL;
1920 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1921 struct scsi_qla_host *vha = sp->fcport->vha;
1922 struct qla_hw_data *ha = vha->hw;
1923 struct cmd_type_crc_2 *cmd_pkt;
1924 uint32_t status = 0;
1925 struct qla_qpair *qpair = sp->qpair;
1927 #define QDSS_GOT_Q_SPACE BIT_0
1929 /* Check for host side state */
1930 if (!qpair->online) {
1931 cmd->result = DID_NO_CONNECT << 16;
1932 return QLA_INTERFACE_ERROR;
1935 if (!qpair->difdix_supported &&
1936 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1937 cmd->result = DID_NO_CONNECT << 16;
1938 return QLA_INTERFACE_ERROR;
1941 /* Only process protection or >16 cdb in this routine */
1942 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1943 if (cmd->cmd_len <= 16)
1944 return qla2xxx_start_scsi_mq(sp);
1947 spin_lock_irqsave(&qpair->qp_lock, flags);
1949 /* Setup qpair pointers */
1950 rsp = qpair->rsp;
1951 req = qpair->req;
1953 /* So we know we haven't pci_map'ed anything yet */
1954 tot_dsds = 0;
1956 /* Send marker if required */
1957 if (vha->marker_needed != 0) {
1958 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1959 QLA_SUCCESS) {
1960 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1961 return QLA_FUNCTION_FAILED;
1963 vha->marker_needed = 0;
1966 /* Check for room in outstanding command list. */
1967 handle = req->current_outstanding_cmd;
1968 for (index = 1; index < req->num_outstanding_cmds; index++) {
1969 handle++;
1970 if (handle == req->num_outstanding_cmds)
1971 handle = 1;
1972 if (!req->outstanding_cmds[handle])
1973 break;
1976 if (index == req->num_outstanding_cmds)
1977 goto queuing_error;
1979 /* Compute number of required data segments */
1980 /* Map the sg table so we have an accurate count of sg entries needed */
1981 if (scsi_sg_count(cmd)) {
1982 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1983 scsi_sg_count(cmd), cmd->sc_data_direction);
1984 if (unlikely(!nseg))
1985 goto queuing_error;
1986 else
1987 sp->flags |= SRB_DMA_VALID;
1989 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1990 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1991 struct qla2_sgx sgx;
1992 uint32_t partial;
1994 memset(&sgx, 0, sizeof(struct qla2_sgx));
1995 sgx.tot_bytes = scsi_bufflen(cmd);
1996 sgx.cur_sg = scsi_sglist(cmd);
1997 sgx.sp = sp;
1999 nseg = 0;
2000 while (qla24xx_get_one_block_sg(
2001 cmd->device->sector_size, &sgx, &partial))
2002 nseg++;
2004 } else
2005 nseg = 0;
2007 /* number of required data segments */
2008 tot_dsds = nseg;
2010 /* Compute number of required protection segments */
2011 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2012 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2013 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2014 if (unlikely(!nseg))
2015 goto queuing_error;
2016 else
2017 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2019 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2020 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2021 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2023 } else {
2024 nseg = 0;
2027 req_cnt = 1;
2028 /* Total Data and protection sg segment(s) */
2029 tot_prot_dsds = nseg;
2030 tot_dsds += nseg;
2031 if (req->cnt < (req_cnt + 2)) {
2032 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2033 RD_REG_DWORD_RELAXED(req->req_q_out);
2034 if (req->ring_index < cnt)
2035 req->cnt = cnt - req->ring_index;
2036 else
2037 req->cnt = req->length -
2038 (req->ring_index - cnt);
2039 if (req->cnt < (req_cnt + 2))
2040 goto queuing_error;
2043 status |= QDSS_GOT_Q_SPACE;
2045 /* Build header part of command packet (excluding the OPCODE). */
2046 req->current_outstanding_cmd = handle;
2047 req->outstanding_cmds[handle] = sp;
2048 sp->handle = handle;
2049 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2050 req->cnt -= req_cnt;
2052 /* Fill-in common area */
2053 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2054 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2056 clr_ptr = (uint32_t *)cmd_pkt + 2;
2057 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2059 /* Set NPORT-ID and LUN number*/
2060 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2061 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2062 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2063 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2065 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2066 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2068 /* Total Data and protection segment(s) */
2069 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2071 /* Build IOCB segments and adjust for data protection segments */
2072 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2073 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2074 QLA_SUCCESS)
2075 goto queuing_error;
2077 cmd_pkt->entry_count = (uint8_t)req_cnt;
2078 cmd_pkt->timeout = cpu_to_le16(0);
2079 wmb();
2081 /* Adjust ring index. */
2082 req->ring_index++;
2083 if (req->ring_index == req->length) {
2084 req->ring_index = 0;
2085 req->ring_ptr = req->ring;
2086 } else
2087 req->ring_ptr++;
2089 /* Set chip new ring index. */
2090 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2092 /* Manage unprocessed RIO/ZIO commands in response queue. */
2093 if (vha->flags.process_response_queue &&
2094 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2095 qla24xx_process_response_queue(vha, rsp);
2097 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2099 return QLA_SUCCESS;
2101 queuing_error:
2102 if (status & QDSS_GOT_Q_SPACE) {
2103 req->outstanding_cmds[handle] = NULL;
2104 req->cnt += req_cnt;
2106 /* Cleanup will be performed by the caller (queuecommand) */
2108 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2109 return QLA_FUNCTION_FAILED;
2112 /* Generic Control-SRB manipulation functions. */
2114 /* hardware_lock assumed to be held. */
2116 void *
2117 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2119 scsi_qla_host_t *vha = qpair->vha;
2120 struct qla_hw_data *ha = vha->hw;
2121 struct req_que *req = qpair->req;
2122 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2123 uint32_t index, handle;
2124 request_t *pkt;
2125 uint16_t cnt, req_cnt;
2127 pkt = NULL;
2128 req_cnt = 1;
2129 handle = 0;
2131 if (!sp)
2132 goto skip_cmd_array;
2134 /* Check for room in outstanding command list. */
2135 handle = req->current_outstanding_cmd;
2136 for (index = 1; index < req->num_outstanding_cmds; index++) {
2137 handle++;
2138 if (handle == req->num_outstanding_cmds)
2139 handle = 1;
2140 if (!req->outstanding_cmds[handle])
2141 break;
2143 if (index == req->num_outstanding_cmds) {
2144 ql_log(ql_log_warn, vha, 0x700b,
2145 "No room on outstanding cmd array.\n");
2146 goto queuing_error;
2149 /* Prep command array. */
2150 req->current_outstanding_cmd = handle;
2151 req->outstanding_cmds[handle] = sp;
2152 sp->handle = handle;
2154 /* Adjust entry-counts as needed. */
2155 if (sp->type != SRB_SCSI_CMD)
2156 req_cnt = sp->iocbs;
2158 skip_cmd_array:
2159 /* Check for room on request queue. */
2160 if (req->cnt < req_cnt + 2) {
2161 if (qpair->use_shadow_reg)
2162 cnt = *req->out_ptr;
2163 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2164 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2165 else if (IS_P3P_TYPE(ha))
2166 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2167 else if (IS_FWI2_CAPABLE(ha))
2168 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2169 else if (IS_QLAFX00(ha))
2170 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2171 else
2172 cnt = qla2x00_debounce_register(
2173 ISP_REQ_Q_OUT(ha, &reg->isp));
2175 if (req->ring_index < cnt)
2176 req->cnt = cnt - req->ring_index;
2177 else
2178 req->cnt = req->length -
2179 (req->ring_index - cnt);
2181 if (req->cnt < req_cnt + 2)
2182 goto queuing_error;
2184 /* Prep packet */
2185 req->cnt -= req_cnt;
2186 pkt = req->ring_ptr;
2187 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2188 if (IS_QLAFX00(ha)) {
2189 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2190 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2191 } else {
2192 pkt->entry_count = req_cnt;
2193 pkt->handle = handle;
2196 queuing_error:
2197 qpair->tgt_counters.num_alloc_iocb_failed++;
2198 return pkt;
2201 void *
2202 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2204 scsi_qla_host_t *vha = qpair->vha;
2206 if (qla2x00_reset_active(vha))
2207 return NULL;
2209 return __qla2x00_alloc_iocbs(qpair, sp);
2212 void *
2213 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2215 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2218 static void
2219 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2221 struct srb_iocb *lio = &sp->u.iocb_cmd;
2223 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2224 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2225 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2226 logio->control_flags |= LCF_NVME_PRLI;
2228 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2229 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2230 logio->port_id[1] = sp->fcport->d_id.b.area;
2231 logio->port_id[2] = sp->fcport->d_id.b.domain;
2232 logio->vp_index = sp->vha->vp_idx;
2235 static void
2236 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2238 struct srb_iocb *lio = &sp->u.iocb_cmd;
2240 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2241 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2243 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2244 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2245 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2246 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2247 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2248 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2249 logio->port_id[1] = sp->fcport->d_id.b.area;
2250 logio->port_id[2] = sp->fcport->d_id.b.domain;
2251 logio->vp_index = sp->vha->vp_idx;
2254 static void
2255 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2257 struct qla_hw_data *ha = sp->vha->hw;
2258 struct srb_iocb *lio = &sp->u.iocb_cmd;
2259 uint16_t opts;
2261 mbx->entry_type = MBX_IOCB_TYPE;
2262 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2263 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2264 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2265 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2266 if (HAS_EXTENDED_IDS(ha)) {
2267 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2268 mbx->mb10 = cpu_to_le16(opts);
2269 } else {
2270 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2272 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2273 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2274 sp->fcport->d_id.b.al_pa);
2275 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2278 static void
2279 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2281 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2282 logio->control_flags =
2283 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2284 if (!sp->fcport->se_sess ||
2285 !sp->fcport->keep_nport_handle)
2286 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2287 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2288 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2289 logio->port_id[1] = sp->fcport->d_id.b.area;
2290 logio->port_id[2] = sp->fcport->d_id.b.domain;
2291 logio->vp_index = sp->vha->vp_idx;
2294 static void
2295 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2297 struct qla_hw_data *ha = sp->vha->hw;
2299 mbx->entry_type = MBX_IOCB_TYPE;
2300 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2301 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2302 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2303 cpu_to_le16(sp->fcport->loop_id):
2304 cpu_to_le16(sp->fcport->loop_id << 8);
2305 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2306 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2307 sp->fcport->d_id.b.al_pa);
2308 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2309 /* Implicit: mbx->mbx10 = 0. */
2312 static void
2313 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2315 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2316 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2317 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2318 logio->vp_index = sp->vha->vp_idx;
2321 static void
2322 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2324 struct qla_hw_data *ha = sp->vha->hw;
2326 mbx->entry_type = MBX_IOCB_TYPE;
2327 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2328 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2329 if (HAS_EXTENDED_IDS(ha)) {
2330 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2331 mbx->mb10 = cpu_to_le16(BIT_0);
2332 } else {
2333 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2335 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2336 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2337 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2338 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2339 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2342 static void
2343 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2345 uint32_t flags;
2346 uint64_t lun;
2347 struct fc_port *fcport = sp->fcport;
2348 scsi_qla_host_t *vha = fcport->vha;
2349 struct qla_hw_data *ha = vha->hw;
2350 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2351 struct req_que *req = vha->req;
2353 flags = iocb->u.tmf.flags;
2354 lun = iocb->u.tmf.lun;
2356 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2357 tsk->entry_count = 1;
2358 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2359 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2360 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2361 tsk->control_flags = cpu_to_le32(flags);
2362 tsk->port_id[0] = fcport->d_id.b.al_pa;
2363 tsk->port_id[1] = fcport->d_id.b.area;
2364 tsk->port_id[2] = fcport->d_id.b.domain;
2365 tsk->vp_index = fcport->vha->vp_idx;
2367 if (flags == TCF_LUN_RESET) {
2368 int_to_scsilun(lun, &tsk->lun);
2369 host_to_fcp_swap((uint8_t *)&tsk->lun,
2370 sizeof(tsk->lun));
2374 static void
2375 qla2x00_els_dcmd_sp_free(void *data)
2377 srb_t *sp = data;
2378 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2380 kfree(sp->fcport);
2382 if (elsio->u.els_logo.els_logo_pyld)
2383 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2384 elsio->u.els_logo.els_logo_pyld,
2385 elsio->u.els_logo.els_logo_pyld_dma);
2387 del_timer(&elsio->timer);
2388 qla2x00_rel_sp(sp);
2391 static void
2392 qla2x00_els_dcmd_iocb_timeout(void *data)
2394 srb_t *sp = data;
2395 fc_port_t *fcport = sp->fcport;
2396 struct scsi_qla_host *vha = sp->vha;
2397 struct srb_iocb *lio = &sp->u.iocb_cmd;
2399 ql_dbg(ql_dbg_io, vha, 0x3069,
2400 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2401 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2402 fcport->d_id.b.al_pa);
2404 complete(&lio->u.els_logo.comp);
2407 static void
2408 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2410 srb_t *sp = ptr;
2411 fc_port_t *fcport = sp->fcport;
2412 struct srb_iocb *lio = &sp->u.iocb_cmd;
2413 struct scsi_qla_host *vha = sp->vha;
2415 ql_dbg(ql_dbg_io, vha, 0x3072,
2416 "%s hdl=%x, portid=%02x%02x%02x done\n",
2417 sp->name, sp->handle, fcport->d_id.b.domain,
2418 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2420 complete(&lio->u.els_logo.comp);
2424 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2425 port_id_t remote_did)
2427 srb_t *sp;
2428 fc_port_t *fcport = NULL;
2429 struct srb_iocb *elsio = NULL;
2430 struct qla_hw_data *ha = vha->hw;
2431 struct els_logo_payload logo_pyld;
2432 int rval = QLA_SUCCESS;
2434 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2435 if (!fcport) {
2436 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2437 return -ENOMEM;
2440 /* Alloc SRB structure */
2441 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2442 if (!sp) {
2443 kfree(fcport);
2444 ql_log(ql_log_info, vha, 0x70e6,
2445 "SRB allocation failed\n");
2446 return -ENOMEM;
2449 elsio = &sp->u.iocb_cmd;
2450 fcport->loop_id = 0xFFFF;
2451 fcport->d_id.b.domain = remote_did.b.domain;
2452 fcport->d_id.b.area = remote_did.b.area;
2453 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2455 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2456 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2458 sp->type = SRB_ELS_DCMD;
2459 sp->name = "ELS_DCMD";
2460 sp->fcport = fcport;
2461 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2462 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2463 sp->done = qla2x00_els_dcmd_sp_done;
2464 sp->free = qla2x00_els_dcmd_sp_free;
2466 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2467 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2468 GFP_KERNEL);
2470 if (!elsio->u.els_logo.els_logo_pyld) {
2471 sp->free(sp);
2472 return QLA_FUNCTION_FAILED;
2475 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2477 elsio->u.els_logo.els_cmd = els_opcode;
2478 logo_pyld.opcode = els_opcode;
2479 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2480 logo_pyld.s_id[1] = vha->d_id.b.area;
2481 logo_pyld.s_id[2] = vha->d_id.b.domain;
2482 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2483 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2485 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2486 sizeof(struct els_logo_payload));
2488 rval = qla2x00_start_sp(sp);
2489 if (rval != QLA_SUCCESS) {
2490 sp->free(sp);
2491 return QLA_FUNCTION_FAILED;
2494 ql_dbg(ql_dbg_io, vha, 0x3074,
2495 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2496 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2497 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2499 wait_for_completion(&elsio->u.els_logo.comp);
2501 sp->free(sp);
2502 return rval;
2505 static void
2506 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2508 scsi_qla_host_t *vha = sp->vha;
2509 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2510 uint32_t dsd_len = 24;
2512 els_iocb->entry_type = ELS_IOCB_TYPE;
2513 els_iocb->entry_count = 1;
2514 els_iocb->sys_define = 0;
2515 els_iocb->entry_status = 0;
2516 els_iocb->handle = sp->handle;
2517 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2518 els_iocb->tx_dsd_count = 1;
2519 els_iocb->vp_index = vha->vp_idx;
2520 els_iocb->sof_type = EST_SOFI3;
2521 els_iocb->rx_dsd_count = 0;
2522 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2524 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2525 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2526 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2527 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2528 els_iocb->s_id[1] = vha->d_id.b.area;
2529 els_iocb->s_id[2] = vha->d_id.b.domain;
2530 els_iocb->control_flags = 0;
2532 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2533 els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
2534 els_iocb->tx_address[0] =
2535 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2536 els_iocb->tx_address[1] =
2537 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2538 els_iocb->tx_len = dsd_len;
2540 els_iocb->rx_dsd_count = 1;
2541 els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
2542 els_iocb->rx_address[0] =
2543 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2544 els_iocb->rx_address[1] =
2545 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2546 els_iocb->rx_len = dsd_len;
2547 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2548 "PLOGI ELS IOCB:\n");
2549 ql_dump_buffer(ql_log_info, vha, 0x0109,
2550 (uint8_t *)els_iocb, 0x70);
2551 } else {
2552 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2553 els_iocb->tx_address[0] =
2554 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2555 els_iocb->tx_address[1] =
2556 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2557 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2559 els_iocb->rx_byte_count = 0;
2560 els_iocb->rx_address[0] = 0;
2561 els_iocb->rx_address[1] = 0;
2562 els_iocb->rx_len = 0;
2565 sp->vha->qla_stats.control_requests++;
2568 static void
2569 qla2x00_els_dcmd2_sp_free(void *data)
2571 srb_t *sp = data;
2572 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2574 if (elsio->u.els_plogi.els_plogi_pyld)
2575 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2576 elsio->u.els_plogi.els_plogi_pyld,
2577 elsio->u.els_plogi.els_plogi_pyld_dma);
2579 if (elsio->u.els_plogi.els_resp_pyld)
2580 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2581 elsio->u.els_plogi.els_resp_pyld,
2582 elsio->u.els_plogi.els_resp_pyld_dma);
2584 del_timer(&elsio->timer);
2585 qla2x00_rel_sp(sp);
2588 static void
2589 qla2x00_els_dcmd2_iocb_timeout(void *data)
2591 srb_t *sp = data;
2592 fc_port_t *fcport = sp->fcport;
2593 struct scsi_qla_host *vha = sp->vha;
2594 struct qla_hw_data *ha = vha->hw;
2595 struct srb_iocb *lio = &sp->u.iocb_cmd;
2596 unsigned long flags = 0;
2597 int res;
2599 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2600 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2601 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2603 /* Abort the exchange */
2604 spin_lock_irqsave(&ha->hardware_lock, flags);
2605 res = ha->isp_ops->abort_command(sp);
2606 ql_dbg(ql_dbg_io, vha, 0x3070,
2607 "mbx abort_command %s\n",
2608 (res == QLA_SUCCESS) ? "successful" : "failed");
2609 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2611 complete(&lio->u.els_plogi.comp);
2614 static void
2615 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2617 srb_t *sp = ptr;
2618 fc_port_t *fcport = sp->fcport;
2619 struct srb_iocb *lio = &sp->u.iocb_cmd;
2620 struct scsi_qla_host *vha = sp->vha;
2622 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
2623 "%s ELS hdl=%x, portid=%06x done %8phC\n",
2624 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
2626 complete(&lio->u.els_plogi.comp);
2630 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2631 fc_port_t *fcport, port_id_t remote_did)
2633 srb_t *sp;
2634 struct srb_iocb *elsio = NULL;
2635 struct qla_hw_data *ha = vha->hw;
2636 int rval = QLA_SUCCESS;
2637 void *ptr, *resp_ptr;
2638 dma_addr_t ptr_dma;
2640 /* Alloc SRB structure */
2641 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2642 if (!sp) {
2643 ql_log(ql_log_info, vha, 0x70e6,
2644 "SRB allocation failed\n");
2645 return -ENOMEM;
2648 elsio = &sp->u.iocb_cmd;
2649 fcport->d_id.b.domain = remote_did.b.domain;
2650 fcport->d_id.b.area = remote_did.b.area;
2651 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2653 ql_dbg(ql_dbg_io, vha, 0x3073,
2654 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2656 sp->type = SRB_ELS_DCMD;
2657 sp->name = "ELS_DCMD";
2658 sp->fcport = fcport;
2659 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2660 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2661 sp->done = qla2x00_els_dcmd2_sp_done;
2662 sp->free = qla2x00_els_dcmd2_sp_free;
2664 ptr = elsio->u.els_plogi.els_plogi_pyld =
2665 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2666 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2667 ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2669 if (!elsio->u.els_plogi.els_plogi_pyld) {
2670 rval = QLA_FUNCTION_FAILED;
2671 goto out;
2674 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2675 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2676 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2678 if (!elsio->u.els_plogi.els_resp_pyld) {
2679 rval = QLA_FUNCTION_FAILED;
2680 goto out;
2683 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2685 memset(ptr, 0, sizeof(struct els_plogi_payload));
2686 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2687 elsio->u.els_plogi.els_cmd = els_opcode;
2688 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2689 qla24xx_get_port_login_templ(vha, ptr_dma + 4,
2690 &elsio->u.els_plogi.els_plogi_pyld->data[0],
2691 sizeof(struct els_plogi_payload));
2693 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2694 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
2695 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2697 init_completion(&elsio->u.els_plogi.comp);
2698 rval = qla2x00_start_sp(sp);
2699 if (rval != QLA_SUCCESS) {
2700 rval = QLA_FUNCTION_FAILED;
2701 goto out;
2704 ql_dbg(ql_dbg_io, vha, 0x3074,
2705 "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
2706 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
2708 wait_for_completion(&elsio->u.els_plogi.comp);
2710 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2711 rval = QLA_FUNCTION_FAILED;
2713 out:
2714 sp->free(sp);
2715 return rval;
2718 static void
2719 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2721 struct bsg_job *bsg_job = sp->u.bsg_job;
2722 struct fc_bsg_request *bsg_request = bsg_job->request;
2724 els_iocb->entry_type = ELS_IOCB_TYPE;
2725 els_iocb->entry_count = 1;
2726 els_iocb->sys_define = 0;
2727 els_iocb->entry_status = 0;
2728 els_iocb->handle = sp->handle;
2729 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2730 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2731 els_iocb->vp_index = sp->vha->vp_idx;
2732 els_iocb->sof_type = EST_SOFI3;
2733 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2735 els_iocb->opcode =
2736 sp->type == SRB_ELS_CMD_RPT ?
2737 bsg_request->rqst_data.r_els.els_code :
2738 bsg_request->rqst_data.h_els.command_code;
2739 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2740 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2741 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2742 els_iocb->control_flags = 0;
2743 els_iocb->rx_byte_count =
2744 cpu_to_le32(bsg_job->reply_payload.payload_len);
2745 els_iocb->tx_byte_count =
2746 cpu_to_le32(bsg_job->request_payload.payload_len);
2748 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2749 (bsg_job->request_payload.sg_list)));
2750 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2751 (bsg_job->request_payload.sg_list)));
2752 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2753 (bsg_job->request_payload.sg_list));
2755 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2756 (bsg_job->reply_payload.sg_list)));
2757 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2758 (bsg_job->reply_payload.sg_list)));
2759 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2760 (bsg_job->reply_payload.sg_list));
2762 sp->vha->qla_stats.control_requests++;
2765 static void
2766 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2768 uint16_t avail_dsds;
2769 uint32_t *cur_dsd;
2770 struct scatterlist *sg;
2771 int index;
2772 uint16_t tot_dsds;
2773 scsi_qla_host_t *vha = sp->vha;
2774 struct qla_hw_data *ha = vha->hw;
2775 struct bsg_job *bsg_job = sp->u.bsg_job;
2776 int loop_iterartion = 0;
2777 int entry_count = 1;
2779 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2780 ct_iocb->entry_type = CT_IOCB_TYPE;
2781 ct_iocb->entry_status = 0;
2782 ct_iocb->handle1 = sp->handle;
2783 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2784 ct_iocb->status = cpu_to_le16(0);
2785 ct_iocb->control_flags = cpu_to_le16(0);
2786 ct_iocb->timeout = 0;
2787 ct_iocb->cmd_dsd_count =
2788 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2789 ct_iocb->total_dsd_count =
2790 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2791 ct_iocb->req_bytecount =
2792 cpu_to_le32(bsg_job->request_payload.payload_len);
2793 ct_iocb->rsp_bytecount =
2794 cpu_to_le32(bsg_job->reply_payload.payload_len);
2796 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2797 (bsg_job->request_payload.sg_list)));
2798 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2799 (bsg_job->request_payload.sg_list)));
2800 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2802 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2803 (bsg_job->reply_payload.sg_list)));
2804 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2805 (bsg_job->reply_payload.sg_list)));
2806 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2808 avail_dsds = 1;
2809 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2810 index = 0;
2811 tot_dsds = bsg_job->reply_payload.sg_cnt;
2813 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2814 dma_addr_t sle_dma;
2815 cont_a64_entry_t *cont_pkt;
2817 /* Allocate additional continuation packets? */
2818 if (avail_dsds == 0) {
2820 * Five DSDs are available in the Cont.
2821 * Type 1 IOCB.
2823 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2824 vha->hw->req_q_map[0]);
2825 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2826 avail_dsds = 5;
2827 entry_count++;
2830 sle_dma = sg_dma_address(sg);
2831 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2832 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2833 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2834 loop_iterartion++;
2835 avail_dsds--;
2837 ct_iocb->entry_count = entry_count;
2839 sp->vha->qla_stats.control_requests++;
2842 static void
2843 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2845 uint16_t avail_dsds;
2846 uint32_t *cur_dsd;
2847 struct scatterlist *sg;
2848 int index;
2849 uint16_t cmd_dsds, rsp_dsds;
2850 scsi_qla_host_t *vha = sp->vha;
2851 struct qla_hw_data *ha = vha->hw;
2852 struct bsg_job *bsg_job = sp->u.bsg_job;
2853 int entry_count = 1;
2854 cont_a64_entry_t *cont_pkt = NULL;
2856 ct_iocb->entry_type = CT_IOCB_TYPE;
2857 ct_iocb->entry_status = 0;
2858 ct_iocb->sys_define = 0;
2859 ct_iocb->handle = sp->handle;
2861 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2862 ct_iocb->vp_index = sp->vha->vp_idx;
2863 ct_iocb->comp_status = cpu_to_le16(0);
2865 cmd_dsds = bsg_job->request_payload.sg_cnt;
2866 rsp_dsds = bsg_job->reply_payload.sg_cnt;
2868 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2869 ct_iocb->timeout = 0;
2870 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2871 ct_iocb->cmd_byte_count =
2872 cpu_to_le32(bsg_job->request_payload.payload_len);
2874 avail_dsds = 2;
2875 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2876 index = 0;
2878 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2879 dma_addr_t sle_dma;
2881 /* Allocate additional continuation packets? */
2882 if (avail_dsds == 0) {
2884 * Five DSDs are available in the Cont.
2885 * Type 1 IOCB.
2887 cont_pkt = qla2x00_prep_cont_type1_iocb(
2888 vha, ha->req_q_map[0]);
2889 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2890 avail_dsds = 5;
2891 entry_count++;
2894 sle_dma = sg_dma_address(sg);
2895 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2896 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2897 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2898 avail_dsds--;
2901 index = 0;
2903 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2904 dma_addr_t sle_dma;
2906 /* Allocate additional continuation packets? */
2907 if (avail_dsds == 0) {
2909 * Five DSDs are available in the Cont.
2910 * Type 1 IOCB.
2912 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2913 ha->req_q_map[0]);
2914 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2915 avail_dsds = 5;
2916 entry_count++;
2919 sle_dma = sg_dma_address(sg);
2920 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2921 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2922 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2923 avail_dsds--;
2925 ct_iocb->entry_count = entry_count;
2929 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2930 * @sp: command to send to the ISP
2932 * Returns non-zero if a failure occurred, else zero.
2935 qla82xx_start_scsi(srb_t *sp)
2937 int nseg;
2938 unsigned long flags;
2939 struct scsi_cmnd *cmd;
2940 uint32_t *clr_ptr;
2941 uint32_t index;
2942 uint32_t handle;
2943 uint16_t cnt;
2944 uint16_t req_cnt;
2945 uint16_t tot_dsds;
2946 struct device_reg_82xx __iomem *reg;
2947 uint32_t dbval;
2948 uint32_t *fcp_dl;
2949 uint8_t additional_cdb_len;
2950 struct ct6_dsd *ctx;
2951 struct scsi_qla_host *vha = sp->vha;
2952 struct qla_hw_data *ha = vha->hw;
2953 struct req_que *req = NULL;
2954 struct rsp_que *rsp = NULL;
2956 /* Setup device pointers. */
2957 reg = &ha->iobase->isp82;
2958 cmd = GET_CMD_SP(sp);
2959 req = vha->req;
2960 rsp = ha->rsp_q_map[0];
2962 /* So we know we haven't pci_map'ed anything yet */
2963 tot_dsds = 0;
2965 dbval = 0x04 | (ha->portnum << 5);
2967 /* Send marker if required */
2968 if (vha->marker_needed != 0) {
2969 if (qla2x00_marker(vha, req,
2970 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2971 ql_log(ql_log_warn, vha, 0x300c,
2972 "qla2x00_marker failed for cmd=%p.\n", cmd);
2973 return QLA_FUNCTION_FAILED;
2975 vha->marker_needed = 0;
2978 /* Acquire ring specific lock */
2979 spin_lock_irqsave(&ha->hardware_lock, flags);
2981 /* Check for room in outstanding command list. */
2982 handle = req->current_outstanding_cmd;
2983 for (index = 1; index < req->num_outstanding_cmds; index++) {
2984 handle++;
2985 if (handle == req->num_outstanding_cmds)
2986 handle = 1;
2987 if (!req->outstanding_cmds[handle])
2988 break;
2990 if (index == req->num_outstanding_cmds)
2991 goto queuing_error;
2993 /* Map the sg table so we have an accurate count of sg entries needed */
2994 if (scsi_sg_count(cmd)) {
2995 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2996 scsi_sg_count(cmd), cmd->sc_data_direction);
2997 if (unlikely(!nseg))
2998 goto queuing_error;
2999 } else
3000 nseg = 0;
3002 tot_dsds = nseg;
3004 if (tot_dsds > ql2xshiftctondsd) {
3005 struct cmd_type_6 *cmd_pkt;
3006 uint16_t more_dsd_lists = 0;
3007 struct dsd_dma *dsd_ptr;
3008 uint16_t i;
3010 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3011 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3012 ql_dbg(ql_dbg_io, vha, 0x300d,
3013 "Num of DSD list %d is than %d for cmd=%p.\n",
3014 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3015 cmd);
3016 goto queuing_error;
3019 if (more_dsd_lists <= ha->gbl_dsd_avail)
3020 goto sufficient_dsds;
3021 else
3022 more_dsd_lists -= ha->gbl_dsd_avail;
3024 for (i = 0; i < more_dsd_lists; i++) {
3025 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3026 if (!dsd_ptr) {
3027 ql_log(ql_log_fatal, vha, 0x300e,
3028 "Failed to allocate memory for dsd_dma "
3029 "for cmd=%p.\n", cmd);
3030 goto queuing_error;
3033 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3034 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3035 if (!dsd_ptr->dsd_addr) {
3036 kfree(dsd_ptr);
3037 ql_log(ql_log_fatal, vha, 0x300f,
3038 "Failed to allocate memory for dsd_addr "
3039 "for cmd=%p.\n", cmd);
3040 goto queuing_error;
3042 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3043 ha->gbl_dsd_avail++;
3046 sufficient_dsds:
3047 req_cnt = 1;
3049 if (req->cnt < (req_cnt + 2)) {
3050 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3051 &reg->req_q_out[0]);
3052 if (req->ring_index < cnt)
3053 req->cnt = cnt - req->ring_index;
3054 else
3055 req->cnt = req->length -
3056 (req->ring_index - cnt);
3057 if (req->cnt < (req_cnt + 2))
3058 goto queuing_error;
3061 ctx = sp->u.scmd.ctx =
3062 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3063 if (!ctx) {
3064 ql_log(ql_log_fatal, vha, 0x3010,
3065 "Failed to allocate ctx for cmd=%p.\n", cmd);
3066 goto queuing_error;
3069 memset(ctx, 0, sizeof(struct ct6_dsd));
3070 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
3071 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3072 if (!ctx->fcp_cmnd) {
3073 ql_log(ql_log_fatal, vha, 0x3011,
3074 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3075 goto queuing_error;
3078 /* Initialize the DSD list and dma handle */
3079 INIT_LIST_HEAD(&ctx->dsd_list);
3080 ctx->dsd_use_cnt = 0;
3082 if (cmd->cmd_len > 16) {
3083 additional_cdb_len = cmd->cmd_len - 16;
3084 if ((cmd->cmd_len % 4) != 0) {
3085 /* SCSI command bigger than 16 bytes must be
3086 * multiple of 4
3088 ql_log(ql_log_warn, vha, 0x3012,
3089 "scsi cmd len %d not multiple of 4 "
3090 "for cmd=%p.\n", cmd->cmd_len, cmd);
3091 goto queuing_error_fcp_cmnd;
3093 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3094 } else {
3095 additional_cdb_len = 0;
3096 ctx->fcp_cmnd_len = 12 + 16 + 4;
3099 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3100 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3102 /* Zero out remaining portion of packet. */
3103 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3104 clr_ptr = (uint32_t *)cmd_pkt + 2;
3105 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3106 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3108 /* Set NPORT-ID and LUN number*/
3109 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3110 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3111 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3112 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3113 cmd_pkt->vp_index = sp->vha->vp_idx;
3115 /* Build IOCB segments */
3116 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3117 goto queuing_error_fcp_cmnd;
3119 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3120 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3122 /* build FCP_CMND IU */
3123 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
3124 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3125 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3127 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3128 ctx->fcp_cmnd->additional_cdb_len |= 1;
3129 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3130 ctx->fcp_cmnd->additional_cdb_len |= 2;
3132 /* Populate the FCP_PRIO. */
3133 if (ha->flags.fcp_prio_enabled)
3134 ctx->fcp_cmnd->task_attribute |=
3135 sp->fcport->fcp_prio << 3;
3137 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3139 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3140 additional_cdb_len);
3141 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3143 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3144 cmd_pkt->fcp_cmnd_dseg_address[0] =
3145 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3146 cmd_pkt->fcp_cmnd_dseg_address[1] =
3147 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3149 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3150 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3151 /* Set total data segment count. */
3152 cmd_pkt->entry_count = (uint8_t)req_cnt;
3153 /* Specify response queue number where
3154 * completion should happen
3156 cmd_pkt->entry_status = (uint8_t) rsp->id;
3157 } else {
3158 struct cmd_type_7 *cmd_pkt;
3159 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3160 if (req->cnt < (req_cnt + 2)) {
3161 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3162 &reg->req_q_out[0]);
3163 if (req->ring_index < cnt)
3164 req->cnt = cnt - req->ring_index;
3165 else
3166 req->cnt = req->length -
3167 (req->ring_index - cnt);
3169 if (req->cnt < (req_cnt + 2))
3170 goto queuing_error;
3172 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3173 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3175 /* Zero out remaining portion of packet. */
3176 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3177 clr_ptr = (uint32_t *)cmd_pkt + 2;
3178 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3179 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3181 /* Set NPORT-ID and LUN number*/
3182 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3183 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3184 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3185 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3186 cmd_pkt->vp_index = sp->vha->vp_idx;
3188 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3189 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3190 sizeof(cmd_pkt->lun));
3192 /* Populate the FCP_PRIO. */
3193 if (ha->flags.fcp_prio_enabled)
3194 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3196 /* Load SCSI command packet. */
3197 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3198 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3200 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3202 /* Build IOCB segments */
3203 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3205 /* Set total data segment count. */
3206 cmd_pkt->entry_count = (uint8_t)req_cnt;
3207 /* Specify response queue number where
3208 * completion should happen.
3210 cmd_pkt->entry_status = (uint8_t) rsp->id;
3213 /* Build command packet. */
3214 req->current_outstanding_cmd = handle;
3215 req->outstanding_cmds[handle] = sp;
3216 sp->handle = handle;
3217 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3218 req->cnt -= req_cnt;
3219 wmb();
3221 /* Adjust ring index. */
3222 req->ring_index++;
3223 if (req->ring_index == req->length) {
3224 req->ring_index = 0;
3225 req->ring_ptr = req->ring;
3226 } else
3227 req->ring_ptr++;
3229 sp->flags |= SRB_DMA_VALID;
3231 /* Set chip new ring index. */
3232 /* write, read and verify logic */
3233 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3234 if (ql2xdbwr)
3235 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3236 else {
3237 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3238 wmb();
3239 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3240 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3241 wmb();
3245 /* Manage unprocessed RIO/ZIO commands in response queue. */
3246 if (vha->flags.process_response_queue &&
3247 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3248 qla24xx_process_response_queue(vha, rsp);
3250 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3251 return QLA_SUCCESS;
3253 queuing_error_fcp_cmnd:
3254 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3255 queuing_error:
3256 if (tot_dsds)
3257 scsi_dma_unmap(cmd);
3259 if (sp->u.scmd.ctx) {
3260 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3261 sp->u.scmd.ctx = NULL;
3263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3265 return QLA_FUNCTION_FAILED;
3268 static void
3269 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3271 struct srb_iocb *aio = &sp->u.iocb_cmd;
3272 scsi_qla_host_t *vha = sp->vha;
3273 struct req_que *req = vha->req;
3275 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3276 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3277 abt_iocb->entry_count = 1;
3278 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3279 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3280 abt_iocb->handle_to_abort =
3281 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3282 aio->u.abt.cmd_hndl));
3283 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3284 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3285 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3286 abt_iocb->vp_index = vha->vp_idx;
3287 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3288 /* Send the command to the firmware */
3289 wmb();
3292 static void
3293 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3295 int i, sz;
3297 mbx->entry_type = MBX_IOCB_TYPE;
3298 mbx->handle = sp->handle;
3299 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3301 for (i = 0; i < sz; i++)
3302 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3305 static void
3306 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3308 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3309 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3310 ct_pkt->handle = sp->handle;
3313 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3314 struct nack_to_isp *nack)
3316 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3318 nack->entry_type = NOTIFY_ACK_TYPE;
3319 nack->entry_count = 1;
3320 nack->ox_id = ntfy->ox_id;
3322 nack->u.isp24.handle = sp->handle;
3323 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3324 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3325 nack->u.isp24.flags = ntfy->u.isp24.flags &
3326 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3328 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3329 nack->u.isp24.status = ntfy->u.isp24.status;
3330 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3331 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3332 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3333 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3334 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3335 nack->u.isp24.srr_flags = 0;
3336 nack->u.isp24.srr_reject_code = 0;
3337 nack->u.isp24.srr_reject_code_expl = 0;
3338 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3342 * Build NVME LS request
3344 static int
3345 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3347 struct srb_iocb *nvme;
3348 int rval = QLA_SUCCESS;
3350 nvme = &sp->u.iocb_cmd;
3351 cmd_pkt->entry_type = PT_LS4_REQUEST;
3352 cmd_pkt->entry_count = 1;
3353 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3355 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3356 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3357 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3359 cmd_pkt->tx_dseg_count = 1;
3360 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3361 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3362 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3363 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3365 cmd_pkt->rx_dseg_count = 1;
3366 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3367 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3368 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3369 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3371 return rval;
3374 static void
3375 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3377 int map, pos;
3379 vce->entry_type = VP_CTRL_IOCB_TYPE;
3380 vce->handle = sp->handle;
3381 vce->entry_count = 1;
3382 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3383 vce->vp_count = cpu_to_le16(1);
3386 * index map in firmware starts with 1; decrement index
3387 * this is ok as we never use index 0
3389 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3390 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3391 vce->vp_idx_map[map] |= 1 << pos;
3394 static void
3395 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3397 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3398 logio->control_flags =
3399 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3401 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3402 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3403 logio->port_id[1] = sp->fcport->d_id.b.area;
3404 logio->port_id[2] = sp->fcport->d_id.b.domain;
3405 logio->vp_index = sp->fcport->vha->vp_idx;
3409 qla2x00_start_sp(srb_t *sp)
3411 int rval;
3412 scsi_qla_host_t *vha = sp->vha;
3413 struct qla_hw_data *ha = vha->hw;
3414 void *pkt;
3415 unsigned long flags;
3417 rval = QLA_FUNCTION_FAILED;
3418 spin_lock_irqsave(&ha->hardware_lock, flags);
3419 pkt = qla2x00_alloc_iocbs(vha, sp);
3420 if (!pkt) {
3421 ql_log(ql_log_warn, vha, 0x700c,
3422 "qla2x00_alloc_iocbs failed.\n");
3423 goto done;
3426 rval = QLA_SUCCESS;
3427 switch (sp->type) {
3428 case SRB_LOGIN_CMD:
3429 IS_FWI2_CAPABLE(ha) ?
3430 qla24xx_login_iocb(sp, pkt) :
3431 qla2x00_login_iocb(sp, pkt);
3432 break;
3433 case SRB_PRLI_CMD:
3434 qla24xx_prli_iocb(sp, pkt);
3435 break;
3436 case SRB_LOGOUT_CMD:
3437 IS_FWI2_CAPABLE(ha) ?
3438 qla24xx_logout_iocb(sp, pkt) :
3439 qla2x00_logout_iocb(sp, pkt);
3440 break;
3441 case SRB_ELS_CMD_RPT:
3442 case SRB_ELS_CMD_HST:
3443 qla24xx_els_iocb(sp, pkt);
3444 break;
3445 case SRB_CT_CMD:
3446 IS_FWI2_CAPABLE(ha) ?
3447 qla24xx_ct_iocb(sp, pkt) :
3448 qla2x00_ct_iocb(sp, pkt);
3449 break;
3450 case SRB_ADISC_CMD:
3451 IS_FWI2_CAPABLE(ha) ?
3452 qla24xx_adisc_iocb(sp, pkt) :
3453 qla2x00_adisc_iocb(sp, pkt);
3454 break;
3455 case SRB_TM_CMD:
3456 IS_QLAFX00(ha) ?
3457 qlafx00_tm_iocb(sp, pkt) :
3458 qla24xx_tm_iocb(sp, pkt);
3459 break;
3460 case SRB_FXIOCB_DCMD:
3461 case SRB_FXIOCB_BCMD:
3462 qlafx00_fxdisc_iocb(sp, pkt);
3463 break;
3464 case SRB_NVME_LS:
3465 qla_nvme_ls(sp, pkt);
3466 break;
3467 case SRB_ABT_CMD:
3468 IS_QLAFX00(ha) ?
3469 qlafx00_abort_iocb(sp, pkt) :
3470 qla24xx_abort_iocb(sp, pkt);
3471 break;
3472 case SRB_ELS_DCMD:
3473 qla24xx_els_logo_iocb(sp, pkt);
3474 break;
3475 case SRB_CT_PTHRU_CMD:
3476 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3477 break;
3478 case SRB_MB_IOCB:
3479 qla2x00_mb_iocb(sp, pkt);
3480 break;
3481 case SRB_NACK_PLOGI:
3482 case SRB_NACK_PRLI:
3483 case SRB_NACK_LOGO:
3484 qla2x00_send_notify_ack_iocb(sp, pkt);
3485 break;
3486 case SRB_CTRL_VP:
3487 qla25xx_ctrlvp_iocb(sp, pkt);
3488 break;
3489 case SRB_PRLO_CMD:
3490 qla24xx_prlo_iocb(sp, pkt);
3491 break;
3492 default:
3493 break;
3496 wmb();
3497 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3498 done:
3499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3500 return rval;
3503 static void
3504 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3505 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3507 uint16_t avail_dsds;
3508 uint32_t *cur_dsd;
3509 uint32_t req_data_len = 0;
3510 uint32_t rsp_data_len = 0;
3511 struct scatterlist *sg;
3512 int index;
3513 int entry_count = 1;
3514 struct bsg_job *bsg_job = sp->u.bsg_job;
3516 /*Update entry type to indicate bidir command */
3517 *((uint32_t *)(&cmd_pkt->entry_type)) =
3518 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3520 /* Set the transfer direction, in this set both flags
3521 * Also set the BD_WRAP_BACK flag, firmware will take care
3522 * assigning DID=SID for outgoing pkts.
3524 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3525 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3526 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3527 BD_WRAP_BACK);
3529 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3530 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3531 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3532 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3534 vha->bidi_stats.transfer_bytes += req_data_len;
3535 vha->bidi_stats.io_count++;
3537 vha->qla_stats.output_bytes += req_data_len;
3538 vha->qla_stats.output_requests++;
3540 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3541 * are bundled in continuation iocb
3543 avail_dsds = 1;
3544 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3546 index = 0;
3548 for_each_sg(bsg_job->request_payload.sg_list, sg,
3549 bsg_job->request_payload.sg_cnt, index) {
3550 dma_addr_t sle_dma;
3551 cont_a64_entry_t *cont_pkt;
3553 /* Allocate additional continuation packets */
3554 if (avail_dsds == 0) {
3555 /* Continuation type 1 IOCB can accomodate
3556 * 5 DSDS
3558 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3559 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3560 avail_dsds = 5;
3561 entry_count++;
3563 sle_dma = sg_dma_address(sg);
3564 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3565 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3566 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3567 avail_dsds--;
3569 /* For read request DSD will always goes to continuation IOCB
3570 * and follow the write DSD. If there is room on the current IOCB
3571 * then it is added to that IOCB else new continuation IOCB is
3572 * allocated.
3574 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3575 bsg_job->reply_payload.sg_cnt, index) {
3576 dma_addr_t sle_dma;
3577 cont_a64_entry_t *cont_pkt;
3579 /* Allocate additional continuation packets */
3580 if (avail_dsds == 0) {
3581 /* Continuation type 1 IOCB can accomodate
3582 * 5 DSDS
3584 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3585 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3586 avail_dsds = 5;
3587 entry_count++;
3589 sle_dma = sg_dma_address(sg);
3590 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3591 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3592 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3593 avail_dsds--;
3595 /* This value should be same as number of IOCB required for this cmd */
3596 cmd_pkt->entry_count = entry_count;
3600 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3603 struct qla_hw_data *ha = vha->hw;
3604 unsigned long flags;
3605 uint32_t handle;
3606 uint32_t index;
3607 uint16_t req_cnt;
3608 uint16_t cnt;
3609 uint32_t *clr_ptr;
3610 struct cmd_bidir *cmd_pkt = NULL;
3611 struct rsp_que *rsp;
3612 struct req_que *req;
3613 int rval = EXT_STATUS_OK;
3615 rval = QLA_SUCCESS;
3617 rsp = ha->rsp_q_map[0];
3618 req = vha->req;
3620 /* Send marker if required */
3621 if (vha->marker_needed != 0) {
3622 if (qla2x00_marker(vha, req,
3623 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3624 return EXT_STATUS_MAILBOX;
3625 vha->marker_needed = 0;
3628 /* Acquire ring specific lock */
3629 spin_lock_irqsave(&ha->hardware_lock, flags);
3631 /* Check for room in outstanding command list. */
3632 handle = req->current_outstanding_cmd;
3633 for (index = 1; index < req->num_outstanding_cmds; index++) {
3634 handle++;
3635 if (handle == req->num_outstanding_cmds)
3636 handle = 1;
3637 if (!req->outstanding_cmds[handle])
3638 break;
3641 if (index == req->num_outstanding_cmds) {
3642 rval = EXT_STATUS_BUSY;
3643 goto queuing_error;
3646 /* Calculate number of IOCB required */
3647 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3649 /* Check for room on request queue. */
3650 if (req->cnt < req_cnt + 2) {
3651 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3652 RD_REG_DWORD_RELAXED(req->req_q_out);
3653 if (req->ring_index < cnt)
3654 req->cnt = cnt - req->ring_index;
3655 else
3656 req->cnt = req->length -
3657 (req->ring_index - cnt);
3659 if (req->cnt < req_cnt + 2) {
3660 rval = EXT_STATUS_BUSY;
3661 goto queuing_error;
3664 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3665 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3667 /* Zero out remaining portion of packet. */
3668 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3669 clr_ptr = (uint32_t *)cmd_pkt + 2;
3670 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3672 /* Set NPORT-ID (of vha)*/
3673 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3674 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3675 cmd_pkt->port_id[1] = vha->d_id.b.area;
3676 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3678 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3679 cmd_pkt->entry_status = (uint8_t) rsp->id;
3680 /* Build command packet. */
3681 req->current_outstanding_cmd = handle;
3682 req->outstanding_cmds[handle] = sp;
3683 sp->handle = handle;
3684 req->cnt -= req_cnt;
3686 /* Send the command to the firmware */
3687 wmb();
3688 qla2x00_start_iocbs(vha, req);
3689 queuing_error:
3690 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3691 return rval;