gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_iocb.c
blob182bd68c79ac2dfa21a4df88ddd131657c3ccf28
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 /**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @sp: SCSI command
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
28 cflags = 0;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE;
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ;
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
40 return (cflags);
43 /**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
54 uint16_t iocbs;
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
62 return (iocbs);
65 /**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
76 uint16_t iocbs;
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
84 return (iocbs);
87 /**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @vha: HA context
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
103 } else {
104 req->ring_ptr++;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
112 return (cont_pkt);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @vha: HA context
118 * @req: request queue
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
132 } else {
133 req->ring_ptr++;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
142 return (cont_pkt);
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
194 uint16_t avail_dsds;
195 struct dsd32 *cur_dsd;
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
199 int i;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
206 /* No data transfer */
207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208 cmd_pkt->byte_count = cpu_to_le32(0);
209 return;
212 vha = sp->vha;
213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215 /* Three DSDs are available in the Command Type 2 IOCB */
216 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217 cur_dsd = cmd_pkt->dsd32;
219 /* Load data segments */
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 cont_entry_t *cont_pkt;
223 /* Allocate additional continuation packets? */
224 if (avail_dsds == 0) {
226 * Seven DSDs are available in the Continuation
227 * Type 0 IOCB.
229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230 cur_dsd = cont_pkt->dsd;
231 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
234 append_dsd32(&cur_dsd, sg);
235 avail_dsds--;
240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241 * capable IOCB types.
243 * @sp: SRB command to process
244 * @cmd_pkt: Command type 3 IOCB
245 * @tot_dsds: Total number of segments to transfer
247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
248 uint16_t tot_dsds)
250 uint16_t avail_dsds;
251 struct dsd64 *cur_dsd;
252 scsi_qla_host_t *vha;
253 struct scsi_cmnd *cmd;
254 struct scatterlist *sg;
255 int i;
257 cmd = GET_CMD_SP(sp);
259 /* Update entry type to indicate Command Type 3 IOCB */
260 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
262 /* No data transfer */
263 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
264 cmd_pkt->byte_count = cpu_to_le32(0);
265 return;
268 vha = sp->vha;
269 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
271 /* Two DSDs are available in the Command Type 3 IOCB */
272 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273 cur_dsd = cmd_pkt->dsd64;
275 /* Load data segments */
276 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
277 cont_a64_entry_t *cont_pkt;
279 /* Allocate additional continuation packets? */
280 if (avail_dsds == 0) {
282 * Five DSDs are available in the Continuation
283 * Type 1 IOCB.
285 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
286 cur_dsd = cont_pkt->dsd;
287 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
290 append_dsd64(&cur_dsd, sg);
291 avail_dsds--;
296 * Find the first handle that is not in use, starting from
297 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
298 * associated with @req.
300 uint32_t qla2xxx_get_next_handle(struct req_que *req)
302 uint32_t index, handle = req->current_outstanding_cmd;
304 for (index = 1; index < req->num_outstanding_cmds; index++) {
305 handle++;
306 if (handle == req->num_outstanding_cmds)
307 handle = 1;
308 if (!req->outstanding_cmds[handle])
309 return handle;
312 return 0;
316 * qla2x00_start_scsi() - Send a SCSI command to the ISP
317 * @sp: command to send to the ISP
319 * Returns non-zero if a failure occurred, else zero.
322 qla2x00_start_scsi(srb_t *sp)
324 int nseg;
325 unsigned long flags;
326 scsi_qla_host_t *vha;
327 struct scsi_cmnd *cmd;
328 uint32_t *clr_ptr;
329 uint32_t handle;
330 cmd_entry_t *cmd_pkt;
331 uint16_t cnt;
332 uint16_t req_cnt;
333 uint16_t tot_dsds;
334 struct device_reg_2xxx __iomem *reg;
335 struct qla_hw_data *ha;
336 struct req_que *req;
337 struct rsp_que *rsp;
339 /* Setup device pointers. */
340 vha = sp->vha;
341 ha = vha->hw;
342 reg = &ha->iobase->isp;
343 cmd = GET_CMD_SP(sp);
344 req = ha->req_q_map[0];
345 rsp = ha->rsp_q_map[0];
346 /* So we know we haven't pci_map'ed anything yet */
347 tot_dsds = 0;
349 /* Send marker if required */
350 if (vha->marker_needed != 0) {
351 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
352 QLA_SUCCESS) {
353 return (QLA_FUNCTION_FAILED);
355 vha->marker_needed = 0;
358 /* Acquire ring specific lock */
359 spin_lock_irqsave(&ha->hardware_lock, flags);
361 handle = qla2xxx_get_next_handle(req);
362 if (handle == 0)
363 goto queuing_error;
365 /* Map the sg table so we have an accurate count of sg entries needed */
366 if (scsi_sg_count(cmd)) {
367 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368 scsi_sg_count(cmd), cmd->sc_data_direction);
369 if (unlikely(!nseg))
370 goto queuing_error;
371 } else
372 nseg = 0;
374 tot_dsds = nseg;
376 /* Calculate the number of request entries needed. */
377 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378 if (req->cnt < (req_cnt + 2)) {
379 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380 if (req->ring_index < cnt)
381 req->cnt = cnt - req->ring_index;
382 else
383 req->cnt = req->length -
384 (req->ring_index - cnt);
385 /* If still no head room then bail out */
386 if (req->cnt < (req_cnt + 2))
387 goto queuing_error;
390 /* Build command packet */
391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
393 sp->handle = handle;
394 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 req->cnt -= req_cnt;
397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
409 /* Load SCSI command packet. */
410 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
411 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
413 /* Build IOCB segments */
414 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
416 /* Set total data segment count. */
417 cmd_pkt->entry_count = (uint8_t)req_cnt;
418 wmb();
420 /* Adjust ring index. */
421 req->ring_index++;
422 if (req->ring_index == req->length) {
423 req->ring_index = 0;
424 req->ring_ptr = req->ring;
425 } else
426 req->ring_ptr++;
428 sp->flags |= SRB_DMA_VALID;
430 /* Set chip new ring index. */
431 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
432 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
434 /* Manage unprocessed RIO/ZIO commands in response queue. */
435 if (vha->flags.process_response_queue &&
436 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
437 qla2x00_process_response_queue(rsp);
439 spin_unlock_irqrestore(&ha->hardware_lock, flags);
440 return (QLA_SUCCESS);
442 queuing_error:
443 if (tot_dsds)
444 scsi_dma_unmap(cmd);
446 spin_unlock_irqrestore(&ha->hardware_lock, flags);
448 return (QLA_FUNCTION_FAILED);
452 * qla2x00_start_iocbs() - Execute the IOCB command
453 * @vha: HA context
454 * @req: request queue
456 void
457 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
459 struct qla_hw_data *ha = vha->hw;
460 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
462 if (IS_P3P_TYPE(ha)) {
463 qla82xx_start_iocbs(vha);
464 } else {
465 /* Adjust ring index. */
466 req->ring_index++;
467 if (req->ring_index == req->length) {
468 req->ring_index = 0;
469 req->ring_ptr = req->ring;
470 } else
471 req->ring_ptr++;
473 /* Set chip new ring index. */
474 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
475 WRT_REG_DWORD(req->req_q_in, req->ring_index);
476 } else if (IS_QLA83XX(ha)) {
477 WRT_REG_DWORD(req->req_q_in, req->ring_index);
478 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
479 } else if (IS_QLAFX00(ha)) {
480 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
481 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
482 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
483 } else if (IS_FWI2_CAPABLE(ha)) {
484 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
485 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
486 } else {
487 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
488 req->ring_index);
489 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
495 * qla2x00_marker() - Send a marker IOCB to the firmware.
496 * @vha: HA context
497 * @qpair: queue pair pointer
498 * @loop_id: loop ID
499 * @lun: LUN
500 * @type: marker modifier
502 * Can be called from both normal and interrupt context.
504 * Returns non-zero if a failure occurred, else zero.
506 static int
507 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
508 uint16_t loop_id, uint64_t lun, uint8_t type)
510 mrk_entry_t *mrk;
511 struct mrk_entry_24xx *mrk24 = NULL;
512 struct req_que *req = qpair->req;
513 struct qla_hw_data *ha = vha->hw;
514 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
516 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
517 if (mrk == NULL) {
518 ql_log(ql_log_warn, base_vha, 0x3026,
519 "Failed to allocate Marker IOCB.\n");
521 return (QLA_FUNCTION_FAILED);
524 mrk->entry_type = MARKER_TYPE;
525 mrk->modifier = type;
526 if (type != MK_SYNC_ALL) {
527 if (IS_FWI2_CAPABLE(ha)) {
528 mrk24 = (struct mrk_entry_24xx *) mrk;
529 mrk24->nport_handle = cpu_to_le16(loop_id);
530 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
531 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
532 mrk24->vp_index = vha->vp_idx;
533 mrk24->handle = make_handle(req->id, mrk24->handle);
534 } else {
535 SET_TARGET_ID(ha, mrk->target, loop_id);
536 mrk->lun = cpu_to_le16((uint16_t)lun);
539 wmb();
541 qla2x00_start_iocbs(vha, req);
543 return (QLA_SUCCESS);
547 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
548 uint16_t loop_id, uint64_t lun, uint8_t type)
550 int ret;
551 unsigned long flags = 0;
553 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
554 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
555 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
557 return (ret);
561 * qla2x00_issue_marker
563 * Issue marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
569 if (ha_locked) {
570 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
571 MK_SYNC_ALL) != QLA_SUCCESS)
572 return QLA_FUNCTION_FAILED;
573 } else {
574 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
575 MK_SYNC_ALL) != QLA_SUCCESS)
576 return QLA_FUNCTION_FAILED;
578 vha->marker_needed = 0;
580 return QLA_SUCCESS;
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585 uint16_t tot_dsds)
587 struct dsd64 *cur_dsd = NULL, *next_dsd;
588 scsi_qla_host_t *vha;
589 struct qla_hw_data *ha;
590 struct scsi_cmnd *cmd;
591 struct scatterlist *cur_seg;
592 uint8_t avail_dsds;
593 uint8_t first_iocb = 1;
594 uint32_t dsd_list_len;
595 struct dsd_dma *dsd_ptr;
596 struct ct6_dsd *ctx;
598 cmd = GET_CMD_SP(sp);
600 /* Update entry type to indicate Command Type 3 IOCB */
601 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
603 /* No data transfer */
604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
605 cmd_pkt->byte_count = cpu_to_le32(0);
606 return 0;
609 vha = sp->vha;
610 ha = vha->hw;
612 /* Set transfer direction */
613 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
615 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
616 vha->qla_stats.output_requests++;
617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
619 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
620 vha->qla_stats.input_requests++;
623 cur_seg = scsi_sglist(cmd);
624 ctx = sp->u.scmd.ct6_ctx;
626 while (tot_dsds) {
627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
628 QLA_DSDS_PER_IOCB : tot_dsds;
629 tot_dsds -= avail_dsds;
630 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
632 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
633 struct dsd_dma, list);
634 next_dsd = dsd_ptr->dsd_addr;
635 list_del(&dsd_ptr->list);
636 ha->gbl_dsd_avail--;
637 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
638 ctx->dsd_use_cnt++;
639 ha->gbl_dsd_inuse++;
641 if (first_iocb) {
642 first_iocb = 0;
643 put_unaligned_le64(dsd_ptr->dsd_list_dma,
644 &cmd_pkt->fcp_dsd.address);
645 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
646 } else {
647 put_unaligned_le64(dsd_ptr->dsd_list_dma,
648 &cur_dsd->address);
649 cur_dsd->length = cpu_to_le32(dsd_list_len);
650 cur_dsd++;
652 cur_dsd = next_dsd;
653 while (avail_dsds) {
654 append_dsd64(&cur_dsd, cur_seg);
655 cur_seg = sg_next(cur_seg);
656 avail_dsds--;
660 /* Null termination */
661 cur_dsd->address = 0;
662 cur_dsd->length = 0;
663 cur_dsd++;
664 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
665 return 0;
669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670 * for Command Type 6.
672 * @dsds: number of data segment decriptors needed
674 * Returns the number of dsd list needed to store @dsds.
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds)
679 uint16_t dsd_lists = 0;
681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682 if (dsds % QLA_DSDS_PER_IOCB)
683 dsd_lists++;
684 return dsd_lists;
689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
690 * IOCB types.
692 * @sp: SRB command to process
693 * @cmd_pkt: Command type 3 IOCB
694 * @tot_dsds: Total number of segments to transfer
695 * @req: pointer to request queue
697 inline void
698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699 uint16_t tot_dsds, struct req_que *req)
701 uint16_t avail_dsds;
702 struct dsd64 *cur_dsd;
703 scsi_qla_host_t *vha;
704 struct scsi_cmnd *cmd;
705 struct scatterlist *sg;
706 int i;
708 cmd = GET_CMD_SP(sp);
710 /* Update entry type to indicate Command Type 3 IOCB */
711 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
713 /* No data transfer */
714 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
715 cmd_pkt->byte_count = cpu_to_le32(0);
716 return;
719 vha = sp->vha;
721 /* Set transfer direction */
722 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
723 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
724 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
725 vha->qla_stats.output_requests++;
726 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
727 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
728 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
729 vha->qla_stats.input_requests++;
732 /* One DSD is available in the Command Type 3 IOCB */
733 avail_dsds = 1;
734 cur_dsd = &cmd_pkt->dsd;
736 /* Load data segments */
738 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
739 cont_a64_entry_t *cont_pkt;
741 /* Allocate additional continuation packets? */
742 if (avail_dsds == 0) {
744 * Five DSDs are available in the Continuation
745 * Type 1 IOCB.
747 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
748 cur_dsd = cont_pkt->dsd;
749 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
752 append_dsd64(&cur_dsd, sg);
753 avail_dsds--;
757 struct fw_dif_context {
758 uint32_t ref_tag;
759 uint16_t app_tag;
760 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
761 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
765 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
768 static inline void
769 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
770 unsigned int protcnt)
772 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
774 switch (scsi_get_prot_type(cmd)) {
775 case SCSI_PROT_DIF_TYPE0:
777 * No check for ql2xenablehba_err_chk, as it would be an
778 * I/O error if hba tag generation is not done.
780 pkt->ref_tag = cpu_to_le32((uint32_t)
781 (0xffffffff & scsi_get_lba(cmd)));
783 if (!qla2x00_hba_err_chk_enabled(sp))
784 break;
786 pkt->ref_tag_mask[0] = 0xff;
787 pkt->ref_tag_mask[1] = 0xff;
788 pkt->ref_tag_mask[2] = 0xff;
789 pkt->ref_tag_mask[3] = 0xff;
790 break;
793 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
794 * match LBA in CDB + N
796 case SCSI_PROT_DIF_TYPE2:
797 pkt->app_tag = cpu_to_le16(0);
798 pkt->app_tag_mask[0] = 0x0;
799 pkt->app_tag_mask[1] = 0x0;
801 pkt->ref_tag = cpu_to_le32((uint32_t)
802 (0xffffffff & scsi_get_lba(cmd)));
804 if (!qla2x00_hba_err_chk_enabled(sp))
805 break;
807 /* enable ALL bytes of the ref tag */
808 pkt->ref_tag_mask[0] = 0xff;
809 pkt->ref_tag_mask[1] = 0xff;
810 pkt->ref_tag_mask[2] = 0xff;
811 pkt->ref_tag_mask[3] = 0xff;
812 break;
814 /* For Type 3 protection: 16 bit GUARD only */
815 case SCSI_PROT_DIF_TYPE3:
816 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
817 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
818 0x00;
819 break;
822 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
823 * 16 bit app tag.
825 case SCSI_PROT_DIF_TYPE1:
826 pkt->ref_tag = cpu_to_le32((uint32_t)
827 (0xffffffff & scsi_get_lba(cmd)));
828 pkt->app_tag = cpu_to_le16(0);
829 pkt->app_tag_mask[0] = 0x0;
830 pkt->app_tag_mask[1] = 0x0;
832 if (!qla2x00_hba_err_chk_enabled(sp))
833 break;
835 /* enable ALL bytes of the ref tag */
836 pkt->ref_tag_mask[0] = 0xff;
837 pkt->ref_tag_mask[1] = 0xff;
838 pkt->ref_tag_mask[2] = 0xff;
839 pkt->ref_tag_mask[3] = 0xff;
840 break;
845 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
846 uint32_t *partial)
848 struct scatterlist *sg;
849 uint32_t cumulative_partial, sg_len;
850 dma_addr_t sg_dma_addr;
852 if (sgx->num_bytes == sgx->tot_bytes)
853 return 0;
855 sg = sgx->cur_sg;
856 cumulative_partial = sgx->tot_partial;
858 sg_dma_addr = sg_dma_address(sg);
859 sg_len = sg_dma_len(sg);
861 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
863 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
864 sgx->dma_len = (blk_sz - cumulative_partial);
865 sgx->tot_partial = 0;
866 sgx->num_bytes += blk_sz;
867 *partial = 0;
868 } else {
869 sgx->dma_len = sg_len - sgx->bytes_consumed;
870 sgx->tot_partial += sgx->dma_len;
871 *partial = 1;
874 sgx->bytes_consumed += sgx->dma_len;
876 if (sg_len == sgx->bytes_consumed) {
877 sg = sg_next(sg);
878 sgx->num_sg++;
879 sgx->cur_sg = sg;
880 sgx->bytes_consumed = 0;
883 return 1;
887 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
888 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
890 void *next_dsd;
891 uint8_t avail_dsds = 0;
892 uint32_t dsd_list_len;
893 struct dsd_dma *dsd_ptr;
894 struct scatterlist *sg_prot;
895 struct dsd64 *cur_dsd = dsd;
896 uint16_t used_dsds = tot_dsds;
897 uint32_t prot_int; /* protection interval */
898 uint32_t partial;
899 struct qla2_sgx sgx;
900 dma_addr_t sle_dma;
901 uint32_t sle_dma_len, tot_prot_dma_len = 0;
902 struct scsi_cmnd *cmd;
904 memset(&sgx, 0, sizeof(struct qla2_sgx));
905 if (sp) {
906 cmd = GET_CMD_SP(sp);
907 prot_int = cmd->device->sector_size;
909 sgx.tot_bytes = scsi_bufflen(cmd);
910 sgx.cur_sg = scsi_sglist(cmd);
911 sgx.sp = sp;
913 sg_prot = scsi_prot_sglist(cmd);
914 } else if (tc) {
915 prot_int = tc->blk_sz;
916 sgx.tot_bytes = tc->bufflen;
917 sgx.cur_sg = tc->sg;
918 sg_prot = tc->prot_sg;
919 } else {
920 BUG();
921 return 1;
924 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
926 sle_dma = sgx.dma_addr;
927 sle_dma_len = sgx.dma_len;
928 alloc_and_fill:
929 /* Allocate additional continuation packets? */
930 if (avail_dsds == 0) {
931 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
932 QLA_DSDS_PER_IOCB : used_dsds;
933 dsd_list_len = (avail_dsds + 1) * 12;
934 used_dsds -= avail_dsds;
936 /* allocate tracking DS */
937 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
938 if (!dsd_ptr)
939 return 1;
941 /* allocate new list */
942 dsd_ptr->dsd_addr = next_dsd =
943 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
944 &dsd_ptr->dsd_list_dma);
946 if (!next_dsd) {
948 * Need to cleanup only this dsd_ptr, rest
949 * will be done by sp_free_dma()
951 kfree(dsd_ptr);
952 return 1;
955 if (sp) {
956 list_add_tail(&dsd_ptr->list,
957 &sp->u.scmd.crc_ctx->dsd_list);
959 sp->flags |= SRB_CRC_CTX_DSD_VALID;
960 } else {
961 list_add_tail(&dsd_ptr->list,
962 &(tc->ctx->dsd_list));
963 *tc->ctx_dsd_alloced = 1;
967 /* add new list to cmd iocb or last list */
968 put_unaligned_le64(dsd_ptr->dsd_list_dma,
969 &cur_dsd->address);
970 cur_dsd->length = cpu_to_le32(dsd_list_len);
971 cur_dsd = next_dsd;
973 put_unaligned_le64(sle_dma, &cur_dsd->address);
974 cur_dsd->length = cpu_to_le32(sle_dma_len);
975 cur_dsd++;
976 avail_dsds--;
978 if (partial == 0) {
979 /* Got a full protection interval */
980 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
981 sle_dma_len = 8;
983 tot_prot_dma_len += sle_dma_len;
984 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
985 tot_prot_dma_len = 0;
986 sg_prot = sg_next(sg_prot);
989 partial = 1; /* So as to not re-enter this block */
990 goto alloc_and_fill;
993 /* Null termination */
994 cur_dsd->address = 0;
995 cur_dsd->length = 0;
996 cur_dsd++;
997 return 0;
1001 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1002 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1004 void *next_dsd;
1005 uint8_t avail_dsds = 0;
1006 uint32_t dsd_list_len;
1007 struct dsd_dma *dsd_ptr;
1008 struct scatterlist *sg, *sgl;
1009 struct dsd64 *cur_dsd = dsd;
1010 int i;
1011 uint16_t used_dsds = tot_dsds;
1012 struct scsi_cmnd *cmd;
1014 if (sp) {
1015 cmd = GET_CMD_SP(sp);
1016 sgl = scsi_sglist(cmd);
1017 } else if (tc) {
1018 sgl = tc->sg;
1019 } else {
1020 BUG();
1021 return 1;
1025 for_each_sg(sgl, sg, tot_dsds, i) {
1026 /* Allocate additional continuation packets? */
1027 if (avail_dsds == 0) {
1028 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1029 QLA_DSDS_PER_IOCB : used_dsds;
1030 dsd_list_len = (avail_dsds + 1) * 12;
1031 used_dsds -= avail_dsds;
1033 /* allocate tracking DS */
1034 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1035 if (!dsd_ptr)
1036 return 1;
1038 /* allocate new list */
1039 dsd_ptr->dsd_addr = next_dsd =
1040 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1041 &dsd_ptr->dsd_list_dma);
1043 if (!next_dsd) {
1045 * Need to cleanup only this dsd_ptr, rest
1046 * will be done by sp_free_dma()
1048 kfree(dsd_ptr);
1049 return 1;
1052 if (sp) {
1053 list_add_tail(&dsd_ptr->list,
1054 &sp->u.scmd.crc_ctx->dsd_list);
1056 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057 } else {
1058 list_add_tail(&dsd_ptr->list,
1059 &(tc->ctx->dsd_list));
1060 *tc->ctx_dsd_alloced = 1;
1063 /* add new list to cmd iocb or last list */
1064 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065 &cur_dsd->address);
1066 cur_dsd->length = cpu_to_le32(dsd_list_len);
1067 cur_dsd = next_dsd;
1069 append_dsd64(&cur_dsd, sg);
1070 avail_dsds--;
1073 /* Null termination */
1074 cur_dsd->address = 0;
1075 cur_dsd->length = 0;
1076 cur_dsd++;
1077 return 0;
1081 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1082 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1084 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1085 struct scatterlist *sg, *sgl;
1086 struct crc_context *difctx = NULL;
1087 struct scsi_qla_host *vha;
1088 uint dsd_list_len;
1089 uint avail_dsds = 0;
1090 uint used_dsds = tot_dsds;
1091 bool dif_local_dma_alloc = false;
1092 bool direction_to_device = false;
1093 int i;
1095 if (sp) {
1096 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1098 sgl = scsi_prot_sglist(cmd);
1099 vha = sp->vha;
1100 difctx = sp->u.scmd.crc_ctx;
1101 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1102 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1103 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1104 __func__, cmd, difctx, sp);
1105 } else if (tc) {
1106 vha = tc->vha;
1107 sgl = tc->prot_sg;
1108 difctx = tc->ctx;
1109 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1110 } else {
1111 BUG();
1112 return 1;
1115 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116 "%s: enter (write=%u)\n", __func__, direction_to_device);
1118 /* if initiator doing write or target doing read */
1119 if (direction_to_device) {
1120 for_each_sg(sgl, sg, tot_dsds, i) {
1121 u64 sle_phys = sg_phys(sg);
1123 /* If SGE addr + len flips bits in upper 32-bits */
1124 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1125 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1126 "%s: page boundary crossing (phys=%llx len=%x)\n",
1127 __func__, sle_phys, sg->length);
1129 if (difctx) {
1130 ha->dif_bundle_crossed_pages++;
1131 dif_local_dma_alloc = true;
1132 } else {
1133 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134 vha, 0xe022,
1135 "%s: difctx pointer is NULL\n",
1136 __func__);
1138 break;
1141 ha->dif_bundle_writes++;
1142 } else {
1143 ha->dif_bundle_reads++;
1146 if (ql2xdifbundlinginternalbuffers)
1147 dif_local_dma_alloc = direction_to_device;
1149 if (dif_local_dma_alloc) {
1150 u32 track_difbundl_buf = 0;
1151 u32 ldma_sg_len = 0;
1152 u8 ldma_needed = 1;
1154 difctx->no_dif_bundl = 0;
1155 difctx->dif_bundl_len = 0;
1157 /* Track DSD buffers */
1158 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1159 /* Track local DMA buffers */
1160 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1162 for_each_sg(sgl, sg, tot_dsds, i) {
1163 u32 sglen = sg_dma_len(sg);
1165 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1166 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1167 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1168 difctx->dif_bundl_len, ldma_needed);
1170 while (sglen) {
1171 u32 xfrlen = 0;
1173 if (ldma_needed) {
1175 * Allocate list item to store
1176 * the DMA buffers
1178 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1179 GFP_ATOMIC);
1180 if (!dsd_ptr) {
1181 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1182 "%s: failed alloc dsd_ptr\n",
1183 __func__);
1184 return 1;
1186 ha->dif_bundle_kallocs++;
1188 /* allocate dma buffer */
1189 dsd_ptr->dsd_addr = dma_pool_alloc
1190 (ha->dif_bundl_pool, GFP_ATOMIC,
1191 &dsd_ptr->dsd_list_dma);
1192 if (!dsd_ptr->dsd_addr) {
1193 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1194 "%s: failed alloc ->dsd_ptr\n",
1195 __func__);
1197 * need to cleanup only this
1198 * dsd_ptr rest will be done
1199 * by sp_free_dma()
1201 kfree(dsd_ptr);
1202 ha->dif_bundle_kallocs--;
1203 return 1;
1205 ha->dif_bundle_dma_allocs++;
1206 ldma_needed = 0;
1207 difctx->no_dif_bundl++;
1208 list_add_tail(&dsd_ptr->list,
1209 &difctx->ldif_dma_hndl_list);
1212 /* xfrlen is min of dma pool size and sglen */
1213 xfrlen = (sglen >
1214 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1215 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1216 sglen;
1218 /* replace with local allocated dma buffer */
1219 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1220 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1221 difctx->dif_bundl_len);
1222 difctx->dif_bundl_len += xfrlen;
1223 sglen -= xfrlen;
1224 ldma_sg_len += xfrlen;
1225 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1226 sg_is_last(sg)) {
1227 ldma_needed = 1;
1228 ldma_sg_len = 0;
1233 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1234 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1235 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1236 difctx->dif_bundl_len, difctx->no_dif_bundl,
1237 track_difbundl_buf);
1239 if (sp)
1240 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241 else
1242 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1244 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1245 &difctx->ldif_dma_hndl_list, list) {
1246 u32 sglen = (difctx->dif_bundl_len >
1247 DIF_BUNDLING_DMA_POOL_SIZE) ?
1248 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1250 BUG_ON(track_difbundl_buf == 0);
1252 /* Allocate additional continuation packets? */
1253 if (avail_dsds == 0) {
1254 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255 0xe024,
1256 "%s: adding continuation iocb's\n",
1257 __func__);
1258 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1259 QLA_DSDS_PER_IOCB : used_dsds;
1260 dsd_list_len = (avail_dsds + 1) * 12;
1261 used_dsds -= avail_dsds;
1263 /* allocate tracking DS */
1264 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265 if (!dsd_ptr) {
1266 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1267 "%s: failed alloc dsd_ptr\n",
1268 __func__);
1269 return 1;
1271 ha->dif_bundle_kallocs++;
1273 difctx->no_ldif_dsd++;
1274 /* allocate new list */
1275 dsd_ptr->dsd_addr =
1276 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1277 &dsd_ptr->dsd_list_dma);
1278 if (!dsd_ptr->dsd_addr) {
1279 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280 "%s: failed alloc ->dsd_addr\n",
1281 __func__);
1283 * need to cleanup only this dsd_ptr
1284 * rest will be done by sp_free_dma()
1286 kfree(dsd_ptr);
1287 ha->dif_bundle_kallocs--;
1288 return 1;
1290 ha->dif_bundle_dma_allocs++;
1292 if (sp) {
1293 list_add_tail(&dsd_ptr->list,
1294 &difctx->ldif_dsd_list);
1295 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296 } else {
1297 list_add_tail(&dsd_ptr->list,
1298 &difctx->ldif_dsd_list);
1299 tc->ctx_dsd_alloced = 1;
1302 /* add new list to cmd iocb or last list */
1303 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304 &cur_dsd->address);
1305 cur_dsd->length = cpu_to_le32(dsd_list_len);
1306 cur_dsd = dsd_ptr->dsd_addr;
1308 put_unaligned_le64(dif_dsd->dsd_list_dma,
1309 &cur_dsd->address);
1310 cur_dsd->length = cpu_to_le32(sglen);
1311 cur_dsd++;
1312 avail_dsds--;
1313 difctx->dif_bundl_len -= sglen;
1314 track_difbundl_buf--;
1317 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1318 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1319 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320 } else {
1321 for_each_sg(sgl, sg, tot_dsds, i) {
1322 /* Allocate additional continuation packets? */
1323 if (avail_dsds == 0) {
1324 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1325 QLA_DSDS_PER_IOCB : used_dsds;
1326 dsd_list_len = (avail_dsds + 1) * 12;
1327 used_dsds -= avail_dsds;
1329 /* allocate tracking DS */
1330 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331 if (!dsd_ptr) {
1332 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333 vha, 0xe027,
1334 "%s: failed alloc dsd_dma...\n",
1335 __func__);
1336 return 1;
1339 /* allocate new list */
1340 dsd_ptr->dsd_addr =
1341 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1342 &dsd_ptr->dsd_list_dma);
1343 if (!dsd_ptr->dsd_addr) {
1344 /* need to cleanup only this dsd_ptr */
1345 /* rest will be done by sp_free_dma() */
1346 kfree(dsd_ptr);
1347 return 1;
1350 if (sp) {
1351 list_add_tail(&dsd_ptr->list,
1352 &difctx->dsd_list);
1353 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354 } else {
1355 list_add_tail(&dsd_ptr->list,
1356 &difctx->dsd_list);
1357 tc->ctx_dsd_alloced = 1;
1360 /* add new list to cmd iocb or last list */
1361 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362 &cur_dsd->address);
1363 cur_dsd->length = cpu_to_le32(dsd_list_len);
1364 cur_dsd = dsd_ptr->dsd_addr;
1366 append_dsd64(&cur_dsd, sg);
1367 avail_dsds--;
1370 /* Null termination */
1371 cur_dsd->address = 0;
1372 cur_dsd->length = 0;
1373 cur_dsd++;
1374 return 0;
1378 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1379 * Type 6 IOCB types.
1381 * @sp: SRB command to process
1382 * @cmd_pkt: Command type 3 IOCB
1383 * @tot_dsds: Total number of segments to transfer
1384 * @tot_prot_dsds: Total number of segments with protection information
1385 * @fw_prot_opts: Protection options to be passed to firmware
1387 static inline int
1388 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1389 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1391 struct dsd64 *cur_dsd;
1392 uint32_t *fcp_dl;
1393 scsi_qla_host_t *vha;
1394 struct scsi_cmnd *cmd;
1395 uint32_t total_bytes = 0;
1396 uint32_t data_bytes;
1397 uint32_t dif_bytes;
1398 uint8_t bundling = 1;
1399 uint16_t blk_size;
1400 struct crc_context *crc_ctx_pkt = NULL;
1401 struct qla_hw_data *ha;
1402 uint8_t additional_fcpcdb_len;
1403 uint16_t fcp_cmnd_len;
1404 struct fcp_cmnd *fcp_cmnd;
1405 dma_addr_t crc_ctx_dma;
1407 cmd = GET_CMD_SP(sp);
1409 /* Update entry type to indicate Command Type CRC_2 IOCB */
1410 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1412 vha = sp->vha;
1413 ha = vha->hw;
1415 /* No data transfer */
1416 data_bytes = scsi_bufflen(cmd);
1417 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1418 cmd_pkt->byte_count = cpu_to_le32(0);
1419 return QLA_SUCCESS;
1422 cmd_pkt->vp_index = sp->vha->vp_idx;
1424 /* Set transfer direction */
1425 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1426 cmd_pkt->control_flags =
1427 cpu_to_le16(CF_WRITE_DATA);
1428 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1429 cmd_pkt->control_flags =
1430 cpu_to_le16(CF_READ_DATA);
1433 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1434 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1435 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1436 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1437 bundling = 0;
1439 /* Allocate CRC context from global pool */
1440 crc_ctx_pkt = sp->u.scmd.crc_ctx =
1441 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1443 if (!crc_ctx_pkt)
1444 goto crc_queuing_error;
1446 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1448 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1450 /* Set handle */
1451 crc_ctx_pkt->handle = cmd_pkt->handle;
1453 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1455 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1456 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1458 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1459 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1461 /* Determine SCSI command length -- align to 4 byte boundary */
1462 if (cmd->cmd_len > 16) {
1463 additional_fcpcdb_len = cmd->cmd_len - 16;
1464 if ((cmd->cmd_len % 4) != 0) {
1465 /* SCSI cmd > 16 bytes must be multiple of 4 */
1466 goto crc_queuing_error;
1468 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469 } else {
1470 additional_fcpcdb_len = 0;
1471 fcp_cmnd_len = 12 + 16 + 4;
1474 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1476 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1477 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1478 fcp_cmnd->additional_cdb_len |= 1;
1479 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1480 fcp_cmnd->additional_cdb_len |= 2;
1482 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1483 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1484 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1485 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1486 &cmd_pkt->fcp_cmnd_dseg_address);
1487 fcp_cmnd->task_management = 0;
1488 fcp_cmnd->task_attribute = TSK_SIMPLE;
1490 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1492 /* Compute dif len and adjust data len to incude protection */
1493 dif_bytes = 0;
1494 blk_size = cmd->device->sector_size;
1495 dif_bytes = (data_bytes / blk_size) * 8;
1497 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1498 case SCSI_PROT_READ_INSERT:
1499 case SCSI_PROT_WRITE_STRIP:
1500 total_bytes = data_bytes;
1501 data_bytes += dif_bytes;
1502 break;
1504 case SCSI_PROT_READ_STRIP:
1505 case SCSI_PROT_WRITE_INSERT:
1506 case SCSI_PROT_READ_PASS:
1507 case SCSI_PROT_WRITE_PASS:
1508 total_bytes = data_bytes + dif_bytes;
1509 break;
1510 default:
1511 BUG();
1514 if (!qla2x00_hba_err_chk_enabled(sp))
1515 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1516 /* HBA error checking enabled */
1517 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1518 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1519 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1520 SCSI_PROT_DIF_TYPE2))
1521 fw_prot_opts |= BIT_10;
1522 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1523 SCSI_PROT_DIF_TYPE3)
1524 fw_prot_opts |= BIT_11;
1527 if (!bundling) {
1528 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1529 } else {
1531 * Configure Bundling if we need to fetch interlaving
1532 * protection PCI accesses
1534 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1535 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1536 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537 tot_prot_dsds);
1538 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1541 /* Finish the common fields of CRC pkt */
1542 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1543 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1544 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1545 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1546 /* Fibre channel byte count */
1547 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1548 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549 additional_fcpcdb_len);
1550 *fcp_dl = htonl(total_bytes);
1552 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1553 cmd_pkt->byte_count = cpu_to_le32(0);
1554 return QLA_SUCCESS;
1556 /* Walks data segments */
1558 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1560 if (!bundling && tot_prot_dsds) {
1561 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1562 cur_dsd, tot_dsds, NULL))
1563 goto crc_queuing_error;
1564 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1565 (tot_dsds - tot_prot_dsds), NULL))
1566 goto crc_queuing_error;
1568 if (bundling && tot_prot_dsds) {
1569 /* Walks dif segments */
1570 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1571 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1572 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1573 tot_prot_dsds, NULL))
1574 goto crc_queuing_error;
1576 return QLA_SUCCESS;
1578 crc_queuing_error:
1579 /* Cleanup will be performed by the caller */
1581 return QLA_FUNCTION_FAILED;
1585 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1586 * @sp: command to send to the ISP
1588 * Returns non-zero if a failure occurred, else zero.
1591 qla24xx_start_scsi(srb_t *sp)
1593 int nseg;
1594 unsigned long flags;
1595 uint32_t *clr_ptr;
1596 uint32_t handle;
1597 struct cmd_type_7 *cmd_pkt;
1598 uint16_t cnt;
1599 uint16_t req_cnt;
1600 uint16_t tot_dsds;
1601 struct req_que *req = NULL;
1602 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1603 struct scsi_qla_host *vha = sp->vha;
1604 struct qla_hw_data *ha = vha->hw;
1606 /* Setup device pointers. */
1607 req = vha->req;
1609 /* So we know we haven't pci_map'ed anything yet */
1610 tot_dsds = 0;
1612 /* Send marker if required */
1613 if (vha->marker_needed != 0) {
1614 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615 QLA_SUCCESS)
1616 return QLA_FUNCTION_FAILED;
1617 vha->marker_needed = 0;
1620 /* Acquire ring specific lock */
1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1623 handle = qla2xxx_get_next_handle(req);
1624 if (handle == 0)
1625 goto queuing_error;
1627 /* Map the sg table so we have an accurate count of sg entries needed */
1628 if (scsi_sg_count(cmd)) {
1629 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630 scsi_sg_count(cmd), cmd->sc_data_direction);
1631 if (unlikely(!nseg))
1632 goto queuing_error;
1633 } else
1634 nseg = 0;
1636 tot_dsds = nseg;
1637 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638 if (req->cnt < (req_cnt + 2)) {
1639 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640 RD_REG_DWORD_RELAXED(req->req_q_out);
1641 if (req->ring_index < cnt)
1642 req->cnt = cnt - req->ring_index;
1643 else
1644 req->cnt = req->length -
1645 (req->ring_index - cnt);
1646 if (req->cnt < (req_cnt + 2))
1647 goto queuing_error;
1650 /* Build command packet. */
1651 req->current_outstanding_cmd = handle;
1652 req->outstanding_cmds[handle] = sp;
1653 sp->handle = handle;
1654 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655 req->cnt -= req_cnt;
1657 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658 cmd_pkt->handle = make_handle(req->id, handle);
1660 /* Zero out remaining portion of packet. */
1661 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662 clr_ptr = (uint32_t *)cmd_pkt + 2;
1663 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1666 /* Set NPORT-ID and LUN number*/
1667 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671 cmd_pkt->vp_index = sp->vha->vp_idx;
1673 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1676 cmd_pkt->task = TSK_SIMPLE;
1678 /* Load SCSI command packet. */
1679 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1682 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1684 /* Build IOCB segments */
1685 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1687 /* Set total data segment count. */
1688 cmd_pkt->entry_count = (uint8_t)req_cnt;
1689 wmb();
1690 /* Adjust ring index. */
1691 req->ring_index++;
1692 if (req->ring_index == req->length) {
1693 req->ring_index = 0;
1694 req->ring_ptr = req->ring;
1695 } else
1696 req->ring_ptr++;
1698 sp->flags |= SRB_DMA_VALID;
1700 /* Set chip new ring index. */
1701 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704 return QLA_SUCCESS;
1706 queuing_error:
1707 if (tot_dsds)
1708 scsi_dma_unmap(cmd);
1710 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1712 return QLA_FUNCTION_FAILED;
1716 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717 * @sp: command to send to the ISP
1719 * Returns non-zero if a failure occurred, else zero.
1722 qla24xx_dif_start_scsi(srb_t *sp)
1724 int nseg;
1725 unsigned long flags;
1726 uint32_t *clr_ptr;
1727 uint32_t handle;
1728 uint16_t cnt;
1729 uint16_t req_cnt = 0;
1730 uint16_t tot_dsds;
1731 uint16_t tot_prot_dsds;
1732 uint16_t fw_prot_opts = 0;
1733 struct req_que *req = NULL;
1734 struct rsp_que *rsp = NULL;
1735 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1736 struct scsi_qla_host *vha = sp->vha;
1737 struct qla_hw_data *ha = vha->hw;
1738 struct cmd_type_crc_2 *cmd_pkt;
1739 uint32_t status = 0;
1741 #define QDSS_GOT_Q_SPACE BIT_0
1743 /* Only process protection or >16 cdb in this routine */
1744 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745 if (cmd->cmd_len <= 16)
1746 return qla24xx_start_scsi(sp);
1749 /* Setup device pointers. */
1750 req = vha->req;
1751 rsp = req->rsp;
1753 /* So we know we haven't pci_map'ed anything yet */
1754 tot_dsds = 0;
1756 /* Send marker if required */
1757 if (vha->marker_needed != 0) {
1758 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759 QLA_SUCCESS)
1760 return QLA_FUNCTION_FAILED;
1761 vha->marker_needed = 0;
1764 /* Acquire ring specific lock */
1765 spin_lock_irqsave(&ha->hardware_lock, flags);
1767 handle = qla2xxx_get_next_handle(req);
1768 if (handle == 0)
1769 goto queuing_error;
1771 /* Compute number of required data segments */
1772 /* Map the sg table so we have an accurate count of sg entries needed */
1773 if (scsi_sg_count(cmd)) {
1774 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1775 scsi_sg_count(cmd), cmd->sc_data_direction);
1776 if (unlikely(!nseg))
1777 goto queuing_error;
1778 else
1779 sp->flags |= SRB_DMA_VALID;
1781 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1782 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1783 struct qla2_sgx sgx;
1784 uint32_t partial;
1786 memset(&sgx, 0, sizeof(struct qla2_sgx));
1787 sgx.tot_bytes = scsi_bufflen(cmd);
1788 sgx.cur_sg = scsi_sglist(cmd);
1789 sgx.sp = sp;
1791 nseg = 0;
1792 while (qla24xx_get_one_block_sg(
1793 cmd->device->sector_size, &sgx, &partial))
1794 nseg++;
1796 } else
1797 nseg = 0;
1799 /* number of required data segments */
1800 tot_dsds = nseg;
1802 /* Compute number of required protection segments */
1803 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1804 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1805 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1806 if (unlikely(!nseg))
1807 goto queuing_error;
1808 else
1809 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1811 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1812 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1813 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1815 } else {
1816 nseg = 0;
1819 req_cnt = 1;
1820 /* Total Data and protection sg segment(s) */
1821 tot_prot_dsds = nseg;
1822 tot_dsds += nseg;
1823 if (req->cnt < (req_cnt + 2)) {
1824 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1825 RD_REG_DWORD_RELAXED(req->req_q_out);
1826 if (req->ring_index < cnt)
1827 req->cnt = cnt - req->ring_index;
1828 else
1829 req->cnt = req->length -
1830 (req->ring_index - cnt);
1831 if (req->cnt < (req_cnt + 2))
1832 goto queuing_error;
1835 status |= QDSS_GOT_Q_SPACE;
1837 /* Build header part of command packet (excluding the OPCODE). */
1838 req->current_outstanding_cmd = handle;
1839 req->outstanding_cmds[handle] = sp;
1840 sp->handle = handle;
1841 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1842 req->cnt -= req_cnt;
1844 /* Fill-in common area */
1845 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1846 cmd_pkt->handle = make_handle(req->id, handle);
1848 clr_ptr = (uint32_t *)cmd_pkt + 2;
1849 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1851 /* Set NPORT-ID and LUN number*/
1852 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1853 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1854 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1855 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1857 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1858 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1860 /* Total Data and protection segment(s) */
1861 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1863 /* Build IOCB segments and adjust for data protection segments */
1864 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1865 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1866 QLA_SUCCESS)
1867 goto queuing_error;
1869 cmd_pkt->entry_count = (uint8_t)req_cnt;
1870 /* Specify response queue number where completion should happen */
1871 cmd_pkt->entry_status = (uint8_t) rsp->id;
1872 cmd_pkt->timeout = cpu_to_le16(0);
1873 wmb();
1875 /* Adjust ring index. */
1876 req->ring_index++;
1877 if (req->ring_index == req->length) {
1878 req->ring_index = 0;
1879 req->ring_ptr = req->ring;
1880 } else
1881 req->ring_ptr++;
1883 /* Set chip new ring index. */
1884 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1886 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1888 return QLA_SUCCESS;
1890 queuing_error:
1891 if (status & QDSS_GOT_Q_SPACE) {
1892 req->outstanding_cmds[handle] = NULL;
1893 req->cnt += req_cnt;
1895 /* Cleanup will be performed by the caller (queuecommand) */
1897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898 return QLA_FUNCTION_FAILED;
1902 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1903 * @sp: command to send to the ISP
1905 * Returns non-zero if a failure occurred, else zero.
1907 static int
1908 qla2xxx_start_scsi_mq(srb_t *sp)
1910 int nseg;
1911 unsigned long flags;
1912 uint32_t *clr_ptr;
1913 uint32_t handle;
1914 struct cmd_type_7 *cmd_pkt;
1915 uint16_t cnt;
1916 uint16_t req_cnt;
1917 uint16_t tot_dsds;
1918 struct req_que *req = NULL;
1919 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1920 struct scsi_qla_host *vha = sp->fcport->vha;
1921 struct qla_hw_data *ha = vha->hw;
1922 struct qla_qpair *qpair = sp->qpair;
1924 /* Acquire qpair specific lock */
1925 spin_lock_irqsave(&qpair->qp_lock, flags);
1927 /* Setup qpair pointers */
1928 req = qpair->req;
1930 /* So we know we haven't pci_map'ed anything yet */
1931 tot_dsds = 0;
1933 /* Send marker if required */
1934 if (vha->marker_needed != 0) {
1935 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936 QLA_SUCCESS) {
1937 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938 return QLA_FUNCTION_FAILED;
1940 vha->marker_needed = 0;
1943 handle = qla2xxx_get_next_handle(req);
1944 if (handle == 0)
1945 goto queuing_error;
1947 /* Map the sg table so we have an accurate count of sg entries needed */
1948 if (scsi_sg_count(cmd)) {
1949 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950 scsi_sg_count(cmd), cmd->sc_data_direction);
1951 if (unlikely(!nseg))
1952 goto queuing_error;
1953 } else
1954 nseg = 0;
1956 tot_dsds = nseg;
1957 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958 if (req->cnt < (req_cnt + 2)) {
1959 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1960 RD_REG_DWORD_RELAXED(req->req_q_out);
1961 if (req->ring_index < cnt)
1962 req->cnt = cnt - req->ring_index;
1963 else
1964 req->cnt = req->length -
1965 (req->ring_index - cnt);
1966 if (req->cnt < (req_cnt + 2))
1967 goto queuing_error;
1970 /* Build command packet. */
1971 req->current_outstanding_cmd = handle;
1972 req->outstanding_cmds[handle] = sp;
1973 sp->handle = handle;
1974 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1975 req->cnt -= req_cnt;
1977 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1978 cmd_pkt->handle = make_handle(req->id, handle);
1980 /* Zero out remaining portion of packet. */
1981 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1982 clr_ptr = (uint32_t *)cmd_pkt + 2;
1983 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1984 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1986 /* Set NPORT-ID and LUN number*/
1987 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1988 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1989 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1990 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1991 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1993 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1994 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1996 cmd_pkt->task = TSK_SIMPLE;
1998 /* Load SCSI command packet. */
1999 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2000 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2002 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2004 /* Build IOCB segments */
2005 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2007 /* Set total data segment count. */
2008 cmd_pkt->entry_count = (uint8_t)req_cnt;
2009 wmb();
2010 /* Adjust ring index. */
2011 req->ring_index++;
2012 if (req->ring_index == req->length) {
2013 req->ring_index = 0;
2014 req->ring_ptr = req->ring;
2015 } else
2016 req->ring_ptr++;
2018 sp->flags |= SRB_DMA_VALID;
2020 /* Set chip new ring index. */
2021 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2023 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2024 return QLA_SUCCESS;
2026 queuing_error:
2027 if (tot_dsds)
2028 scsi_dma_unmap(cmd);
2030 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2032 return QLA_FUNCTION_FAILED;
2037 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2038 * @sp: command to send to the ISP
2040 * Returns non-zero if a failure occurred, else zero.
2043 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2045 int nseg;
2046 unsigned long flags;
2047 uint32_t *clr_ptr;
2048 uint32_t handle;
2049 uint16_t cnt;
2050 uint16_t req_cnt = 0;
2051 uint16_t tot_dsds;
2052 uint16_t tot_prot_dsds;
2053 uint16_t fw_prot_opts = 0;
2054 struct req_que *req = NULL;
2055 struct rsp_que *rsp = NULL;
2056 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2057 struct scsi_qla_host *vha = sp->fcport->vha;
2058 struct qla_hw_data *ha = vha->hw;
2059 struct cmd_type_crc_2 *cmd_pkt;
2060 uint32_t status = 0;
2061 struct qla_qpair *qpair = sp->qpair;
2063 #define QDSS_GOT_Q_SPACE BIT_0
2065 /* Check for host side state */
2066 if (!qpair->online) {
2067 cmd->result = DID_NO_CONNECT << 16;
2068 return QLA_INTERFACE_ERROR;
2071 if (!qpair->difdix_supported &&
2072 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2073 cmd->result = DID_NO_CONNECT << 16;
2074 return QLA_INTERFACE_ERROR;
2077 /* Only process protection or >16 cdb in this routine */
2078 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2079 if (cmd->cmd_len <= 16)
2080 return qla2xxx_start_scsi_mq(sp);
2083 spin_lock_irqsave(&qpair->qp_lock, flags);
2085 /* Setup qpair pointers */
2086 rsp = qpair->rsp;
2087 req = qpair->req;
2089 /* So we know we haven't pci_map'ed anything yet */
2090 tot_dsds = 0;
2092 /* Send marker if required */
2093 if (vha->marker_needed != 0) {
2094 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095 QLA_SUCCESS) {
2096 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2097 return QLA_FUNCTION_FAILED;
2099 vha->marker_needed = 0;
2102 handle = qla2xxx_get_next_handle(req);
2103 if (handle == 0)
2104 goto queuing_error;
2106 /* Compute number of required data segments */
2107 /* Map the sg table so we have an accurate count of sg entries needed */
2108 if (scsi_sg_count(cmd)) {
2109 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2110 scsi_sg_count(cmd), cmd->sc_data_direction);
2111 if (unlikely(!nseg))
2112 goto queuing_error;
2113 else
2114 sp->flags |= SRB_DMA_VALID;
2116 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2117 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2118 struct qla2_sgx sgx;
2119 uint32_t partial;
2121 memset(&sgx, 0, sizeof(struct qla2_sgx));
2122 sgx.tot_bytes = scsi_bufflen(cmd);
2123 sgx.cur_sg = scsi_sglist(cmd);
2124 sgx.sp = sp;
2126 nseg = 0;
2127 while (qla24xx_get_one_block_sg(
2128 cmd->device->sector_size, &sgx, &partial))
2129 nseg++;
2131 } else
2132 nseg = 0;
2134 /* number of required data segments */
2135 tot_dsds = nseg;
2137 /* Compute number of required protection segments */
2138 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2139 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2140 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2141 if (unlikely(!nseg))
2142 goto queuing_error;
2143 else
2144 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2146 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2147 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2148 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2150 } else {
2151 nseg = 0;
2154 req_cnt = 1;
2155 /* Total Data and protection sg segment(s) */
2156 tot_prot_dsds = nseg;
2157 tot_dsds += nseg;
2158 if (req->cnt < (req_cnt + 2)) {
2159 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2160 RD_REG_DWORD_RELAXED(req->req_q_out);
2161 if (req->ring_index < cnt)
2162 req->cnt = cnt - req->ring_index;
2163 else
2164 req->cnt = req->length -
2165 (req->ring_index - cnt);
2166 if (req->cnt < (req_cnt + 2))
2167 goto queuing_error;
2170 status |= QDSS_GOT_Q_SPACE;
2172 /* Build header part of command packet (excluding the OPCODE). */
2173 req->current_outstanding_cmd = handle;
2174 req->outstanding_cmds[handle] = sp;
2175 sp->handle = handle;
2176 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2177 req->cnt -= req_cnt;
2179 /* Fill-in common area */
2180 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2181 cmd_pkt->handle = make_handle(req->id, handle);
2183 clr_ptr = (uint32_t *)cmd_pkt + 2;
2184 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2186 /* Set NPORT-ID and LUN number*/
2187 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2188 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2189 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2190 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2192 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2193 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2195 /* Total Data and protection segment(s) */
2196 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2198 /* Build IOCB segments and adjust for data protection segments */
2199 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2200 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2201 QLA_SUCCESS)
2202 goto queuing_error;
2204 cmd_pkt->entry_count = (uint8_t)req_cnt;
2205 cmd_pkt->timeout = cpu_to_le16(0);
2206 wmb();
2208 /* Adjust ring index. */
2209 req->ring_index++;
2210 if (req->ring_index == req->length) {
2211 req->ring_index = 0;
2212 req->ring_ptr = req->ring;
2213 } else
2214 req->ring_ptr++;
2216 /* Set chip new ring index. */
2217 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2219 /* Manage unprocessed RIO/ZIO commands in response queue. */
2220 if (vha->flags.process_response_queue &&
2221 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2222 qla24xx_process_response_queue(vha, rsp);
2224 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2226 return QLA_SUCCESS;
2228 queuing_error:
2229 if (status & QDSS_GOT_Q_SPACE) {
2230 req->outstanding_cmds[handle] = NULL;
2231 req->cnt += req_cnt;
2233 /* Cleanup will be performed by the caller (queuecommand) */
2235 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2236 return QLA_FUNCTION_FAILED;
2239 /* Generic Control-SRB manipulation functions. */
2241 /* hardware_lock assumed to be held. */
2243 void *
2244 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2246 scsi_qla_host_t *vha = qpair->vha;
2247 struct qla_hw_data *ha = vha->hw;
2248 struct req_que *req = qpair->req;
2249 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2250 uint32_t handle;
2251 request_t *pkt;
2252 uint16_t cnt, req_cnt;
2254 pkt = NULL;
2255 req_cnt = 1;
2256 handle = 0;
2258 if (sp && (sp->type != SRB_SCSI_CMD)) {
2259 /* Adjust entry-counts as needed. */
2260 req_cnt = sp->iocbs;
2263 /* Check for room on request queue. */
2264 if (req->cnt < req_cnt + 2) {
2265 if (qpair->use_shadow_reg)
2266 cnt = *req->out_ptr;
2267 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268 IS_QLA28XX(ha))
2269 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2270 else if (IS_P3P_TYPE(ha))
2271 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2272 else if (IS_FWI2_CAPABLE(ha))
2273 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2274 else if (IS_QLAFX00(ha))
2275 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2276 else
2277 cnt = qla2x00_debounce_register(
2278 ISP_REQ_Q_OUT(ha, &reg->isp));
2280 if (req->ring_index < cnt)
2281 req->cnt = cnt - req->ring_index;
2282 else
2283 req->cnt = req->length -
2284 (req->ring_index - cnt);
2286 if (req->cnt < req_cnt + 2)
2287 goto queuing_error;
2289 if (sp) {
2290 handle = qla2xxx_get_next_handle(req);
2291 if (handle == 0) {
2292 ql_log(ql_log_warn, vha, 0x700b,
2293 "No room on outstanding cmd array.\n");
2294 goto queuing_error;
2297 /* Prep command array. */
2298 req->current_outstanding_cmd = handle;
2299 req->outstanding_cmds[handle] = sp;
2300 sp->handle = handle;
2303 /* Prep packet */
2304 req->cnt -= req_cnt;
2305 pkt = req->ring_ptr;
2306 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2307 if (IS_QLAFX00(ha)) {
2308 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2309 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2310 } else {
2311 pkt->entry_count = req_cnt;
2312 pkt->handle = handle;
2315 return pkt;
2317 queuing_error:
2318 qpair->tgt_counters.num_alloc_iocb_failed++;
2319 return pkt;
2322 void *
2323 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2325 scsi_qla_host_t *vha = qpair->vha;
2327 if (qla2x00_reset_active(vha))
2328 return NULL;
2330 return __qla2x00_alloc_iocbs(qpair, sp);
2333 void *
2334 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2336 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2339 static void
2340 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2342 struct srb_iocb *lio = &sp->u.iocb_cmd;
2344 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2345 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2346 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2347 logio->control_flags |= LCF_NVME_PRLI;
2348 if (sp->vha->flags.nvme_first_burst)
2349 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2352 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354 logio->port_id[1] = sp->fcport->d_id.b.area;
2355 logio->port_id[2] = sp->fcport->d_id.b.domain;
2356 logio->vp_index = sp->vha->vp_idx;
2359 static void
2360 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2362 struct srb_iocb *lio = &sp->u.iocb_cmd;
2364 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2365 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2367 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2368 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2369 } else {
2370 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2371 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2372 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2373 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2374 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2376 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2377 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2378 logio->port_id[1] = sp->fcport->d_id.b.area;
2379 logio->port_id[2] = sp->fcport->d_id.b.domain;
2380 logio->vp_index = sp->vha->vp_idx;
2383 static void
2384 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2386 struct qla_hw_data *ha = sp->vha->hw;
2387 struct srb_iocb *lio = &sp->u.iocb_cmd;
2388 uint16_t opts;
2390 mbx->entry_type = MBX_IOCB_TYPE;
2391 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2392 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2393 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2394 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2395 if (HAS_EXTENDED_IDS(ha)) {
2396 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2397 mbx->mb10 = cpu_to_le16(opts);
2398 } else {
2399 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2401 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2402 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2403 sp->fcport->d_id.b.al_pa);
2404 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2407 static void
2408 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2410 u16 control_flags = LCF_COMMAND_LOGO;
2411 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2413 if (sp->fcport->explicit_logout) {
2414 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2415 } else {
2416 control_flags |= LCF_IMPL_LOGO;
2418 if (!sp->fcport->keep_nport_handle)
2419 control_flags |= LCF_FREE_NPORT;
2422 logio->control_flags = cpu_to_le16(control_flags);
2423 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2424 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2425 logio->port_id[1] = sp->fcport->d_id.b.area;
2426 logio->port_id[2] = sp->fcport->d_id.b.domain;
2427 logio->vp_index = sp->vha->vp_idx;
2430 static void
2431 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2433 struct qla_hw_data *ha = sp->vha->hw;
2435 mbx->entry_type = MBX_IOCB_TYPE;
2436 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2437 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2438 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2439 cpu_to_le16(sp->fcport->loop_id) :
2440 cpu_to_le16(sp->fcport->loop_id << 8);
2441 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2442 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2443 sp->fcport->d_id.b.al_pa);
2444 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2445 /* Implicit: mbx->mbx10 = 0. */
2448 static void
2449 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2451 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2452 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2453 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2454 logio->vp_index = sp->vha->vp_idx;
2457 static void
2458 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2460 struct qla_hw_data *ha = sp->vha->hw;
2462 mbx->entry_type = MBX_IOCB_TYPE;
2463 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2464 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2465 if (HAS_EXTENDED_IDS(ha)) {
2466 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2467 mbx->mb10 = cpu_to_le16(BIT_0);
2468 } else {
2469 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2471 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2472 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2473 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2474 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2475 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2478 static void
2479 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2481 uint32_t flags;
2482 uint64_t lun;
2483 struct fc_port *fcport = sp->fcport;
2484 scsi_qla_host_t *vha = fcport->vha;
2485 struct qla_hw_data *ha = vha->hw;
2486 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2487 struct req_que *req = vha->req;
2489 flags = iocb->u.tmf.flags;
2490 lun = iocb->u.tmf.lun;
2492 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2493 tsk->entry_count = 1;
2494 tsk->handle = make_handle(req->id, tsk->handle);
2495 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2496 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2497 tsk->control_flags = cpu_to_le32(flags);
2498 tsk->port_id[0] = fcport->d_id.b.al_pa;
2499 tsk->port_id[1] = fcport->d_id.b.area;
2500 tsk->port_id[2] = fcport->d_id.b.domain;
2501 tsk->vp_index = fcport->vha->vp_idx;
2503 if (flags == TCF_LUN_RESET) {
2504 int_to_scsilun(lun, &tsk->lun);
2505 host_to_fcp_swap((uint8_t *)&tsk->lun,
2506 sizeof(tsk->lun));
2510 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2512 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2513 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2514 sp->free = qla2x00_sp_free;
2515 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2516 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2517 sp->start_timer = 1;
2520 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2522 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2524 kfree(sp->fcport);
2526 if (elsio->u.els_logo.els_logo_pyld)
2527 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2528 elsio->u.els_logo.els_logo_pyld,
2529 elsio->u.els_logo.els_logo_pyld_dma);
2531 del_timer(&elsio->timer);
2532 qla2x00_rel_sp(sp);
2535 static void
2536 qla2x00_els_dcmd_iocb_timeout(void *data)
2538 srb_t *sp = data;
2539 fc_port_t *fcport = sp->fcport;
2540 struct scsi_qla_host *vha = sp->vha;
2541 struct srb_iocb *lio = &sp->u.iocb_cmd;
2542 unsigned long flags = 0;
2543 int res, h;
2545 ql_dbg(ql_dbg_io, vha, 0x3069,
2546 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2547 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2548 fcport->d_id.b.al_pa);
2550 /* Abort the exchange */
2551 res = qla24xx_async_abort_cmd(sp, false);
2552 if (res) {
2553 ql_dbg(ql_dbg_io, vha, 0x3070,
2554 "mbx abort_command failed.\n");
2555 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2556 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2557 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2558 sp->qpair->req->outstanding_cmds[h] = NULL;
2559 break;
2562 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2563 complete(&lio->u.els_logo.comp);
2564 } else {
2565 ql_dbg(ql_dbg_io, vha, 0x3071,
2566 "mbx abort_command success.\n");
2570 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2572 fc_port_t *fcport = sp->fcport;
2573 struct srb_iocb *lio = &sp->u.iocb_cmd;
2574 struct scsi_qla_host *vha = sp->vha;
2576 ql_dbg(ql_dbg_io, vha, 0x3072,
2577 "%s hdl=%x, portid=%02x%02x%02x done\n",
2578 sp->name, sp->handle, fcport->d_id.b.domain,
2579 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2581 complete(&lio->u.els_logo.comp);
2585 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2586 port_id_t remote_did)
2588 srb_t *sp;
2589 fc_port_t *fcport = NULL;
2590 struct srb_iocb *elsio = NULL;
2591 struct qla_hw_data *ha = vha->hw;
2592 struct els_logo_payload logo_pyld;
2593 int rval = QLA_SUCCESS;
2595 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2596 if (!fcport) {
2597 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2598 return -ENOMEM;
2601 /* Alloc SRB structure */
2602 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2603 if (!sp) {
2604 kfree(fcport);
2605 ql_log(ql_log_info, vha, 0x70e6,
2606 "SRB allocation failed\n");
2607 return -ENOMEM;
2610 elsio = &sp->u.iocb_cmd;
2611 fcport->loop_id = 0xFFFF;
2612 fcport->d_id.b.domain = remote_did.b.domain;
2613 fcport->d_id.b.area = remote_did.b.area;
2614 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2616 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2617 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2619 sp->type = SRB_ELS_DCMD;
2620 sp->name = "ELS_DCMD";
2621 sp->fcport = fcport;
2622 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2623 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2624 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2625 sp->done = qla2x00_els_dcmd_sp_done;
2626 sp->free = qla2x00_els_dcmd_sp_free;
2628 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2629 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2630 GFP_KERNEL);
2632 if (!elsio->u.els_logo.els_logo_pyld) {
2633 sp->free(sp);
2634 return QLA_FUNCTION_FAILED;
2637 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2639 elsio->u.els_logo.els_cmd = els_opcode;
2640 logo_pyld.opcode = els_opcode;
2641 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2642 logo_pyld.s_id[1] = vha->d_id.b.area;
2643 logo_pyld.s_id[2] = vha->d_id.b.domain;
2644 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2645 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2647 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2648 sizeof(struct els_logo_payload));
2649 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2650 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2651 elsio->u.els_logo.els_logo_pyld,
2652 sizeof(*elsio->u.els_logo.els_logo_pyld));
2654 rval = qla2x00_start_sp(sp);
2655 if (rval != QLA_SUCCESS) {
2656 sp->free(sp);
2657 return QLA_FUNCTION_FAILED;
2660 ql_dbg(ql_dbg_io, vha, 0x3074,
2661 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2662 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2663 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2665 wait_for_completion(&elsio->u.els_logo.comp);
2667 sp->free(sp);
2668 return rval;
2671 static void
2672 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2674 scsi_qla_host_t *vha = sp->vha;
2675 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2677 els_iocb->entry_type = ELS_IOCB_TYPE;
2678 els_iocb->entry_count = 1;
2679 els_iocb->sys_define = 0;
2680 els_iocb->entry_status = 0;
2681 els_iocb->handle = sp->handle;
2682 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2683 els_iocb->tx_dsd_count = 1;
2684 els_iocb->vp_index = vha->vp_idx;
2685 els_iocb->sof_type = EST_SOFI3;
2686 els_iocb->rx_dsd_count = 0;
2687 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2689 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2690 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2691 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2692 /* For SID the byte order is different than DID */
2693 els_iocb->s_id[1] = vha->d_id.b.al_pa;
2694 els_iocb->s_id[2] = vha->d_id.b.area;
2695 els_iocb->s_id[0] = vha->d_id.b.domain;
2697 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2698 els_iocb->control_flags = 0;
2699 els_iocb->tx_byte_count = els_iocb->tx_len =
2700 cpu_to_le32(sizeof(struct els_plogi_payload));
2701 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2702 &els_iocb->tx_address);
2703 els_iocb->rx_dsd_count = 1;
2704 els_iocb->rx_byte_count = els_iocb->rx_len =
2705 cpu_to_le32(sizeof(struct els_plogi_payload));
2706 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2707 &els_iocb->rx_address);
2709 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2710 "PLOGI ELS IOCB:\n");
2711 ql_dump_buffer(ql_log_info, vha, 0x0109,
2712 (uint8_t *)els_iocb,
2713 sizeof(*els_iocb));
2714 } else {
2715 els_iocb->control_flags = 1 << 13;
2716 els_iocb->tx_byte_count =
2717 cpu_to_le32(sizeof(struct els_logo_payload));
2718 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2719 &els_iocb->tx_address);
2720 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2722 els_iocb->rx_byte_count = 0;
2723 els_iocb->rx_address = 0;
2724 els_iocb->rx_len = 0;
2725 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2726 "LOGO ELS IOCB:");
2727 ql_dump_buffer(ql_log_info, vha, 0x010b,
2728 els_iocb,
2729 sizeof(*els_iocb));
2732 sp->vha->qla_stats.control_requests++;
2735 static void
2736 qla2x00_els_dcmd2_iocb_timeout(void *data)
2738 srb_t *sp = data;
2739 fc_port_t *fcport = sp->fcport;
2740 struct scsi_qla_host *vha = sp->vha;
2741 unsigned long flags = 0;
2742 int res, h;
2744 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2745 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2746 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2748 /* Abort the exchange */
2749 res = qla24xx_async_abort_cmd(sp, false);
2750 ql_dbg(ql_dbg_io, vha, 0x3070,
2751 "mbx abort_command %s\n",
2752 (res == QLA_SUCCESS) ? "successful" : "failed");
2753 if (res) {
2754 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2755 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2756 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2757 sp->qpair->req->outstanding_cmds[h] = NULL;
2758 break;
2761 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2762 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2766 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2768 if (els_plogi->els_plogi_pyld)
2769 dma_free_coherent(&vha->hw->pdev->dev,
2770 els_plogi->tx_size,
2771 els_plogi->els_plogi_pyld,
2772 els_plogi->els_plogi_pyld_dma);
2774 if (els_plogi->els_resp_pyld)
2775 dma_free_coherent(&vha->hw->pdev->dev,
2776 els_plogi->rx_size,
2777 els_plogi->els_resp_pyld,
2778 els_plogi->els_resp_pyld_dma);
2781 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2783 fc_port_t *fcport = sp->fcport;
2784 struct srb_iocb *lio = &sp->u.iocb_cmd;
2785 struct scsi_qla_host *vha = sp->vha;
2786 struct event_arg ea;
2787 struct qla_work_evt *e;
2788 struct fc_port *conflict_fcport;
2789 port_id_t cid; /* conflict Nport id */
2790 u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2791 u16 lid;
2793 ql_dbg(ql_dbg_disc, vha, 0x3072,
2794 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2795 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2797 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2798 del_timer(&sp->u.iocb_cmd.timer);
2800 if (sp->flags & SRB_WAKEUP_ON_COMP)
2801 complete(&lio->u.els_plogi.comp);
2802 else {
2803 switch (fw_status[0]) {
2804 case CS_DATA_UNDERRUN:
2805 case CS_COMPLETE:
2806 memset(&ea, 0, sizeof(ea));
2807 ea.fcport = fcport;
2808 ea.rc = res;
2809 qla_handle_els_plogi_done(vha, &ea);
2810 break;
2812 case CS_IOCB_ERROR:
2813 switch (fw_status[1]) {
2814 case LSC_SCODE_PORTID_USED:
2815 lid = fw_status[2] & 0xffff;
2816 qlt_find_sess_invalidate_other(vha,
2817 wwn_to_u64(fcport->port_name),
2818 fcport->d_id, lid, &conflict_fcport);
2819 if (conflict_fcport) {
2821 * Another fcport shares the same
2822 * loop_id & nport id; conflict
2823 * fcport needs to finish cleanup
2824 * before this fcport can proceed
2825 * to login.
2827 conflict_fcport->conflict = fcport;
2828 fcport->login_pause = 1;
2829 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2830 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2831 __func__, __LINE__,
2832 fcport->port_name,
2833 fcport->d_id.b24, lid);
2834 } else {
2835 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2836 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2837 __func__, __LINE__,
2838 fcport->port_name,
2839 fcport->d_id.b24, lid);
2840 qla2x00_clear_loop_id(fcport);
2841 set_bit(lid, vha->hw->loop_id_map);
2842 fcport->loop_id = lid;
2843 fcport->keep_nport_handle = 0;
2844 qlt_schedule_sess_for_deletion(fcport);
2846 break;
2848 case LSC_SCODE_NPORT_USED:
2849 cid.b.domain = (fw_status[2] >> 16) & 0xff;
2850 cid.b.area = (fw_status[2] >> 8) & 0xff;
2851 cid.b.al_pa = fw_status[2] & 0xff;
2852 cid.b.rsvd_1 = 0;
2854 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2855 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2856 __func__, __LINE__, fcport->port_name,
2857 fcport->loop_id, cid.b24);
2858 set_bit(fcport->loop_id,
2859 vha->hw->loop_id_map);
2860 fcport->loop_id = FC_NO_LOOP_ID;
2861 qla24xx_post_gnl_work(vha, fcport);
2862 break;
2864 case LSC_SCODE_NOXCB:
2865 vha->hw->exch_starvation++;
2866 if (vha->hw->exch_starvation > 5) {
2867 ql_log(ql_log_warn, vha, 0xd046,
2868 "Exchange starvation. Resetting RISC\n");
2869 vha->hw->exch_starvation = 0;
2870 set_bit(ISP_ABORT_NEEDED,
2871 &vha->dpc_flags);
2872 qla2xxx_wake_dpc(vha);
2874 /* fall through */
2875 default:
2876 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2877 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2878 __func__, sp->fcport->port_name,
2879 fw_status[0], fw_status[1], fw_status[2]);
2881 fcport->flags &= ~FCF_ASYNC_SENT;
2882 qla2x00_set_fcport_disc_state(fcport,
2883 DSC_LOGIN_FAILED);
2884 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2885 break;
2887 break;
2889 default:
2890 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2891 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2892 __func__, sp->fcport->port_name,
2893 fw_status[0], fw_status[1], fw_status[2]);
2895 sp->fcport->flags &= ~FCF_ASYNC_SENT;
2896 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2897 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2898 break;
2901 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2902 if (!e) {
2903 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2905 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2906 sp->free(sp);
2907 return;
2909 e->u.iosb.sp = sp;
2910 qla2x00_post_work(vha, e);
2915 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2916 fc_port_t *fcport, bool wait)
2918 srb_t *sp;
2919 struct srb_iocb *elsio = NULL;
2920 struct qla_hw_data *ha = vha->hw;
2921 int rval = QLA_SUCCESS;
2922 void *ptr, *resp_ptr;
2924 /* Alloc SRB structure */
2925 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2926 if (!sp) {
2927 ql_log(ql_log_info, vha, 0x70e6,
2928 "SRB allocation failed\n");
2929 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2930 return -ENOMEM;
2933 fcport->flags |= FCF_ASYNC_SENT;
2934 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
2935 elsio = &sp->u.iocb_cmd;
2936 ql_dbg(ql_dbg_io, vha, 0x3073,
2937 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2939 sp->type = SRB_ELS_DCMD;
2940 sp->name = "ELS_DCMD";
2941 sp->fcport = fcport;
2943 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2944 if (wait)
2945 sp->flags = SRB_WAKEUP_ON_COMP;
2947 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2949 sp->done = qla2x00_els_dcmd2_sp_done;
2950 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2952 ptr = elsio->u.els_plogi.els_plogi_pyld =
2953 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
2954 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2956 if (!elsio->u.els_plogi.els_plogi_pyld) {
2957 rval = QLA_FUNCTION_FAILED;
2958 goto out;
2961 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2962 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
2963 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2965 if (!elsio->u.els_plogi.els_resp_pyld) {
2966 rval = QLA_FUNCTION_FAILED;
2967 goto out;
2970 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2972 memset(ptr, 0, sizeof(struct els_plogi_payload));
2973 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2974 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2975 &ha->plogi_els_payld.data,
2976 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2978 elsio->u.els_plogi.els_cmd = els_opcode;
2979 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2981 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2982 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2983 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2984 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2986 init_completion(&elsio->u.els_plogi.comp);
2987 rval = qla2x00_start_sp(sp);
2988 if (rval != QLA_SUCCESS) {
2989 rval = QLA_FUNCTION_FAILED;
2990 } else {
2991 ql_dbg(ql_dbg_disc, vha, 0x3074,
2992 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2993 sp->name, sp->handle, fcport->loop_id,
2994 fcport->d_id.b24, vha->d_id.b24);
2997 if (wait) {
2998 wait_for_completion(&elsio->u.els_plogi.comp);
3000 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3001 rval = QLA_FUNCTION_FAILED;
3002 } else {
3003 goto done;
3006 out:
3007 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3008 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3009 sp->free(sp);
3010 done:
3011 return rval;
3014 static void
3015 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3017 struct bsg_job *bsg_job = sp->u.bsg_job;
3018 struct fc_bsg_request *bsg_request = bsg_job->request;
3020 els_iocb->entry_type = ELS_IOCB_TYPE;
3021 els_iocb->entry_count = 1;
3022 els_iocb->sys_define = 0;
3023 els_iocb->entry_status = 0;
3024 els_iocb->handle = sp->handle;
3025 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3026 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3027 els_iocb->vp_index = sp->vha->vp_idx;
3028 els_iocb->sof_type = EST_SOFI3;
3029 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3031 els_iocb->opcode =
3032 sp->type == SRB_ELS_CMD_RPT ?
3033 bsg_request->rqst_data.r_els.els_code :
3034 bsg_request->rqst_data.h_els.command_code;
3035 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3036 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3037 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3038 els_iocb->control_flags = 0;
3039 els_iocb->rx_byte_count =
3040 cpu_to_le32(bsg_job->reply_payload.payload_len);
3041 els_iocb->tx_byte_count =
3042 cpu_to_le32(bsg_job->request_payload.payload_len);
3044 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3045 &els_iocb->tx_address);
3046 els_iocb->tx_len = cpu_to_le32(sg_dma_len
3047 (bsg_job->request_payload.sg_list));
3049 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3050 &els_iocb->rx_address);
3051 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3052 (bsg_job->reply_payload.sg_list));
3054 sp->vha->qla_stats.control_requests++;
3057 static void
3058 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3060 uint16_t avail_dsds;
3061 struct dsd64 *cur_dsd;
3062 struct scatterlist *sg;
3063 int index;
3064 uint16_t tot_dsds;
3065 scsi_qla_host_t *vha = sp->vha;
3066 struct qla_hw_data *ha = vha->hw;
3067 struct bsg_job *bsg_job = sp->u.bsg_job;
3068 int entry_count = 1;
3070 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3071 ct_iocb->entry_type = CT_IOCB_TYPE;
3072 ct_iocb->entry_status = 0;
3073 ct_iocb->handle1 = sp->handle;
3074 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3075 ct_iocb->status = cpu_to_le16(0);
3076 ct_iocb->control_flags = cpu_to_le16(0);
3077 ct_iocb->timeout = 0;
3078 ct_iocb->cmd_dsd_count =
3079 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3080 ct_iocb->total_dsd_count =
3081 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3082 ct_iocb->req_bytecount =
3083 cpu_to_le32(bsg_job->request_payload.payload_len);
3084 ct_iocb->rsp_bytecount =
3085 cpu_to_le32(bsg_job->reply_payload.payload_len);
3087 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3088 &ct_iocb->req_dsd.address);
3089 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3091 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3092 &ct_iocb->rsp_dsd.address);
3093 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3095 avail_dsds = 1;
3096 cur_dsd = &ct_iocb->rsp_dsd;
3097 index = 0;
3098 tot_dsds = bsg_job->reply_payload.sg_cnt;
3100 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3101 cont_a64_entry_t *cont_pkt;
3103 /* Allocate additional continuation packets? */
3104 if (avail_dsds == 0) {
3106 * Five DSDs are available in the Cont.
3107 * Type 1 IOCB.
3109 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3110 vha->hw->req_q_map[0]);
3111 cur_dsd = cont_pkt->dsd;
3112 avail_dsds = 5;
3113 entry_count++;
3116 append_dsd64(&cur_dsd, sg);
3117 avail_dsds--;
3119 ct_iocb->entry_count = entry_count;
3121 sp->vha->qla_stats.control_requests++;
3124 static void
3125 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3127 uint16_t avail_dsds;
3128 struct dsd64 *cur_dsd;
3129 struct scatterlist *sg;
3130 int index;
3131 uint16_t cmd_dsds, rsp_dsds;
3132 scsi_qla_host_t *vha = sp->vha;
3133 struct qla_hw_data *ha = vha->hw;
3134 struct bsg_job *bsg_job = sp->u.bsg_job;
3135 int entry_count = 1;
3136 cont_a64_entry_t *cont_pkt = NULL;
3138 ct_iocb->entry_type = CT_IOCB_TYPE;
3139 ct_iocb->entry_status = 0;
3140 ct_iocb->sys_define = 0;
3141 ct_iocb->handle = sp->handle;
3143 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3144 ct_iocb->vp_index = sp->vha->vp_idx;
3145 ct_iocb->comp_status = cpu_to_le16(0);
3147 cmd_dsds = bsg_job->request_payload.sg_cnt;
3148 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3150 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3151 ct_iocb->timeout = 0;
3152 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3153 ct_iocb->cmd_byte_count =
3154 cpu_to_le32(bsg_job->request_payload.payload_len);
3156 avail_dsds = 2;
3157 cur_dsd = ct_iocb->dsd;
3158 index = 0;
3160 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3161 /* Allocate additional continuation packets? */
3162 if (avail_dsds == 0) {
3164 * Five DSDs are available in the Cont.
3165 * Type 1 IOCB.
3167 cont_pkt = qla2x00_prep_cont_type1_iocb(
3168 vha, ha->req_q_map[0]);
3169 cur_dsd = cont_pkt->dsd;
3170 avail_dsds = 5;
3171 entry_count++;
3174 append_dsd64(&cur_dsd, sg);
3175 avail_dsds--;
3178 index = 0;
3180 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3181 /* Allocate additional continuation packets? */
3182 if (avail_dsds == 0) {
3184 * Five DSDs are available in the Cont.
3185 * Type 1 IOCB.
3187 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3188 ha->req_q_map[0]);
3189 cur_dsd = cont_pkt->dsd;
3190 avail_dsds = 5;
3191 entry_count++;
3194 append_dsd64(&cur_dsd, sg);
3195 avail_dsds--;
3197 ct_iocb->entry_count = entry_count;
3201 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3202 * @sp: command to send to the ISP
3204 * Returns non-zero if a failure occurred, else zero.
3207 qla82xx_start_scsi(srb_t *sp)
3209 int nseg;
3210 unsigned long flags;
3211 struct scsi_cmnd *cmd;
3212 uint32_t *clr_ptr;
3213 uint32_t handle;
3214 uint16_t cnt;
3215 uint16_t req_cnt;
3216 uint16_t tot_dsds;
3217 struct device_reg_82xx __iomem *reg;
3218 uint32_t dbval;
3219 uint32_t *fcp_dl;
3220 uint8_t additional_cdb_len;
3221 struct ct6_dsd *ctx;
3222 struct scsi_qla_host *vha = sp->vha;
3223 struct qla_hw_data *ha = vha->hw;
3224 struct req_que *req = NULL;
3225 struct rsp_que *rsp = NULL;
3227 /* Setup device pointers. */
3228 reg = &ha->iobase->isp82;
3229 cmd = GET_CMD_SP(sp);
3230 req = vha->req;
3231 rsp = ha->rsp_q_map[0];
3233 /* So we know we haven't pci_map'ed anything yet */
3234 tot_dsds = 0;
3236 dbval = 0x04 | (ha->portnum << 5);
3238 /* Send marker if required */
3239 if (vha->marker_needed != 0) {
3240 if (qla2x00_marker(vha, ha->base_qpair,
3241 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3242 ql_log(ql_log_warn, vha, 0x300c,
3243 "qla2x00_marker failed for cmd=%p.\n", cmd);
3244 return QLA_FUNCTION_FAILED;
3246 vha->marker_needed = 0;
3249 /* Acquire ring specific lock */
3250 spin_lock_irqsave(&ha->hardware_lock, flags);
3252 handle = qla2xxx_get_next_handle(req);
3253 if (handle == 0)
3254 goto queuing_error;
3256 /* Map the sg table so we have an accurate count of sg entries needed */
3257 if (scsi_sg_count(cmd)) {
3258 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3259 scsi_sg_count(cmd), cmd->sc_data_direction);
3260 if (unlikely(!nseg))
3261 goto queuing_error;
3262 } else
3263 nseg = 0;
3265 tot_dsds = nseg;
3267 if (tot_dsds > ql2xshiftctondsd) {
3268 struct cmd_type_6 *cmd_pkt;
3269 uint16_t more_dsd_lists = 0;
3270 struct dsd_dma *dsd_ptr;
3271 uint16_t i;
3273 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3274 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3275 ql_dbg(ql_dbg_io, vha, 0x300d,
3276 "Num of DSD list %d is than %d for cmd=%p.\n",
3277 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3278 cmd);
3279 goto queuing_error;
3282 if (more_dsd_lists <= ha->gbl_dsd_avail)
3283 goto sufficient_dsds;
3284 else
3285 more_dsd_lists -= ha->gbl_dsd_avail;
3287 for (i = 0; i < more_dsd_lists; i++) {
3288 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3289 if (!dsd_ptr) {
3290 ql_log(ql_log_fatal, vha, 0x300e,
3291 "Failed to allocate memory for dsd_dma "
3292 "for cmd=%p.\n", cmd);
3293 goto queuing_error;
3296 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3297 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3298 if (!dsd_ptr->dsd_addr) {
3299 kfree(dsd_ptr);
3300 ql_log(ql_log_fatal, vha, 0x300f,
3301 "Failed to allocate memory for dsd_addr "
3302 "for cmd=%p.\n", cmd);
3303 goto queuing_error;
3305 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3306 ha->gbl_dsd_avail++;
3309 sufficient_dsds:
3310 req_cnt = 1;
3312 if (req->cnt < (req_cnt + 2)) {
3313 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3314 &reg->req_q_out[0]);
3315 if (req->ring_index < cnt)
3316 req->cnt = cnt - req->ring_index;
3317 else
3318 req->cnt = req->length -
3319 (req->ring_index - cnt);
3320 if (req->cnt < (req_cnt + 2))
3321 goto queuing_error;
3324 ctx = sp->u.scmd.ct6_ctx =
3325 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3326 if (!ctx) {
3327 ql_log(ql_log_fatal, vha, 0x3010,
3328 "Failed to allocate ctx for cmd=%p.\n", cmd);
3329 goto queuing_error;
3332 memset(ctx, 0, sizeof(struct ct6_dsd));
3333 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3334 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3335 if (!ctx->fcp_cmnd) {
3336 ql_log(ql_log_fatal, vha, 0x3011,
3337 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3338 goto queuing_error;
3341 /* Initialize the DSD list and dma handle */
3342 INIT_LIST_HEAD(&ctx->dsd_list);
3343 ctx->dsd_use_cnt = 0;
3345 if (cmd->cmd_len > 16) {
3346 additional_cdb_len = cmd->cmd_len - 16;
3347 if ((cmd->cmd_len % 4) != 0) {
3348 /* SCSI command bigger than 16 bytes must be
3349 * multiple of 4
3351 ql_log(ql_log_warn, vha, 0x3012,
3352 "scsi cmd len %d not multiple of 4 "
3353 "for cmd=%p.\n", cmd->cmd_len, cmd);
3354 goto queuing_error_fcp_cmnd;
3356 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3357 } else {
3358 additional_cdb_len = 0;
3359 ctx->fcp_cmnd_len = 12 + 16 + 4;
3362 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3363 cmd_pkt->handle = make_handle(req->id, handle);
3365 /* Zero out remaining portion of packet. */
3366 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3367 clr_ptr = (uint32_t *)cmd_pkt + 2;
3368 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3369 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3371 /* Set NPORT-ID and LUN number*/
3372 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3373 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3374 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3375 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3376 cmd_pkt->vp_index = sp->vha->vp_idx;
3378 /* Build IOCB segments */
3379 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3380 goto queuing_error_fcp_cmnd;
3382 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3383 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3385 /* build FCP_CMND IU */
3386 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3387 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3389 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3390 ctx->fcp_cmnd->additional_cdb_len |= 1;
3391 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3392 ctx->fcp_cmnd->additional_cdb_len |= 2;
3394 /* Populate the FCP_PRIO. */
3395 if (ha->flags.fcp_prio_enabled)
3396 ctx->fcp_cmnd->task_attribute |=
3397 sp->fcport->fcp_prio << 3;
3399 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3401 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3402 additional_cdb_len);
3403 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3405 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3406 put_unaligned_le64(ctx->fcp_cmnd_dma,
3407 &cmd_pkt->fcp_cmnd_dseg_address);
3409 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3411 /* Set total data segment count. */
3412 cmd_pkt->entry_count = (uint8_t)req_cnt;
3413 /* Specify response queue number where
3414 * completion should happen
3416 cmd_pkt->entry_status = (uint8_t) rsp->id;
3417 } else {
3418 struct cmd_type_7 *cmd_pkt;
3420 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3421 if (req->cnt < (req_cnt + 2)) {
3422 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3423 &reg->req_q_out[0]);
3424 if (req->ring_index < cnt)
3425 req->cnt = cnt - req->ring_index;
3426 else
3427 req->cnt = req->length -
3428 (req->ring_index - cnt);
3430 if (req->cnt < (req_cnt + 2))
3431 goto queuing_error;
3433 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3434 cmd_pkt->handle = make_handle(req->id, handle);
3436 /* Zero out remaining portion of packet. */
3437 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3438 clr_ptr = (uint32_t *)cmd_pkt + 2;
3439 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3440 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3442 /* Set NPORT-ID and LUN number*/
3443 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3444 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3445 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3446 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3447 cmd_pkt->vp_index = sp->vha->vp_idx;
3449 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3450 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3451 sizeof(cmd_pkt->lun));
3453 /* Populate the FCP_PRIO. */
3454 if (ha->flags.fcp_prio_enabled)
3455 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3457 /* Load SCSI command packet. */
3458 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3459 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3461 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3463 /* Build IOCB segments */
3464 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3466 /* Set total data segment count. */
3467 cmd_pkt->entry_count = (uint8_t)req_cnt;
3468 /* Specify response queue number where
3469 * completion should happen.
3471 cmd_pkt->entry_status = (uint8_t) rsp->id;
3474 /* Build command packet. */
3475 req->current_outstanding_cmd = handle;
3476 req->outstanding_cmds[handle] = sp;
3477 sp->handle = handle;
3478 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3479 req->cnt -= req_cnt;
3480 wmb();
3482 /* Adjust ring index. */
3483 req->ring_index++;
3484 if (req->ring_index == req->length) {
3485 req->ring_index = 0;
3486 req->ring_ptr = req->ring;
3487 } else
3488 req->ring_ptr++;
3490 sp->flags |= SRB_DMA_VALID;
3492 /* Set chip new ring index. */
3493 /* write, read and verify logic */
3494 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3495 if (ql2xdbwr)
3496 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3497 else {
3498 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3499 wmb();
3500 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3501 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3502 wmb();
3506 /* Manage unprocessed RIO/ZIO commands in response queue. */
3507 if (vha->flags.process_response_queue &&
3508 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3509 qla24xx_process_response_queue(vha, rsp);
3511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3512 return QLA_SUCCESS;
3514 queuing_error_fcp_cmnd:
3515 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3516 queuing_error:
3517 if (tot_dsds)
3518 scsi_dma_unmap(cmd);
3520 if (sp->u.scmd.crc_ctx) {
3521 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3522 sp->u.scmd.crc_ctx = NULL;
3524 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3526 return QLA_FUNCTION_FAILED;
3529 static void
3530 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3532 struct srb_iocb *aio = &sp->u.iocb_cmd;
3533 scsi_qla_host_t *vha = sp->vha;
3534 struct req_que *req = sp->qpair->req;
3536 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3537 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3538 abt_iocb->entry_count = 1;
3539 abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle));
3540 if (sp->fcport) {
3541 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3542 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3543 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3544 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3546 abt_iocb->handle_to_abort =
3547 cpu_to_le32(make_handle(aio->u.abt.req_que_no,
3548 aio->u.abt.cmd_hndl));
3549 abt_iocb->vp_index = vha->vp_idx;
3550 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3551 /* Send the command to the firmware */
3552 wmb();
3555 static void
3556 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3558 int i, sz;
3560 mbx->entry_type = MBX_IOCB_TYPE;
3561 mbx->handle = sp->handle;
3562 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3564 for (i = 0; i < sz; i++)
3565 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3568 static void
3569 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3571 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3572 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3573 ct_pkt->handle = sp->handle;
3576 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3577 struct nack_to_isp *nack)
3579 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3581 nack->entry_type = NOTIFY_ACK_TYPE;
3582 nack->entry_count = 1;
3583 nack->ox_id = ntfy->ox_id;
3585 nack->u.isp24.handle = sp->handle;
3586 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3587 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3588 nack->u.isp24.flags = ntfy->u.isp24.flags &
3589 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3591 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3592 nack->u.isp24.status = ntfy->u.isp24.status;
3593 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3594 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3595 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3596 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3597 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3598 nack->u.isp24.srr_flags = 0;
3599 nack->u.isp24.srr_reject_code = 0;
3600 nack->u.isp24.srr_reject_code_expl = 0;
3601 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3605 * Build NVME LS request
3607 static int
3608 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3610 struct srb_iocb *nvme;
3611 int rval = QLA_SUCCESS;
3613 nvme = &sp->u.iocb_cmd;
3614 cmd_pkt->entry_type = PT_LS4_REQUEST;
3615 cmd_pkt->entry_count = 1;
3616 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3618 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3619 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3620 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3622 cmd_pkt->tx_dseg_count = 1;
3623 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3624 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3625 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3627 cmd_pkt->rx_dseg_count = 1;
3628 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3629 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
3630 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3632 return rval;
3635 static void
3636 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3638 int map, pos;
3640 vce->entry_type = VP_CTRL_IOCB_TYPE;
3641 vce->handle = sp->handle;
3642 vce->entry_count = 1;
3643 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3644 vce->vp_count = cpu_to_le16(1);
3647 * index map in firmware starts with 1; decrement index
3648 * this is ok as we never use index 0
3650 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3651 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3652 vce->vp_idx_map[map] |= 1 << pos;
3655 static void
3656 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3658 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3659 logio->control_flags =
3660 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3662 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3663 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3664 logio->port_id[1] = sp->fcport->d_id.b.area;
3665 logio->port_id[2] = sp->fcport->d_id.b.domain;
3666 logio->vp_index = sp->fcport->vha->vp_idx;
3670 qla2x00_start_sp(srb_t *sp)
3672 int rval = QLA_SUCCESS;
3673 scsi_qla_host_t *vha = sp->vha;
3674 struct qla_hw_data *ha = vha->hw;
3675 struct qla_qpair *qp = sp->qpair;
3676 void *pkt;
3677 unsigned long flags;
3679 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3680 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3681 if (!pkt) {
3682 rval = EAGAIN;
3683 ql_log(ql_log_warn, vha, 0x700c,
3684 "qla2x00_alloc_iocbs failed.\n");
3685 goto done;
3688 switch (sp->type) {
3689 case SRB_LOGIN_CMD:
3690 IS_FWI2_CAPABLE(ha) ?
3691 qla24xx_login_iocb(sp, pkt) :
3692 qla2x00_login_iocb(sp, pkt);
3693 break;
3694 case SRB_PRLI_CMD:
3695 qla24xx_prli_iocb(sp, pkt);
3696 break;
3697 case SRB_LOGOUT_CMD:
3698 IS_FWI2_CAPABLE(ha) ?
3699 qla24xx_logout_iocb(sp, pkt) :
3700 qla2x00_logout_iocb(sp, pkt);
3701 break;
3702 case SRB_ELS_CMD_RPT:
3703 case SRB_ELS_CMD_HST:
3704 qla24xx_els_iocb(sp, pkt);
3705 break;
3706 case SRB_CT_CMD:
3707 IS_FWI2_CAPABLE(ha) ?
3708 qla24xx_ct_iocb(sp, pkt) :
3709 qla2x00_ct_iocb(sp, pkt);
3710 break;
3711 case SRB_ADISC_CMD:
3712 IS_FWI2_CAPABLE(ha) ?
3713 qla24xx_adisc_iocb(sp, pkt) :
3714 qla2x00_adisc_iocb(sp, pkt);
3715 break;
3716 case SRB_TM_CMD:
3717 IS_QLAFX00(ha) ?
3718 qlafx00_tm_iocb(sp, pkt) :
3719 qla24xx_tm_iocb(sp, pkt);
3720 break;
3721 case SRB_FXIOCB_DCMD:
3722 case SRB_FXIOCB_BCMD:
3723 qlafx00_fxdisc_iocb(sp, pkt);
3724 break;
3725 case SRB_NVME_LS:
3726 qla_nvme_ls(sp, pkt);
3727 break;
3728 case SRB_ABT_CMD:
3729 IS_QLAFX00(ha) ?
3730 qlafx00_abort_iocb(sp, pkt) :
3731 qla24xx_abort_iocb(sp, pkt);
3732 break;
3733 case SRB_ELS_DCMD:
3734 qla24xx_els_logo_iocb(sp, pkt);
3735 break;
3736 case SRB_CT_PTHRU_CMD:
3737 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3738 break;
3739 case SRB_MB_IOCB:
3740 qla2x00_mb_iocb(sp, pkt);
3741 break;
3742 case SRB_NACK_PLOGI:
3743 case SRB_NACK_PRLI:
3744 case SRB_NACK_LOGO:
3745 qla2x00_send_notify_ack_iocb(sp, pkt);
3746 break;
3747 case SRB_CTRL_VP:
3748 qla25xx_ctrlvp_iocb(sp, pkt);
3749 break;
3750 case SRB_PRLO_CMD:
3751 qla24xx_prlo_iocb(sp, pkt);
3752 break;
3753 default:
3754 break;
3757 if (sp->start_timer)
3758 add_timer(&sp->u.iocb_cmd.timer);
3760 wmb();
3761 qla2x00_start_iocbs(vha, qp->req);
3762 done:
3763 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3764 return rval;
3767 static void
3768 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3769 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3771 uint16_t avail_dsds;
3772 struct dsd64 *cur_dsd;
3773 uint32_t req_data_len = 0;
3774 uint32_t rsp_data_len = 0;
3775 struct scatterlist *sg;
3776 int index;
3777 int entry_count = 1;
3778 struct bsg_job *bsg_job = sp->u.bsg_job;
3780 /*Update entry type to indicate bidir command */
3781 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3783 /* Set the transfer direction, in this set both flags
3784 * Also set the BD_WRAP_BACK flag, firmware will take care
3785 * assigning DID=SID for outgoing pkts.
3787 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3788 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3789 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3790 BD_WRAP_BACK);
3792 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3793 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3794 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3795 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3797 vha->bidi_stats.transfer_bytes += req_data_len;
3798 vha->bidi_stats.io_count++;
3800 vha->qla_stats.output_bytes += req_data_len;
3801 vha->qla_stats.output_requests++;
3803 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3804 * are bundled in continuation iocb
3806 avail_dsds = 1;
3807 cur_dsd = &cmd_pkt->fcp_dsd;
3809 index = 0;
3811 for_each_sg(bsg_job->request_payload.sg_list, sg,
3812 bsg_job->request_payload.sg_cnt, index) {
3813 cont_a64_entry_t *cont_pkt;
3815 /* Allocate additional continuation packets */
3816 if (avail_dsds == 0) {
3817 /* Continuation type 1 IOCB can accomodate
3818 * 5 DSDS
3820 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3821 cur_dsd = cont_pkt->dsd;
3822 avail_dsds = 5;
3823 entry_count++;
3825 append_dsd64(&cur_dsd, sg);
3826 avail_dsds--;
3828 /* For read request DSD will always goes to continuation IOCB
3829 * and follow the write DSD. If there is room on the current IOCB
3830 * then it is added to that IOCB else new continuation IOCB is
3831 * allocated.
3833 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3834 bsg_job->reply_payload.sg_cnt, index) {
3835 cont_a64_entry_t *cont_pkt;
3837 /* Allocate additional continuation packets */
3838 if (avail_dsds == 0) {
3839 /* Continuation type 1 IOCB can accomodate
3840 * 5 DSDS
3842 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3843 cur_dsd = cont_pkt->dsd;
3844 avail_dsds = 5;
3845 entry_count++;
3847 append_dsd64(&cur_dsd, sg);
3848 avail_dsds--;
3850 /* This value should be same as number of IOCB required for this cmd */
3851 cmd_pkt->entry_count = entry_count;
3855 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3858 struct qla_hw_data *ha = vha->hw;
3859 unsigned long flags;
3860 uint32_t handle;
3861 uint16_t req_cnt;
3862 uint16_t cnt;
3863 uint32_t *clr_ptr;
3864 struct cmd_bidir *cmd_pkt = NULL;
3865 struct rsp_que *rsp;
3866 struct req_que *req;
3867 int rval = EXT_STATUS_OK;
3869 rval = QLA_SUCCESS;
3871 rsp = ha->rsp_q_map[0];
3872 req = vha->req;
3874 /* Send marker if required */
3875 if (vha->marker_needed != 0) {
3876 if (qla2x00_marker(vha, ha->base_qpair,
3877 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3878 return EXT_STATUS_MAILBOX;
3879 vha->marker_needed = 0;
3882 /* Acquire ring specific lock */
3883 spin_lock_irqsave(&ha->hardware_lock, flags);
3885 handle = qla2xxx_get_next_handle(req);
3886 if (handle == 0) {
3887 rval = EXT_STATUS_BUSY;
3888 goto queuing_error;
3891 /* Calculate number of IOCB required */
3892 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3894 /* Check for room on request queue. */
3895 if (req->cnt < req_cnt + 2) {
3896 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3897 RD_REG_DWORD_RELAXED(req->req_q_out);
3898 if (req->ring_index < cnt)
3899 req->cnt = cnt - req->ring_index;
3900 else
3901 req->cnt = req->length -
3902 (req->ring_index - cnt);
3904 if (req->cnt < req_cnt + 2) {
3905 rval = EXT_STATUS_BUSY;
3906 goto queuing_error;
3909 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3910 cmd_pkt->handle = make_handle(req->id, handle);
3912 /* Zero out remaining portion of packet. */
3913 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3914 clr_ptr = (uint32_t *)cmd_pkt + 2;
3915 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3917 /* Set NPORT-ID (of vha)*/
3918 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3919 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3920 cmd_pkt->port_id[1] = vha->d_id.b.area;
3921 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3923 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3924 cmd_pkt->entry_status = (uint8_t) rsp->id;
3925 /* Build command packet. */
3926 req->current_outstanding_cmd = handle;
3927 req->outstanding_cmds[handle] = sp;
3928 sp->handle = handle;
3929 req->cnt -= req_cnt;
3931 /* Send the command to the firmware */
3932 wmb();
3933 qla2x00_start_iocbs(vha, req);
3934 queuing_error:
3935 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3936 return rval;