2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
14 void qedf_cmd_timer_set(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
15 unsigned int timer_msec
)
17 queue_delayed_work(qedf
->timer_work_queue
, &io_req
->timeout_work
,
18 msecs_to_jiffies(timer_msec
));
21 static void qedf_cmd_timeout(struct work_struct
*work
)
24 struct qedf_ioreq
*io_req
=
25 container_of(work
, struct qedf_ioreq
, timeout_work
.work
);
26 struct qedf_ctx
*qedf
;
27 struct qedf_rport
*fcport
;
31 QEDF_INFO(NULL
, QEDF_LOG_IO
, "io_req is NULL.\n");
35 fcport
= io_req
->fcport
;
36 if (io_req
->fcport
== NULL
) {
37 QEDF_INFO(NULL
, QEDF_LOG_IO
, "fcport is NULL.\n");
43 switch (io_req
->cmd_type
) {
46 QEDF_INFO(NULL
, QEDF_LOG_IO
, "qedf is NULL for xid=0x%x.\n",
51 QEDF_ERR((&qedf
->dbg_ctx
), "ABTS timeout, xid=0x%x.\n",
53 /* Cleanup timed out ABTS */
54 qedf_initiate_cleanup(io_req
, true);
55 complete(&io_req
->abts_done
);
58 * Need to call kref_put for reference taken when initiate_abts
59 * was called since abts_compl won't be called now that we've
60 * cleaned up the task.
62 kref_put(&io_req
->refcount
, qedf_release_cmd
);
65 * Now that the original I/O and the ABTS are complete see
66 * if we need to reconnect to the target.
68 qedf_restart_rport(fcport
);
71 kref_get(&io_req
->refcount
);
73 * Don't attempt to clean an ELS timeout as any subseqeunt
74 * ABTS or cleanup requests just hang. For now just free
75 * the resources of the original I/O and the RRQ
77 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS timeout, xid=0x%x.\n",
79 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
80 /* Call callback function to complete command */
81 if (io_req
->cb_func
&& io_req
->cb_arg
) {
82 op
= io_req
->cb_arg
->op
;
83 io_req
->cb_func(io_req
->cb_arg
);
84 io_req
->cb_arg
= NULL
;
86 qedf_initiate_cleanup(io_req
, true);
87 kref_put(&io_req
->refcount
, qedf_release_cmd
);
89 case QEDF_SEQ_CLEANUP
:
90 QEDF_ERR(&(qedf
->dbg_ctx
), "Sequence cleanup timeout, "
91 "xid=0x%x.\n", io_req
->xid
);
92 qedf_initiate_cleanup(io_req
, true);
93 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
94 qedf_process_seq_cleanup_compl(qedf
, NULL
, io_req
);
101 void qedf_cmd_mgr_free(struct qedf_cmd_mgr
*cmgr
)
103 struct io_bdt
*bdt_info
;
104 struct qedf_ctx
*qedf
= cmgr
->qedf
;
106 u16 min_xid
= QEDF_MIN_XID
;
107 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
110 struct qedf_ioreq
*io_req
;
112 num_ios
= max_xid
- min_xid
+ 1;
114 /* Free fcoe_bdt_ctx structures */
115 if (!cmgr
->io_bdt_pool
)
118 bd_tbl_sz
= QEDF_MAX_BDS_PER_CMD
* sizeof(struct scsi_sge
);
119 for (i
= 0; i
< num_ios
; i
++) {
120 bdt_info
= cmgr
->io_bdt_pool
[i
];
121 if (bdt_info
->bd_tbl
) {
122 dma_free_coherent(&qedf
->pdev
->dev
, bd_tbl_sz
,
123 bdt_info
->bd_tbl
, bdt_info
->bd_tbl_dma
);
124 bdt_info
->bd_tbl
= NULL
;
128 /* Destroy io_bdt pool */
129 for (i
= 0; i
< num_ios
; i
++) {
130 kfree(cmgr
->io_bdt_pool
[i
]);
131 cmgr
->io_bdt_pool
[i
] = NULL
;
134 kfree(cmgr
->io_bdt_pool
);
135 cmgr
->io_bdt_pool
= NULL
;
139 for (i
= 0; i
< num_ios
; i
++) {
140 io_req
= &cmgr
->cmds
[i
];
141 kfree(io_req
->sgl_task_params
);
142 kfree(io_req
->task_params
);
143 /* Make sure we free per command sense buffer */
144 if (io_req
->sense_buffer
)
145 dma_free_coherent(&qedf
->pdev
->dev
,
146 QEDF_SCSI_SENSE_BUFFERSIZE
, io_req
->sense_buffer
,
147 io_req
->sense_buffer_dma
);
148 cancel_delayed_work_sync(&io_req
->rrq_work
);
151 /* Free command manager itself */
155 static void qedf_handle_rrq(struct work_struct
*work
)
157 struct qedf_ioreq
*io_req
=
158 container_of(work
, struct qedf_ioreq
, rrq_work
.work
);
160 qedf_send_rrq(io_req
);
164 struct qedf_cmd_mgr
*qedf_cmd_mgr_alloc(struct qedf_ctx
*qedf
)
166 struct qedf_cmd_mgr
*cmgr
;
167 struct io_bdt
*bdt_info
;
168 struct qedf_ioreq
*io_req
;
172 u16 min_xid
= QEDF_MIN_XID
;
173 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
175 /* Make sure num_queues is already set before calling this function */
176 if (!qedf
->num_queues
) {
177 QEDF_ERR(&(qedf
->dbg_ctx
), "num_queues is not set.\n");
181 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
) {
182 QEDF_WARN(&(qedf
->dbg_ctx
), "Invalid min_xid 0x%x and "
183 "max_xid 0x%x.\n", min_xid
, max_xid
);
187 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
, "min xid 0x%x, max xid "
188 "0x%x.\n", min_xid
, max_xid
);
190 num_ios
= max_xid
- min_xid
+ 1;
192 cmgr
= vzalloc(sizeof(struct qedf_cmd_mgr
));
194 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc cmd mgr.\n");
199 spin_lock_init(&cmgr
->lock
);
202 * Initialize I/O request fields.
206 for (i
= 0; i
< num_ios
; i
++) {
207 io_req
= &cmgr
->cmds
[i
];
208 INIT_DELAYED_WORK(&io_req
->timeout_work
, qedf_cmd_timeout
);
212 INIT_DELAYED_WORK(&io_req
->rrq_work
, qedf_handle_rrq
);
214 /* Allocate DMA memory to hold sense buffer */
215 io_req
->sense_buffer
= dma_alloc_coherent(&qedf
->pdev
->dev
,
216 QEDF_SCSI_SENSE_BUFFERSIZE
, &io_req
->sense_buffer_dma
,
218 if (!io_req
->sense_buffer
)
221 /* Allocate task parameters to pass to f/w init funcions */
222 io_req
->task_params
= kzalloc(sizeof(*io_req
->task_params
),
224 if (!io_req
->task_params
) {
225 QEDF_ERR(&(qedf
->dbg_ctx
),
226 "Failed to allocate task_params for xid=0x%x\n",
232 * Allocate scatter/gather list info to pass to f/w init
235 io_req
->sgl_task_params
= kzalloc(
236 sizeof(struct scsi_sgl_task_params
), GFP_KERNEL
);
237 if (!io_req
->sgl_task_params
) {
238 QEDF_ERR(&(qedf
->dbg_ctx
),
239 "Failed to allocate sgl_task_params for xid=0x%x\n",
245 /* Allocate pool of io_bdts - one for each qedf_ioreq */
246 cmgr
->io_bdt_pool
= kmalloc_array(num_ios
, sizeof(struct io_bdt
*),
249 if (!cmgr
->io_bdt_pool
) {
250 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc io_bdt_pool.\n");
254 for (i
= 0; i
< num_ios
; i
++) {
255 cmgr
->io_bdt_pool
[i
] = kmalloc(sizeof(struct io_bdt
),
257 if (!cmgr
->io_bdt_pool
[i
]) {
258 QEDF_WARN(&(qedf
->dbg_ctx
),
259 "Failed to alloc io_bdt_pool[%d].\n", i
);
264 for (i
= 0; i
< num_ios
; i
++) {
265 bdt_info
= cmgr
->io_bdt_pool
[i
];
266 bdt_info
->bd_tbl
= dma_alloc_coherent(&qedf
->pdev
->dev
,
267 QEDF_MAX_BDS_PER_CMD
* sizeof(struct scsi_sge
),
268 &bdt_info
->bd_tbl_dma
, GFP_KERNEL
);
269 if (!bdt_info
->bd_tbl
) {
270 QEDF_WARN(&(qedf
->dbg_ctx
),
271 "Failed to alloc bdt_tbl[%d].\n", i
);
275 atomic_set(&cmgr
->free_list_cnt
, num_ios
);
276 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
277 "cmgr->free_list_cnt=%d.\n",
278 atomic_read(&cmgr
->free_list_cnt
));
283 qedf_cmd_mgr_free(cmgr
);
287 struct qedf_ioreq
*qedf_alloc_cmd(struct qedf_rport
*fcport
, u8 cmd_type
)
289 struct qedf_ctx
*qedf
= fcport
->qedf
;
290 struct qedf_cmd_mgr
*cmd_mgr
= qedf
->cmd_mgr
;
291 struct qedf_ioreq
*io_req
= NULL
;
292 struct io_bdt
*bd_tbl
;
298 free_sqes
= atomic_read(&fcport
->free_sqes
);
301 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
302 "Returning NULL, free_sqes=%d.\n ",
307 /* Limit the number of outstanding R/W tasks */
308 if ((atomic_read(&fcport
->num_active_ios
) >=
309 NUM_RW_TASKS_PER_CONNECTION
)) {
310 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
311 "Returning NULL, num_active_ios=%d.\n",
312 atomic_read(&fcport
->num_active_ios
));
316 /* Limit global TIDs certain tasks */
317 if (atomic_read(&cmd_mgr
->free_list_cnt
) <= GBL_RSVD_TASKS
) {
318 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
319 "Returning NULL, free_list_cnt=%d.\n",
320 atomic_read(&cmd_mgr
->free_list_cnt
));
324 spin_lock_irqsave(&cmd_mgr
->lock
, flags
);
325 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
326 io_req
= &cmd_mgr
->cmds
[cmd_mgr
->idx
];
328 if (cmd_mgr
->idx
== FCOE_PARAMS_NUM_TASKS
)
331 /* Check to make sure command was previously freed */
332 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
))
336 if (i
== FCOE_PARAMS_NUM_TASKS
) {
337 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
341 set_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
342 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
344 atomic_inc(&fcport
->num_active_ios
);
345 atomic_dec(&fcport
->free_sqes
);
347 atomic_dec(&cmd_mgr
->free_list_cnt
);
349 io_req
->cmd_mgr
= cmd_mgr
;
350 io_req
->fcport
= fcport
;
352 /* Hold the io_req against deletion */
353 kref_init(&io_req
->refcount
);
355 /* Bind io_bdt for this io_req */
356 /* Have a static link between io_req and io_bdt_pool */
357 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
358 if (bd_tbl
== NULL
) {
359 QEDF_ERR(&(qedf
->dbg_ctx
), "bd_tbl is NULL, xid=%x.\n", xid
);
360 kref_put(&io_req
->refcount
, qedf_release_cmd
);
363 bd_tbl
->io_req
= io_req
;
364 io_req
->cmd_type
= cmd_type
;
365 io_req
->tm_flags
= 0;
367 /* Reset sequence offset data */
368 io_req
->rx_buf_off
= 0;
369 io_req
->tx_buf_off
= 0;
370 io_req
->rx_id
= 0xffff; /* No OX_ID */
375 /* Record failure for stats and return NULL to caller */
376 qedf
->alloc_failures
++;
380 static void qedf_free_mp_resc(struct qedf_ioreq
*io_req
)
382 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
383 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
384 uint64_t sz
= sizeof(struct scsi_sge
);
387 if (mp_req
->mp_req_bd
) {
388 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
389 mp_req
->mp_req_bd
, mp_req
->mp_req_bd_dma
);
390 mp_req
->mp_req_bd
= NULL
;
392 if (mp_req
->mp_resp_bd
) {
393 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
394 mp_req
->mp_resp_bd
, mp_req
->mp_resp_bd_dma
);
395 mp_req
->mp_resp_bd
= NULL
;
397 if (mp_req
->req_buf
) {
398 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
399 mp_req
->req_buf
, mp_req
->req_buf_dma
);
400 mp_req
->req_buf
= NULL
;
402 if (mp_req
->resp_buf
) {
403 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
404 mp_req
->resp_buf
, mp_req
->resp_buf_dma
);
405 mp_req
->resp_buf
= NULL
;
409 void qedf_release_cmd(struct kref
*ref
)
411 struct qedf_ioreq
*io_req
=
412 container_of(ref
, struct qedf_ioreq
, refcount
);
413 struct qedf_cmd_mgr
*cmd_mgr
= io_req
->cmd_mgr
;
414 struct qedf_rport
*fcport
= io_req
->fcport
;
416 if (io_req
->cmd_type
== QEDF_ELS
||
417 io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
)
418 qedf_free_mp_resc(io_req
);
420 atomic_inc(&cmd_mgr
->free_list_cnt
);
421 atomic_dec(&fcport
->num_active_ios
);
422 if (atomic_read(&fcport
->num_active_ios
) < 0)
423 QEDF_WARN(&(fcport
->qedf
->dbg_ctx
), "active_ios < 0.\n");
425 /* Increment task retry identifier now that the request is released */
426 io_req
->task_retry_identifier
++;
428 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
431 static int qedf_split_bd(struct qedf_ioreq
*io_req
, u64 addr
, int sg_len
,
434 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
435 int frag_size
, sg_frags
;
439 if (sg_len
> QEDF_BD_SPLIT_SZ
)
440 frag_size
= QEDF_BD_SPLIT_SZ
;
443 bd
[bd_index
+ sg_frags
].sge_addr
.lo
= U64_LO(addr
);
444 bd
[bd_index
+ sg_frags
].sge_addr
.hi
= U64_HI(addr
);
445 bd
[bd_index
+ sg_frags
].sge_len
= (uint16_t)frag_size
;
447 addr
+= (u64
)frag_size
;
454 static int qedf_map_sg(struct qedf_ioreq
*io_req
)
456 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
457 struct Scsi_Host
*host
= sc
->device
->host
;
458 struct fc_lport
*lport
= shost_priv(host
);
459 struct qedf_ctx
*qedf
= lport_priv(lport
);
460 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
461 struct scatterlist
*sg
;
470 sg_count
= dma_map_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
471 scsi_sg_count(sc
), sc
->sc_data_direction
);
473 sg
= scsi_sglist(sc
);
476 * New condition to send single SGE as cached-SGL with length less
479 if ((sg_count
== 1) && (sg_dma_len(sg
) <=
480 QEDF_MAX_SGLEN_FOR_CACHESGL
)) {
481 sg_len
= sg_dma_len(sg
);
482 addr
= (u64
)sg_dma_address(sg
);
484 bd
[bd_count
].sge_addr
.lo
= (addr
& 0xffffffff);
485 bd
[bd_count
].sge_addr
.hi
= (addr
>> 32);
486 bd
[bd_count
].sge_len
= (u16
)sg_len
;
491 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
492 sg_len
= sg_dma_len(sg
);
493 addr
= (u64
)sg_dma_address(sg
);
494 end_addr
= (u64
)(addr
+ sg_len
);
497 * First s/g element in the list so check if the end_addr
498 * is paged aligned. Also check to make sure the length is
499 * at least page size.
501 if ((i
== 0) && (sg_count
> 1) &&
502 ((end_addr
% QEDF_PAGE_SIZE
) ||
503 sg_len
< QEDF_PAGE_SIZE
))
504 io_req
->use_slowpath
= true;
506 * Last s/g element so check if the start address is paged
509 else if ((i
== (sg_count
- 1)) && (sg_count
> 1) &&
510 (addr
% QEDF_PAGE_SIZE
))
511 io_req
->use_slowpath
= true;
513 * Intermediate s/g element so check if start and end address
516 else if ((i
!= 0) && (i
!= (sg_count
- 1)) &&
517 ((addr
% QEDF_PAGE_SIZE
) || (end_addr
% QEDF_PAGE_SIZE
)))
518 io_req
->use_slowpath
= true;
520 if (sg_len
> QEDF_MAX_BD_LEN
) {
521 sg_frags
= qedf_split_bd(io_req
, addr
, sg_len
,
525 bd
[bd_count
].sge_addr
.lo
= U64_LO(addr
);
526 bd
[bd_count
].sge_addr
.hi
= U64_HI(addr
);
527 bd
[bd_count
].sge_len
= (uint16_t)sg_len
;
530 bd_count
+= sg_frags
;
531 byte_count
+= sg_len
;
534 if (byte_count
!= scsi_bufflen(sc
))
535 QEDF_ERR(&(qedf
->dbg_ctx
), "byte_count = %d != "
536 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count
,
537 scsi_bufflen(sc
), io_req
->xid
);
542 static int qedf_build_bd_list_from_sg(struct qedf_ioreq
*io_req
)
544 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
545 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
548 if (scsi_sg_count(sc
)) {
549 bd_count
= qedf_map_sg(io_req
);
554 bd
[0].sge_addr
.lo
= bd
[0].sge_addr
.hi
= 0;
557 io_req
->bd_tbl
->bd_valid
= bd_count
;
562 static void qedf_build_fcp_cmnd(struct qedf_ioreq
*io_req
,
563 struct fcp_cmnd
*fcp_cmnd
)
565 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
567 /* fcp_cmnd is 32 bytes */
568 memset(fcp_cmnd
, 0, FCP_CMND_LEN
);
570 /* 8 bytes: SCSI LUN info */
571 int_to_scsilun(sc_cmd
->device
->lun
,
572 (struct scsi_lun
*)&fcp_cmnd
->fc_lun
);
574 /* 4 bytes: flag info */
575 fcp_cmnd
->fc_pri_ta
= 0;
576 fcp_cmnd
->fc_tm_flags
= io_req
->tm_flags
;
577 fcp_cmnd
->fc_flags
= io_req
->io_req_flags
;
578 fcp_cmnd
->fc_cmdref
= 0;
580 /* Populate data direction */
581 if (io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) {
582 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
584 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
585 fcp_cmnd
->fc_flags
|= FCP_CFL_WRDATA
;
586 else if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
587 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
590 fcp_cmnd
->fc_pri_ta
= FCP_PTA_SIMPLE
;
592 /* 16 bytes: CDB information */
593 if (io_req
->cmd_type
!= QEDF_TASK_MGMT_CMD
)
594 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
596 /* 4 bytes: FCP data length */
597 fcp_cmnd
->fc_dl
= htonl(io_req
->data_xfer_len
);
600 static void qedf_init_task(struct qedf_rport
*fcport
, struct fc_lport
*lport
,
601 struct qedf_ioreq
*io_req
, struct e4_fcoe_task_context
*task_ctx
,
602 struct fcoe_wqe
*sqe
)
604 enum fcoe_task_type task_type
;
605 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
606 struct io_bdt
*bd_tbl
= io_req
->bd_tbl
;
610 struct qedf_ctx
*qedf
= fcport
->qedf
;
611 uint16_t cq_idx
= smp_processor_id() % qedf
->num_queues
;
612 struct regpair sense_data_buffer_phys_addr
;
617 /* Note init_initiator_rw_fcoe_task memsets the task context */
618 io_req
->task
= task_ctx
;
619 memset(task_ctx
, 0, sizeof(struct e4_fcoe_task_context
));
620 memset(io_req
->task_params
, 0, sizeof(struct fcoe_task_params
));
621 memset(io_req
->sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
623 /* Set task type bassed on DMA directio of command */
624 if (io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) {
625 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
627 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
628 task_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
629 tx_io_size
= io_req
->data_xfer_len
;
631 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
632 rx_io_size
= io_req
->data_xfer_len
;
636 /* Setup the fields for fcoe_task_params */
637 io_req
->task_params
->context
= task_ctx
;
638 io_req
->task_params
->sqe
= sqe
;
639 io_req
->task_params
->task_type
= task_type
;
640 io_req
->task_params
->tx_io_size
= tx_io_size
;
641 io_req
->task_params
->rx_io_size
= rx_io_size
;
642 io_req
->task_params
->conn_cid
= fcport
->fw_cid
;
643 io_req
->task_params
->itid
= io_req
->xid
;
644 io_req
->task_params
->cq_rss_number
= cq_idx
;
645 io_req
->task_params
->is_tape_device
= fcport
->dev_type
;
647 /* Fill in information for scatter/gather list */
648 if (io_req
->cmd_type
!= QEDF_TASK_MGMT_CMD
) {
649 bd_count
= bd_tbl
->bd_valid
;
650 io_req
->sgl_task_params
->sgl
= bd_tbl
->bd_tbl
;
651 io_req
->sgl_task_params
->sgl_phys_addr
.lo
=
652 U64_LO(bd_tbl
->bd_tbl_dma
);
653 io_req
->sgl_task_params
->sgl_phys_addr
.hi
=
654 U64_HI(bd_tbl
->bd_tbl_dma
);
655 io_req
->sgl_task_params
->num_sges
= bd_count
;
656 io_req
->sgl_task_params
->total_buffer_size
=
657 scsi_bufflen(io_req
->sc_cmd
);
658 io_req
->sgl_task_params
->small_mid_sge
=
659 io_req
->use_slowpath
;
662 /* Fill in physical address of sense buffer */
663 sense_data_buffer_phys_addr
.lo
= U64_LO(io_req
->sense_buffer_dma
);
664 sense_data_buffer_phys_addr
.hi
= U64_HI(io_req
->sense_buffer_dma
);
666 /* fill FCP_CMND IU */
667 qedf_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)tmp_fcp_cmnd
);
669 /* Swap fcp_cmnd since FC is big endian */
670 cnt
= sizeof(struct fcp_cmnd
) / sizeof(u32
);
671 for (i
= 0; i
< cnt
; i
++) {
672 tmp_fcp_cmnd
[i
] = cpu_to_be32(tmp_fcp_cmnd
[i
]);
674 memcpy(fcp_cmnd
, tmp_fcp_cmnd
, sizeof(struct fcp_cmnd
));
676 init_initiator_rw_fcoe_task(io_req
->task_params
,
677 io_req
->sgl_task_params
,
678 sense_data_buffer_phys_addr
,
679 io_req
->task_retry_identifier
, fcp_cmnd
);
681 /* Increment SGL type counters */
683 qedf
->single_sge_ios
++;
684 io_req
->sge_type
= QEDF_IOREQ_SINGLE_SGE
;
685 } else if (io_req
->use_slowpath
) {
686 qedf
->slow_sge_ios
++;
687 io_req
->sge_type
= QEDF_IOREQ_SLOW_SGE
;
689 qedf
->fast_sge_ios
++;
690 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
;
694 void qedf_init_mp_task(struct qedf_ioreq
*io_req
,
695 struct e4_fcoe_task_context
*task_ctx
, struct fcoe_wqe
*sqe
)
697 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
698 struct qedf_rport
*fcport
= io_req
->fcport
;
699 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
700 struct fc_frame_header
*fc_hdr
;
701 struct fcoe_tx_mid_path_params task_fc_hdr
;
702 struct scsi_sgl_task_params tx_sgl_task_params
;
703 struct scsi_sgl_task_params rx_sgl_task_params
;
705 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
,
706 "Initializing MP task for cmd_type=%d\n",
709 qedf
->control_requests
++;
711 memset(&tx_sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
712 memset(&rx_sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
713 memset(task_ctx
, 0, sizeof(struct e4_fcoe_task_context
));
714 memset(&task_fc_hdr
, 0, sizeof(struct fcoe_tx_mid_path_params
));
716 /* Setup the task from io_req for easy reference */
717 io_req
->task
= task_ctx
;
719 /* Setup the fields for fcoe_task_params */
720 io_req
->task_params
->context
= task_ctx
;
721 io_req
->task_params
->sqe
= sqe
;
722 io_req
->task_params
->task_type
= FCOE_TASK_TYPE_MIDPATH
;
723 io_req
->task_params
->tx_io_size
= io_req
->data_xfer_len
;
724 /* rx_io_size tells the f/w how large a response buffer we have */
725 io_req
->task_params
->rx_io_size
= PAGE_SIZE
;
726 io_req
->task_params
->conn_cid
= fcport
->fw_cid
;
727 io_req
->task_params
->itid
= io_req
->xid
;
728 /* Return middle path commands on CQ 0 */
729 io_req
->task_params
->cq_rss_number
= 0;
730 io_req
->task_params
->is_tape_device
= fcport
->dev_type
;
732 fc_hdr
= &(mp_req
->req_fc_hdr
);
733 /* Set OX_ID and RX_ID based on driver task id */
734 fc_hdr
->fh_ox_id
= io_req
->xid
;
735 fc_hdr
->fh_rx_id
= htons(0xffff);
737 /* Set up FC header information */
738 task_fc_hdr
.parameter
= fc_hdr
->fh_parm_offset
;
739 task_fc_hdr
.r_ctl
= fc_hdr
->fh_r_ctl
;
740 task_fc_hdr
.type
= fc_hdr
->fh_type
;
741 task_fc_hdr
.cs_ctl
= fc_hdr
->fh_cs_ctl
;
742 task_fc_hdr
.df_ctl
= fc_hdr
->fh_df_ctl
;
743 task_fc_hdr
.rx_id
= fc_hdr
->fh_rx_id
;
744 task_fc_hdr
.ox_id
= fc_hdr
->fh_ox_id
;
746 /* Set up s/g list parameters for request buffer */
747 tx_sgl_task_params
.sgl
= mp_req
->mp_req_bd
;
748 tx_sgl_task_params
.sgl_phys_addr
.lo
= U64_LO(mp_req
->mp_req_bd_dma
);
749 tx_sgl_task_params
.sgl_phys_addr
.hi
= U64_HI(mp_req
->mp_req_bd_dma
);
750 tx_sgl_task_params
.num_sges
= 1;
751 /* Set PAGE_SIZE for now since sg element is that size ??? */
752 tx_sgl_task_params
.total_buffer_size
= io_req
->data_xfer_len
;
753 tx_sgl_task_params
.small_mid_sge
= 0;
755 /* Set up s/g list parameters for request buffer */
756 rx_sgl_task_params
.sgl
= mp_req
->mp_resp_bd
;
757 rx_sgl_task_params
.sgl_phys_addr
.lo
= U64_LO(mp_req
->mp_resp_bd_dma
);
758 rx_sgl_task_params
.sgl_phys_addr
.hi
= U64_HI(mp_req
->mp_resp_bd_dma
);
759 rx_sgl_task_params
.num_sges
= 1;
760 /* Set PAGE_SIZE for now since sg element is that size ??? */
761 rx_sgl_task_params
.total_buffer_size
= PAGE_SIZE
;
762 rx_sgl_task_params
.small_mid_sge
= 0;
766 * Last arg is 0 as previous code did not set that we wanted the
767 * fc header information.
769 init_initiator_midpath_unsolicited_fcoe_task(io_req
->task_params
,
772 &rx_sgl_task_params
, 0);
774 /* Midpath requests always consume 1 SGE */
775 qedf
->single_sge_ios
++;
778 /* Presumed that fcport->rport_lock is held */
779 u16
qedf_get_sqe_idx(struct qedf_rport
*fcport
)
781 uint16_t total_sqe
= (fcport
->sq_mem_size
)/(sizeof(struct fcoe_wqe
));
784 rval
= fcport
->sq_prod_idx
;
786 /* Adjust ring index */
787 fcport
->sq_prod_idx
++;
788 fcport
->fw_sq_prod_idx
++;
789 if (fcport
->sq_prod_idx
== total_sqe
)
790 fcport
->sq_prod_idx
= 0;
795 void qedf_ring_doorbell(struct qedf_rport
*fcport
)
797 struct fcoe_db_data dbell
= { 0 };
801 dbell
.params
|= DB_DEST_XCM
<< FCOE_DB_DATA_DEST_SHIFT
;
802 dbell
.params
|= DB_AGG_CMD_SET
<< FCOE_DB_DATA_AGG_CMD_SHIFT
;
803 dbell
.params
|= DQ_XCM_FCOE_SQ_PROD_CMD
<<
804 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT
;
806 dbell
.sq_prod
= fcport
->fw_sq_prod_idx
;
807 writel(*(u32
*)&dbell
, fcport
->p_doorbell
);
808 /* Make sure SQ index is updated so f/w prcesses requests in order */
813 static void qedf_trace_io(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
,
816 struct qedf_ctx
*qedf
= fcport
->qedf
;
817 struct qedf_io_log
*io_log
;
818 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
822 spin_lock_irqsave(&qedf
->io_trace_lock
, flags
);
824 io_log
= &qedf
->io_trace_buf
[qedf
->io_trace_idx
];
825 io_log
->direction
= direction
;
826 io_log
->task_id
= io_req
->xid
;
827 io_log
->port_id
= fcport
->rdata
->ids
.port_id
;
828 io_log
->lun
= sc_cmd
->device
->lun
;
829 io_log
->op
= op
= sc_cmd
->cmnd
[0];
830 io_log
->lba
[0] = sc_cmd
->cmnd
[2];
831 io_log
->lba
[1] = sc_cmd
->cmnd
[3];
832 io_log
->lba
[2] = sc_cmd
->cmnd
[4];
833 io_log
->lba
[3] = sc_cmd
->cmnd
[5];
834 io_log
->bufflen
= scsi_bufflen(sc_cmd
);
835 io_log
->sg_count
= scsi_sg_count(sc_cmd
);
836 io_log
->result
= sc_cmd
->result
;
837 io_log
->jiffies
= jiffies
;
838 io_log
->refcount
= kref_read(&io_req
->refcount
);
840 if (direction
== QEDF_IO_TRACE_REQ
) {
841 /* For requests we only care abot the submission CPU */
842 io_log
->req_cpu
= io_req
->cpu
;
845 } else if (direction
== QEDF_IO_TRACE_RSP
) {
846 io_log
->req_cpu
= io_req
->cpu
;
847 io_log
->int_cpu
= io_req
->int_cpu
;
848 io_log
->rsp_cpu
= smp_processor_id();
851 io_log
->sge_type
= io_req
->sge_type
;
853 qedf
->io_trace_idx
++;
854 if (qedf
->io_trace_idx
== QEDF_IO_TRACE_SIZE
)
855 qedf
->io_trace_idx
= 0;
857 spin_unlock_irqrestore(&qedf
->io_trace_lock
, flags
);
860 int qedf_post_io_req(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
)
862 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
863 struct Scsi_Host
*host
= sc_cmd
->device
->host
;
864 struct fc_lport
*lport
= shost_priv(host
);
865 struct qedf_ctx
*qedf
= lport_priv(lport
);
866 struct e4_fcoe_task_context
*task_ctx
;
868 enum fcoe_task_type req_type
= 0;
869 struct fcoe_wqe
*sqe
;
872 /* Initialize rest of io_req fileds */
873 io_req
->data_xfer_len
= scsi_bufflen(sc_cmd
);
874 sc_cmd
->SCp
.ptr
= (char *)io_req
;
875 io_req
->use_slowpath
= false; /* Assume fast SGL by default */
877 /* Record which cpu this request is associated with */
878 io_req
->cpu
= smp_processor_id();
880 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
881 req_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
882 io_req
->io_req_flags
= QEDF_READ
;
883 qedf
->input_requests
++;
884 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
885 req_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
886 io_req
->io_req_flags
= QEDF_WRITE
;
887 qedf
->output_requests
++;
889 io_req
->io_req_flags
= 0;
890 qedf
->control_requests
++;
895 /* Build buffer descriptor list for firmware from sg list */
896 if (qedf_build_bd_list_from_sg(io_req
)) {
897 QEDF_ERR(&(qedf
->dbg_ctx
), "BD list creation failed.\n");
898 kref_put(&io_req
->refcount
, qedf_release_cmd
);
902 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
903 QEDF_ERR(&(qedf
->dbg_ctx
), "Session not offloaded yet.\n");
904 kref_put(&io_req
->refcount
, qedf_release_cmd
);
908 /* Obtain free SQE */
909 sqe_idx
= qedf_get_sqe_idx(fcport
);
910 sqe
= &fcport
->sq
[sqe_idx
];
911 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
913 /* Get the task context */
914 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
916 QEDF_WARN(&(qedf
->dbg_ctx
), "task_ctx is NULL, xid=%d.\n",
918 kref_put(&io_req
->refcount
, qedf_release_cmd
);
922 qedf_init_task(fcport
, lport
, io_req
, task_ctx
, sqe
);
925 qedf_ring_doorbell(fcport
);
927 if (qedf_io_tracing
&& io_req
->sc_cmd
)
928 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_REQ
);
934 qedf_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*sc_cmd
)
936 struct fc_lport
*lport
= shost_priv(host
);
937 struct qedf_ctx
*qedf
= lport_priv(lport
);
938 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
939 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
940 struct qedf_rport
*fcport
;
941 struct qedf_ioreq
*io_req
;
944 unsigned long flags
= 0;
947 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
948 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
949 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
950 sc_cmd
->scsi_done(sc_cmd
);
954 if (!qedf
->pdev
->msix_enabled
) {
955 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
956 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
958 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
959 sc_cmd
->scsi_done(sc_cmd
);
963 rval
= fc_remote_port_chkready(rport
);
965 sc_cmd
->result
= rval
;
966 sc_cmd
->scsi_done(sc_cmd
);
970 /* Retry command if we are doing a qed drain operation */
971 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
972 rc
= SCSI_MLQUEUE_HOST_BUSY
;
976 if (lport
->state
!= LPORT_ST_READY
||
977 atomic_read(&qedf
->link_state
) != QEDF_LINK_UP
) {
978 rc
= SCSI_MLQUEUE_HOST_BUSY
;
982 /* rport and tgt are allocated together, so tgt should be non-NULL */
983 fcport
= (struct qedf_rport
*)&rp
[1];
985 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
987 * Session is not offloaded yet. Let SCSI-ml retry
990 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
993 if (fcport
->retry_delay_timestamp
) {
994 if (time_after(jiffies
, fcport
->retry_delay_timestamp
)) {
995 fcport
->retry_delay_timestamp
= 0;
997 /* If retry_delay timer is active, flow off the ML */
998 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1003 io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
1005 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1009 io_req
->sc_cmd
= sc_cmd
;
1011 /* Take fcport->rport_lock for posting to fcport send queue */
1012 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1013 if (qedf_post_io_req(fcport
, io_req
)) {
1014 QEDF_WARN(&(qedf
->dbg_ctx
), "Unable to post io_req\n");
1015 /* Return SQE to pool */
1016 atomic_inc(&fcport
->free_sqes
);
1017 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1019 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1025 static void qedf_parse_fcp_rsp(struct qedf_ioreq
*io_req
,
1026 struct fcoe_cqe_rsp_info
*fcp_rsp
)
1028 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1029 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1030 u8 rsp_flags
= fcp_rsp
->rsp_flags
.flags
;
1031 int fcp_sns_len
= 0;
1032 int fcp_rsp_len
= 0;
1033 uint8_t *rsp_info
, *sense_data
;
1035 io_req
->fcp_status
= FC_GOOD
;
1036 io_req
->fcp_resid
= 0;
1037 if (rsp_flags
& (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER
|
1038 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER
))
1039 io_req
->fcp_resid
= fcp_rsp
->fcp_resid
;
1041 io_req
->scsi_comp_flags
= rsp_flags
;
1042 CMD_SCSI_STATUS(sc_cmd
) = io_req
->cdb_status
=
1043 fcp_rsp
->scsi_status_code
;
1046 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID
)
1047 fcp_rsp_len
= fcp_rsp
->fcp_rsp_len
;
1050 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID
)
1051 fcp_sns_len
= fcp_rsp
->fcp_sns_len
;
1053 io_req
->fcp_rsp_len
= fcp_rsp_len
;
1054 io_req
->fcp_sns_len
= fcp_sns_len
;
1055 rsp_info
= sense_data
= io_req
->sense_buffer
;
1057 /* fetch fcp_rsp_code */
1058 if ((fcp_rsp_len
== 4) || (fcp_rsp_len
== 8)) {
1059 /* Only for task management function */
1060 io_req
->fcp_rsp_code
= rsp_info
[3];
1061 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1062 "fcp_rsp_code = %d\n", io_req
->fcp_rsp_code
);
1063 /* Adjust sense-data location. */
1064 sense_data
+= fcp_rsp_len
;
1067 if (fcp_sns_len
> SCSI_SENSE_BUFFERSIZE
) {
1068 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1069 "Truncating sense buffer\n");
1070 fcp_sns_len
= SCSI_SENSE_BUFFERSIZE
;
1073 /* The sense buffer can be NULL for TMF commands */
1074 if (sc_cmd
->sense_buffer
) {
1075 memset(sc_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1077 memcpy(sc_cmd
->sense_buffer
, sense_data
,
1082 static void qedf_unmap_sg_list(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
)
1084 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1086 if (io_req
->bd_tbl
->bd_valid
&& sc
&& scsi_sg_count(sc
)) {
1087 dma_unmap_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
1088 scsi_sg_count(sc
), sc
->sc_data_direction
);
1089 io_req
->bd_tbl
->bd_valid
= 0;
1093 void qedf_scsi_completion(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1094 struct qedf_ioreq
*io_req
)
1097 struct e4_fcoe_task_context
*task_ctx
;
1098 struct scsi_cmnd
*sc_cmd
;
1099 struct fcoe_cqe_rsp_info
*fcp_rsp
;
1100 struct qedf_rport
*fcport
;
1102 u16 scope
, qualifier
= 0;
1103 u8 fw_residual_flag
= 0;
1111 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1112 sc_cmd
= io_req
->sc_cmd
;
1113 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
1116 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1120 if (!sc_cmd
->SCp
.ptr
) {
1121 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1122 "another context.\n");
1126 if (!sc_cmd
->request
) {
1127 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd->request is NULL, "
1128 "sc_cmd=%p.\n", sc_cmd
);
1132 if (!sc_cmd
->request
->special
) {
1133 QEDF_WARN(&(qedf
->dbg_ctx
), "request->special is NULL so "
1134 "request not valid, sc_cmd=%p.\n", sc_cmd
);
1138 if (!sc_cmd
->request
->q
) {
1139 QEDF_WARN(&(qedf
->dbg_ctx
), "request->q is NULL so request "
1140 "is not valid, sc_cmd=%p.\n", sc_cmd
);
1144 fcport
= io_req
->fcport
;
1146 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
1148 qedf_unmap_sg_list(qedf
, io_req
);
1150 /* Check for FCP transport error */
1151 if (io_req
->fcp_rsp_len
> 3 && io_req
->fcp_rsp_code
) {
1152 QEDF_ERR(&(qedf
->dbg_ctx
),
1153 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1154 "fcp_rsp_code=%d.\n", io_req
->xid
, io_req
->fcp_rsp_len
,
1155 io_req
->fcp_rsp_code
);
1156 sc_cmd
->result
= DID_BUS_BUSY
<< 16;
1160 fw_residual_flag
= GET_FIELD(cqe
->cqe_info
.rsp_info
.fw_error_flags
,
1161 FCOE_CQE_RSP_INFO_FW_UNDERRUN
);
1162 if (fw_residual_flag
) {
1163 QEDF_ERR(&(qedf
->dbg_ctx
),
1164 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1165 "fcp_resid=%d fw_residual=0x%x.\n", io_req
->xid
,
1166 fcp_rsp
->rsp_flags
.flags
, io_req
->fcp_resid
,
1167 cqe
->cqe_info
.rsp_info
.fw_residual
);
1169 if (io_req
->cdb_status
== 0)
1170 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1172 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1174 /* Abort the command since we did not get all the data */
1175 init_completion(&io_req
->abts_done
);
1176 rval
= qedf_initiate_abts(io_req
, true);
1178 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1179 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1183 * Set resid to the whole buffer length so we won't try to resue
1184 * any previously data.
1186 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1190 switch (io_req
->fcp_status
) {
1192 if (io_req
->cdb_status
== 0) {
1193 /* Good I/O completion */
1194 sc_cmd
->result
= DID_OK
<< 16;
1196 refcount
= kref_read(&io_req
->refcount
);
1197 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1198 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1199 "lba=%02x%02x%02x%02x cdb_status=%d "
1200 "fcp_resid=0x%x refcount=%d.\n",
1201 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1202 sc_cmd
->device
->lun
, io_req
->xid
,
1203 sc_cmd
->cmnd
[0], sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3],
1204 sc_cmd
->cmnd
[4], sc_cmd
->cmnd
[5],
1205 io_req
->cdb_status
, io_req
->fcp_resid
,
1207 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1209 if (io_req
->cdb_status
== SAM_STAT_TASK_SET_FULL
||
1210 io_req
->cdb_status
== SAM_STAT_BUSY
) {
1212 * Check whether we need to set retry_delay at
1213 * all based on retry_delay module parameter
1214 * and the status qualifier.
1218 scope
= fcp_rsp
->retry_delay_timer
& 0xC000;
1220 qualifier
= fcp_rsp
->retry_delay_timer
& 0x3FFF;
1222 if (qedf_retry_delay
&&
1223 scope
> 0 && qualifier
> 0 &&
1224 qualifier
<= 0x3FEF) {
1225 /* Check we don't go over the max */
1226 if (qualifier
> QEDF_RETRY_DELAY_MAX
)
1228 QEDF_RETRY_DELAY_MAX
;
1229 fcport
->retry_delay_timestamp
=
1230 jiffies
+ (qualifier
* HZ
/ 10);
1233 if (io_req
->cdb_status
==
1234 SAM_STAT_TASK_SET_FULL
)
1235 qedf
->task_set_fulls
++;
1240 if (io_req
->fcp_resid
)
1241 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1244 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "fcp_status=%d.\n",
1245 io_req
->fcp_status
);
1250 if (qedf_io_tracing
)
1251 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1253 io_req
->sc_cmd
= NULL
;
1254 sc_cmd
->SCp
.ptr
= NULL
;
1255 sc_cmd
->scsi_done(sc_cmd
);
1256 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1259 /* Return a SCSI command in some other context besides a normal completion */
1260 void qedf_scsi_done(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
1264 struct scsi_cmnd
*sc_cmd
;
1271 sc_cmd
= io_req
->sc_cmd
;
1274 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1278 if (!sc_cmd
->SCp
.ptr
) {
1279 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1280 "another context.\n");
1284 qedf_unmap_sg_list(qedf
, io_req
);
1286 sc_cmd
->result
= result
<< 16;
1287 refcount
= kref_read(&io_req
->refcount
);
1288 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "%d:0:%d:%lld: Completing "
1289 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1290 "allowed=%d retries=%d refcount=%d.\n",
1291 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1292 sc_cmd
->device
->lun
, sc_cmd
, sc_cmd
->result
, sc_cmd
->cmnd
[0],
1293 sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3], sc_cmd
->cmnd
[4],
1294 sc_cmd
->cmnd
[5], sc_cmd
->allowed
, sc_cmd
->retries
,
1298 * Set resid to the whole buffer length so we won't try to resue any
1299 * previously read data
1301 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1303 if (qedf_io_tracing
)
1304 qedf_trace_io(io_req
->fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1306 io_req
->sc_cmd
= NULL
;
1307 sc_cmd
->SCp
.ptr
= NULL
;
1308 sc_cmd
->scsi_done(sc_cmd
);
1309 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1313 * Handle warning type CQE completions. This is mainly used for REC timer
1316 void qedf_process_warning_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1317 struct qedf_ioreq
*io_req
)
1320 struct qedf_rport
*fcport
= io_req
->fcport
;
1321 u64 err_warn_bit_map
;
1327 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Warning CQE, "
1328 "xid=0x%x\n", io_req
->xid
);
1329 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1330 "err_warn_bitmap=%08x:%08x\n",
1331 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1332 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1333 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1334 "rx_buff_off=%08x, rx_id=%04x\n",
1335 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1336 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1337 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1339 /* Normalize the error bitmap value to an just an unsigned int */
1340 err_warn_bit_map
= (u64
)
1341 ((u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
<< 32) |
1342 (u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
;
1343 for (i
= 0; i
< 64; i
++) {
1344 if (err_warn_bit_map
& (u64
)((u64
)1 << i
)) {
1350 /* Check if REC TOV expired if this is a tape device */
1351 if (fcport
->dev_type
== QEDF_RPORT_TYPE_TAPE
) {
1353 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION
) {
1354 QEDF_ERR(&(qedf
->dbg_ctx
), "REC timer expired.\n");
1355 if (!test_bit(QEDF_CMD_SRR_SENT
, &io_req
->flags
)) {
1356 io_req
->rx_buf_off
=
1357 cqe
->cqe_info
.err_info
.rx_buf_off
;
1358 io_req
->tx_buf_off
=
1359 cqe
->cqe_info
.err_info
.tx_buf_off
;
1360 io_req
->rx_id
= cqe
->cqe_info
.err_info
.rx_id
;
1361 rval
= qedf_send_rec(io_req
);
1363 * We only want to abort the io_req if we
1364 * can't queue the REC command as we want to
1365 * keep the exchange open for recovery.
1375 init_completion(&io_req
->abts_done
);
1376 rval
= qedf_initiate_abts(io_req
, true);
1378 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1381 /* Cleanup a command when we receive an error detection completion */
1382 void qedf_process_error_detect(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1383 struct qedf_ioreq
*io_req
)
1390 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Error detection CQE, "
1391 "xid=0x%x\n", io_req
->xid
);
1392 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1393 "err_warn_bitmap=%08x:%08x\n",
1394 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1395 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1396 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1397 "rx_buff_off=%08x, rx_id=%04x\n",
1398 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1399 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1400 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1402 if (qedf
->stop_io_on_error
) {
1403 qedf_stop_all_io(qedf
);
1407 init_completion(&io_req
->abts_done
);
1408 rval
= qedf_initiate_abts(io_req
, true);
1410 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1413 static void qedf_flush_els_req(struct qedf_ctx
*qedf
,
1414 struct qedf_ioreq
*els_req
)
1416 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1417 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req
->xid
,
1418 kref_read(&els_req
->refcount
));
1421 * Need to distinguish this from a timeout when calling the
1424 els_req
->event
= QEDF_IOREQ_EV_ELS_FLUSH
;
1426 /* Cancel the timer */
1427 cancel_delayed_work_sync(&els_req
->timeout_work
);
1429 /* Call callback function to complete command */
1430 if (els_req
->cb_func
&& els_req
->cb_arg
) {
1431 els_req
->cb_func(els_req
->cb_arg
);
1432 els_req
->cb_arg
= NULL
;
1435 /* Release kref for original initiate_els */
1436 kref_put(&els_req
->refcount
, qedf_release_cmd
);
1439 /* A value of -1 for lun is a wild card that means flush all
1440 * active SCSI I/Os for the target.
1442 void qedf_flush_active_ios(struct qedf_rport
*fcport
, int lun
)
1444 struct qedf_ioreq
*io_req
;
1445 struct qedf_ctx
*qedf
;
1446 struct qedf_cmd_mgr
*cmd_mgr
;
1452 /* Check that fcport is still offloaded */
1453 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1454 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
1458 qedf
= fcport
->qedf
;
1459 cmd_mgr
= qedf
->cmd_mgr
;
1461 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Flush active i/o's.\n");
1463 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
1464 io_req
= &cmd_mgr
->cmds
[i
];
1468 if (io_req
->fcport
!= fcport
)
1470 if (io_req
->cmd_type
== QEDF_ELS
) {
1471 rc
= kref_get_unless_zero(&io_req
->refcount
);
1473 QEDF_ERR(&(qedf
->dbg_ctx
),
1474 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1475 io_req
, io_req
->xid
);
1478 qedf_flush_els_req(qedf
, io_req
);
1480 * Release the kref and go back to the top of the
1486 if (io_req
->cmd_type
== QEDF_ABTS
) {
1487 rc
= kref_get_unless_zero(&io_req
->refcount
);
1489 QEDF_ERR(&(qedf
->dbg_ctx
),
1490 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1491 io_req
, io_req
->xid
);
1494 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1495 "Flushing abort xid=0x%x.\n", io_req
->xid
);
1497 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1499 if (io_req
->sc_cmd
) {
1500 if (io_req
->return_scsi_cmd_on_abts
)
1501 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1504 /* Notify eh_abort handler that ABTS is complete */
1505 complete(&io_req
->abts_done
);
1506 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1511 if (!io_req
->sc_cmd
)
1514 if (io_req
->sc_cmd
->device
->lun
!=
1520 * Use kref_get_unless_zero in the unlikely case the command
1521 * we're about to flush was completed in the normal SCSI path
1523 rc
= kref_get_unless_zero(&io_req
->refcount
);
1525 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not get kref for "
1526 "io_req=0x%p xid=0x%x\n", io_req
, io_req
->xid
);
1529 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1530 "Cleanup xid=0x%x.\n", io_req
->xid
);
1532 /* Cleanup task and return I/O mid-layer */
1533 qedf_initiate_cleanup(io_req
, true);
1536 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1541 * Initiate a ABTS middle path command. Note that we don't have to initialize
1542 * the task context for an ABTS task.
1544 int qedf_initiate_abts(struct qedf_ioreq
*io_req
, bool return_scsi_cmd_on_abts
)
1546 struct fc_lport
*lport
;
1547 struct qedf_rport
*fcport
= io_req
->fcport
;
1548 struct fc_rport_priv
*rdata
;
1549 struct qedf_ctx
*qedf
;
1553 unsigned long flags
;
1554 struct fcoe_wqe
*sqe
;
1557 /* Sanity check qedf_rport before dereferencing any pointers */
1558 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1559 QEDF_ERR(NULL
, "tgt not offloaded\n");
1564 rdata
= fcport
->rdata
;
1565 r_a_tov
= rdata
->r_a_tov
;
1566 qedf
= fcport
->qedf
;
1567 lport
= qedf
->lport
;
1569 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
1570 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
1575 if (atomic_read(&qedf
->link_down_tmo_valid
) > 0) {
1576 QEDF_ERR(&(qedf
->dbg_ctx
), "link_down_tmo active.\n");
1581 /* Ensure room on SQ */
1582 if (!atomic_read(&fcport
->free_sqes
)) {
1583 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1588 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
1589 QEDF_ERR(&qedf
->dbg_ctx
, "fcport is uploading.\n");
1594 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
1595 test_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
) ||
1596 test_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
)) {
1597 QEDF_ERR(&(qedf
->dbg_ctx
), "io_req xid=0x%x already in "
1598 "cleanup or abort processing or already "
1599 "completed.\n", io_req
->xid
);
1604 kref_get(&io_req
->refcount
);
1607 qedf
->control_requests
++;
1608 qedf
->packet_aborts
++;
1610 /* Set the return CPU to be the same as the request one */
1611 io_req
->cpu
= smp_processor_id();
1613 /* Set the command type to abort */
1614 io_req
->cmd_type
= QEDF_ABTS
;
1615 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1617 set_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1618 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "ABTS io_req xid = "
1621 qedf_cmd_timer_set(qedf
, io_req
, QEDF_ABORT_TIMEOUT
* HZ
);
1623 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1625 sqe_idx
= qedf_get_sqe_idx(fcport
);
1626 sqe
= &fcport
->sq
[sqe_idx
];
1627 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1628 io_req
->task_params
->sqe
= sqe
;
1630 init_initiator_abort_fcoe_task(io_req
->task_params
);
1631 qedf_ring_doorbell(fcport
);
1633 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1638 * If the ABTS task fails to queue then we need to cleanup the
1639 * task at the firmware.
1641 qedf_initiate_cleanup(io_req
, return_scsi_cmd_on_abts
);
1646 void qedf_process_abts_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1647 struct qedf_ioreq
*io_req
)
1652 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "Entered with xid = "
1653 "0x%x cmd_type = %d\n", io_req
->xid
, io_req
->cmd_type
);
1655 cancel_delayed_work(&io_req
->timeout_work
);
1658 r_ctl
= cqe
->cqe_info
.abts_info
.r_ctl
;
1661 case FC_RCTL_BA_ACC
:
1662 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1663 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1664 io_req
->event
= QEDF_IOREQ_EV_ABORT_SUCCESS
;
1666 * Dont release this cmd yet. It will be relesed
1667 * after we get RRQ response
1669 kref_get(&io_req
->refcount
);
1670 queue_delayed_work(qedf
->dpc_wq
, &io_req
->rrq_work
,
1671 msecs_to_jiffies(qedf
->lport
->r_a_tov
));
1673 /* For error cases let the cleanup return the command */
1674 case FC_RCTL_BA_RJT
:
1675 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1676 "ABTS response - RJT\n");
1677 io_req
->event
= QEDF_IOREQ_EV_ABORT_FAILED
;
1680 QEDF_ERR(&(qedf
->dbg_ctx
), "Unknown ABTS response\n");
1684 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1686 if (io_req
->sc_cmd
) {
1687 if (io_req
->return_scsi_cmd_on_abts
)
1688 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1691 /* Notify eh_abort handler that ABTS is complete */
1692 complete(&io_req
->abts_done
);
1694 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1697 int qedf_init_mp_req(struct qedf_ioreq
*io_req
)
1699 struct qedf_mp_req
*mp_req
;
1700 struct scsi_sge
*mp_req_bd
;
1701 struct scsi_sge
*mp_resp_bd
;
1702 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1706 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_MP_REQ
, "Entered.\n");
1708 mp_req
= (struct qedf_mp_req
*)&(io_req
->mp_req
);
1709 memset(mp_req
, 0, sizeof(struct qedf_mp_req
));
1711 if (io_req
->cmd_type
!= QEDF_ELS
) {
1712 mp_req
->req_len
= sizeof(struct fcp_cmnd
);
1713 io_req
->data_xfer_len
= mp_req
->req_len
;
1715 mp_req
->req_len
= io_req
->data_xfer_len
;
1717 mp_req
->req_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
1718 &mp_req
->req_buf_dma
, GFP_KERNEL
);
1719 if (!mp_req
->req_buf
) {
1720 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req buffer\n");
1721 qedf_free_mp_resc(io_req
);
1725 mp_req
->resp_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
,
1726 QEDF_PAGE_SIZE
, &mp_req
->resp_buf_dma
, GFP_KERNEL
);
1727 if (!mp_req
->resp_buf
) {
1728 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc TM resp "
1730 qedf_free_mp_resc(io_req
);
1734 /* Allocate and map mp_req_bd and mp_resp_bd */
1735 sz
= sizeof(struct scsi_sge
);
1736 mp_req
->mp_req_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
1737 &mp_req
->mp_req_bd_dma
, GFP_KERNEL
);
1738 if (!mp_req
->mp_req_bd
) {
1739 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req bd\n");
1740 qedf_free_mp_resc(io_req
);
1744 mp_req
->mp_resp_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
1745 &mp_req
->mp_resp_bd_dma
, GFP_KERNEL
);
1746 if (!mp_req
->mp_resp_bd
) {
1747 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP resp bd\n");
1748 qedf_free_mp_resc(io_req
);
1753 addr
= mp_req
->req_buf_dma
;
1754 mp_req_bd
= mp_req
->mp_req_bd
;
1755 mp_req_bd
->sge_addr
.lo
= U64_LO(addr
);
1756 mp_req_bd
->sge_addr
.hi
= U64_HI(addr
);
1757 mp_req_bd
->sge_len
= QEDF_PAGE_SIZE
;
1760 * MP buffer is either a task mgmt command or an ELS.
1761 * So the assumption is that it consumes a single bd
1762 * entry in the bd table
1764 mp_resp_bd
= mp_req
->mp_resp_bd
;
1765 addr
= mp_req
->resp_buf_dma
;
1766 mp_resp_bd
->sge_addr
.lo
= U64_LO(addr
);
1767 mp_resp_bd
->sge_addr
.hi
= U64_HI(addr
);
1768 mp_resp_bd
->sge_len
= QEDF_PAGE_SIZE
;
1774 * Last ditch effort to clear the port if it's stuck. Used only after a
1775 * cleanup task times out.
1777 static void qedf_drain_request(struct qedf_ctx
*qedf
)
1779 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
1780 QEDF_ERR(&(qedf
->dbg_ctx
), "MCP drain already active.\n");
1784 /* Set bit to return all queuecommand requests as busy */
1785 set_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
1787 /* Call qed drain request for function. Should be synchronous */
1788 qed_ops
->common
->drain(qedf
->cdev
);
1790 /* Settle time for CQEs to be returned */
1793 /* Unplug and continue */
1794 clear_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
1798 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1801 int qedf_initiate_cleanup(struct qedf_ioreq
*io_req
,
1802 bool return_scsi_cmd_on_abts
)
1804 struct qedf_rport
*fcport
;
1805 struct qedf_ctx
*qedf
;
1807 struct e4_fcoe_task_context
*task
;
1810 unsigned long flags
;
1811 struct fcoe_wqe
*sqe
;
1814 fcport
= io_req
->fcport
;
1816 QEDF_ERR(NULL
, "fcport is NULL.\n");
1820 /* Sanity check qedf_rport before dereferencing any pointers */
1821 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1822 QEDF_ERR(NULL
, "tgt not offloaded\n");
1827 qedf
= fcport
->qedf
;
1829 QEDF_ERR(NULL
, "qedf is NULL.\n");
1833 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
1834 test_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
)) {
1835 QEDF_ERR(&(qedf
->dbg_ctx
), "io_req xid=0x%x already in "
1836 "cleanup processing or already completed.\n",
1841 /* Ensure room on SQ */
1842 if (!atomic_read(&fcport
->free_sqes
)) {
1843 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1848 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid=0x%x\n",
1851 /* Cleanup cmds re-use the same TID as the original I/O */
1853 io_req
->cmd_type
= QEDF_CLEANUP
;
1854 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1856 /* Set the return CPU to be the same as the request one */
1857 io_req
->cpu
= smp_processor_id();
1859 set_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1861 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1863 init_completion(&io_req
->tm_done
);
1865 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1867 sqe_idx
= qedf_get_sqe_idx(fcport
);
1868 sqe
= &fcport
->sq
[sqe_idx
];
1869 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1870 io_req
->task_params
->sqe
= sqe
;
1872 init_initiator_cleanup_fcoe_task(io_req
->task_params
);
1873 qedf_ring_doorbell(fcport
);
1875 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1877 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
1878 QEDF_CLEANUP_TIMEOUT
* HZ
);
1883 QEDF_ERR(&(qedf
->dbg_ctx
), "Cleanup command timeout, "
1884 "xid=%x.\n", io_req
->xid
);
1885 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1886 /* Issue a drain request if cleanup task times out */
1887 QEDF_ERR(&(qedf
->dbg_ctx
), "Issuing MCP drain request.\n");
1888 qedf_drain_request(qedf
);
1891 if (io_req
->sc_cmd
) {
1892 if (io_req
->return_scsi_cmd_on_abts
)
1893 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1897 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_SUCCESS
;
1899 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_FAILED
;
1904 void qedf_process_cleanup_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1905 struct qedf_ioreq
*io_req
)
1907 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid = 0x%x\n",
1910 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1912 /* Complete so we can finish cleaning up the I/O */
1913 complete(&io_req
->tm_done
);
1916 static int qedf_execute_tmf(struct qedf_rport
*fcport
, struct scsi_cmnd
*sc_cmd
,
1919 struct qedf_ioreq
*io_req
;
1920 struct e4_fcoe_task_context
*task
;
1921 struct qedf_ctx
*qedf
= fcport
->qedf
;
1922 struct fc_lport
*lport
= qedf
->lport
;
1926 unsigned long flags
;
1927 struct fcoe_wqe
*sqe
;
1931 QEDF_ERR(&(qedf
->dbg_ctx
), "invalid arg\n");
1935 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1936 QEDF_ERR(&(qedf
->dbg_ctx
), "fcport not offloaded\n");
1941 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "portid = 0x%x "
1942 "tm_flags = %d\n", fcport
->rdata
->ids
.port_id
, tm_flags
);
1944 io_req
= qedf_alloc_cmd(fcport
, QEDF_TASK_MGMT_CMD
);
1946 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed TMF");
1951 if (tm_flags
== FCP_TMF_LUN_RESET
)
1953 else if (tm_flags
== FCP_TMF_TGT_RESET
)
1954 qedf
->target_resets
++;
1956 /* Initialize rest of io_req fields */
1957 io_req
->sc_cmd
= sc_cmd
;
1958 io_req
->fcport
= fcport
;
1959 io_req
->cmd_type
= QEDF_TASK_MGMT_CMD
;
1961 /* Set the return CPU to be the same as the request one */
1962 io_req
->cpu
= smp_processor_id();
1965 io_req
->io_req_flags
= QEDF_READ
;
1966 io_req
->data_xfer_len
= 0;
1967 io_req
->tm_flags
= tm_flags
;
1969 /* Default is to return a SCSI command when an error occurs */
1970 io_req
->return_scsi_cmd_on_abts
= true;
1972 /* Obtain exchange id */
1975 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "TMF io_req xid = "
1978 /* Initialize task context for this IO request */
1979 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1981 init_completion(&io_req
->tm_done
);
1983 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1985 sqe_idx
= qedf_get_sqe_idx(fcport
);
1986 sqe
= &fcport
->sq
[sqe_idx
];
1987 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1989 qedf_init_task(fcport
, lport
, io_req
, task
, sqe
);
1990 qedf_ring_doorbell(fcport
);
1992 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1994 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
1995 QEDF_TM_TIMEOUT
* HZ
);
1999 QEDF_ERR(&(qedf
->dbg_ctx
), "wait for tm_cmpl timeout!\n");
2001 /* Check TMF response code */
2002 if (io_req
->fcp_rsp_code
== 0)
2008 if (tm_flags
== FCP_TMF_LUN_RESET
)
2009 qedf_flush_active_ios(fcport
, (int)sc_cmd
->device
->lun
);
2011 qedf_flush_active_ios(fcport
, -1);
2013 kref_put(&io_req
->refcount
, qedf_release_cmd
);
2015 if (rc
!= SUCCESS
) {
2016 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command failed...\n");
2019 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command success...\n");
2026 int qedf_initiate_tmf(struct scsi_cmnd
*sc_cmd
, u8 tm_flags
)
2028 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
2029 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
2030 struct qedf_rport
*fcport
= (struct qedf_rport
*)&rp
[1];
2031 struct qedf_ctx
*qedf
;
2032 struct fc_lport
*lport
;
2036 rval
= fc_remote_port_chkready(rport
);
2039 QEDF_ERR(NULL
, "device_reset rport not ready\n");
2044 if (fcport
== NULL
) {
2045 QEDF_ERR(NULL
, "device_reset: rport is NULL\n");
2050 qedf
= fcport
->qedf
;
2051 lport
= qedf
->lport
;
2053 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
2054 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
2059 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
2060 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
2065 rc
= qedf_execute_tmf(fcport
, sc_cmd
, tm_flags
);
2071 void qedf_process_tmf_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
2072 struct qedf_ioreq
*io_req
)
2074 struct fcoe_cqe_rsp_info
*fcp_rsp
;
2076 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
2077 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
2079 io_req
->sc_cmd
= NULL
;
2080 complete(&io_req
->tm_done
);
2083 void qedf_process_unsol_compl(struct qedf_ctx
*qedf
, uint16_t que_idx
,
2084 struct fcoe_cqe
*cqe
)
2086 unsigned long flags
;
2088 uint16_t pktlen
= cqe
->cqe_info
.unsolic_info
.pkt_len
;
2089 u32 payload_len
, crc
;
2090 struct fc_frame_header
*fh
;
2091 struct fc_frame
*fp
;
2092 struct qedf_io_work
*io_work
;
2095 struct scsi_bd
*p_bd_info
;
2097 p_bd_info
= &cqe
->cqe_info
.unsolic_info
.bd_info
;
2098 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2099 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2100 le32_to_cpu(p_bd_info
->address
.hi
),
2101 le32_to_cpu(p_bd_info
->address
.lo
),
2102 le32_to_cpu(p_bd_info
->opaque
.fcoe_opaque
.hi
),
2103 le32_to_cpu(p_bd_info
->opaque
.fcoe_opaque
.lo
),
2104 qedf
->bdq_prod_idx
, pktlen
);
2106 bdq_idx
= le32_to_cpu(p_bd_info
->opaque
.fcoe_opaque
.lo
);
2107 if (bdq_idx
>= QEDF_BDQ_SIZE
) {
2108 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_idx is out of range %d.\n",
2110 goto increment_prod
;
2113 bdq_addr
= qedf
->bdq
[bdq_idx
].buf_addr
;
2115 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_addr is NULL, dropping "
2116 "unsolicited packet.\n");
2117 goto increment_prod
;
2120 if (qedf_dump_frames
) {
2121 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2122 "BDQ frame is at addr=%p.\n", bdq_addr
);
2123 print_hex_dump(KERN_WARNING
, "bdq ", DUMP_PREFIX_OFFSET
, 16, 1,
2124 (void *)bdq_addr
, pktlen
, false);
2127 /* Allocate frame */
2128 payload_len
= pktlen
- sizeof(struct fc_frame_header
);
2129 fp
= fc_frame_alloc(qedf
->lport
, payload_len
);
2131 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not allocate fp.\n");
2132 goto increment_prod
;
2135 /* Copy data from BDQ buffer into fc_frame struct */
2136 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
2137 memcpy(fh
, (void *)bdq_addr
, pktlen
);
2139 /* Initialize the frame so libfc sees it as a valid frame */
2140 crc
= fcoe_fc_crc(fp
);
2142 fr_dev(fp
) = qedf
->lport
;
2143 fr_sof(fp
) = FC_SOF_I3
;
2144 fr_eof(fp
) = FC_EOF_T
;
2145 fr_crc(fp
) = cpu_to_le32(~crc
);
2148 * We need to return the frame back up to libfc in a non-atomic
2151 io_work
= mempool_alloc(qedf
->io_mempool
, GFP_ATOMIC
);
2153 QEDF_WARN(&(qedf
->dbg_ctx
), "Could not allocate "
2154 "work for I/O completion.\n");
2156 goto increment_prod
;
2158 memset(io_work
, 0, sizeof(struct qedf_io_work
));
2160 INIT_WORK(&io_work
->work
, qedf_fp_io_handler
);
2162 /* Copy contents of CQE for deferred processing */
2163 memcpy(&io_work
->cqe
, cqe
, sizeof(struct fcoe_cqe
));
2165 io_work
->qedf
= qedf
;
2168 queue_work_on(smp_processor_id(), qedf_io_wq
, &io_work
->work
);
2170 spin_lock_irqsave(&qedf
->hba_lock
, flags
);
2172 /* Increment producer to let f/w know we've handled the frame */
2173 qedf
->bdq_prod_idx
++;
2175 /* Producer index wraps at uint16_t boundary */
2176 if (qedf
->bdq_prod_idx
== 0xffff)
2177 qedf
->bdq_prod_idx
= 0;
2179 writew(qedf
->bdq_prod_idx
, qedf
->bdq_primary_prod
);
2180 tmp
= readw(qedf
->bdq_primary_prod
);
2181 writew(qedf
->bdq_prod_idx
, qedf
->bdq_secondary_prod
);
2182 tmp
= readw(qedf
->bdq_secondary_prod
);
2184 spin_unlock_irqrestore(&qedf
->hba_lock
, flags
);