1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
6 #include <linux/spinlock.h>
7 #include <linux/vmalloc.h>
9 #include <scsi/scsi_tcq.h>
11 void qedf_cmd_timer_set(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
12 unsigned int timer_msec
)
14 queue_delayed_work(qedf
->timer_work_queue
, &io_req
->timeout_work
,
15 msecs_to_jiffies(timer_msec
));
18 static void qedf_cmd_timeout(struct work_struct
*work
)
21 struct qedf_ioreq
*io_req
=
22 container_of(work
, struct qedf_ioreq
, timeout_work
.work
);
23 struct qedf_ctx
*qedf
;
24 struct qedf_rport
*fcport
;
28 QEDF_INFO(NULL
, QEDF_LOG_IO
, "io_req is NULL.\n");
32 fcport
= io_req
->fcport
;
33 if (io_req
->fcport
== NULL
) {
34 QEDF_INFO(NULL
, QEDF_LOG_IO
, "fcport is NULL.\n");
40 switch (io_req
->cmd_type
) {
43 QEDF_INFO(NULL
, QEDF_LOG_IO
,
44 "qedf is NULL for ABTS xid=0x%x.\n",
49 QEDF_ERR((&qedf
->dbg_ctx
), "ABTS timeout, xid=0x%x.\n",
51 /* Cleanup timed out ABTS */
52 qedf_initiate_cleanup(io_req
, true);
53 complete(&io_req
->abts_done
);
56 * Need to call kref_put for reference taken when initiate_abts
57 * was called since abts_compl won't be called now that we've
58 * cleaned up the task.
60 kref_put(&io_req
->refcount
, qedf_release_cmd
);
62 /* Clear in abort bit now that we're done with the command */
63 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
66 * Now that the original I/O and the ABTS are complete see
67 * if we need to reconnect to the target.
69 qedf_restart_rport(fcport
);
73 QEDF_INFO(NULL
, QEDF_LOG_IO
,
74 "qedf is NULL for ELS xid=0x%x.\n",
78 /* ELS request no longer outstanding since it timed out */
79 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
81 kref_get(&io_req
->refcount
);
83 * Don't attempt to clean an ELS timeout as any subseqeunt
84 * ABTS or cleanup requests just hang. For now just free
85 * the resources of the original I/O and the RRQ
87 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS timeout, xid=0x%x.\n",
89 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
90 /* Call callback function to complete command */
91 if (io_req
->cb_func
&& io_req
->cb_arg
) {
92 op
= io_req
->cb_arg
->op
;
93 io_req
->cb_func(io_req
->cb_arg
);
94 io_req
->cb_arg
= NULL
;
96 qedf_initiate_cleanup(io_req
, true);
97 kref_put(&io_req
->refcount
, qedf_release_cmd
);
99 case QEDF_SEQ_CLEANUP
:
100 QEDF_ERR(&(qedf
->dbg_ctx
), "Sequence cleanup timeout, "
101 "xid=0x%x.\n", io_req
->xid
);
102 qedf_initiate_cleanup(io_req
, true);
103 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
104 qedf_process_seq_cleanup_compl(qedf
, NULL
, io_req
);
107 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
108 "Hit default case, xid=0x%x.\n", io_req
->xid
);
113 void qedf_cmd_mgr_free(struct qedf_cmd_mgr
*cmgr
)
115 struct io_bdt
*bdt_info
;
116 struct qedf_ctx
*qedf
= cmgr
->qedf
;
119 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
122 struct qedf_ioreq
*io_req
;
124 num_ios
= max_xid
- min_xid
+ 1;
126 /* Free fcoe_bdt_ctx structures */
127 if (!cmgr
->io_bdt_pool
) {
128 QEDF_ERR(&qedf
->dbg_ctx
, "io_bdt_pool is NULL.\n");
132 bd_tbl_sz
= QEDF_MAX_BDS_PER_CMD
* sizeof(struct scsi_sge
);
133 for (i
= 0; i
< num_ios
; i
++) {
134 bdt_info
= cmgr
->io_bdt_pool
[i
];
135 if (bdt_info
->bd_tbl
) {
136 dma_free_coherent(&qedf
->pdev
->dev
, bd_tbl_sz
,
137 bdt_info
->bd_tbl
, bdt_info
->bd_tbl_dma
);
138 bdt_info
->bd_tbl
= NULL
;
142 /* Destroy io_bdt pool */
143 for (i
= 0; i
< num_ios
; i
++) {
144 kfree(cmgr
->io_bdt_pool
[i
]);
145 cmgr
->io_bdt_pool
[i
] = NULL
;
148 kfree(cmgr
->io_bdt_pool
);
149 cmgr
->io_bdt_pool
= NULL
;
153 for (i
= 0; i
< num_ios
; i
++) {
154 io_req
= &cmgr
->cmds
[i
];
155 kfree(io_req
->sgl_task_params
);
156 kfree(io_req
->task_params
);
157 /* Make sure we free per command sense buffer */
158 if (io_req
->sense_buffer
)
159 dma_free_coherent(&qedf
->pdev
->dev
,
160 QEDF_SCSI_SENSE_BUFFERSIZE
, io_req
->sense_buffer
,
161 io_req
->sense_buffer_dma
);
162 cancel_delayed_work_sync(&io_req
->rrq_work
);
165 /* Free command manager itself */
169 static void qedf_handle_rrq(struct work_struct
*work
)
171 struct qedf_ioreq
*io_req
=
172 container_of(work
, struct qedf_ioreq
, rrq_work
.work
);
174 atomic_set(&io_req
->state
, QEDFC_CMD_ST_RRQ_ACTIVE
);
175 qedf_send_rrq(io_req
);
179 struct qedf_cmd_mgr
*qedf_cmd_mgr_alloc(struct qedf_ctx
*qedf
)
181 struct qedf_cmd_mgr
*cmgr
;
182 struct io_bdt
*bdt_info
;
183 struct qedf_ioreq
*io_req
;
188 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
190 /* Make sure num_queues is already set before calling this function */
191 if (!qedf
->num_queues
) {
192 QEDF_ERR(&(qedf
->dbg_ctx
), "num_queues is not set.\n");
196 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
) {
197 QEDF_WARN(&(qedf
->dbg_ctx
), "Invalid min_xid 0x%x and "
198 "max_xid 0x%x.\n", min_xid
, max_xid
);
202 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
, "min xid 0x%x, max xid "
203 "0x%x.\n", min_xid
, max_xid
);
205 num_ios
= max_xid
- min_xid
+ 1;
207 cmgr
= vzalloc(sizeof(struct qedf_cmd_mgr
));
209 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc cmd mgr.\n");
214 spin_lock_init(&cmgr
->lock
);
217 * Initialize I/O request fields.
221 for (i
= 0; i
< num_ios
; i
++) {
222 io_req
= &cmgr
->cmds
[i
];
223 INIT_DELAYED_WORK(&io_req
->timeout_work
, qedf_cmd_timeout
);
227 INIT_DELAYED_WORK(&io_req
->rrq_work
, qedf_handle_rrq
);
229 /* Allocate DMA memory to hold sense buffer */
230 io_req
->sense_buffer
= dma_alloc_coherent(&qedf
->pdev
->dev
,
231 QEDF_SCSI_SENSE_BUFFERSIZE
, &io_req
->sense_buffer_dma
,
233 if (!io_req
->sense_buffer
) {
234 QEDF_ERR(&qedf
->dbg_ctx
,
235 "Failed to alloc sense buffer.\n");
239 /* Allocate task parameters to pass to f/w init funcions */
240 io_req
->task_params
= kzalloc(sizeof(*io_req
->task_params
),
242 if (!io_req
->task_params
) {
243 QEDF_ERR(&(qedf
->dbg_ctx
),
244 "Failed to allocate task_params for xid=0x%x\n",
250 * Allocate scatter/gather list info to pass to f/w init
253 io_req
->sgl_task_params
= kzalloc(
254 sizeof(struct scsi_sgl_task_params
), GFP_KERNEL
);
255 if (!io_req
->sgl_task_params
) {
256 QEDF_ERR(&(qedf
->dbg_ctx
),
257 "Failed to allocate sgl_task_params for xid=0x%x\n",
263 /* Allocate pool of io_bdts - one for each qedf_ioreq */
264 cmgr
->io_bdt_pool
= kmalloc_array(num_ios
, sizeof(struct io_bdt
*),
267 if (!cmgr
->io_bdt_pool
) {
268 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc io_bdt_pool.\n");
272 for (i
= 0; i
< num_ios
; i
++) {
273 cmgr
->io_bdt_pool
[i
] = kmalloc(sizeof(struct io_bdt
),
275 if (!cmgr
->io_bdt_pool
[i
]) {
276 QEDF_WARN(&(qedf
->dbg_ctx
),
277 "Failed to alloc io_bdt_pool[%d].\n", i
);
282 for (i
= 0; i
< num_ios
; i
++) {
283 bdt_info
= cmgr
->io_bdt_pool
[i
];
284 bdt_info
->bd_tbl
= dma_alloc_coherent(&qedf
->pdev
->dev
,
285 QEDF_MAX_BDS_PER_CMD
* sizeof(struct scsi_sge
),
286 &bdt_info
->bd_tbl_dma
, GFP_KERNEL
);
287 if (!bdt_info
->bd_tbl
) {
288 QEDF_WARN(&(qedf
->dbg_ctx
),
289 "Failed to alloc bdt_tbl[%d].\n", i
);
293 atomic_set(&cmgr
->free_list_cnt
, num_ios
);
294 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
295 "cmgr->free_list_cnt=%d.\n",
296 atomic_read(&cmgr
->free_list_cnt
));
301 qedf_cmd_mgr_free(cmgr
);
305 struct qedf_ioreq
*qedf_alloc_cmd(struct qedf_rport
*fcport
, u8 cmd_type
)
307 struct qedf_ctx
*qedf
= fcport
->qedf
;
308 struct qedf_cmd_mgr
*cmd_mgr
= qedf
->cmd_mgr
;
309 struct qedf_ioreq
*io_req
= NULL
;
310 struct io_bdt
*bd_tbl
;
316 free_sqes
= atomic_read(&fcport
->free_sqes
);
319 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
320 "Returning NULL, free_sqes=%d.\n ",
325 /* Limit the number of outstanding R/W tasks */
326 if ((atomic_read(&fcport
->num_active_ios
) >=
327 NUM_RW_TASKS_PER_CONNECTION
)) {
328 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
329 "Returning NULL, num_active_ios=%d.\n",
330 atomic_read(&fcport
->num_active_ios
));
334 /* Limit global TIDs certain tasks */
335 if (atomic_read(&cmd_mgr
->free_list_cnt
) <= GBL_RSVD_TASKS
) {
336 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
337 "Returning NULL, free_list_cnt=%d.\n",
338 atomic_read(&cmd_mgr
->free_list_cnt
));
342 spin_lock_irqsave(&cmd_mgr
->lock
, flags
);
343 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
344 io_req
= &cmd_mgr
->cmds
[cmd_mgr
->idx
];
346 if (cmd_mgr
->idx
== FCOE_PARAMS_NUM_TASKS
)
349 /* Check to make sure command was previously freed */
354 if (i
== FCOE_PARAMS_NUM_TASKS
) {
355 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
359 if (test_bit(QEDF_CMD_DIRTY
, &io_req
->flags
))
360 QEDF_ERR(&qedf
->dbg_ctx
,
361 "io_req found to be dirty ox_id = 0x%x.\n",
364 /* Clear any flags now that we've reallocated the xid */
367 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
369 atomic_inc(&fcport
->num_active_ios
);
370 atomic_dec(&fcport
->free_sqes
);
372 atomic_dec(&cmd_mgr
->free_list_cnt
);
374 io_req
->cmd_mgr
= cmd_mgr
;
375 io_req
->fcport
= fcport
;
377 /* Clear any stale sc_cmd back pointer */
378 io_req
->sc_cmd
= NULL
;
381 /* Hold the io_req against deletion */
382 kref_init(&io_req
->refcount
); /* ID: 001 */
383 atomic_set(&io_req
->state
, QEDFC_CMD_ST_IO_ACTIVE
);
385 /* Bind io_bdt for this io_req */
386 /* Have a static link between io_req and io_bdt_pool */
387 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
388 if (bd_tbl
== NULL
) {
389 QEDF_ERR(&(qedf
->dbg_ctx
), "bd_tbl is NULL, xid=%x.\n", xid
);
390 kref_put(&io_req
->refcount
, qedf_release_cmd
);
393 bd_tbl
->io_req
= io_req
;
394 io_req
->cmd_type
= cmd_type
;
395 io_req
->tm_flags
= 0;
397 /* Reset sequence offset data */
398 io_req
->rx_buf_off
= 0;
399 io_req
->tx_buf_off
= 0;
400 io_req
->rx_id
= 0xffff; /* No OX_ID */
405 /* Record failure for stats and return NULL to caller */
406 qedf
->alloc_failures
++;
410 static void qedf_free_mp_resc(struct qedf_ioreq
*io_req
)
412 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
413 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
414 uint64_t sz
= sizeof(struct scsi_sge
);
417 if (mp_req
->mp_req_bd
) {
418 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
419 mp_req
->mp_req_bd
, mp_req
->mp_req_bd_dma
);
420 mp_req
->mp_req_bd
= NULL
;
422 if (mp_req
->mp_resp_bd
) {
423 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
424 mp_req
->mp_resp_bd
, mp_req
->mp_resp_bd_dma
);
425 mp_req
->mp_resp_bd
= NULL
;
427 if (mp_req
->req_buf
) {
428 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
429 mp_req
->req_buf
, mp_req
->req_buf_dma
);
430 mp_req
->req_buf
= NULL
;
432 if (mp_req
->resp_buf
) {
433 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
434 mp_req
->resp_buf
, mp_req
->resp_buf_dma
);
435 mp_req
->resp_buf
= NULL
;
439 void qedf_release_cmd(struct kref
*ref
)
441 struct qedf_ioreq
*io_req
=
442 container_of(ref
, struct qedf_ioreq
, refcount
);
443 struct qedf_cmd_mgr
*cmd_mgr
= io_req
->cmd_mgr
;
444 struct qedf_rport
*fcport
= io_req
->fcport
;
447 if (io_req
->cmd_type
== QEDF_SCSI_CMD
) {
448 QEDF_WARN(&fcport
->qedf
->dbg_ctx
,
449 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
450 io_req
, io_req
->xid
);
451 WARN_ON(io_req
->sc_cmd
);
454 if (io_req
->cmd_type
== QEDF_ELS
||
455 io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
)
456 qedf_free_mp_resc(io_req
);
458 atomic_inc(&cmd_mgr
->free_list_cnt
);
459 atomic_dec(&fcport
->num_active_ios
);
460 atomic_set(&io_req
->state
, QEDF_CMD_ST_INACTIVE
);
461 if (atomic_read(&fcport
->num_active_ios
) < 0) {
462 QEDF_WARN(&(fcport
->qedf
->dbg_ctx
), "active_ios < 0.\n");
466 /* Increment task retry identifier now that the request is released */
467 io_req
->task_retry_identifier
++;
468 io_req
->fcport
= NULL
;
470 clear_bit(QEDF_CMD_DIRTY
, &io_req
->flags
);
472 spin_lock_irqsave(&cmd_mgr
->lock
, flags
);
473 io_req
->fcport
= NULL
;
475 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
478 static int qedf_map_sg(struct qedf_ioreq
*io_req
)
480 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
481 struct Scsi_Host
*host
= sc
->device
->host
;
482 struct fc_lport
*lport
= shost_priv(host
);
483 struct qedf_ctx
*qedf
= lport_priv(lport
);
484 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
485 struct scatterlist
*sg
;
493 sg_count
= dma_map_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
494 scsi_sg_count(sc
), sc
->sc_data_direction
);
495 sg
= scsi_sglist(sc
);
497 io_req
->sge_type
= QEDF_IOREQ_UNKNOWN_SGE
;
499 if (sg_count
<= 8 || io_req
->io_req_flags
== QEDF_READ
)
500 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
;
502 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
503 sg_len
= (u32
)sg_dma_len(sg
);
504 addr
= (u64
)sg_dma_address(sg
);
505 end_addr
= (u64
)(addr
+ sg_len
);
508 * Intermediate s/g element so check if start and end address
509 * is page aligned. Only required for writes and only if the
510 * number of scatter/gather elements is 8 or more.
512 if (io_req
->sge_type
== QEDF_IOREQ_UNKNOWN_SGE
&& (i
) &&
513 (i
!= (sg_count
- 1)) && sg_len
< QEDF_PAGE_SIZE
)
514 io_req
->sge_type
= QEDF_IOREQ_SLOW_SGE
;
516 bd
[bd_count
].sge_addr
.lo
= cpu_to_le32(U64_LO(addr
));
517 bd
[bd_count
].sge_addr
.hi
= cpu_to_le32(U64_HI(addr
));
518 bd
[bd_count
].sge_len
= cpu_to_le32(sg_len
);
521 byte_count
+= sg_len
;
524 /* To catch a case where FAST and SLOW nothing is set, set FAST */
525 if (io_req
->sge_type
== QEDF_IOREQ_UNKNOWN_SGE
)
526 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
;
528 if (byte_count
!= scsi_bufflen(sc
))
529 QEDF_ERR(&(qedf
->dbg_ctx
), "byte_count = %d != "
530 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count
,
531 scsi_bufflen(sc
), io_req
->xid
);
536 static int qedf_build_bd_list_from_sg(struct qedf_ioreq
*io_req
)
538 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
539 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
542 if (scsi_sg_count(sc
)) {
543 bd_count
= qedf_map_sg(io_req
);
548 bd
[0].sge_addr
.lo
= bd
[0].sge_addr
.hi
= 0;
551 io_req
->bd_tbl
->bd_valid
= bd_count
;
556 static void qedf_build_fcp_cmnd(struct qedf_ioreq
*io_req
,
557 struct fcp_cmnd
*fcp_cmnd
)
559 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
561 /* fcp_cmnd is 32 bytes */
562 memset(fcp_cmnd
, 0, FCP_CMND_LEN
);
564 /* 8 bytes: SCSI LUN info */
565 int_to_scsilun(sc_cmd
->device
->lun
,
566 (struct scsi_lun
*)&fcp_cmnd
->fc_lun
);
568 /* 4 bytes: flag info */
569 fcp_cmnd
->fc_pri_ta
= 0;
570 fcp_cmnd
->fc_tm_flags
= io_req
->tm_flags
;
571 fcp_cmnd
->fc_flags
= io_req
->io_req_flags
;
572 fcp_cmnd
->fc_cmdref
= 0;
574 /* Populate data direction */
575 if (io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) {
576 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
578 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
579 fcp_cmnd
->fc_flags
|= FCP_CFL_WRDATA
;
580 else if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
581 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
584 fcp_cmnd
->fc_pri_ta
= FCP_PTA_SIMPLE
;
586 /* 16 bytes: CDB information */
587 if (io_req
->cmd_type
!= QEDF_TASK_MGMT_CMD
)
588 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
590 /* 4 bytes: FCP data length */
591 fcp_cmnd
->fc_dl
= htonl(io_req
->data_xfer_len
);
594 static void qedf_init_task(struct qedf_rport
*fcport
, struct fc_lport
*lport
,
595 struct qedf_ioreq
*io_req
, struct e4_fcoe_task_context
*task_ctx
,
596 struct fcoe_wqe
*sqe
)
598 enum fcoe_task_type task_type
;
599 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
600 struct io_bdt
*bd_tbl
= io_req
->bd_tbl
;
604 struct qedf_ctx
*qedf
= fcport
->qedf
;
605 uint16_t cq_idx
= smp_processor_id() % qedf
->num_queues
;
606 struct regpair sense_data_buffer_phys_addr
;
611 /* Note init_initiator_rw_fcoe_task memsets the task context */
612 io_req
->task
= task_ctx
;
613 memset(task_ctx
, 0, sizeof(struct e4_fcoe_task_context
));
614 memset(io_req
->task_params
, 0, sizeof(struct fcoe_task_params
));
615 memset(io_req
->sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
617 /* Set task type bassed on DMA directio of command */
618 if (io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) {
619 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
621 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
622 task_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
623 tx_io_size
= io_req
->data_xfer_len
;
625 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
626 rx_io_size
= io_req
->data_xfer_len
;
630 /* Setup the fields for fcoe_task_params */
631 io_req
->task_params
->context
= task_ctx
;
632 io_req
->task_params
->sqe
= sqe
;
633 io_req
->task_params
->task_type
= task_type
;
634 io_req
->task_params
->tx_io_size
= tx_io_size
;
635 io_req
->task_params
->rx_io_size
= rx_io_size
;
636 io_req
->task_params
->conn_cid
= fcport
->fw_cid
;
637 io_req
->task_params
->itid
= io_req
->xid
;
638 io_req
->task_params
->cq_rss_number
= cq_idx
;
639 io_req
->task_params
->is_tape_device
= fcport
->dev_type
;
641 /* Fill in information for scatter/gather list */
642 if (io_req
->cmd_type
!= QEDF_TASK_MGMT_CMD
) {
643 bd_count
= bd_tbl
->bd_valid
;
644 io_req
->sgl_task_params
->sgl
= bd_tbl
->bd_tbl
;
645 io_req
->sgl_task_params
->sgl_phys_addr
.lo
=
646 U64_LO(bd_tbl
->bd_tbl_dma
);
647 io_req
->sgl_task_params
->sgl_phys_addr
.hi
=
648 U64_HI(bd_tbl
->bd_tbl_dma
);
649 io_req
->sgl_task_params
->num_sges
= bd_count
;
650 io_req
->sgl_task_params
->total_buffer_size
=
651 scsi_bufflen(io_req
->sc_cmd
);
652 if (io_req
->sge_type
== QEDF_IOREQ_SLOW_SGE
)
653 io_req
->sgl_task_params
->small_mid_sge
= 1;
655 io_req
->sgl_task_params
->small_mid_sge
= 0;
658 /* Fill in physical address of sense buffer */
659 sense_data_buffer_phys_addr
.lo
= U64_LO(io_req
->sense_buffer_dma
);
660 sense_data_buffer_phys_addr
.hi
= U64_HI(io_req
->sense_buffer_dma
);
662 /* fill FCP_CMND IU */
663 qedf_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)tmp_fcp_cmnd
);
665 /* Swap fcp_cmnd since FC is big endian */
666 cnt
= sizeof(struct fcp_cmnd
) / sizeof(u32
);
667 for (i
= 0; i
< cnt
; i
++) {
668 tmp_fcp_cmnd
[i
] = cpu_to_be32(tmp_fcp_cmnd
[i
]);
670 memcpy(fcp_cmnd
, tmp_fcp_cmnd
, sizeof(struct fcp_cmnd
));
672 init_initiator_rw_fcoe_task(io_req
->task_params
,
673 io_req
->sgl_task_params
,
674 sense_data_buffer_phys_addr
,
675 io_req
->task_retry_identifier
, fcp_cmnd
);
677 /* Increment SGL type counters */
678 if (io_req
->sge_type
== QEDF_IOREQ_SLOW_SGE
)
679 qedf
->slow_sge_ios
++;
681 qedf
->fast_sge_ios
++;
684 void qedf_init_mp_task(struct qedf_ioreq
*io_req
,
685 struct e4_fcoe_task_context
*task_ctx
, struct fcoe_wqe
*sqe
)
687 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
688 struct qedf_rport
*fcport
= io_req
->fcport
;
689 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
690 struct fc_frame_header
*fc_hdr
;
691 struct fcoe_tx_mid_path_params task_fc_hdr
;
692 struct scsi_sgl_task_params tx_sgl_task_params
;
693 struct scsi_sgl_task_params rx_sgl_task_params
;
695 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
,
696 "Initializing MP task for cmd_type=%d\n",
699 qedf
->control_requests
++;
701 memset(&tx_sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
702 memset(&rx_sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
703 memset(task_ctx
, 0, sizeof(struct e4_fcoe_task_context
));
704 memset(&task_fc_hdr
, 0, sizeof(struct fcoe_tx_mid_path_params
));
706 /* Setup the task from io_req for easy reference */
707 io_req
->task
= task_ctx
;
709 /* Setup the fields for fcoe_task_params */
710 io_req
->task_params
->context
= task_ctx
;
711 io_req
->task_params
->sqe
= sqe
;
712 io_req
->task_params
->task_type
= FCOE_TASK_TYPE_MIDPATH
;
713 io_req
->task_params
->tx_io_size
= io_req
->data_xfer_len
;
714 /* rx_io_size tells the f/w how large a response buffer we have */
715 io_req
->task_params
->rx_io_size
= PAGE_SIZE
;
716 io_req
->task_params
->conn_cid
= fcport
->fw_cid
;
717 io_req
->task_params
->itid
= io_req
->xid
;
718 /* Return middle path commands on CQ 0 */
719 io_req
->task_params
->cq_rss_number
= 0;
720 io_req
->task_params
->is_tape_device
= fcport
->dev_type
;
722 fc_hdr
= &(mp_req
->req_fc_hdr
);
723 /* Set OX_ID and RX_ID based on driver task id */
724 fc_hdr
->fh_ox_id
= io_req
->xid
;
725 fc_hdr
->fh_rx_id
= htons(0xffff);
727 /* Set up FC header information */
728 task_fc_hdr
.parameter
= fc_hdr
->fh_parm_offset
;
729 task_fc_hdr
.r_ctl
= fc_hdr
->fh_r_ctl
;
730 task_fc_hdr
.type
= fc_hdr
->fh_type
;
731 task_fc_hdr
.cs_ctl
= fc_hdr
->fh_cs_ctl
;
732 task_fc_hdr
.df_ctl
= fc_hdr
->fh_df_ctl
;
733 task_fc_hdr
.rx_id
= fc_hdr
->fh_rx_id
;
734 task_fc_hdr
.ox_id
= fc_hdr
->fh_ox_id
;
736 /* Set up s/g list parameters for request buffer */
737 tx_sgl_task_params
.sgl
= mp_req
->mp_req_bd
;
738 tx_sgl_task_params
.sgl_phys_addr
.lo
= U64_LO(mp_req
->mp_req_bd_dma
);
739 tx_sgl_task_params
.sgl_phys_addr
.hi
= U64_HI(mp_req
->mp_req_bd_dma
);
740 tx_sgl_task_params
.num_sges
= 1;
741 /* Set PAGE_SIZE for now since sg element is that size ??? */
742 tx_sgl_task_params
.total_buffer_size
= io_req
->data_xfer_len
;
743 tx_sgl_task_params
.small_mid_sge
= 0;
745 /* Set up s/g list parameters for request buffer */
746 rx_sgl_task_params
.sgl
= mp_req
->mp_resp_bd
;
747 rx_sgl_task_params
.sgl_phys_addr
.lo
= U64_LO(mp_req
->mp_resp_bd_dma
);
748 rx_sgl_task_params
.sgl_phys_addr
.hi
= U64_HI(mp_req
->mp_resp_bd_dma
);
749 rx_sgl_task_params
.num_sges
= 1;
750 /* Set PAGE_SIZE for now since sg element is that size ??? */
751 rx_sgl_task_params
.total_buffer_size
= PAGE_SIZE
;
752 rx_sgl_task_params
.small_mid_sge
= 0;
756 * Last arg is 0 as previous code did not set that we wanted the
757 * fc header information.
759 init_initiator_midpath_unsolicited_fcoe_task(io_req
->task_params
,
762 &rx_sgl_task_params
, 0);
765 /* Presumed that fcport->rport_lock is held */
766 u16
qedf_get_sqe_idx(struct qedf_rport
*fcport
)
768 uint16_t total_sqe
= (fcport
->sq_mem_size
)/(sizeof(struct fcoe_wqe
));
771 rval
= fcport
->sq_prod_idx
;
773 /* Adjust ring index */
774 fcport
->sq_prod_idx
++;
775 fcport
->fw_sq_prod_idx
++;
776 if (fcport
->sq_prod_idx
== total_sqe
)
777 fcport
->sq_prod_idx
= 0;
782 void qedf_ring_doorbell(struct qedf_rport
*fcport
)
784 struct fcoe_db_data dbell
= { 0 };
788 dbell
.params
|= DB_DEST_XCM
<< FCOE_DB_DATA_DEST_SHIFT
;
789 dbell
.params
|= DB_AGG_CMD_SET
<< FCOE_DB_DATA_AGG_CMD_SHIFT
;
790 dbell
.params
|= DQ_XCM_FCOE_SQ_PROD_CMD
<<
791 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT
;
793 dbell
.sq_prod
= fcport
->fw_sq_prod_idx
;
794 /* wmb makes sure that the BDs data is updated before updating the
795 * producer, otherwise FW may read old data from the BDs.
799 writel(*(u32
*)&dbell
, fcport
->p_doorbell
);
801 * Fence required to flush the write combined buffer, since another
802 * CPU may write to the same doorbell address and data may be lost
803 * due to relaxed order nature of write combined bar.
808 static void qedf_trace_io(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
,
811 struct qedf_ctx
*qedf
= fcport
->qedf
;
812 struct qedf_io_log
*io_log
;
813 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
817 spin_lock_irqsave(&qedf
->io_trace_lock
, flags
);
819 io_log
= &qedf
->io_trace_buf
[qedf
->io_trace_idx
];
820 io_log
->direction
= direction
;
821 io_log
->task_id
= io_req
->xid
;
822 io_log
->port_id
= fcport
->rdata
->ids
.port_id
;
823 io_log
->lun
= sc_cmd
->device
->lun
;
824 io_log
->op
= op
= sc_cmd
->cmnd
[0];
825 io_log
->lba
[0] = sc_cmd
->cmnd
[2];
826 io_log
->lba
[1] = sc_cmd
->cmnd
[3];
827 io_log
->lba
[2] = sc_cmd
->cmnd
[4];
828 io_log
->lba
[3] = sc_cmd
->cmnd
[5];
829 io_log
->bufflen
= scsi_bufflen(sc_cmd
);
830 io_log
->sg_count
= scsi_sg_count(sc_cmd
);
831 io_log
->result
= sc_cmd
->result
;
832 io_log
->jiffies
= jiffies
;
833 io_log
->refcount
= kref_read(&io_req
->refcount
);
835 if (direction
== QEDF_IO_TRACE_REQ
) {
836 /* For requests we only care abot the submission CPU */
837 io_log
->req_cpu
= io_req
->cpu
;
840 } else if (direction
== QEDF_IO_TRACE_RSP
) {
841 io_log
->req_cpu
= io_req
->cpu
;
842 io_log
->int_cpu
= io_req
->int_cpu
;
843 io_log
->rsp_cpu
= smp_processor_id();
846 io_log
->sge_type
= io_req
->sge_type
;
848 qedf
->io_trace_idx
++;
849 if (qedf
->io_trace_idx
== QEDF_IO_TRACE_SIZE
)
850 qedf
->io_trace_idx
= 0;
852 spin_unlock_irqrestore(&qedf
->io_trace_lock
, flags
);
855 int qedf_post_io_req(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
)
857 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
858 struct Scsi_Host
*host
= sc_cmd
->device
->host
;
859 struct fc_lport
*lport
= shost_priv(host
);
860 struct qedf_ctx
*qedf
= lport_priv(lport
);
861 struct e4_fcoe_task_context
*task_ctx
;
863 enum fcoe_task_type req_type
= 0;
864 struct fcoe_wqe
*sqe
;
867 /* Initialize rest of io_req fileds */
868 io_req
->data_xfer_len
= scsi_bufflen(sc_cmd
);
869 sc_cmd
->SCp
.ptr
= (char *)io_req
;
870 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
; /* Assume fast SGL by default */
872 /* Record which cpu this request is associated with */
873 io_req
->cpu
= smp_processor_id();
875 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
876 req_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
877 io_req
->io_req_flags
= QEDF_READ
;
878 qedf
->input_requests
++;
879 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
880 req_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
881 io_req
->io_req_flags
= QEDF_WRITE
;
882 qedf
->output_requests
++;
884 io_req
->io_req_flags
= 0;
885 qedf
->control_requests
++;
890 /* Build buffer descriptor list for firmware from sg list */
891 if (qedf_build_bd_list_from_sg(io_req
)) {
892 QEDF_ERR(&(qedf
->dbg_ctx
), "BD list creation failed.\n");
893 /* Release cmd will release io_req, but sc_cmd is assigned */
894 io_req
->sc_cmd
= NULL
;
895 kref_put(&io_req
->refcount
, qedf_release_cmd
);
899 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
) ||
900 test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
901 QEDF_ERR(&(qedf
->dbg_ctx
), "Session not offloaded yet.\n");
902 /* Release cmd will release io_req, but sc_cmd is assigned */
903 io_req
->sc_cmd
= NULL
;
904 kref_put(&io_req
->refcount
, qedf_release_cmd
);
908 /* Record LUN number for later use if we neeed them */
909 io_req
->lun
= (int)sc_cmd
->device
->lun
;
911 /* Obtain free SQE */
912 sqe_idx
= qedf_get_sqe_idx(fcport
);
913 sqe
= &fcport
->sq
[sqe_idx
];
914 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
916 /* Get the task context */
917 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
919 QEDF_WARN(&(qedf
->dbg_ctx
), "task_ctx is NULL, xid=%d.\n",
921 /* Release cmd will release io_req, but sc_cmd is assigned */
922 io_req
->sc_cmd
= NULL
;
923 kref_put(&io_req
->refcount
, qedf_release_cmd
);
927 qedf_init_task(fcport
, lport
, io_req
, task_ctx
, sqe
);
930 qedf_ring_doorbell(fcport
);
932 /* Set that command is with the firmware now */
933 set_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
935 if (qedf_io_tracing
&& io_req
->sc_cmd
)
936 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_REQ
);
942 qedf_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*sc_cmd
)
944 struct fc_lport
*lport
= shost_priv(host
);
945 struct qedf_ctx
*qedf
= lport_priv(lport
);
946 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
947 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
948 struct qedf_rport
*fcport
;
949 struct qedf_ioreq
*io_req
;
952 unsigned long flags
= 0;
955 num_sgs
= scsi_sg_count(sc_cmd
);
956 if (scsi_sg_count(sc_cmd
) > QEDF_MAX_BDS_PER_CMD
) {
957 QEDF_ERR(&qedf
->dbg_ctx
,
958 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
959 num_sgs
, QEDF_MAX_BDS_PER_CMD
);
960 sc_cmd
->result
= DID_ERROR
;
961 sc_cmd
->scsi_done(sc_cmd
);
965 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
966 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
967 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
968 "Returning DNC as unloading or stop io, flags 0x%lx.\n",
970 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
971 sc_cmd
->scsi_done(sc_cmd
);
975 if (!qedf
->pdev
->msix_enabled
) {
976 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
977 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
979 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
980 sc_cmd
->scsi_done(sc_cmd
);
984 rval
= fc_remote_port_chkready(rport
);
986 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
987 "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
988 rval
, rport
->port_id
);
989 sc_cmd
->result
= rval
;
990 sc_cmd
->scsi_done(sc_cmd
);
994 /* Retry command if we are doing a qed drain operation */
995 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
996 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
, "Drain active.\n");
997 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1001 if (lport
->state
!= LPORT_ST_READY
||
1002 atomic_read(&qedf
->link_state
) != QEDF_LINK_UP
) {
1003 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
, "Link down.\n");
1004 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1008 /* rport and tgt are allocated together, so tgt should be non-NULL */
1009 fcport
= (struct qedf_rport
*)&rp
[1];
1011 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
) ||
1012 test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
1014 * Session is not offloaded yet. Let SCSI-ml retry
1017 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1021 atomic_inc(&fcport
->ios_to_queue
);
1023 if (fcport
->retry_delay_timestamp
) {
1024 if (time_after(jiffies
, fcport
->retry_delay_timestamp
)) {
1025 fcport
->retry_delay_timestamp
= 0;
1027 /* If retry_delay timer is active, flow off the ML */
1028 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
1029 atomic_dec(&fcport
->ios_to_queue
);
1034 io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
1036 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1037 atomic_dec(&fcport
->ios_to_queue
);
1041 io_req
->sc_cmd
= sc_cmd
;
1043 /* Take fcport->rport_lock for posting to fcport send queue */
1044 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1045 if (qedf_post_io_req(fcport
, io_req
)) {
1046 QEDF_WARN(&(qedf
->dbg_ctx
), "Unable to post io_req\n");
1047 /* Return SQE to pool */
1048 atomic_inc(&fcport
->free_sqes
);
1049 rc
= SCSI_MLQUEUE_HOST_BUSY
;
1051 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1052 atomic_dec(&fcport
->ios_to_queue
);
1058 static void qedf_parse_fcp_rsp(struct qedf_ioreq
*io_req
,
1059 struct fcoe_cqe_rsp_info
*fcp_rsp
)
1061 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1062 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1063 u8 rsp_flags
= fcp_rsp
->rsp_flags
.flags
;
1064 int fcp_sns_len
= 0;
1065 int fcp_rsp_len
= 0;
1066 uint8_t *rsp_info
, *sense_data
;
1068 io_req
->fcp_status
= FC_GOOD
;
1069 io_req
->fcp_resid
= 0;
1070 if (rsp_flags
& (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER
|
1071 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER
))
1072 io_req
->fcp_resid
= fcp_rsp
->fcp_resid
;
1074 io_req
->scsi_comp_flags
= rsp_flags
;
1075 CMD_SCSI_STATUS(sc_cmd
) = io_req
->cdb_status
=
1076 fcp_rsp
->scsi_status_code
;
1079 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID
)
1080 fcp_rsp_len
= fcp_rsp
->fcp_rsp_len
;
1083 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID
)
1084 fcp_sns_len
= fcp_rsp
->fcp_sns_len
;
1086 io_req
->fcp_rsp_len
= fcp_rsp_len
;
1087 io_req
->fcp_sns_len
= fcp_sns_len
;
1088 rsp_info
= sense_data
= io_req
->sense_buffer
;
1090 /* fetch fcp_rsp_code */
1091 if ((fcp_rsp_len
== 4) || (fcp_rsp_len
== 8)) {
1092 /* Only for task management function */
1093 io_req
->fcp_rsp_code
= rsp_info
[3];
1094 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1095 "fcp_rsp_code = %d\n", io_req
->fcp_rsp_code
);
1096 /* Adjust sense-data location. */
1097 sense_data
+= fcp_rsp_len
;
1100 if (fcp_sns_len
> SCSI_SENSE_BUFFERSIZE
) {
1101 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1102 "Truncating sense buffer\n");
1103 fcp_sns_len
= SCSI_SENSE_BUFFERSIZE
;
1106 /* The sense buffer can be NULL for TMF commands */
1107 if (sc_cmd
->sense_buffer
) {
1108 memset(sc_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1110 memcpy(sc_cmd
->sense_buffer
, sense_data
,
1115 static void qedf_unmap_sg_list(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
)
1117 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1119 if (io_req
->bd_tbl
->bd_valid
&& sc
&& scsi_sg_count(sc
)) {
1120 dma_unmap_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
1121 scsi_sg_count(sc
), sc
->sc_data_direction
);
1122 io_req
->bd_tbl
->bd_valid
= 0;
1126 void qedf_scsi_completion(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1127 struct qedf_ioreq
*io_req
)
1130 struct e4_fcoe_task_context
*task_ctx
;
1131 struct scsi_cmnd
*sc_cmd
;
1132 struct fcoe_cqe_rsp_info
*fcp_rsp
;
1133 struct qedf_rport
*fcport
;
1135 u16 scope
, qualifier
= 0;
1136 u8 fw_residual_flag
= 0;
1143 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
1144 test_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
) ||
1145 test_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
)) {
1146 QEDF_ERR(&qedf
->dbg_ctx
,
1147 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1153 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1154 sc_cmd
= io_req
->sc_cmd
;
1155 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
1158 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1162 if (!sc_cmd
->SCp
.ptr
) {
1163 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1164 "another context.\n");
1168 if (!sc_cmd
->device
) {
1169 QEDF_ERR(&qedf
->dbg_ctx
,
1170 "Device for sc_cmd %p is NULL.\n", sc_cmd
);
1174 if (!sc_cmd
->request
) {
1175 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd->request is NULL, "
1176 "sc_cmd=%p.\n", sc_cmd
);
1180 if (!sc_cmd
->request
->q
) {
1181 QEDF_WARN(&(qedf
->dbg_ctx
), "request->q is NULL so request "
1182 "is not valid, sc_cmd=%p.\n", sc_cmd
);
1186 fcport
= io_req
->fcport
;
1189 * When flush is active, let the cmds be completed from the cleanup
1192 if (test_bit(QEDF_RPORT_IN_TARGET_RESET
, &fcport
->flags
) ||
1193 (test_bit(QEDF_RPORT_IN_LUN_RESET
, &fcport
->flags
) &&
1194 sc_cmd
->device
->lun
== (u64
)fcport
->lun_reset_lun
)) {
1195 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1196 "Dropping good completion xid=0x%x as fcport is flushing",
1201 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
1203 qedf_unmap_sg_list(qedf
, io_req
);
1205 /* Check for FCP transport error */
1206 if (io_req
->fcp_rsp_len
> 3 && io_req
->fcp_rsp_code
) {
1207 QEDF_ERR(&(qedf
->dbg_ctx
),
1208 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1209 "fcp_rsp_code=%d.\n", io_req
->xid
, io_req
->fcp_rsp_len
,
1210 io_req
->fcp_rsp_code
);
1211 sc_cmd
->result
= DID_BUS_BUSY
<< 16;
1215 fw_residual_flag
= GET_FIELD(cqe
->cqe_info
.rsp_info
.fw_error_flags
,
1216 FCOE_CQE_RSP_INFO_FW_UNDERRUN
);
1217 if (fw_residual_flag
) {
1218 QEDF_ERR(&qedf
->dbg_ctx
,
1219 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1220 io_req
->xid
, fcp_rsp
->rsp_flags
.flags
,
1222 cqe
->cqe_info
.rsp_info
.fw_residual
, sc_cmd
->cmnd
[2],
1223 sc_cmd
->cmnd
[3], sc_cmd
->cmnd
[4], sc_cmd
->cmnd
[5]);
1225 if (io_req
->cdb_status
== 0)
1226 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1228 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1231 * Set resid to the whole buffer length so we won't try to resue
1232 * any previously data.
1234 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1238 switch (io_req
->fcp_status
) {
1240 if (io_req
->cdb_status
== 0) {
1241 /* Good I/O completion */
1242 sc_cmd
->result
= DID_OK
<< 16;
1244 refcount
= kref_read(&io_req
->refcount
);
1245 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1246 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1247 "lba=%02x%02x%02x%02x cdb_status=%d "
1248 "fcp_resid=0x%x refcount=%d.\n",
1249 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1250 sc_cmd
->device
->lun
, io_req
->xid
,
1251 sc_cmd
->cmnd
[0], sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3],
1252 sc_cmd
->cmnd
[4], sc_cmd
->cmnd
[5],
1253 io_req
->cdb_status
, io_req
->fcp_resid
,
1255 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1257 if (io_req
->cdb_status
== SAM_STAT_TASK_SET_FULL
||
1258 io_req
->cdb_status
== SAM_STAT_BUSY
) {
1260 * Check whether we need to set retry_delay at
1261 * all based on retry_delay module parameter
1262 * and the status qualifier.
1266 scope
= fcp_rsp
->retry_delay_timer
& 0xC000;
1268 qualifier
= fcp_rsp
->retry_delay_timer
& 0x3FFF;
1270 if (qedf_retry_delay
&&
1271 scope
> 0 && qualifier
> 0 &&
1272 qualifier
<= 0x3FEF) {
1273 /* Check we don't go over the max */
1274 if (qualifier
> QEDF_RETRY_DELAY_MAX
)
1276 QEDF_RETRY_DELAY_MAX
;
1277 fcport
->retry_delay_timestamp
=
1278 jiffies
+ (qualifier
* HZ
/ 10);
1281 if (io_req
->cdb_status
==
1282 SAM_STAT_TASK_SET_FULL
)
1283 qedf
->task_set_fulls
++;
1288 if (io_req
->fcp_resid
)
1289 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1292 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "fcp_status=%d.\n",
1293 io_req
->fcp_status
);
1298 if (qedf_io_tracing
)
1299 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1302 * We wait till the end of the function to clear the
1303 * outstanding bit in case we need to send an abort
1305 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
1307 io_req
->sc_cmd
= NULL
;
1308 sc_cmd
->SCp
.ptr
= NULL
;
1309 sc_cmd
->scsi_done(sc_cmd
);
1310 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1313 /* Return a SCSI command in some other context besides a normal completion */
1314 void qedf_scsi_done(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
1318 struct scsi_cmnd
*sc_cmd
;
1322 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
, "io_req is NULL\n");
1326 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE
, &io_req
->flags
)) {
1327 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1328 "io_req:%p scsi_done handling already done\n",
1334 * We will be done with this command after this call so clear the
1337 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
1340 sc_cmd
= io_req
->sc_cmd
;
1343 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1347 if (!virt_addr_valid(sc_cmd
)) {
1348 QEDF_ERR(&qedf
->dbg_ctx
, "sc_cmd=%p is not valid.", sc_cmd
);
1352 if (!sc_cmd
->SCp
.ptr
) {
1353 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1354 "another context.\n");
1358 if (!sc_cmd
->device
) {
1359 QEDF_ERR(&qedf
->dbg_ctx
, "Device for sc_cmd %p is NULL.\n",
1364 if (!virt_addr_valid(sc_cmd
->device
)) {
1365 QEDF_ERR(&qedf
->dbg_ctx
,
1366 "Device pointer for sc_cmd %p is bad.\n", sc_cmd
);
1370 if (!sc_cmd
->sense_buffer
) {
1371 QEDF_ERR(&qedf
->dbg_ctx
,
1372 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1377 if (!virt_addr_valid(sc_cmd
->sense_buffer
)) {
1378 QEDF_ERR(&qedf
->dbg_ctx
,
1379 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1384 if (!sc_cmd
->scsi_done
) {
1385 QEDF_ERR(&qedf
->dbg_ctx
,
1386 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1391 qedf_unmap_sg_list(qedf
, io_req
);
1393 sc_cmd
->result
= result
<< 16;
1394 refcount
= kref_read(&io_req
->refcount
);
1395 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "%d:0:%d:%lld: Completing "
1396 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1397 "allowed=%d retries=%d refcount=%d.\n",
1398 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1399 sc_cmd
->device
->lun
, sc_cmd
, sc_cmd
->result
, sc_cmd
->cmnd
[0],
1400 sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3], sc_cmd
->cmnd
[4],
1401 sc_cmd
->cmnd
[5], sc_cmd
->allowed
, sc_cmd
->retries
,
1405 * Set resid to the whole buffer length so we won't try to resue any
1406 * previously read data
1408 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1410 if (qedf_io_tracing
)
1411 qedf_trace_io(io_req
->fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1413 io_req
->sc_cmd
= NULL
;
1414 sc_cmd
->SCp
.ptr
= NULL
;
1415 sc_cmd
->scsi_done(sc_cmd
);
1416 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1421 * Clear the io_req->sc_cmd backpointer so we don't try to process
1424 io_req
->sc_cmd
= NULL
;
1425 kref_put(&io_req
->refcount
, qedf_release_cmd
); /* ID: 001 */
1429 * Handle warning type CQE completions. This is mainly used for REC timer
1432 void qedf_process_warning_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1433 struct qedf_ioreq
*io_req
)
1436 struct qedf_rport
*fcport
= io_req
->fcport
;
1437 u64 err_warn_bit_map
;
1441 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1442 "cqe is NULL for io_req %p xid=0x%x\n",
1443 io_req
, io_req
->xid
);
1447 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Warning CQE, "
1448 "xid=0x%x\n", io_req
->xid
);
1449 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1450 "err_warn_bitmap=%08x:%08x\n",
1451 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1452 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1453 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1454 "rx_buff_off=%08x, rx_id=%04x\n",
1455 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1456 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1457 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1459 /* Normalize the error bitmap value to an just an unsigned int */
1460 err_warn_bit_map
= (u64
)
1461 ((u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
<< 32) |
1462 (u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
;
1463 for (i
= 0; i
< 64; i
++) {
1464 if (err_warn_bit_map
& (u64
)((u64
)1 << i
)) {
1470 /* Check if REC TOV expired if this is a tape device */
1471 if (fcport
->dev_type
== QEDF_RPORT_TYPE_TAPE
) {
1473 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION
) {
1474 QEDF_ERR(&(qedf
->dbg_ctx
), "REC timer expired.\n");
1475 if (!test_bit(QEDF_CMD_SRR_SENT
, &io_req
->flags
)) {
1476 io_req
->rx_buf_off
=
1477 cqe
->cqe_info
.err_info
.rx_buf_off
;
1478 io_req
->tx_buf_off
=
1479 cqe
->cqe_info
.err_info
.tx_buf_off
;
1480 io_req
->rx_id
= cqe
->cqe_info
.err_info
.rx_id
;
1481 rval
= qedf_send_rec(io_req
);
1483 * We only want to abort the io_req if we
1484 * can't queue the REC command as we want to
1485 * keep the exchange open for recovery.
1495 init_completion(&io_req
->abts_done
);
1496 rval
= qedf_initiate_abts(io_req
, true);
1498 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1501 /* Cleanup a command when we receive an error detection completion */
1502 void qedf_process_error_detect(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1503 struct qedf_ioreq
*io_req
)
1508 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1509 "cqe is NULL for io_req %p\n", io_req
);
1513 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Error detection CQE, "
1514 "xid=0x%x\n", io_req
->xid
);
1515 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1516 "err_warn_bitmap=%08x:%08x\n",
1517 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1518 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1519 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1520 "rx_buff_off=%08x, rx_id=%04x\n",
1521 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1522 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1523 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1525 if (qedf
->stop_io_on_error
) {
1526 qedf_stop_all_io(qedf
);
1530 init_completion(&io_req
->abts_done
);
1531 rval
= qedf_initiate_abts(io_req
, true);
1533 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1536 static void qedf_flush_els_req(struct qedf_ctx
*qedf
,
1537 struct qedf_ioreq
*els_req
)
1539 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1540 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req
->xid
,
1541 kref_read(&els_req
->refcount
));
1544 * Need to distinguish this from a timeout when calling the
1547 els_req
->event
= QEDF_IOREQ_EV_ELS_FLUSH
;
1549 /* Cancel the timer */
1550 cancel_delayed_work_sync(&els_req
->timeout_work
);
1552 /* Call callback function to complete command */
1553 if (els_req
->cb_func
&& els_req
->cb_arg
) {
1554 els_req
->cb_func(els_req
->cb_arg
);
1555 els_req
->cb_arg
= NULL
;
1558 /* Release kref for original initiate_els */
1559 kref_put(&els_req
->refcount
, qedf_release_cmd
);
1562 /* A value of -1 for lun is a wild card that means flush all
1563 * active SCSI I/Os for the target.
1565 void qedf_flush_active_ios(struct qedf_rport
*fcport
, int lun
)
1567 struct qedf_ioreq
*io_req
;
1568 struct qedf_ctx
*qedf
;
1569 struct qedf_cmd_mgr
*cmd_mgr
;
1571 unsigned long flags
;
1577 QEDF_ERR(NULL
, "fcport is NULL\n");
1581 /* Check that fcport is still offloaded */
1582 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1583 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
1587 qedf
= fcport
->qedf
;
1590 QEDF_ERR(NULL
, "qedf is NULL.\n");
1594 /* Only wait for all commands to be queued in the Upload context */
1595 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
) &&
1597 while (atomic_read(&fcport
->ios_to_queue
)) {
1598 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1599 "Waiting for %d I/Os to be queued\n",
1600 atomic_read(&fcport
->ios_to_queue
));
1601 if (wait_cnt
== 0) {
1603 "%d IOs request could not be queued\n",
1604 atomic_read(&fcport
->ios_to_queue
));
1611 cmd_mgr
= qedf
->cmd_mgr
;
1613 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1614 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1615 atomic_read(&fcport
->num_active_ios
), fcport
,
1616 fcport
->rdata
->ids
.port_id
, fcport
->rport
->scsi_target_id
);
1617 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
, "Locking flush mutex.\n");
1619 mutex_lock(&qedf
->flush_mutex
);
1621 set_bit(QEDF_RPORT_IN_TARGET_RESET
, &fcport
->flags
);
1623 set_bit(QEDF_RPORT_IN_LUN_RESET
, &fcport
->flags
);
1624 fcport
->lun_reset_lun
= lun
;
1627 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
1628 io_req
= &cmd_mgr
->cmds
[i
];
1632 if (!io_req
->fcport
)
1635 spin_lock_irqsave(&cmd_mgr
->lock
, flags
);
1637 if (io_req
->alloc
) {
1638 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
)) {
1639 if (io_req
->cmd_type
== QEDF_SCSI_CMD
)
1640 QEDF_ERR(&qedf
->dbg_ctx
,
1641 "Allocated but not queued, xid=0x%x\n",
1644 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
1646 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
1650 if (io_req
->fcport
!= fcport
)
1653 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1654 * but RRQ is still pending.
1655 * Workaround: Within qedf_send_rrq, we check if the fcport is
1656 * NULL, and we drop the ref on the io_req to clean it up.
1658 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
)) {
1659 refcount
= kref_read(&io_req
->refcount
);
1660 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1661 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1662 io_req
->xid
, io_req
->cmd_type
, refcount
);
1663 /* If RRQ work has been queue, try to cancel it and
1666 if (atomic_read(&io_req
->state
) ==
1667 QEDFC_CMD_ST_RRQ_WAIT
) {
1668 if (cancel_delayed_work_sync
1669 (&io_req
->rrq_work
)) {
1670 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1671 "Putting reference for pending RRQ work xid=0x%x.\n",
1674 kref_put(&io_req
->refcount
,
1681 /* Only consider flushing ELS during target reset */
1682 if (io_req
->cmd_type
== QEDF_ELS
&&
1684 rc
= kref_get_unless_zero(&io_req
->refcount
);
1686 QEDF_ERR(&(qedf
->dbg_ctx
),
1687 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1688 io_req
, io_req
->xid
);
1692 qedf_flush_els_req(qedf
, io_req
);
1694 * Release the kref and go back to the top of the
1700 if (io_req
->cmd_type
== QEDF_ABTS
) {
1702 rc
= kref_get_unless_zero(&io_req
->refcount
);
1704 QEDF_ERR(&(qedf
->dbg_ctx
),
1705 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1706 io_req
, io_req
->xid
);
1709 if (lun
!= -1 && io_req
->lun
!= lun
)
1712 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1713 "Flushing abort xid=0x%x.\n", io_req
->xid
);
1715 if (cancel_delayed_work_sync(&io_req
->rrq_work
)) {
1716 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1717 "Putting ref for cancelled RRQ work xid=0x%x.\n",
1719 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1722 if (cancel_delayed_work_sync(&io_req
->timeout_work
)) {
1723 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1724 "Putting ref for cancelled tmo work xid=0x%x.\n",
1726 qedf_initiate_cleanup(io_req
, true);
1727 /* Notify eh_abort handler that ABTS is
1730 complete(&io_req
->abts_done
);
1731 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1733 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1739 if (!io_req
->sc_cmd
)
1741 if (!io_req
->sc_cmd
->device
) {
1742 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1743 "Device backpointer NULL for sc_cmd=%p.\n",
1745 /* Put reference for non-existent scsi_cmnd */
1746 io_req
->sc_cmd
= NULL
;
1747 qedf_initiate_cleanup(io_req
, false);
1748 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1752 if (io_req
->lun
!= lun
)
1757 * Use kref_get_unless_zero in the unlikely case the command
1758 * we're about to flush was completed in the normal SCSI path
1760 rc
= kref_get_unless_zero(&io_req
->refcount
);
1762 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not get kref for "
1763 "io_req=0x%p xid=0x%x\n", io_req
, io_req
->xid
);
1767 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1768 "Cleanup xid=0x%x.\n", io_req
->xid
);
1771 /* Cleanup task and return I/O mid-layer */
1772 qedf_initiate_cleanup(io_req
, true);
1775 kref_put(&io_req
->refcount
, qedf_release_cmd
); /* ID: 004 */
1779 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1780 "Flushed 0x%x I/Os, active=0x%x.\n",
1781 flush_cnt
, atomic_read(&fcport
->num_active_ios
));
1782 /* Only wait for all commands to complete in the Upload context */
1783 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
) &&
1785 while (atomic_read(&fcport
->num_active_ios
)) {
1786 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1787 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1789 atomic_read(&fcport
->num_active_ios
),
1791 if (wait_cnt
== 0) {
1792 QEDF_ERR(&qedf
->dbg_ctx
,
1793 "Flushed %d I/Os, active=%d.\n",
1795 atomic_read(&fcport
->num_active_ios
));
1796 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
1797 io_req
= &cmd_mgr
->cmds
[i
];
1798 if (io_req
->fcport
&&
1799 io_req
->fcport
== fcport
) {
1801 kref_read(&io_req
->refcount
);
1802 set_bit(QEDF_CMD_DIRTY
,
1804 QEDF_ERR(&qedf
->dbg_ctx
,
1805 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1806 io_req
, io_req
->xid
,
1821 clear_bit(QEDF_RPORT_IN_LUN_RESET
, &fcport
->flags
);
1822 clear_bit(QEDF_RPORT_IN_TARGET_RESET
, &fcport
->flags
);
1823 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
, "Unlocking flush mutex.\n");
1824 mutex_unlock(&qedf
->flush_mutex
);
1828 * Initiate a ABTS middle path command. Note that we don't have to initialize
1829 * the task context for an ABTS task.
1831 int qedf_initiate_abts(struct qedf_ioreq
*io_req
, bool return_scsi_cmd_on_abts
)
1833 struct fc_lport
*lport
;
1834 struct qedf_rport
*fcport
= io_req
->fcport
;
1835 struct fc_rport_priv
*rdata
;
1836 struct qedf_ctx
*qedf
;
1840 unsigned long flags
;
1841 struct fcoe_wqe
*sqe
;
1845 /* Sanity check qedf_rport before dereferencing any pointers */
1846 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1847 QEDF_ERR(NULL
, "tgt not offloaded\n");
1852 qedf
= fcport
->qedf
;
1853 rdata
= fcport
->rdata
;
1855 if (!rdata
|| !kref_get_unless_zero(&rdata
->kref
)) {
1856 QEDF_ERR(&qedf
->dbg_ctx
, "stale rport\n");
1861 r_a_tov
= rdata
->r_a_tov
;
1862 lport
= qedf
->lport
;
1864 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
1865 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
1867 goto drop_rdata_kref
;
1870 if (atomic_read(&qedf
->link_down_tmo_valid
) > 0) {
1871 QEDF_ERR(&(qedf
->dbg_ctx
), "link_down_tmo active.\n");
1873 goto drop_rdata_kref
;
1876 /* Ensure room on SQ */
1877 if (!atomic_read(&fcport
->free_sqes
)) {
1878 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1880 goto drop_rdata_kref
;
1883 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
1884 QEDF_ERR(&qedf
->dbg_ctx
, "fcport is uploading.\n");
1886 goto drop_rdata_kref
;
1889 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
1890 test_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
) ||
1891 test_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
)) {
1892 QEDF_ERR(&qedf
->dbg_ctx
,
1893 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1894 io_req
->xid
, io_req
->sc_cmd
);
1896 goto drop_rdata_kref
;
1899 kref_get(&io_req
->refcount
);
1902 qedf
->control_requests
++;
1903 qedf
->packet_aborts
++;
1905 /* Set the command type to abort */
1906 io_req
->cmd_type
= QEDF_ABTS
;
1907 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1909 set_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1910 refcount
= kref_read(&io_req
->refcount
);
1911 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_SCSI_TM
,
1912 "ABTS io_req xid = 0x%x refcount=%d\n",
1915 qedf_cmd_timer_set(qedf
, io_req
, QEDF_ABORT_TIMEOUT
);
1917 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1919 sqe_idx
= qedf_get_sqe_idx(fcport
);
1920 sqe
= &fcport
->sq
[sqe_idx
];
1921 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1922 io_req
->task_params
->sqe
= sqe
;
1924 init_initiator_abort_fcoe_task(io_req
->task_params
);
1925 qedf_ring_doorbell(fcport
);
1927 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1930 kref_put(&rdata
->kref
, fc_rport_destroy
);
1935 void qedf_process_abts_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1936 struct qedf_ioreq
*io_req
)
1941 struct qedf_rport
*fcport
= io_req
->fcport
;
1943 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "Entered with xid = "
1944 "0x%x cmd_type = %d\n", io_req
->xid
, io_req
->cmd_type
);
1947 r_ctl
= cqe
->cqe_info
.abts_info
.r_ctl
;
1949 /* This was added at a point when we were scheduling abts_compl &
1950 * cleanup_compl on different CPUs and there was a possibility of
1951 * the io_req to be freed from the other context before we got here.
1954 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1955 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1961 * When flush is active, let the cmds be completed from the cleanup
1964 if (test_bit(QEDF_RPORT_IN_TARGET_RESET
, &fcport
->flags
) ||
1965 test_bit(QEDF_RPORT_IN_LUN_RESET
, &fcport
->flags
)) {
1966 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
1967 "Dropping ABTS completion xid=0x%x as fcport is flushing",
1972 if (!cancel_delayed_work(&io_req
->timeout_work
)) {
1973 QEDF_ERR(&qedf
->dbg_ctx
,
1974 "Wasn't able to cancel abts timeout work.\n");
1978 case FC_RCTL_BA_ACC
:
1979 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1980 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1981 io_req
->event
= QEDF_IOREQ_EV_ABORT_SUCCESS
;
1982 rc
= kref_get_unless_zero(&io_req
->refcount
); /* ID: 003 */
1984 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_SCSI_TM
,
1985 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
1990 * Dont release this cmd yet. It will be relesed
1991 * after we get RRQ response
1993 queue_delayed_work(qedf
->dpc_wq
, &io_req
->rrq_work
,
1994 msecs_to_jiffies(qedf
->lport
->r_a_tov
));
1995 atomic_set(&io_req
->state
, QEDFC_CMD_ST_RRQ_WAIT
);
1997 /* For error cases let the cleanup return the command */
1998 case FC_RCTL_BA_RJT
:
1999 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
2000 "ABTS response - RJT\n");
2001 io_req
->event
= QEDF_IOREQ_EV_ABORT_FAILED
;
2004 QEDF_ERR(&(qedf
->dbg_ctx
), "Unknown ABTS response\n");
2008 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
2010 if (io_req
->sc_cmd
) {
2011 if (!io_req
->return_scsi_cmd_on_abts
)
2012 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_SCSI_TM
,
2013 "Not call scsi_done for xid=0x%x.\n",
2015 if (io_req
->return_scsi_cmd_on_abts
)
2016 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
2019 /* Notify eh_abort handler that ABTS is complete */
2020 complete(&io_req
->abts_done
);
2022 kref_put(&io_req
->refcount
, qedf_release_cmd
);
2025 int qedf_init_mp_req(struct qedf_ioreq
*io_req
)
2027 struct qedf_mp_req
*mp_req
;
2028 struct scsi_sge
*mp_req_bd
;
2029 struct scsi_sge
*mp_resp_bd
;
2030 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
2034 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_MP_REQ
, "Entered.\n");
2036 mp_req
= (struct qedf_mp_req
*)&(io_req
->mp_req
);
2037 memset(mp_req
, 0, sizeof(struct qedf_mp_req
));
2039 if (io_req
->cmd_type
!= QEDF_ELS
) {
2040 mp_req
->req_len
= sizeof(struct fcp_cmnd
);
2041 io_req
->data_xfer_len
= mp_req
->req_len
;
2043 mp_req
->req_len
= io_req
->data_xfer_len
;
2045 mp_req
->req_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
2046 &mp_req
->req_buf_dma
, GFP_KERNEL
);
2047 if (!mp_req
->req_buf
) {
2048 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req buffer\n");
2049 qedf_free_mp_resc(io_req
);
2053 mp_req
->resp_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
,
2054 QEDF_PAGE_SIZE
, &mp_req
->resp_buf_dma
, GFP_KERNEL
);
2055 if (!mp_req
->resp_buf
) {
2056 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc TM resp "
2058 qedf_free_mp_resc(io_req
);
2062 /* Allocate and map mp_req_bd and mp_resp_bd */
2063 sz
= sizeof(struct scsi_sge
);
2064 mp_req
->mp_req_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
2065 &mp_req
->mp_req_bd_dma
, GFP_KERNEL
);
2066 if (!mp_req
->mp_req_bd
) {
2067 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req bd\n");
2068 qedf_free_mp_resc(io_req
);
2072 mp_req
->mp_resp_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
2073 &mp_req
->mp_resp_bd_dma
, GFP_KERNEL
);
2074 if (!mp_req
->mp_resp_bd
) {
2075 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP resp bd\n");
2076 qedf_free_mp_resc(io_req
);
2081 addr
= mp_req
->req_buf_dma
;
2082 mp_req_bd
= mp_req
->mp_req_bd
;
2083 mp_req_bd
->sge_addr
.lo
= U64_LO(addr
);
2084 mp_req_bd
->sge_addr
.hi
= U64_HI(addr
);
2085 mp_req_bd
->sge_len
= QEDF_PAGE_SIZE
;
2088 * MP buffer is either a task mgmt command or an ELS.
2089 * So the assumption is that it consumes a single bd
2090 * entry in the bd table
2092 mp_resp_bd
= mp_req
->mp_resp_bd
;
2093 addr
= mp_req
->resp_buf_dma
;
2094 mp_resp_bd
->sge_addr
.lo
= U64_LO(addr
);
2095 mp_resp_bd
->sge_addr
.hi
= U64_HI(addr
);
2096 mp_resp_bd
->sge_len
= QEDF_PAGE_SIZE
;
2102 * Last ditch effort to clear the port if it's stuck. Used only after a
2103 * cleanup task times out.
2105 static void qedf_drain_request(struct qedf_ctx
*qedf
)
2107 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
2108 QEDF_ERR(&(qedf
->dbg_ctx
), "MCP drain already active.\n");
2112 /* Set bit to return all queuecommand requests as busy */
2113 set_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
2115 /* Call qed drain request for function. Should be synchronous */
2116 qed_ops
->common
->drain(qedf
->cdev
);
2118 /* Settle time for CQEs to be returned */
2121 /* Unplug and continue */
2122 clear_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
2126 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2129 int qedf_initiate_cleanup(struct qedf_ioreq
*io_req
,
2130 bool return_scsi_cmd_on_abts
)
2132 struct qedf_rport
*fcport
;
2133 struct qedf_ctx
*qedf
;
2135 struct e4_fcoe_task_context
*task
;
2138 unsigned long flags
;
2139 struct fcoe_wqe
*sqe
;
2143 fcport
= io_req
->fcport
;
2145 QEDF_ERR(NULL
, "fcport is NULL.\n");
2149 /* Sanity check qedf_rport before dereferencing any pointers */
2150 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
2151 QEDF_ERR(NULL
, "tgt not offloaded\n");
2156 qedf
= fcport
->qedf
;
2158 QEDF_ERR(NULL
, "qedf is NULL.\n");
2162 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
2163 test_and_set_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
)) {
2164 QEDF_ERR(&(qedf
->dbg_ctx
), "io_req xid=0x%x already in "
2165 "cleanup processing or already completed.\n",
2169 set_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
2171 /* Ensure room on SQ */
2172 if (!atomic_read(&fcport
->free_sqes
)) {
2173 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
2174 /* Need to make sure we clear the flag since it was set */
2175 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
2179 if (io_req
->cmd_type
== QEDF_CLEANUP
) {
2180 QEDF_ERR(&qedf
->dbg_ctx
,
2181 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2182 io_req
->xid
, io_req
->cmd_type
);
2183 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
2187 refcount
= kref_read(&io_req
->refcount
);
2189 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
2190 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2191 io_req
->xid
, io_req
->sc_cmd
, io_req
->cmd_type
, io_req
->flags
,
2192 refcount
, fcport
, fcport
->rdata
->ids
.port_id
);
2194 /* Cleanup cmds re-use the same TID as the original I/O */
2196 io_req
->cmd_type
= QEDF_CLEANUP
;
2197 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
2199 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
2201 init_completion(&io_req
->cleanup_done
);
2203 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
2205 sqe_idx
= qedf_get_sqe_idx(fcport
);
2206 sqe
= &fcport
->sq
[sqe_idx
];
2207 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
2208 io_req
->task_params
->sqe
= sqe
;
2210 init_initiator_cleanup_fcoe_task(io_req
->task_params
);
2211 qedf_ring_doorbell(fcport
);
2213 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
2215 tmo
= wait_for_completion_timeout(&io_req
->cleanup_done
,
2216 QEDF_CLEANUP_TIMEOUT
* HZ
);
2221 QEDF_ERR(&(qedf
->dbg_ctx
), "Cleanup command timeout, "
2222 "xid=%x.\n", io_req
->xid
);
2223 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
2224 /* Issue a drain request if cleanup task times out */
2225 QEDF_ERR(&(qedf
->dbg_ctx
), "Issuing MCP drain request.\n");
2226 qedf_drain_request(qedf
);
2229 /* If it TASK MGMT handle it, reference will be decreased
2230 * in qedf_execute_tmf
2232 if (io_req
->tm_flags
== FCP_TMF_LUN_RESET
||
2233 io_req
->tm_flags
== FCP_TMF_TGT_RESET
) {
2234 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
2235 io_req
->sc_cmd
= NULL
;
2236 complete(&io_req
->tm_done
);
2239 if (io_req
->sc_cmd
) {
2240 if (!io_req
->return_scsi_cmd_on_abts
)
2241 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_SCSI_TM
,
2242 "Not call scsi_done for xid=0x%x.\n",
2244 if (io_req
->return_scsi_cmd_on_abts
)
2245 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
2249 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_SUCCESS
;
2251 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_FAILED
;
2256 void qedf_process_cleanup_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
2257 struct qedf_ioreq
*io_req
)
2259 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid = 0x%x\n",
2262 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
2264 /* Complete so we can finish cleaning up the I/O */
2265 complete(&io_req
->cleanup_done
);
2268 static int qedf_execute_tmf(struct qedf_rport
*fcport
, struct scsi_cmnd
*sc_cmd
,
2271 struct qedf_ioreq
*io_req
;
2272 struct e4_fcoe_task_context
*task
;
2273 struct qedf_ctx
*qedf
= fcport
->qedf
;
2274 struct fc_lport
*lport
= qedf
->lport
;
2279 unsigned long flags
;
2280 struct fcoe_wqe
*sqe
;
2284 QEDF_ERR(&qedf
->dbg_ctx
, "sc_cmd is NULL\n");
2288 lun
= (int)sc_cmd
->device
->lun
;
2289 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
2290 QEDF_ERR(&(qedf
->dbg_ctx
), "fcport not offloaded\n");
2295 io_req
= qedf_alloc_cmd(fcport
, QEDF_TASK_MGMT_CMD
);
2297 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed TMF");
2302 if (tm_flags
== FCP_TMF_LUN_RESET
)
2304 else if (tm_flags
== FCP_TMF_TGT_RESET
)
2305 qedf
->target_resets
++;
2307 /* Initialize rest of io_req fields */
2308 io_req
->sc_cmd
= sc_cmd
;
2309 io_req
->fcport
= fcport
;
2310 io_req
->cmd_type
= QEDF_TASK_MGMT_CMD
;
2312 /* Record which cpu this request is associated with */
2313 io_req
->cpu
= smp_processor_id();
2316 io_req
->io_req_flags
= QEDF_READ
;
2317 io_req
->data_xfer_len
= 0;
2318 io_req
->tm_flags
= tm_flags
;
2320 /* Default is to return a SCSI command when an error occurs */
2321 io_req
->return_scsi_cmd_on_abts
= false;
2323 /* Obtain exchange id */
2326 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "TMF io_req xid = "
2329 /* Initialize task context for this IO request */
2330 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
2332 init_completion(&io_req
->tm_done
);
2334 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
2336 sqe_idx
= qedf_get_sqe_idx(fcport
);
2337 sqe
= &fcport
->sq
[sqe_idx
];
2338 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
2340 qedf_init_task(fcport
, lport
, io_req
, task
, sqe
);
2341 qedf_ring_doorbell(fcport
);
2343 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
2345 set_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
2346 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
2347 QEDF_TM_TIMEOUT
* HZ
);
2351 QEDF_ERR(&(qedf
->dbg_ctx
), "wait for tm_cmpl timeout!\n");
2352 /* Clear outstanding bit since command timed out */
2353 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
2354 io_req
->sc_cmd
= NULL
;
2356 /* Check TMF response code */
2357 if (io_req
->fcp_rsp_code
== 0)
2363 * Double check that fcport has not gone into an uploading state before
2364 * executing the command flush for the LUN/target.
2366 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
2367 QEDF_ERR(&qedf
->dbg_ctx
,
2368 "fcport is uploading, not executing flush.\n");
2371 /* We do not need this io_req any more */
2372 kref_put(&io_req
->refcount
, qedf_release_cmd
);
2375 if (tm_flags
== FCP_TMF_LUN_RESET
)
2376 qedf_flush_active_ios(fcport
, lun
);
2378 qedf_flush_active_ios(fcport
, -1);
2381 if (rc
!= SUCCESS
) {
2382 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command failed...\n");
2385 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command success...\n");
2391 int qedf_initiate_tmf(struct scsi_cmnd
*sc_cmd
, u8 tm_flags
)
2393 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
2394 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
2395 struct qedf_rport
*fcport
= (struct qedf_rport
*)&rp
[1];
2396 struct qedf_ctx
*qedf
;
2397 struct fc_lport
*lport
= shost_priv(sc_cmd
->device
->host
);
2400 struct qedf_ioreq
*io_req
= NULL
;
2402 struct fc_rport_priv
*rdata
= fcport
->rdata
;
2405 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2406 tm_flags
, sc_cmd
, sc_cmd
->cmd_len
? sc_cmd
->cmnd
[0] : 0xff,
2407 rport
->scsi_target_id
, (int)sc_cmd
->device
->lun
);
2409 if (!rdata
|| !kref_get_unless_zero(&rdata
->kref
)) {
2410 QEDF_ERR(NULL
, "stale rport\n");
2414 QEDF_ERR(NULL
, "portid=%06x tm_flags =%s\n", rdata
->ids
.port_id
,
2415 (tm_flags
== FCP_TMF_TGT_RESET
) ? "TARGET RESET" :
2418 if (sc_cmd
->SCp
.ptr
) {
2419 io_req
= (struct qedf_ioreq
*)sc_cmd
->SCp
.ptr
;
2420 ref_cnt
= kref_read(&io_req
->refcount
);
2422 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2423 io_req
, io_req
->xid
, ref_cnt
);
2426 rval
= fc_remote_port_chkready(rport
);
2428 QEDF_ERR(NULL
, "device_reset rport not ready\n");
2433 rc
= fc_block_scsi_eh(sc_cmd
);
2438 QEDF_ERR(NULL
, "device_reset: rport is NULL\n");
2443 qedf
= fcport
->qedf
;
2446 QEDF_ERR(NULL
, "qedf is NULL.\n");
2451 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
2452 QEDF_ERR(&qedf
->dbg_ctx
, "Connection is getting uploaded.\n");
2457 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
2458 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
2463 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
2464 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
2469 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
2471 QEDF_ERR(&qedf
->dbg_ctx
, "fcport %p is uploading.\n",
2474 QEDF_ERR(&qedf
->dbg_ctx
,
2475 "fcport %p port_id=%06x is uploading.\n",
2476 fcport
, fcport
->rdata
->ids
.port_id
);
2481 rc
= qedf_execute_tmf(fcport
, sc_cmd
, tm_flags
);
2484 kref_put(&rdata
->kref
, fc_rport_destroy
);
2488 void qedf_process_tmf_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
2489 struct qedf_ioreq
*io_req
)
2491 struct fcoe_cqe_rsp_info
*fcp_rsp
;
2493 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
2495 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
2496 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
2498 io_req
->sc_cmd
= NULL
;
2499 complete(&io_req
->tm_done
);
2502 void qedf_process_unsol_compl(struct qedf_ctx
*qedf
, uint16_t que_idx
,
2503 struct fcoe_cqe
*cqe
)
2505 unsigned long flags
;
2507 uint16_t pktlen
= cqe
->cqe_info
.unsolic_info
.pkt_len
;
2508 u32 payload_len
, crc
;
2509 struct fc_frame_header
*fh
;
2510 struct fc_frame
*fp
;
2511 struct qedf_io_work
*io_work
;
2514 struct scsi_bd
*p_bd_info
;
2516 p_bd_info
= &cqe
->cqe_info
.unsolic_info
.bd_info
;
2517 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2518 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2519 le32_to_cpu(p_bd_info
->address
.hi
),
2520 le32_to_cpu(p_bd_info
->address
.lo
),
2521 le32_to_cpu(p_bd_info
->opaque
.fcoe_opaque
.hi
),
2522 le32_to_cpu(p_bd_info
->opaque
.fcoe_opaque
.lo
),
2523 qedf
->bdq_prod_idx
, pktlen
);
2525 bdq_idx
= le32_to_cpu(p_bd_info
->opaque
.fcoe_opaque
.lo
);
2526 if (bdq_idx
>= QEDF_BDQ_SIZE
) {
2527 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_idx is out of range %d.\n",
2529 goto increment_prod
;
2532 bdq_addr
= qedf
->bdq
[bdq_idx
].buf_addr
;
2534 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_addr is NULL, dropping "
2535 "unsolicited packet.\n");
2536 goto increment_prod
;
2539 if (qedf_dump_frames
) {
2540 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2541 "BDQ frame is at addr=%p.\n", bdq_addr
);
2542 print_hex_dump(KERN_WARNING
, "bdq ", DUMP_PREFIX_OFFSET
, 16, 1,
2543 (void *)bdq_addr
, pktlen
, false);
2546 /* Allocate frame */
2547 payload_len
= pktlen
- sizeof(struct fc_frame_header
);
2548 fp
= fc_frame_alloc(qedf
->lport
, payload_len
);
2550 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not allocate fp.\n");
2551 goto increment_prod
;
2554 /* Copy data from BDQ buffer into fc_frame struct */
2555 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
2556 memcpy(fh
, (void *)bdq_addr
, pktlen
);
2558 QEDF_WARN(&qedf
->dbg_ctx
,
2559 "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2560 ntoh24(fh
->fh_s_id
), ntoh24(fh
->fh_d_id
), fh
->fh_r_ctl
,
2561 fh
->fh_type
, fc_frame_payload_op(fp
));
2563 /* Initialize the frame so libfc sees it as a valid frame */
2564 crc
= fcoe_fc_crc(fp
);
2566 fr_dev(fp
) = qedf
->lport
;
2567 fr_sof(fp
) = FC_SOF_I3
;
2568 fr_eof(fp
) = FC_EOF_T
;
2569 fr_crc(fp
) = cpu_to_le32(~crc
);
2572 * We need to return the frame back up to libfc in a non-atomic
2575 io_work
= mempool_alloc(qedf
->io_mempool
, GFP_ATOMIC
);
2577 QEDF_WARN(&(qedf
->dbg_ctx
), "Could not allocate "
2578 "work for I/O completion.\n");
2580 goto increment_prod
;
2582 memset(io_work
, 0, sizeof(struct qedf_io_work
));
2584 INIT_WORK(&io_work
->work
, qedf_fp_io_handler
);
2586 /* Copy contents of CQE for deferred processing */
2587 memcpy(&io_work
->cqe
, cqe
, sizeof(struct fcoe_cqe
));
2589 io_work
->qedf
= qedf
;
2592 queue_work_on(smp_processor_id(), qedf_io_wq
, &io_work
->work
);
2594 spin_lock_irqsave(&qedf
->hba_lock
, flags
);
2596 /* Increment producer to let f/w know we've handled the frame */
2597 qedf
->bdq_prod_idx
++;
2599 /* Producer index wraps at uint16_t boundary */
2600 if (qedf
->bdq_prod_idx
== 0xffff)
2601 qedf
->bdq_prod_idx
= 0;
2603 writew(qedf
->bdq_prod_idx
, qedf
->bdq_primary_prod
);
2604 tmp
= readw(qedf
->bdq_primary_prod
);
2605 writew(qedf
->bdq_prod_idx
, qedf
->bdq_secondary_prod
);
2606 tmp
= readw(qedf
->bdq_secondary_prod
);
2608 spin_unlock_irqrestore(&qedf
->hba_lock
, flags
);