2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
14 void qedf_cmd_timer_set(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
15 unsigned int timer_msec
)
17 queue_delayed_work(qedf
->timer_work_queue
, &io_req
->timeout_work
,
18 msecs_to_jiffies(timer_msec
));
21 static void qedf_cmd_timeout(struct work_struct
*work
)
24 struct qedf_ioreq
*io_req
=
25 container_of(work
, struct qedf_ioreq
, timeout_work
.work
);
26 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
27 struct qedf_rport
*fcport
= io_req
->fcport
;
30 switch (io_req
->cmd_type
) {
32 QEDF_ERR((&qedf
->dbg_ctx
), "ABTS timeout, xid=0x%x.\n",
34 /* Cleanup timed out ABTS */
35 qedf_initiate_cleanup(io_req
, true);
36 complete(&io_req
->abts_done
);
39 * Need to call kref_put for reference taken when initiate_abts
40 * was called since abts_compl won't be called now that we've
41 * cleaned up the task.
43 kref_put(&io_req
->refcount
, qedf_release_cmd
);
46 * Now that the original I/O and the ABTS are complete see
47 * if we need to reconnect to the target.
49 qedf_restart_rport(fcport
);
52 kref_get(&io_req
->refcount
);
54 * Don't attempt to clean an ELS timeout as any subseqeunt
55 * ABTS or cleanup requests just hang. For now just free
56 * the resources of the original I/O and the RRQ
58 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS timeout, xid=0x%x.\n",
60 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
61 /* Call callback function to complete command */
62 if (io_req
->cb_func
&& io_req
->cb_arg
) {
63 op
= io_req
->cb_arg
->op
;
64 io_req
->cb_func(io_req
->cb_arg
);
65 io_req
->cb_arg
= NULL
;
67 qedf_initiate_cleanup(io_req
, true);
68 kref_put(&io_req
->refcount
, qedf_release_cmd
);
70 case QEDF_SEQ_CLEANUP
:
71 QEDF_ERR(&(qedf
->dbg_ctx
), "Sequence cleanup timeout, "
72 "xid=0x%x.\n", io_req
->xid
);
73 qedf_initiate_cleanup(io_req
, true);
74 io_req
->event
= QEDF_IOREQ_EV_ELS_TMO
;
75 qedf_process_seq_cleanup_compl(qedf
, NULL
, io_req
);
82 void qedf_cmd_mgr_free(struct qedf_cmd_mgr
*cmgr
)
84 struct io_bdt
*bdt_info
;
85 struct qedf_ctx
*qedf
= cmgr
->qedf
;
87 u16 min_xid
= QEDF_MIN_XID
;
88 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
91 struct qedf_ioreq
*io_req
;
93 num_ios
= max_xid
- min_xid
+ 1;
95 /* Free fcoe_bdt_ctx structures */
96 if (!cmgr
->io_bdt_pool
)
99 bd_tbl_sz
= QEDF_MAX_BDS_PER_CMD
* sizeof(struct scsi_sge
);
100 for (i
= 0; i
< num_ios
; i
++) {
101 bdt_info
= cmgr
->io_bdt_pool
[i
];
102 if (bdt_info
->bd_tbl
) {
103 dma_free_coherent(&qedf
->pdev
->dev
, bd_tbl_sz
,
104 bdt_info
->bd_tbl
, bdt_info
->bd_tbl_dma
);
105 bdt_info
->bd_tbl
= NULL
;
109 /* Destroy io_bdt pool */
110 for (i
= 0; i
< num_ios
; i
++) {
111 kfree(cmgr
->io_bdt_pool
[i
]);
112 cmgr
->io_bdt_pool
[i
] = NULL
;
115 kfree(cmgr
->io_bdt_pool
);
116 cmgr
->io_bdt_pool
= NULL
;
120 for (i
= 0; i
< num_ios
; i
++) {
121 io_req
= &cmgr
->cmds
[i
];
122 kfree(io_req
->sgl_task_params
);
123 kfree(io_req
->task_params
);
124 /* Make sure we free per command sense buffer */
125 if (io_req
->sense_buffer
)
126 dma_free_coherent(&qedf
->pdev
->dev
,
127 QEDF_SCSI_SENSE_BUFFERSIZE
, io_req
->sense_buffer
,
128 io_req
->sense_buffer_dma
);
129 cancel_delayed_work_sync(&io_req
->rrq_work
);
132 /* Free command manager itself */
136 static void qedf_handle_rrq(struct work_struct
*work
)
138 struct qedf_ioreq
*io_req
=
139 container_of(work
, struct qedf_ioreq
, rrq_work
.work
);
141 qedf_send_rrq(io_req
);
145 struct qedf_cmd_mgr
*qedf_cmd_mgr_alloc(struct qedf_ctx
*qedf
)
147 struct qedf_cmd_mgr
*cmgr
;
148 struct io_bdt
*bdt_info
;
149 struct qedf_ioreq
*io_req
;
153 u16 min_xid
= QEDF_MIN_XID
;
154 u16 max_xid
= (FCOE_PARAMS_NUM_TASKS
- 1);
156 /* Make sure num_queues is already set before calling this function */
157 if (!qedf
->num_queues
) {
158 QEDF_ERR(&(qedf
->dbg_ctx
), "num_queues is not set.\n");
162 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
) {
163 QEDF_WARN(&(qedf
->dbg_ctx
), "Invalid min_xid 0x%x and "
164 "max_xid 0x%x.\n", min_xid
, max_xid
);
168 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
, "min xid 0x%x, max xid "
169 "0x%x.\n", min_xid
, max_xid
);
171 num_ios
= max_xid
- min_xid
+ 1;
173 cmgr
= vzalloc(sizeof(struct qedf_cmd_mgr
));
175 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc cmd mgr.\n");
180 spin_lock_init(&cmgr
->lock
);
183 * Initialize I/O request fields.
187 for (i
= 0; i
< num_ios
; i
++) {
188 io_req
= &cmgr
->cmds
[i
];
189 INIT_DELAYED_WORK(&io_req
->timeout_work
, qedf_cmd_timeout
);
193 INIT_DELAYED_WORK(&io_req
->rrq_work
, qedf_handle_rrq
);
195 /* Allocate DMA memory to hold sense buffer */
196 io_req
->sense_buffer
= dma_alloc_coherent(&qedf
->pdev
->dev
,
197 QEDF_SCSI_SENSE_BUFFERSIZE
, &io_req
->sense_buffer_dma
,
199 if (!io_req
->sense_buffer
)
202 /* Allocate task parameters to pass to f/w init funcions */
203 io_req
->task_params
= kzalloc(sizeof(*io_req
->task_params
),
205 if (!io_req
->task_params
) {
206 QEDF_ERR(&(qedf
->dbg_ctx
),
207 "Failed to allocate task_params for xid=0x%x\n",
213 * Allocate scatter/gather list info to pass to f/w init
216 io_req
->sgl_task_params
= kzalloc(
217 sizeof(struct scsi_sgl_task_params
), GFP_KERNEL
);
218 if (!io_req
->sgl_task_params
) {
219 QEDF_ERR(&(qedf
->dbg_ctx
),
220 "Failed to allocate sgl_task_params for xid=0x%x\n",
226 /* Allocate pool of io_bdts - one for each qedf_ioreq */
227 cmgr
->io_bdt_pool
= kmalloc_array(num_ios
, sizeof(struct io_bdt
*),
230 if (!cmgr
->io_bdt_pool
) {
231 QEDF_WARN(&(qedf
->dbg_ctx
), "Failed to alloc io_bdt_pool.\n");
235 for (i
= 0; i
< num_ios
; i
++) {
236 cmgr
->io_bdt_pool
[i
] = kmalloc(sizeof(struct io_bdt
),
238 if (!cmgr
->io_bdt_pool
[i
]) {
239 QEDF_WARN(&(qedf
->dbg_ctx
),
240 "Failed to alloc io_bdt_pool[%d].\n", i
);
245 for (i
= 0; i
< num_ios
; i
++) {
246 bdt_info
= cmgr
->io_bdt_pool
[i
];
247 bdt_info
->bd_tbl
= dma_alloc_coherent(&qedf
->pdev
->dev
,
248 QEDF_MAX_BDS_PER_CMD
* sizeof(struct scsi_sge
),
249 &bdt_info
->bd_tbl_dma
, GFP_KERNEL
);
250 if (!bdt_info
->bd_tbl
) {
251 QEDF_WARN(&(qedf
->dbg_ctx
),
252 "Failed to alloc bdt_tbl[%d].\n", i
);
256 atomic_set(&cmgr
->free_list_cnt
, num_ios
);
257 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
258 "cmgr->free_list_cnt=%d.\n",
259 atomic_read(&cmgr
->free_list_cnt
));
264 qedf_cmd_mgr_free(cmgr
);
268 struct qedf_ioreq
*qedf_alloc_cmd(struct qedf_rport
*fcport
, u8 cmd_type
)
270 struct qedf_ctx
*qedf
= fcport
->qedf
;
271 struct qedf_cmd_mgr
*cmd_mgr
= qedf
->cmd_mgr
;
272 struct qedf_ioreq
*io_req
= NULL
;
273 struct io_bdt
*bd_tbl
;
279 free_sqes
= atomic_read(&fcport
->free_sqes
);
282 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
283 "Returning NULL, free_sqes=%d.\n ",
288 /* Limit the number of outstanding R/W tasks */
289 if ((atomic_read(&fcport
->num_active_ios
) >=
290 NUM_RW_TASKS_PER_CONNECTION
)) {
291 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
292 "Returning NULL, num_active_ios=%d.\n",
293 atomic_read(&fcport
->num_active_ios
));
297 /* Limit global TIDs certain tasks */
298 if (atomic_read(&cmd_mgr
->free_list_cnt
) <= GBL_RSVD_TASKS
) {
299 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
300 "Returning NULL, free_list_cnt=%d.\n",
301 atomic_read(&cmd_mgr
->free_list_cnt
));
305 spin_lock_irqsave(&cmd_mgr
->lock
, flags
);
306 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
307 io_req
= &cmd_mgr
->cmds
[cmd_mgr
->idx
];
309 if (cmd_mgr
->idx
== FCOE_PARAMS_NUM_TASKS
)
312 /* Check to make sure command was previously freed */
313 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
))
317 if (i
== FCOE_PARAMS_NUM_TASKS
) {
318 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
322 set_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
323 spin_unlock_irqrestore(&cmd_mgr
->lock
, flags
);
325 atomic_inc(&fcport
->num_active_ios
);
326 atomic_dec(&fcport
->free_sqes
);
328 atomic_dec(&cmd_mgr
->free_list_cnt
);
330 io_req
->cmd_mgr
= cmd_mgr
;
331 io_req
->fcport
= fcport
;
333 /* Hold the io_req against deletion */
334 kref_init(&io_req
->refcount
);
336 /* Bind io_bdt for this io_req */
337 /* Have a static link between io_req and io_bdt_pool */
338 bd_tbl
= io_req
->bd_tbl
= cmd_mgr
->io_bdt_pool
[xid
];
339 if (bd_tbl
== NULL
) {
340 QEDF_ERR(&(qedf
->dbg_ctx
), "bd_tbl is NULL, xid=%x.\n", xid
);
341 kref_put(&io_req
->refcount
, qedf_release_cmd
);
344 bd_tbl
->io_req
= io_req
;
345 io_req
->cmd_type
= cmd_type
;
346 io_req
->tm_flags
= 0;
348 /* Reset sequence offset data */
349 io_req
->rx_buf_off
= 0;
350 io_req
->tx_buf_off
= 0;
351 io_req
->rx_id
= 0xffff; /* No OX_ID */
356 /* Record failure for stats and return NULL to caller */
357 qedf
->alloc_failures
++;
361 static void qedf_free_mp_resc(struct qedf_ioreq
*io_req
)
363 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
364 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
365 uint64_t sz
= sizeof(struct scsi_sge
);
368 if (mp_req
->mp_req_bd
) {
369 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
370 mp_req
->mp_req_bd
, mp_req
->mp_req_bd_dma
);
371 mp_req
->mp_req_bd
= NULL
;
373 if (mp_req
->mp_resp_bd
) {
374 dma_free_coherent(&qedf
->pdev
->dev
, sz
,
375 mp_req
->mp_resp_bd
, mp_req
->mp_resp_bd_dma
);
376 mp_req
->mp_resp_bd
= NULL
;
378 if (mp_req
->req_buf
) {
379 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
380 mp_req
->req_buf
, mp_req
->req_buf_dma
);
381 mp_req
->req_buf
= NULL
;
383 if (mp_req
->resp_buf
) {
384 dma_free_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
385 mp_req
->resp_buf
, mp_req
->resp_buf_dma
);
386 mp_req
->resp_buf
= NULL
;
390 void qedf_release_cmd(struct kref
*ref
)
392 struct qedf_ioreq
*io_req
=
393 container_of(ref
, struct qedf_ioreq
, refcount
);
394 struct qedf_cmd_mgr
*cmd_mgr
= io_req
->cmd_mgr
;
395 struct qedf_rport
*fcport
= io_req
->fcport
;
397 if (io_req
->cmd_type
== QEDF_ELS
||
398 io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
)
399 qedf_free_mp_resc(io_req
);
401 atomic_inc(&cmd_mgr
->free_list_cnt
);
402 atomic_dec(&fcport
->num_active_ios
);
403 if (atomic_read(&fcport
->num_active_ios
) < 0)
404 QEDF_WARN(&(fcport
->qedf
->dbg_ctx
), "active_ios < 0.\n");
406 /* Increment task retry identifier now that the request is released */
407 io_req
->task_retry_identifier
++;
409 clear_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
);
412 static int qedf_split_bd(struct qedf_ioreq
*io_req
, u64 addr
, int sg_len
,
415 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
416 int frag_size
, sg_frags
;
420 if (sg_len
> QEDF_BD_SPLIT_SZ
)
421 frag_size
= QEDF_BD_SPLIT_SZ
;
424 bd
[bd_index
+ sg_frags
].sge_addr
.lo
= U64_LO(addr
);
425 bd
[bd_index
+ sg_frags
].sge_addr
.hi
= U64_HI(addr
);
426 bd
[bd_index
+ sg_frags
].sge_len
= (uint16_t)frag_size
;
428 addr
+= (u64
)frag_size
;
435 static int qedf_map_sg(struct qedf_ioreq
*io_req
)
437 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
438 struct Scsi_Host
*host
= sc
->device
->host
;
439 struct fc_lport
*lport
= shost_priv(host
);
440 struct qedf_ctx
*qedf
= lport_priv(lport
);
441 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
442 struct scatterlist
*sg
;
451 sg_count
= dma_map_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
452 scsi_sg_count(sc
), sc
->sc_data_direction
);
454 sg
= scsi_sglist(sc
);
457 * New condition to send single SGE as cached-SGL with length less
460 if ((sg_count
== 1) && (sg_dma_len(sg
) <=
461 QEDF_MAX_SGLEN_FOR_CACHESGL
)) {
462 sg_len
= sg_dma_len(sg
);
463 addr
= (u64
)sg_dma_address(sg
);
465 bd
[bd_count
].sge_addr
.lo
= (addr
& 0xffffffff);
466 bd
[bd_count
].sge_addr
.hi
= (addr
>> 32);
467 bd
[bd_count
].sge_len
= (u16
)sg_len
;
472 scsi_for_each_sg(sc
, sg
, sg_count
, i
) {
473 sg_len
= sg_dma_len(sg
);
474 addr
= (u64
)sg_dma_address(sg
);
475 end_addr
= (u64
)(addr
+ sg_len
);
478 * First s/g element in the list so check if the end_addr
479 * is paged aligned. Also check to make sure the length is
480 * at least page size.
482 if ((i
== 0) && (sg_count
> 1) &&
483 ((end_addr
% QEDF_PAGE_SIZE
) ||
484 sg_len
< QEDF_PAGE_SIZE
))
485 io_req
->use_slowpath
= true;
487 * Last s/g element so check if the start address is paged
490 else if ((i
== (sg_count
- 1)) && (sg_count
> 1) &&
491 (addr
% QEDF_PAGE_SIZE
))
492 io_req
->use_slowpath
= true;
494 * Intermediate s/g element so check if start and end address
497 else if ((i
!= 0) && (i
!= (sg_count
- 1)) &&
498 ((addr
% QEDF_PAGE_SIZE
) || (end_addr
% QEDF_PAGE_SIZE
)))
499 io_req
->use_slowpath
= true;
501 if (sg_len
> QEDF_MAX_BD_LEN
) {
502 sg_frags
= qedf_split_bd(io_req
, addr
, sg_len
,
506 bd
[bd_count
].sge_addr
.lo
= U64_LO(addr
);
507 bd
[bd_count
].sge_addr
.hi
= U64_HI(addr
);
508 bd
[bd_count
].sge_len
= (uint16_t)sg_len
;
511 bd_count
+= sg_frags
;
512 byte_count
+= sg_len
;
515 if (byte_count
!= scsi_bufflen(sc
))
516 QEDF_ERR(&(qedf
->dbg_ctx
), "byte_count = %d != "
517 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count
,
518 scsi_bufflen(sc
), io_req
->xid
);
523 static int qedf_build_bd_list_from_sg(struct qedf_ioreq
*io_req
)
525 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
526 struct scsi_sge
*bd
= io_req
->bd_tbl
->bd_tbl
;
529 if (scsi_sg_count(sc
)) {
530 bd_count
= qedf_map_sg(io_req
);
535 bd
[0].sge_addr
.lo
= bd
[0].sge_addr
.hi
= 0;
538 io_req
->bd_tbl
->bd_valid
= bd_count
;
543 static void qedf_build_fcp_cmnd(struct qedf_ioreq
*io_req
,
544 struct fcp_cmnd
*fcp_cmnd
)
546 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
548 /* fcp_cmnd is 32 bytes */
549 memset(fcp_cmnd
, 0, FCP_CMND_LEN
);
551 /* 8 bytes: SCSI LUN info */
552 int_to_scsilun(sc_cmd
->device
->lun
,
553 (struct scsi_lun
*)&fcp_cmnd
->fc_lun
);
555 /* 4 bytes: flag info */
556 fcp_cmnd
->fc_pri_ta
= 0;
557 fcp_cmnd
->fc_tm_flags
= io_req
->tm_flags
;
558 fcp_cmnd
->fc_flags
= io_req
->io_req_flags
;
559 fcp_cmnd
->fc_cmdref
= 0;
561 /* Populate data direction */
562 if (io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) {
563 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
565 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
566 fcp_cmnd
->fc_flags
|= FCP_CFL_WRDATA
;
567 else if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
568 fcp_cmnd
->fc_flags
|= FCP_CFL_RDDATA
;
571 fcp_cmnd
->fc_pri_ta
= FCP_PTA_SIMPLE
;
573 /* 16 bytes: CDB information */
574 if (io_req
->cmd_type
!= QEDF_TASK_MGMT_CMD
)
575 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
577 /* 4 bytes: FCP data length */
578 fcp_cmnd
->fc_dl
= htonl(io_req
->data_xfer_len
);
581 static void qedf_init_task(struct qedf_rport
*fcport
, struct fc_lport
*lport
,
582 struct qedf_ioreq
*io_req
, struct fcoe_task_context
*task_ctx
,
583 struct fcoe_wqe
*sqe
)
585 enum fcoe_task_type task_type
;
586 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
587 struct io_bdt
*bd_tbl
= io_req
->bd_tbl
;
591 struct qedf_ctx
*qedf
= fcport
->qedf
;
592 uint16_t cq_idx
= smp_processor_id() % qedf
->num_queues
;
593 struct regpair sense_data_buffer_phys_addr
;
598 /* Note init_initiator_rw_fcoe_task memsets the task context */
599 io_req
->task
= task_ctx
;
600 memset(task_ctx
, 0, sizeof(struct fcoe_task_context
));
601 memset(io_req
->task_params
, 0, sizeof(struct fcoe_task_params
));
602 memset(io_req
->sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
604 /* Set task type bassed on DMA directio of command */
605 if (io_req
->cmd_type
== QEDF_TASK_MGMT_CMD
) {
606 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
608 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
609 task_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
610 tx_io_size
= io_req
->data_xfer_len
;
612 task_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
613 rx_io_size
= io_req
->data_xfer_len
;
617 /* Setup the fields for fcoe_task_params */
618 io_req
->task_params
->context
= task_ctx
;
619 io_req
->task_params
->sqe
= sqe
;
620 io_req
->task_params
->task_type
= task_type
;
621 io_req
->task_params
->tx_io_size
= tx_io_size
;
622 io_req
->task_params
->rx_io_size
= rx_io_size
;
623 io_req
->task_params
->conn_cid
= fcport
->fw_cid
;
624 io_req
->task_params
->itid
= io_req
->xid
;
625 io_req
->task_params
->cq_rss_number
= cq_idx
;
626 io_req
->task_params
->is_tape_device
= fcport
->dev_type
;
628 /* Fill in information for scatter/gather list */
629 if (io_req
->cmd_type
!= QEDF_TASK_MGMT_CMD
) {
630 bd_count
= bd_tbl
->bd_valid
;
631 io_req
->sgl_task_params
->sgl
= bd_tbl
->bd_tbl
;
632 io_req
->sgl_task_params
->sgl_phys_addr
.lo
=
633 U64_LO(bd_tbl
->bd_tbl_dma
);
634 io_req
->sgl_task_params
->sgl_phys_addr
.hi
=
635 U64_HI(bd_tbl
->bd_tbl_dma
);
636 io_req
->sgl_task_params
->num_sges
= bd_count
;
637 io_req
->sgl_task_params
->total_buffer_size
=
638 scsi_bufflen(io_req
->sc_cmd
);
639 io_req
->sgl_task_params
->small_mid_sge
=
640 io_req
->use_slowpath
;
643 /* Fill in physical address of sense buffer */
644 sense_data_buffer_phys_addr
.lo
= U64_LO(io_req
->sense_buffer_dma
);
645 sense_data_buffer_phys_addr
.hi
= U64_HI(io_req
->sense_buffer_dma
);
647 /* fill FCP_CMND IU */
648 qedf_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)tmp_fcp_cmnd
);
650 /* Swap fcp_cmnd since FC is big endian */
651 cnt
= sizeof(struct fcp_cmnd
) / sizeof(u32
);
652 for (i
= 0; i
< cnt
; i
++) {
653 tmp_fcp_cmnd
[i
] = cpu_to_be32(tmp_fcp_cmnd
[i
]);
655 memcpy(fcp_cmnd
, tmp_fcp_cmnd
, sizeof(struct fcp_cmnd
));
657 init_initiator_rw_fcoe_task(io_req
->task_params
,
658 io_req
->sgl_task_params
,
659 sense_data_buffer_phys_addr
,
660 io_req
->task_retry_identifier
, fcp_cmnd
);
662 /* Increment SGL type counters */
664 qedf
->single_sge_ios
++;
665 io_req
->sge_type
= QEDF_IOREQ_SINGLE_SGE
;
666 } else if (io_req
->use_slowpath
) {
667 qedf
->slow_sge_ios
++;
668 io_req
->sge_type
= QEDF_IOREQ_SLOW_SGE
;
670 qedf
->fast_sge_ios
++;
671 io_req
->sge_type
= QEDF_IOREQ_FAST_SGE
;
675 void qedf_init_mp_task(struct qedf_ioreq
*io_req
,
676 struct fcoe_task_context
*task_ctx
, struct fcoe_wqe
*sqe
)
678 struct qedf_mp_req
*mp_req
= &(io_req
->mp_req
);
679 struct qedf_rport
*fcport
= io_req
->fcport
;
680 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
681 struct fc_frame_header
*fc_hdr
;
682 struct fcoe_tx_mid_path_params task_fc_hdr
;
683 struct scsi_sgl_task_params tx_sgl_task_params
;
684 struct scsi_sgl_task_params rx_sgl_task_params
;
686 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_DISC
,
687 "Initializing MP task for cmd_type=%d\n",
690 qedf
->control_requests
++;
692 memset(&tx_sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
693 memset(&rx_sgl_task_params
, 0, sizeof(struct scsi_sgl_task_params
));
694 memset(task_ctx
, 0, sizeof(struct fcoe_task_context
));
695 memset(&task_fc_hdr
, 0, sizeof(struct fcoe_tx_mid_path_params
));
697 /* Setup the task from io_req for easy reference */
698 io_req
->task
= task_ctx
;
700 /* Setup the fields for fcoe_task_params */
701 io_req
->task_params
->context
= task_ctx
;
702 io_req
->task_params
->sqe
= sqe
;
703 io_req
->task_params
->task_type
= FCOE_TASK_TYPE_MIDPATH
;
704 io_req
->task_params
->tx_io_size
= io_req
->data_xfer_len
;
705 /* rx_io_size tells the f/w how large a response buffer we have */
706 io_req
->task_params
->rx_io_size
= PAGE_SIZE
;
707 io_req
->task_params
->conn_cid
= fcport
->fw_cid
;
708 io_req
->task_params
->itid
= io_req
->xid
;
709 /* Return middle path commands on CQ 0 */
710 io_req
->task_params
->cq_rss_number
= 0;
711 io_req
->task_params
->is_tape_device
= fcport
->dev_type
;
713 fc_hdr
= &(mp_req
->req_fc_hdr
);
714 /* Set OX_ID and RX_ID based on driver task id */
715 fc_hdr
->fh_ox_id
= io_req
->xid
;
716 fc_hdr
->fh_rx_id
= htons(0xffff);
718 /* Set up FC header information */
719 task_fc_hdr
.parameter
= fc_hdr
->fh_parm_offset
;
720 task_fc_hdr
.r_ctl
= fc_hdr
->fh_r_ctl
;
721 task_fc_hdr
.type
= fc_hdr
->fh_type
;
722 task_fc_hdr
.cs_ctl
= fc_hdr
->fh_cs_ctl
;
723 task_fc_hdr
.df_ctl
= fc_hdr
->fh_df_ctl
;
724 task_fc_hdr
.rx_id
= fc_hdr
->fh_rx_id
;
725 task_fc_hdr
.ox_id
= fc_hdr
->fh_ox_id
;
727 /* Set up s/g list parameters for request buffer */
728 tx_sgl_task_params
.sgl
= mp_req
->mp_req_bd
;
729 tx_sgl_task_params
.sgl_phys_addr
.lo
= U64_LO(mp_req
->mp_req_bd_dma
);
730 tx_sgl_task_params
.sgl_phys_addr
.hi
= U64_HI(mp_req
->mp_req_bd_dma
);
731 tx_sgl_task_params
.num_sges
= 1;
732 /* Set PAGE_SIZE for now since sg element is that size ??? */
733 tx_sgl_task_params
.total_buffer_size
= io_req
->data_xfer_len
;
734 tx_sgl_task_params
.small_mid_sge
= 0;
736 /* Set up s/g list parameters for request buffer */
737 rx_sgl_task_params
.sgl
= mp_req
->mp_resp_bd
;
738 rx_sgl_task_params
.sgl_phys_addr
.lo
= U64_LO(mp_req
->mp_resp_bd_dma
);
739 rx_sgl_task_params
.sgl_phys_addr
.hi
= U64_HI(mp_req
->mp_resp_bd_dma
);
740 rx_sgl_task_params
.num_sges
= 1;
741 /* Set PAGE_SIZE for now since sg element is that size ??? */
742 rx_sgl_task_params
.total_buffer_size
= PAGE_SIZE
;
743 rx_sgl_task_params
.small_mid_sge
= 0;
747 * Last arg is 0 as previous code did not set that we wanted the
748 * fc header information.
750 init_initiator_midpath_unsolicited_fcoe_task(io_req
->task_params
,
753 &rx_sgl_task_params
, 0);
755 /* Midpath requests always consume 1 SGE */
756 qedf
->single_sge_ios
++;
759 /* Presumed that fcport->rport_lock is held */
760 u16
qedf_get_sqe_idx(struct qedf_rport
*fcport
)
762 uint16_t total_sqe
= (fcport
->sq_mem_size
)/(sizeof(struct fcoe_wqe
));
765 rval
= fcport
->sq_prod_idx
;
767 /* Adjust ring index */
768 fcport
->sq_prod_idx
++;
769 fcport
->fw_sq_prod_idx
++;
770 if (fcport
->sq_prod_idx
== total_sqe
)
771 fcport
->sq_prod_idx
= 0;
776 void qedf_ring_doorbell(struct qedf_rport
*fcport
)
778 struct fcoe_db_data dbell
= { 0 };
782 dbell
.params
|= DB_DEST_XCM
<< FCOE_DB_DATA_DEST_SHIFT
;
783 dbell
.params
|= DB_AGG_CMD_SET
<< FCOE_DB_DATA_AGG_CMD_SHIFT
;
784 dbell
.params
|= DQ_XCM_FCOE_SQ_PROD_CMD
<<
785 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT
;
787 dbell
.sq_prod
= fcport
->fw_sq_prod_idx
;
788 writel(*(u32
*)&dbell
, fcport
->p_doorbell
);
789 /* Make sure SQ index is updated so f/w prcesses requests in order */
794 static void qedf_trace_io(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
,
797 struct qedf_ctx
*qedf
= fcport
->qedf
;
798 struct qedf_io_log
*io_log
;
799 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
803 spin_lock_irqsave(&qedf
->io_trace_lock
, flags
);
805 io_log
= &qedf
->io_trace_buf
[qedf
->io_trace_idx
];
806 io_log
->direction
= direction
;
807 io_log
->task_id
= io_req
->xid
;
808 io_log
->port_id
= fcport
->rdata
->ids
.port_id
;
809 io_log
->lun
= sc_cmd
->device
->lun
;
810 io_log
->op
= op
= sc_cmd
->cmnd
[0];
811 io_log
->lba
[0] = sc_cmd
->cmnd
[2];
812 io_log
->lba
[1] = sc_cmd
->cmnd
[3];
813 io_log
->lba
[2] = sc_cmd
->cmnd
[4];
814 io_log
->lba
[3] = sc_cmd
->cmnd
[5];
815 io_log
->bufflen
= scsi_bufflen(sc_cmd
);
816 io_log
->sg_count
= scsi_sg_count(sc_cmd
);
817 io_log
->result
= sc_cmd
->result
;
818 io_log
->jiffies
= jiffies
;
819 io_log
->refcount
= kref_read(&io_req
->refcount
);
821 if (direction
== QEDF_IO_TRACE_REQ
) {
822 /* For requests we only care abot the submission CPU */
823 io_log
->req_cpu
= io_req
->cpu
;
826 } else if (direction
== QEDF_IO_TRACE_RSP
) {
827 io_log
->req_cpu
= io_req
->cpu
;
828 io_log
->int_cpu
= io_req
->int_cpu
;
829 io_log
->rsp_cpu
= smp_processor_id();
832 io_log
->sge_type
= io_req
->sge_type
;
834 qedf
->io_trace_idx
++;
835 if (qedf
->io_trace_idx
== QEDF_IO_TRACE_SIZE
)
836 qedf
->io_trace_idx
= 0;
838 spin_unlock_irqrestore(&qedf
->io_trace_lock
, flags
);
841 int qedf_post_io_req(struct qedf_rport
*fcport
, struct qedf_ioreq
*io_req
)
843 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
844 struct Scsi_Host
*host
= sc_cmd
->device
->host
;
845 struct fc_lport
*lport
= shost_priv(host
);
846 struct qedf_ctx
*qedf
= lport_priv(lport
);
847 struct fcoe_task_context
*task_ctx
;
849 enum fcoe_task_type req_type
= 0;
850 struct fcoe_wqe
*sqe
;
853 /* Initialize rest of io_req fileds */
854 io_req
->data_xfer_len
= scsi_bufflen(sc_cmd
);
855 sc_cmd
->SCp
.ptr
= (char *)io_req
;
856 io_req
->use_slowpath
= false; /* Assume fast SGL by default */
858 /* Record which cpu this request is associated with */
859 io_req
->cpu
= smp_processor_id();
861 if (sc_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
862 req_type
= FCOE_TASK_TYPE_READ_INITIATOR
;
863 io_req
->io_req_flags
= QEDF_READ
;
864 qedf
->input_requests
++;
865 } else if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
866 req_type
= FCOE_TASK_TYPE_WRITE_INITIATOR
;
867 io_req
->io_req_flags
= QEDF_WRITE
;
868 qedf
->output_requests
++;
870 io_req
->io_req_flags
= 0;
871 qedf
->control_requests
++;
876 /* Build buffer descriptor list for firmware from sg list */
877 if (qedf_build_bd_list_from_sg(io_req
)) {
878 QEDF_ERR(&(qedf
->dbg_ctx
), "BD list creation failed.\n");
879 kref_put(&io_req
->refcount
, qedf_release_cmd
);
883 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
884 QEDF_ERR(&(qedf
->dbg_ctx
), "Session not offloaded yet.\n");
885 kref_put(&io_req
->refcount
, qedf_release_cmd
);
888 /* Obtain free SQE */
889 sqe_idx
= qedf_get_sqe_idx(fcport
);
890 sqe
= &fcport
->sq
[sqe_idx
];
891 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
893 /* Get the task context */
894 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
896 QEDF_WARN(&(qedf
->dbg_ctx
), "task_ctx is NULL, xid=%d.\n",
898 kref_put(&io_req
->refcount
, qedf_release_cmd
);
902 qedf_init_task(fcport
, lport
, io_req
, task_ctx
, sqe
);
905 qedf_ring_doorbell(fcport
);
907 if (qedf_io_tracing
&& io_req
->sc_cmd
)
908 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_REQ
);
914 qedf_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*sc_cmd
)
916 struct fc_lport
*lport
= shost_priv(host
);
917 struct qedf_ctx
*qedf
= lport_priv(lport
);
918 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
919 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
920 struct qedf_rport
*fcport
= rport
->dd_data
;
921 struct qedf_ioreq
*io_req
;
924 unsigned long flags
= 0;
927 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
928 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
929 sc_cmd
->result
= DID_NO_CONNECT
<< 16;
930 sc_cmd
->scsi_done(sc_cmd
);
934 rval
= fc_remote_port_chkready(rport
);
936 sc_cmd
->result
= rval
;
937 sc_cmd
->scsi_done(sc_cmd
);
941 /* Retry command if we are doing a qed drain operation */
942 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
943 rc
= SCSI_MLQUEUE_HOST_BUSY
;
947 if (lport
->state
!= LPORT_ST_READY
||
948 atomic_read(&qedf
->link_state
) != QEDF_LINK_UP
) {
949 rc
= SCSI_MLQUEUE_HOST_BUSY
;
953 /* rport and tgt are allocated together, so tgt should be non-NULL */
954 fcport
= (struct qedf_rport
*)&rp
[1];
956 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
958 * Session is not offloaded yet. Let SCSI-ml retry
961 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
964 if (fcport
->retry_delay_timestamp
) {
965 if (time_after(jiffies
, fcport
->retry_delay_timestamp
)) {
966 fcport
->retry_delay_timestamp
= 0;
968 /* If retry_delay timer is active, flow off the ML */
969 rc
= SCSI_MLQUEUE_TARGET_BUSY
;
974 io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
976 rc
= SCSI_MLQUEUE_HOST_BUSY
;
980 io_req
->sc_cmd
= sc_cmd
;
982 /* Take fcport->rport_lock for posting to fcport send queue */
983 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
984 if (qedf_post_io_req(fcport
, io_req
)) {
985 QEDF_WARN(&(qedf
->dbg_ctx
), "Unable to post io_req\n");
986 /* Return SQE to pool */
987 atomic_inc(&fcport
->free_sqes
);
988 rc
= SCSI_MLQUEUE_HOST_BUSY
;
990 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
996 static void qedf_parse_fcp_rsp(struct qedf_ioreq
*io_req
,
997 struct fcoe_cqe_rsp_info
*fcp_rsp
)
999 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1000 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1001 u8 rsp_flags
= fcp_rsp
->rsp_flags
.flags
;
1002 int fcp_sns_len
= 0;
1003 int fcp_rsp_len
= 0;
1004 uint8_t *rsp_info
, *sense_data
;
1006 io_req
->fcp_status
= FC_GOOD
;
1007 io_req
->fcp_resid
= 0;
1008 if (rsp_flags
& (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER
|
1009 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER
))
1010 io_req
->fcp_resid
= fcp_rsp
->fcp_resid
;
1012 io_req
->scsi_comp_flags
= rsp_flags
;
1013 CMD_SCSI_STATUS(sc_cmd
) = io_req
->cdb_status
=
1014 fcp_rsp
->scsi_status_code
;
1017 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID
)
1018 fcp_rsp_len
= fcp_rsp
->fcp_rsp_len
;
1021 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID
)
1022 fcp_sns_len
= fcp_rsp
->fcp_sns_len
;
1024 io_req
->fcp_rsp_len
= fcp_rsp_len
;
1025 io_req
->fcp_sns_len
= fcp_sns_len
;
1026 rsp_info
= sense_data
= io_req
->sense_buffer
;
1028 /* fetch fcp_rsp_code */
1029 if ((fcp_rsp_len
== 4) || (fcp_rsp_len
== 8)) {
1030 /* Only for task management function */
1031 io_req
->fcp_rsp_code
= rsp_info
[3];
1032 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1033 "fcp_rsp_code = %d\n", io_req
->fcp_rsp_code
);
1034 /* Adjust sense-data location. */
1035 sense_data
+= fcp_rsp_len
;
1038 if (fcp_sns_len
> SCSI_SENSE_BUFFERSIZE
) {
1039 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1040 "Truncating sense buffer\n");
1041 fcp_sns_len
= SCSI_SENSE_BUFFERSIZE
;
1044 /* The sense buffer can be NULL for TMF commands */
1045 if (sc_cmd
->sense_buffer
) {
1046 memset(sc_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1048 memcpy(sc_cmd
->sense_buffer
, sense_data
,
1053 static void qedf_unmap_sg_list(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
)
1055 struct scsi_cmnd
*sc
= io_req
->sc_cmd
;
1057 if (io_req
->bd_tbl
->bd_valid
&& sc
&& scsi_sg_count(sc
)) {
1058 dma_unmap_sg(&qedf
->pdev
->dev
, scsi_sglist(sc
),
1059 scsi_sg_count(sc
), sc
->sc_data_direction
);
1060 io_req
->bd_tbl
->bd_valid
= 0;
1064 void qedf_scsi_completion(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1065 struct qedf_ioreq
*io_req
)
1068 struct fcoe_task_context
*task_ctx
;
1069 struct scsi_cmnd
*sc_cmd
;
1070 struct fcoe_cqe_rsp_info
*fcp_rsp
;
1071 struct qedf_rport
*fcport
;
1073 u16 scope
, qualifier
= 0;
1074 u8 fw_residual_flag
= 0;
1082 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1083 sc_cmd
= io_req
->sc_cmd
;
1084 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
1087 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1091 if (!sc_cmd
->SCp
.ptr
) {
1092 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1093 "another context.\n");
1097 if (!sc_cmd
->request
) {
1098 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd->request is NULL, "
1099 "sc_cmd=%p.\n", sc_cmd
);
1103 if (!sc_cmd
->request
->special
) {
1104 QEDF_WARN(&(qedf
->dbg_ctx
), "request->special is NULL so "
1105 "request not valid, sc_cmd=%p.\n", sc_cmd
);
1109 if (!sc_cmd
->request
->q
) {
1110 QEDF_WARN(&(qedf
->dbg_ctx
), "request->q is NULL so request "
1111 "is not valid, sc_cmd=%p.\n", sc_cmd
);
1115 fcport
= io_req
->fcport
;
1117 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
1119 qedf_unmap_sg_list(qedf
, io_req
);
1121 /* Check for FCP transport error */
1122 if (io_req
->fcp_rsp_len
> 3 && io_req
->fcp_rsp_code
) {
1123 QEDF_ERR(&(qedf
->dbg_ctx
),
1124 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1125 "fcp_rsp_code=%d.\n", io_req
->xid
, io_req
->fcp_rsp_len
,
1126 io_req
->fcp_rsp_code
);
1127 sc_cmd
->result
= DID_BUS_BUSY
<< 16;
1131 fw_residual_flag
= GET_FIELD(cqe
->cqe_info
.rsp_info
.fw_error_flags
,
1132 FCOE_CQE_RSP_INFO_FW_UNDERRUN
);
1133 if (fw_residual_flag
) {
1134 QEDF_ERR(&(qedf
->dbg_ctx
),
1135 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1136 "fcp_resid=%d fw_residual=0x%x.\n", io_req
->xid
,
1137 fcp_rsp
->rsp_flags
.flags
, io_req
->fcp_resid
,
1138 cqe
->cqe_info
.rsp_info
.fw_residual
);
1140 if (io_req
->cdb_status
== 0)
1141 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1143 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1145 /* Abort the command since we did not get all the data */
1146 init_completion(&io_req
->abts_done
);
1147 rval
= qedf_initiate_abts(io_req
, true);
1149 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1150 sc_cmd
->result
= (DID_ERROR
<< 16) | io_req
->cdb_status
;
1154 * Set resid to the whole buffer length so we won't try to resue
1155 * any previously data.
1157 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1161 switch (io_req
->fcp_status
) {
1163 if (io_req
->cdb_status
== 0) {
1164 /* Good I/O completion */
1165 sc_cmd
->result
= DID_OK
<< 16;
1167 refcount
= kref_read(&io_req
->refcount
);
1168 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1169 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1170 "lba=%02x%02x%02x%02x cdb_status=%d "
1171 "fcp_resid=0x%x refcount=%d.\n",
1172 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1173 sc_cmd
->device
->lun
, io_req
->xid
,
1174 sc_cmd
->cmnd
[0], sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3],
1175 sc_cmd
->cmnd
[4], sc_cmd
->cmnd
[5],
1176 io_req
->cdb_status
, io_req
->fcp_resid
,
1178 sc_cmd
->result
= (DID_OK
<< 16) | io_req
->cdb_status
;
1180 if (io_req
->cdb_status
== SAM_STAT_TASK_SET_FULL
||
1181 io_req
->cdb_status
== SAM_STAT_BUSY
) {
1183 * Check whether we need to set retry_delay at
1184 * all based on retry_delay module parameter
1185 * and the status qualifier.
1189 scope
= fcp_rsp
->retry_delay_timer
& 0xC000;
1191 qualifier
= fcp_rsp
->retry_delay_timer
& 0x3FFF;
1193 if (qedf_retry_delay
&&
1194 scope
> 0 && qualifier
> 0 &&
1195 qualifier
<= 0x3FEF) {
1196 /* Check we don't go over the max */
1197 if (qualifier
> QEDF_RETRY_DELAY_MAX
)
1199 QEDF_RETRY_DELAY_MAX
;
1200 fcport
->retry_delay_timestamp
=
1201 jiffies
+ (qualifier
* HZ
/ 10);
1205 if (io_req
->fcp_resid
)
1206 scsi_set_resid(sc_cmd
, io_req
->fcp_resid
);
1209 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "fcp_status=%d.\n",
1210 io_req
->fcp_status
);
1215 if (qedf_io_tracing
)
1216 qedf_trace_io(fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1218 io_req
->sc_cmd
= NULL
;
1219 sc_cmd
->SCp
.ptr
= NULL
;
1220 sc_cmd
->scsi_done(sc_cmd
);
1221 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1224 /* Return a SCSI command in some other context besides a normal completion */
1225 void qedf_scsi_done(struct qedf_ctx
*qedf
, struct qedf_ioreq
*io_req
,
1229 struct scsi_cmnd
*sc_cmd
;
1236 sc_cmd
= io_req
->sc_cmd
;
1239 QEDF_WARN(&(qedf
->dbg_ctx
), "sc_cmd is NULL!\n");
1243 if (!sc_cmd
->SCp
.ptr
) {
1244 QEDF_WARN(&(qedf
->dbg_ctx
), "SCp.ptr is NULL, returned in "
1245 "another context.\n");
1249 qedf_unmap_sg_list(qedf
, io_req
);
1251 sc_cmd
->result
= result
<< 16;
1252 refcount
= kref_read(&io_req
->refcount
);
1253 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "%d:0:%d:%lld: Completing "
1254 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1255 "allowed=%d retries=%d refcount=%d.\n",
1256 qedf
->lport
->host
->host_no
, sc_cmd
->device
->id
,
1257 sc_cmd
->device
->lun
, sc_cmd
, sc_cmd
->result
, sc_cmd
->cmnd
[0],
1258 sc_cmd
->cmnd
[2], sc_cmd
->cmnd
[3], sc_cmd
->cmnd
[4],
1259 sc_cmd
->cmnd
[5], sc_cmd
->allowed
, sc_cmd
->retries
,
1263 * Set resid to the whole buffer length so we won't try to resue any
1264 * previously read data
1266 scsi_set_resid(sc_cmd
, scsi_bufflen(sc_cmd
));
1268 if (qedf_io_tracing
)
1269 qedf_trace_io(io_req
->fcport
, io_req
, QEDF_IO_TRACE_RSP
);
1271 io_req
->sc_cmd
= NULL
;
1272 sc_cmd
->SCp
.ptr
= NULL
;
1273 sc_cmd
->scsi_done(sc_cmd
);
1274 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1278 * Handle warning type CQE completions. This is mainly used for REC timer
1281 void qedf_process_warning_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1282 struct qedf_ioreq
*io_req
)
1285 struct qedf_rport
*fcport
= io_req
->fcport
;
1286 u64 err_warn_bit_map
;
1292 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Warning CQE, "
1293 "xid=0x%x\n", io_req
->xid
);
1294 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1295 "err_warn_bitmap=%08x:%08x\n",
1296 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1297 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1298 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1299 "rx_buff_off=%08x, rx_id=%04x\n",
1300 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1301 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1302 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1304 /* Normalize the error bitmap value to an just an unsigned int */
1305 err_warn_bit_map
= (u64
)
1306 ((u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
<< 32) |
1307 (u64
)cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
;
1308 for (i
= 0; i
< 64; i
++) {
1309 if (err_warn_bit_map
& (u64
)((u64
)1 << i
)) {
1315 /* Check if REC TOV expired if this is a tape device */
1316 if (fcport
->dev_type
== QEDF_RPORT_TYPE_TAPE
) {
1318 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION
) {
1319 QEDF_ERR(&(qedf
->dbg_ctx
), "REC timer expired.\n");
1320 if (!test_bit(QEDF_CMD_SRR_SENT
, &io_req
->flags
)) {
1321 io_req
->rx_buf_off
=
1322 cqe
->cqe_info
.err_info
.rx_buf_off
;
1323 io_req
->tx_buf_off
=
1324 cqe
->cqe_info
.err_info
.tx_buf_off
;
1325 io_req
->rx_id
= cqe
->cqe_info
.err_info
.rx_id
;
1326 rval
= qedf_send_rec(io_req
);
1328 * We only want to abort the io_req if we
1329 * can't queue the REC command as we want to
1330 * keep the exchange open for recovery.
1340 init_completion(&io_req
->abts_done
);
1341 rval
= qedf_initiate_abts(io_req
, true);
1343 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1346 /* Cleanup a command when we receive an error detection completion */
1347 void qedf_process_error_detect(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1348 struct qedf_ioreq
*io_req
)
1355 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "Error detection CQE, "
1356 "xid=0x%x\n", io_req
->xid
);
1357 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
),
1358 "err_warn_bitmap=%08x:%08x\n",
1359 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_hi
),
1360 le32_to_cpu(cqe
->cqe_info
.err_info
.err_warn_bitmap_lo
));
1361 QEDF_ERR(&(io_req
->fcport
->qedf
->dbg_ctx
), "tx_buff_off=%08x, "
1362 "rx_buff_off=%08x, rx_id=%04x\n",
1363 le32_to_cpu(cqe
->cqe_info
.err_info
.tx_buf_off
),
1364 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_buf_off
),
1365 le32_to_cpu(cqe
->cqe_info
.err_info
.rx_id
));
1367 if (qedf
->stop_io_on_error
) {
1368 qedf_stop_all_io(qedf
);
1372 init_completion(&io_req
->abts_done
);
1373 rval
= qedf_initiate_abts(io_req
, true);
1375 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed to queue ABTS.\n");
1378 static void qedf_flush_els_req(struct qedf_ctx
*qedf
,
1379 struct qedf_ioreq
*els_req
)
1381 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1382 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req
->xid
,
1383 kref_read(&els_req
->refcount
));
1386 * Need to distinguish this from a timeout when calling the
1389 els_req
->event
= QEDF_IOREQ_EV_ELS_FLUSH
;
1391 /* Cancel the timer */
1392 cancel_delayed_work_sync(&els_req
->timeout_work
);
1394 /* Call callback function to complete command */
1395 if (els_req
->cb_func
&& els_req
->cb_arg
) {
1396 els_req
->cb_func(els_req
->cb_arg
);
1397 els_req
->cb_arg
= NULL
;
1400 /* Release kref for original initiate_els */
1401 kref_put(&els_req
->refcount
, qedf_release_cmd
);
1404 /* A value of -1 for lun is a wild card that means flush all
1405 * active SCSI I/Os for the target.
1407 void qedf_flush_active_ios(struct qedf_rport
*fcport
, int lun
)
1409 struct qedf_ioreq
*io_req
;
1410 struct qedf_ctx
*qedf
;
1411 struct qedf_cmd_mgr
*cmd_mgr
;
1417 qedf
= fcport
->qedf
;
1418 cmd_mgr
= qedf
->cmd_mgr
;
1420 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Flush active i/o's.\n");
1422 for (i
= 0; i
< FCOE_PARAMS_NUM_TASKS
; i
++) {
1423 io_req
= &cmd_mgr
->cmds
[i
];
1427 if (io_req
->fcport
!= fcport
)
1429 if (io_req
->cmd_type
== QEDF_ELS
) {
1430 rc
= kref_get_unless_zero(&io_req
->refcount
);
1432 QEDF_ERR(&(qedf
->dbg_ctx
),
1433 "Could not get kref for io_req=0x%p.\n",
1437 qedf_flush_els_req(qedf
, io_req
);
1439 * Release the kref and go back to the top of the
1445 if (!io_req
->sc_cmd
)
1448 if (io_req
->sc_cmd
->device
->lun
!=
1454 * Use kref_get_unless_zero in the unlikely case the command
1455 * we're about to flush was completed in the normal SCSI path
1457 rc
= kref_get_unless_zero(&io_req
->refcount
);
1459 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not get kref for "
1460 "io_req=0x%p\n", io_req
);
1463 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
,
1464 "Cleanup xid=0x%x.\n", io_req
->xid
);
1466 /* Cleanup task and return I/O mid-layer */
1467 qedf_initiate_cleanup(io_req
, true);
1470 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1475 * Initiate a ABTS middle path command. Note that we don't have to initialize
1476 * the task context for an ABTS task.
1478 int qedf_initiate_abts(struct qedf_ioreq
*io_req
, bool return_scsi_cmd_on_abts
)
1480 struct fc_lport
*lport
;
1481 struct qedf_rport
*fcport
= io_req
->fcport
;
1482 struct fc_rport_priv
*rdata
;
1483 struct qedf_ctx
*qedf
;
1487 unsigned long flags
;
1488 struct fcoe_wqe
*sqe
;
1491 /* Sanity check qedf_rport before dereferencing any pointers */
1492 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1493 QEDF_ERR(NULL
, "tgt not offloaded\n");
1498 rdata
= fcport
->rdata
;
1499 r_a_tov
= rdata
->r_a_tov
;
1500 qedf
= fcport
->qedf
;
1501 lport
= qedf
->lport
;
1503 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
1504 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
1509 if (atomic_read(&qedf
->link_down_tmo_valid
) > 0) {
1510 QEDF_ERR(&(qedf
->dbg_ctx
), "link_down_tmo active.\n");
1515 /* Ensure room on SQ */
1516 if (!atomic_read(&fcport
->free_sqes
)) {
1517 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1523 kref_get(&io_req
->refcount
);
1526 qedf
->control_requests
++;
1527 qedf
->packet_aborts
++;
1529 /* Set the return CPU to be the same as the request one */
1530 io_req
->cpu
= smp_processor_id();
1532 /* Set the command type to abort */
1533 io_req
->cmd_type
= QEDF_ABTS
;
1534 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1536 set_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1537 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "ABTS io_req xid = "
1540 qedf_cmd_timer_set(qedf
, io_req
, QEDF_ABORT_TIMEOUT
* HZ
);
1542 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1544 sqe_idx
= qedf_get_sqe_idx(fcport
);
1545 sqe
= &fcport
->sq
[sqe_idx
];
1546 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1547 io_req
->task_params
->sqe
= sqe
;
1549 init_initiator_abort_fcoe_task(io_req
->task_params
);
1550 qedf_ring_doorbell(fcport
);
1552 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1557 * If the ABTS task fails to queue then we need to cleanup the
1558 * task at the firmware.
1560 qedf_initiate_cleanup(io_req
, return_scsi_cmd_on_abts
);
1564 void qedf_process_abts_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1565 struct qedf_ioreq
*io_req
)
1570 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "Entered with xid = "
1571 "0x%x cmd_type = %d\n", io_req
->xid
, io_req
->cmd_type
);
1573 cancel_delayed_work(&io_req
->timeout_work
);
1576 r_ctl
= cqe
->cqe_info
.abts_info
.r_ctl
;
1579 case FC_RCTL_BA_ACC
:
1580 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1581 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1582 io_req
->event
= QEDF_IOREQ_EV_ABORT_SUCCESS
;
1584 * Dont release this cmd yet. It will be relesed
1585 * after we get RRQ response
1587 kref_get(&io_req
->refcount
);
1588 queue_delayed_work(qedf
->dpc_wq
, &io_req
->rrq_work
,
1589 msecs_to_jiffies(qedf
->lport
->r_a_tov
));
1591 /* For error cases let the cleanup return the command */
1592 case FC_RCTL_BA_RJT
:
1593 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
,
1594 "ABTS response - RJT\n");
1595 io_req
->event
= QEDF_IOREQ_EV_ABORT_FAILED
;
1598 QEDF_ERR(&(qedf
->dbg_ctx
), "Unknown ABTS response\n");
1602 clear_bit(QEDF_CMD_IN_ABORT
, &io_req
->flags
);
1604 if (io_req
->sc_cmd
) {
1605 if (io_req
->return_scsi_cmd_on_abts
)
1606 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1609 /* Notify eh_abort handler that ABTS is complete */
1610 complete(&io_req
->abts_done
);
1612 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1615 int qedf_init_mp_req(struct qedf_ioreq
*io_req
)
1617 struct qedf_mp_req
*mp_req
;
1618 struct scsi_sge
*mp_req_bd
;
1619 struct scsi_sge
*mp_resp_bd
;
1620 struct qedf_ctx
*qedf
= io_req
->fcport
->qedf
;
1624 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_MP_REQ
, "Entered.\n");
1626 mp_req
= (struct qedf_mp_req
*)&(io_req
->mp_req
);
1627 memset(mp_req
, 0, sizeof(struct qedf_mp_req
));
1629 if (io_req
->cmd_type
!= QEDF_ELS
) {
1630 mp_req
->req_len
= sizeof(struct fcp_cmnd
);
1631 io_req
->data_xfer_len
= mp_req
->req_len
;
1633 mp_req
->req_len
= io_req
->data_xfer_len
;
1635 mp_req
->req_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
, QEDF_PAGE_SIZE
,
1636 &mp_req
->req_buf_dma
, GFP_KERNEL
);
1637 if (!mp_req
->req_buf
) {
1638 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req buffer\n");
1639 qedf_free_mp_resc(io_req
);
1643 mp_req
->resp_buf
= dma_alloc_coherent(&qedf
->pdev
->dev
,
1644 QEDF_PAGE_SIZE
, &mp_req
->resp_buf_dma
, GFP_KERNEL
);
1645 if (!mp_req
->resp_buf
) {
1646 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc TM resp "
1648 qedf_free_mp_resc(io_req
);
1652 /* Allocate and map mp_req_bd and mp_resp_bd */
1653 sz
= sizeof(struct scsi_sge
);
1654 mp_req
->mp_req_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
1655 &mp_req
->mp_req_bd_dma
, GFP_KERNEL
);
1656 if (!mp_req
->mp_req_bd
) {
1657 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP req bd\n");
1658 qedf_free_mp_resc(io_req
);
1662 mp_req
->mp_resp_bd
= dma_alloc_coherent(&qedf
->pdev
->dev
, sz
,
1663 &mp_req
->mp_resp_bd_dma
, GFP_KERNEL
);
1664 if (!mp_req
->mp_resp_bd
) {
1665 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to alloc MP resp bd\n");
1666 qedf_free_mp_resc(io_req
);
1671 addr
= mp_req
->req_buf_dma
;
1672 mp_req_bd
= mp_req
->mp_req_bd
;
1673 mp_req_bd
->sge_addr
.lo
= U64_LO(addr
);
1674 mp_req_bd
->sge_addr
.hi
= U64_HI(addr
);
1675 mp_req_bd
->sge_len
= QEDF_PAGE_SIZE
;
1678 * MP buffer is either a task mgmt command or an ELS.
1679 * So the assumption is that it consumes a single bd
1680 * entry in the bd table
1682 mp_resp_bd
= mp_req
->mp_resp_bd
;
1683 addr
= mp_req
->resp_buf_dma
;
1684 mp_resp_bd
->sge_addr
.lo
= U64_LO(addr
);
1685 mp_resp_bd
->sge_addr
.hi
= U64_HI(addr
);
1686 mp_resp_bd
->sge_len
= QEDF_PAGE_SIZE
;
1692 * Last ditch effort to clear the port if it's stuck. Used only after a
1693 * cleanup task times out.
1695 static void qedf_drain_request(struct qedf_ctx
*qedf
)
1697 if (test_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
)) {
1698 QEDF_ERR(&(qedf
->dbg_ctx
), "MCP drain already active.\n");
1702 /* Set bit to return all queuecommand requests as busy */
1703 set_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
1705 /* Call qed drain request for function. Should be synchronous */
1706 qed_ops
->common
->drain(qedf
->cdev
);
1708 /* Settle time for CQEs to be returned */
1711 /* Unplug and continue */
1712 clear_bit(QEDF_DRAIN_ACTIVE
, &qedf
->flags
);
1716 * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1719 int qedf_initiate_cleanup(struct qedf_ioreq
*io_req
,
1720 bool return_scsi_cmd_on_abts
)
1722 struct qedf_rport
*fcport
;
1723 struct qedf_ctx
*qedf
;
1725 struct fcoe_task_context
*task
;
1728 unsigned long flags
;
1729 struct fcoe_wqe
*sqe
;
1732 fcport
= io_req
->fcport
;
1734 QEDF_ERR(NULL
, "fcport is NULL.\n");
1738 /* Sanity check qedf_rport before dereferencing any pointers */
1739 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1740 QEDF_ERR(NULL
, "tgt not offloaded\n");
1745 qedf
= fcport
->qedf
;
1747 QEDF_ERR(NULL
, "qedf is NULL.\n");
1751 if (!test_bit(QEDF_CMD_OUTSTANDING
, &io_req
->flags
) ||
1752 test_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
)) {
1753 QEDF_ERR(&(qedf
->dbg_ctx
), "io_req xid=0x%x already in "
1754 "cleanup processing or already completed.\n",
1759 /* Ensure room on SQ */
1760 if (!atomic_read(&fcport
->free_sqes
)) {
1761 QEDF_ERR(&(qedf
->dbg_ctx
), "No SQ entries available\n");
1766 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid=0x%x\n",
1769 /* Cleanup cmds re-use the same TID as the original I/O */
1771 io_req
->cmd_type
= QEDF_CLEANUP
;
1772 io_req
->return_scsi_cmd_on_abts
= return_scsi_cmd_on_abts
;
1774 /* Set the return CPU to be the same as the request one */
1775 io_req
->cpu
= smp_processor_id();
1777 set_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1779 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1781 init_completion(&io_req
->tm_done
);
1783 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1785 sqe_idx
= qedf_get_sqe_idx(fcport
);
1786 sqe
= &fcport
->sq
[sqe_idx
];
1787 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1788 io_req
->task_params
->sqe
= sqe
;
1790 init_initiator_cleanup_fcoe_task(io_req
->task_params
);
1791 qedf_ring_doorbell(fcport
);
1793 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1795 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
1796 QEDF_CLEANUP_TIMEOUT
* HZ
);
1801 QEDF_ERR(&(qedf
->dbg_ctx
), "Cleanup command timeout, "
1802 "xid=%x.\n", io_req
->xid
);
1803 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1804 /* Issue a drain request if cleanup task times out */
1805 QEDF_ERR(&(qedf
->dbg_ctx
), "Issuing MCP drain request.\n");
1806 qedf_drain_request(qedf
);
1809 if (io_req
->sc_cmd
) {
1810 if (io_req
->return_scsi_cmd_on_abts
)
1811 qedf_scsi_done(qedf
, io_req
, DID_ERROR
);
1815 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_SUCCESS
;
1817 io_req
->event
= QEDF_IOREQ_EV_CLEANUP_FAILED
;
1822 void qedf_process_cleanup_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1823 struct qedf_ioreq
*io_req
)
1825 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_IO
, "Entered xid = 0x%x\n",
1828 clear_bit(QEDF_CMD_IN_CLEANUP
, &io_req
->flags
);
1830 /* Complete so we can finish cleaning up the I/O */
1831 complete(&io_req
->tm_done
);
1834 static int qedf_execute_tmf(struct qedf_rport
*fcport
, struct scsi_cmnd
*sc_cmd
,
1837 struct qedf_ioreq
*io_req
;
1838 struct fcoe_task_context
*task
;
1839 struct qedf_ctx
*qedf
= fcport
->qedf
;
1840 struct fc_lport
*lport
= qedf
->lport
;
1844 unsigned long flags
;
1845 struct fcoe_wqe
*sqe
;
1849 QEDF_ERR(&(qedf
->dbg_ctx
), "invalid arg\n");
1853 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1854 QEDF_ERR(&(qedf
->dbg_ctx
), "fcport not offloaded\n");
1859 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "portid = 0x%x "
1860 "tm_flags = %d\n", fcport
->rdata
->ids
.port_id
, tm_flags
);
1862 io_req
= qedf_alloc_cmd(fcport
, QEDF_TASK_MGMT_CMD
);
1864 QEDF_ERR(&(qedf
->dbg_ctx
), "Failed TMF");
1869 /* Initialize rest of io_req fields */
1870 io_req
->sc_cmd
= sc_cmd
;
1871 io_req
->fcport
= fcport
;
1872 io_req
->cmd_type
= QEDF_TASK_MGMT_CMD
;
1874 /* Set the return CPU to be the same as the request one */
1875 io_req
->cpu
= smp_processor_id();
1878 io_req
->io_req_flags
= QEDF_READ
;
1879 io_req
->data_xfer_len
= 0;
1880 io_req
->tm_flags
= tm_flags
;
1882 /* Default is to return a SCSI command when an error occurs */
1883 io_req
->return_scsi_cmd_on_abts
= true;
1885 /* Obtain exchange id */
1888 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_SCSI_TM
, "TMF io_req xid = "
1891 /* Initialize task context for this IO request */
1892 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
1894 init_completion(&io_req
->tm_done
);
1896 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
1898 sqe_idx
= qedf_get_sqe_idx(fcport
);
1899 sqe
= &fcport
->sq
[sqe_idx
];
1900 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
1902 qedf_init_task(fcport
, lport
, io_req
, task
, sqe
);
1903 qedf_ring_doorbell(fcport
);
1905 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
1907 tmo
= wait_for_completion_timeout(&io_req
->tm_done
,
1908 QEDF_TM_TIMEOUT
* HZ
);
1912 QEDF_ERR(&(qedf
->dbg_ctx
), "wait for tm_cmpl timeout!\n");
1914 /* Check TMF response code */
1915 if (io_req
->fcp_rsp_code
== 0)
1921 if (tm_flags
== FCP_TMF_LUN_RESET
)
1922 qedf_flush_active_ios(fcport
, (int)sc_cmd
->device
->lun
);
1924 qedf_flush_active_ios(fcport
, -1);
1926 kref_put(&io_req
->refcount
, qedf_release_cmd
);
1928 if (rc
!= SUCCESS
) {
1929 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command failed...\n");
1932 QEDF_ERR(&(qedf
->dbg_ctx
), "task mgmt command success...\n");
1939 int qedf_initiate_tmf(struct scsi_cmnd
*sc_cmd
, u8 tm_flags
)
1941 struct fc_rport
*rport
= starget_to_rport(scsi_target(sc_cmd
->device
));
1942 struct fc_rport_libfc_priv
*rp
= rport
->dd_data
;
1943 struct qedf_rport
*fcport
= (struct qedf_rport
*)&rp
[1];
1944 struct qedf_ctx
*qedf
;
1945 struct fc_lport
*lport
;
1949 rval
= fc_remote_port_chkready(rport
);
1952 QEDF_ERR(NULL
, "device_reset rport not ready\n");
1957 if (fcport
== NULL
) {
1958 QEDF_ERR(NULL
, "device_reset: rport is NULL\n");
1963 qedf
= fcport
->qedf
;
1964 lport
= qedf
->lport
;
1966 if (test_bit(QEDF_UNLOADING
, &qedf
->flags
) ||
1967 test_bit(QEDF_DBG_STOP_IO
, &qedf
->flags
)) {
1972 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
1973 QEDF_ERR(&(qedf
->dbg_ctx
), "link is not ready\n");
1978 rc
= qedf_execute_tmf(fcport
, sc_cmd
, tm_flags
);
1984 void qedf_process_tmf_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
1985 struct qedf_ioreq
*io_req
)
1987 struct fcoe_cqe_rsp_info
*fcp_rsp
;
1989 fcp_rsp
= &cqe
->cqe_info
.rsp_info
;
1990 qedf_parse_fcp_rsp(io_req
, fcp_rsp
);
1992 io_req
->sc_cmd
= NULL
;
1993 complete(&io_req
->tm_done
);
1996 void qedf_process_unsol_compl(struct qedf_ctx
*qedf
, uint16_t que_idx
,
1997 struct fcoe_cqe
*cqe
)
1999 unsigned long flags
;
2001 uint16_t pktlen
= cqe
->cqe_info
.unsolic_info
.pkt_len
;
2002 u32 payload_len
, crc
;
2003 struct fc_frame_header
*fh
;
2004 struct fc_frame
*fp
;
2005 struct qedf_io_work
*io_work
;
2009 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2010 "address.hi=%x address.lo=%x opaque_data.hi=%x "
2011 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
2012 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.address
.hi
),
2013 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.address
.lo
),
2014 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.opaque
.hi
),
2015 le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.opaque
.lo
),
2016 qedf
->bdq_prod_idx
, pktlen
);
2018 bdq_idx
= le32_to_cpu(cqe
->cqe_info
.unsolic_info
.bd_info
.opaque
.lo
);
2019 if (bdq_idx
>= QEDF_BDQ_SIZE
) {
2020 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_idx is out of range %d.\n",
2022 goto increment_prod
;
2025 bdq_addr
= qedf
->bdq
[bdq_idx
].buf_addr
;
2027 QEDF_ERR(&(qedf
->dbg_ctx
), "bdq_addr is NULL, dropping "
2028 "unsolicited packet.\n");
2029 goto increment_prod
;
2032 if (qedf_dump_frames
) {
2033 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_UNSOL
,
2034 "BDQ frame is at addr=%p.\n", bdq_addr
);
2035 print_hex_dump(KERN_WARNING
, "bdq ", DUMP_PREFIX_OFFSET
, 16, 1,
2036 (void *)bdq_addr
, pktlen
, false);
2039 /* Allocate frame */
2040 payload_len
= pktlen
- sizeof(struct fc_frame_header
);
2041 fp
= fc_frame_alloc(qedf
->lport
, payload_len
);
2043 QEDF_ERR(&(qedf
->dbg_ctx
), "Could not allocate fp.\n");
2044 goto increment_prod
;
2047 /* Copy data from BDQ buffer into fc_frame struct */
2048 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
2049 memcpy(fh
, (void *)bdq_addr
, pktlen
);
2051 /* Initialize the frame so libfc sees it as a valid frame */
2052 crc
= fcoe_fc_crc(fp
);
2054 fr_dev(fp
) = qedf
->lport
;
2055 fr_sof(fp
) = FC_SOF_I3
;
2056 fr_eof(fp
) = FC_EOF_T
;
2057 fr_crc(fp
) = cpu_to_le32(~crc
);
2060 * We need to return the frame back up to libfc in a non-atomic
2063 io_work
= mempool_alloc(qedf
->io_mempool
, GFP_ATOMIC
);
2065 QEDF_WARN(&(qedf
->dbg_ctx
), "Could not allocate "
2066 "work for I/O completion.\n");
2068 goto increment_prod
;
2070 memset(io_work
, 0, sizeof(struct qedf_io_work
));
2072 INIT_WORK(&io_work
->work
, qedf_fp_io_handler
);
2074 /* Copy contents of CQE for deferred processing */
2075 memcpy(&io_work
->cqe
, cqe
, sizeof(struct fcoe_cqe
));
2077 io_work
->qedf
= qedf
;
2080 queue_work_on(smp_processor_id(), qedf_io_wq
, &io_work
->work
);
2082 spin_lock_irqsave(&qedf
->hba_lock
, flags
);
2084 /* Increment producer to let f/w know we've handled the frame */
2085 qedf
->bdq_prod_idx
++;
2087 /* Producer index wraps at uint16_t boundary */
2088 if (qedf
->bdq_prod_idx
== 0xffff)
2089 qedf
->bdq_prod_idx
= 0;
2091 writew(qedf
->bdq_prod_idx
, qedf
->bdq_primary_prod
);
2092 tmp
= readw(qedf
->bdq_primary_prod
);
2093 writew(qedf
->bdq_prod_idx
, qedf
->bdq_secondary_prod
);
2094 tmp
= readw(qedf
->bdq_secondary_prod
);
2096 spin_unlock_irqrestore(&qedf
->hba_lock
, flags
);