1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
8 /* It's assumed that the lock is held when calling this function. */
9 static int qedf_initiate_els(struct qedf_rport
*fcport
, unsigned int op
,
10 void *data
, uint32_t data_len
,
11 void (*cb_func
)(struct qedf_els_cb_arg
*cb_arg
),
12 struct qedf_els_cb_arg
*cb_arg
, uint32_t timer_msec
)
14 struct qedf_ctx
*qedf
;
15 struct fc_lport
*lport
;
16 struct qedf_ioreq
*els_req
;
17 struct qedf_mp_req
*mp_req
;
18 struct fc_frame_header
*fc_hdr
;
19 struct e4_fcoe_task_context
*task
;
28 QEDF_ERR(NULL
, "fcport is NULL");
36 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending ELS\n");
38 rc
= fc_remote_port_chkready(fcport
->rport
);
40 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: rport not ready\n", op
);
44 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
45 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: link is not ready\n",
51 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
52 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: fcport not ready\n", op
);
57 els_req
= qedf_alloc_cmd(fcport
, QEDF_ELS
);
59 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
60 "Failed to alloc ELS request 0x%x\n", op
);
65 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "initiate_els els_req = "
66 "0x%p cb_arg = %p xid = %x\n", els_req
, cb_arg
,
68 els_req
->sc_cmd
= NULL
;
69 els_req
->cmd_type
= QEDF_ELS
;
70 els_req
->fcport
= fcport
;
71 els_req
->cb_func
= cb_func
;
72 cb_arg
->io_req
= els_req
;
74 els_req
->cb_arg
= cb_arg
;
75 els_req
->data_xfer_len
= data_len
;
77 /* Record which cpu this request is associated with */
78 els_req
->cpu
= smp_processor_id();
80 mp_req
= (struct qedf_mp_req
*)&(els_req
->mp_req
);
81 rc
= qedf_init_mp_req(els_req
);
83 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS MP request init failed\n");
84 kref_put(&els_req
->refcount
, qedf_release_cmd
);
90 /* Fill ELS Payload */
91 if ((op
>= ELS_LS_RJT
) && (op
<= ELS_AUTH_ELS
)) {
92 memcpy(mp_req
->req_buf
, data
, data_len
);
94 QEDF_ERR(&(qedf
->dbg_ctx
), "Invalid ELS op 0x%x\n", op
);
95 els_req
->cb_func
= NULL
;
96 els_req
->cb_arg
= NULL
;
97 kref_put(&els_req
->refcount
, qedf_release_cmd
);
105 fc_hdr
= &(mp_req
->req_fc_hdr
);
107 did
= fcport
->rdata
->ids
.port_id
;
110 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS_REQ
, did
, sid
,
111 FC_TYPE_ELS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
114 /* Obtain exchange id */
117 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
119 sqe_idx
= qedf_get_sqe_idx(fcport
);
120 sqe
= &fcport
->sq
[sqe_idx
];
121 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
123 /* Initialize task context for this IO request */
124 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
125 qedf_init_mp_task(els_req
, task
, sqe
);
127 /* Put timer on els request */
129 qedf_cmd_timer_set(qedf
, els_req
, timer_msec
);
132 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Ringing doorbell for ELS "
134 qedf_ring_doorbell(fcport
);
135 set_bit(QEDF_CMD_OUTSTANDING
, &els_req
->flags
);
137 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
142 void qedf_process_els_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
143 struct qedf_ioreq
*els_req
)
145 struct fcoe_cqe_midpath_info
*mp_info
;
146 struct qedf_rport
*fcport
;
148 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered with xid = 0x%x"
149 " cmd_type = %d.\n", els_req
->xid
, els_req
->cmd_type
);
151 if ((els_req
->event
== QEDF_IOREQ_EV_ELS_FLUSH
)
152 || (els_req
->event
== QEDF_IOREQ_EV_CLEANUP_SUCCESS
)
153 || (els_req
->event
== QEDF_IOREQ_EV_CLEANUP_FAILED
)) {
154 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
155 "ELS completion xid=0x%x after flush event=0x%x",
156 els_req
->xid
, els_req
->event
);
160 fcport
= els_req
->fcport
;
162 /* When flush is active,
163 * let the cmds be completed from the cleanup context
165 if (test_bit(QEDF_RPORT_IN_TARGET_RESET
, &fcport
->flags
) ||
166 test_bit(QEDF_RPORT_IN_LUN_RESET
, &fcport
->flags
)) {
167 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_IO
,
168 "Dropping ELS completion xid=0x%x as fcport is flushing",
173 clear_bit(QEDF_CMD_OUTSTANDING
, &els_req
->flags
);
175 /* Kill the ELS timer */
176 cancel_delayed_work(&els_req
->timeout_work
);
178 /* Get ELS response length from CQE */
179 mp_info
= &cqe
->cqe_info
.midpath_info
;
180 els_req
->mp_req
.resp_len
= mp_info
->data_placement_size
;
182 /* Parse ELS response */
183 if ((els_req
->cb_func
) && (els_req
->cb_arg
)) {
184 els_req
->cb_func(els_req
->cb_arg
);
185 els_req
->cb_arg
= NULL
;
188 kref_put(&els_req
->refcount
, qedf_release_cmd
);
191 static void qedf_rrq_compl(struct qedf_els_cb_arg
*cb_arg
)
193 struct qedf_ioreq
*orig_io_req
;
194 struct qedf_ioreq
*rrq_req
;
195 struct qedf_ctx
*qedf
;
198 rrq_req
= cb_arg
->io_req
;
199 qedf
= rrq_req
->fcport
->qedf
;
201 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered.\n");
203 orig_io_req
= cb_arg
->aborted_io_req
;
206 QEDF_ERR(&qedf
->dbg_ctx
,
207 "Original io_req is NULL, rrq_req = %p.\n", rrq_req
);
211 refcount
= kref_read(&orig_io_req
->refcount
);
212 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "rrq_compl: orig io = %p,"
213 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
214 orig_io_req
, orig_io_req
->xid
, rrq_req
->xid
, refcount
);
217 * This should return the aborted io_req to the command pool. Note that
218 * we need to check the refcound in case the original request was
219 * flushed but we get a completion on this xid.
221 if (orig_io_req
&& refcount
> 0)
222 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
226 * Release a reference to the rrq request if we timed out as the
227 * rrq completion handler is called directly from the timeout handler
228 * and not from els_compl where the reference would have normally been
231 if (rrq_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
232 kref_put(&rrq_req
->refcount
, qedf_release_cmd
);
236 /* Assumes kref is already held by caller */
237 int qedf_send_rrq(struct qedf_ioreq
*aborted_io_req
)
240 struct fc_els_rrq rrq
;
241 struct qedf_rport
*fcport
;
242 struct fc_lport
*lport
;
243 struct qedf_els_cb_arg
*cb_arg
= NULL
;
244 struct qedf_ctx
*qedf
;
250 if (!aborted_io_req
) {
251 QEDF_ERR(NULL
, "abort_io_req is NULL.\n");
255 fcport
= aborted_io_req
->fcport
;
258 refcount
= kref_read(&aborted_io_req
->refcount
);
260 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
261 aborted_io_req
->xid
, refcount
);
262 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
266 /* Check that fcport is still offloaded */
267 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
268 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
273 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
280 * Sanity check that we can send a RRQ to make sure that refcount isn't
283 refcount
= kref_read(&aborted_io_req
->refcount
);
285 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
286 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
287 aborted_io_req
->xid
, aborted_io_req
, refcount
);
293 r_a_tov
= lport
->r_a_tov
;
295 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending RRQ orig "
296 "io = %p, orig_xid = 0x%x\n", aborted_io_req
,
297 aborted_io_req
->xid
);
298 memset(&rrq
, 0, sizeof(rrq
));
300 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
302 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
308 cb_arg
->aborted_io_req
= aborted_io_req
;
310 rrq
.rrq_cmd
= ELS_RRQ
;
311 hton24(rrq
.rrq_s_id
, sid
);
312 rrq
.rrq_ox_id
= htons(aborted_io_req
->xid
);
314 htons(aborted_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
316 rc
= qedf_initiate_els(fcport
, ELS_RRQ
, &rrq
, sizeof(rrq
),
317 qedf_rrq_compl
, cb_arg
, r_a_tov
);
321 QEDF_ERR(&(qedf
->dbg_ctx
), "RRQ failed - release orig io "
322 "req 0x%x\n", aborted_io_req
->xid
);
324 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
329 static void qedf_process_l2_frame_compl(struct qedf_rport
*fcport
,
333 struct fc_lport
*lport
= fcport
->qedf
->lport
;
334 struct fc_frame_header
*fh
;
337 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
339 /* Set the OXID we return to what libfc used */
340 if (l2_oxid
!= FC_XID_UNKNOWN
)
341 fh
->fh_ox_id
= htons(l2_oxid
);
343 /* Setup header fields */
344 fh
->fh_r_ctl
= FC_RCTL_ELS_REP
;
345 fh
->fh_type
= FC_TYPE_ELS
;
346 /* Last sequence, end sequence */
347 fh
->fh_f_ctl
[0] = 0x98;
348 hton24(fh
->fh_d_id
, lport
->port_id
);
349 hton24(fh
->fh_s_id
, fcport
->rdata
->ids
.port_id
);
350 fh
->fh_rx_id
= 0xffff;
352 /* Set frame attributes */
353 crc
= fcoe_fc_crc(fp
);
356 fr_sof(fp
) = FC_SOF_I3
;
357 fr_eof(fp
) = FC_EOF_T
;
358 fr_crc(fp
) = cpu_to_le32(~crc
);
360 /* Send completed request to libfc */
361 fc_exch_recv(lport
, fp
);
365 * In instances where an ELS command times out we may need to restart the
366 * rport by logging out and then logging back in.
368 void qedf_restart_rport(struct qedf_rport
*fcport
)
370 struct fc_lport
*lport
;
371 struct fc_rport_priv
*rdata
;
376 QEDF_ERR(NULL
, "fcport is NULL.\n");
380 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
381 if (test_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
) ||
382 !test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
) ||
383 test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
384 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "fcport %p already in reset or not offloaded.\n",
386 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
390 /* Set that we are now in reset */
391 set_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
392 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
394 rdata
= fcport
->rdata
;
395 if (rdata
&& !kref_get_unless_zero(&rdata
->kref
)) {
396 fcport
->rdata
= NULL
;
400 if (rdata
&& rdata
->rp_state
== RPORT_ST_READY
) {
401 lport
= fcport
->qedf
->lport
;
402 port_id
= rdata
->ids
.port_id
;
403 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
404 "LOGO port_id=%x.\n", port_id
);
405 fc_rport_logoff(rdata
);
406 kref_put(&rdata
->kref
, fc_rport_destroy
);
407 mutex_lock(&lport
->disc
.disc_mutex
);
408 /* Recreate the rport and log back in */
409 rdata
= fc_rport_create(lport
, port_id
);
410 mutex_unlock(&lport
->disc
.disc_mutex
);
412 fc_rport_login(rdata
);
413 fcport
->rdata
= rdata
;
415 clear_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
418 static void qedf_l2_els_compl(struct qedf_els_cb_arg
*cb_arg
)
420 struct qedf_ioreq
*els_req
;
421 struct qedf_rport
*fcport
;
422 struct qedf_mp_req
*mp_req
;
424 struct fc_frame_header
*fh
, *mp_fc_hdr
;
425 void *resp_buf
, *fc_payload
;
429 l2_oxid
= cb_arg
->l2_oxid
;
430 els_req
= cb_arg
->io_req
;
433 QEDF_ERR(NULL
, "els_req is NULL.\n");
438 * If we are flushing the command just free the cb_arg as none of the
439 * response data will be valid.
441 if (els_req
->event
== QEDF_IOREQ_EV_ELS_FLUSH
) {
442 QEDF_ERR(NULL
, "els_req xid=0x%x event is flush.\n",
447 fcport
= els_req
->fcport
;
448 mp_req
= &(els_req
->mp_req
);
449 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
450 resp_len
= mp_req
->resp_len
;
451 resp_buf
= mp_req
->resp_buf
;
454 * If a middle path ELS command times out, don't try to return
455 * the command but rather do any internal cleanup and then libfc
456 * timeout the command and clean up its internal resources.
458 if (els_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
460 * If ADISC times out, libfc will timeout the exchange and then
461 * try to send a PLOGI which will timeout since the session is
462 * still offloaded. Force libfc to logout the session which
463 * will offload the connection and allow the PLOGI response to
464 * flow over the LL2 path.
466 if (cb_arg
->op
== ELS_ADISC
)
467 qedf_restart_rport(fcport
);
471 if (sizeof(struct fc_frame_header
) + resp_len
> QEDF_PAGE_SIZE
) {
472 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "resp_len is "
473 "beyond page size.\n");
477 fp
= fc_frame_alloc(fcport
->qedf
->lport
, resp_len
);
479 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
480 "fc_frame_alloc failure.\n");
484 /* Copy frame header from firmware into fp */
485 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
486 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
488 /* Copy payload from firmware into fp */
489 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
490 memcpy(fc_payload
, resp_buf
, resp_len
);
492 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
493 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid
);
494 qedf_process_l2_frame_compl(fcport
, fp
, l2_oxid
);
500 int qedf_send_adisc(struct qedf_rport
*fcport
, struct fc_frame
*fp
)
502 struct fc_els_adisc
*adisc
;
503 struct fc_frame_header
*fh
;
504 struct fc_lport
*lport
= fcport
->qedf
->lport
;
505 struct qedf_els_cb_arg
*cb_arg
= NULL
;
506 struct qedf_ctx
*qedf
;
507 uint32_t r_a_tov
= lport
->r_a_tov
;
511 fh
= fc_frame_header_get(fp
);
513 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
515 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
520 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
522 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
523 "Sending ADISC ox_id=0x%x.\n", cb_arg
->l2_oxid
);
525 adisc
= fc_frame_payload_get(fp
, sizeof(*adisc
));
527 rc
= qedf_initiate_els(fcport
, ELS_ADISC
, adisc
, sizeof(*adisc
),
528 qedf_l2_els_compl
, cb_arg
, r_a_tov
);
532 QEDF_ERR(&(qedf
->dbg_ctx
), "ADISC failed.\n");
538 static void qedf_srr_compl(struct qedf_els_cb_arg
*cb_arg
)
540 struct qedf_ioreq
*orig_io_req
;
541 struct qedf_ioreq
*srr_req
;
542 struct qedf_mp_req
*mp_req
;
543 struct fc_frame_header
*mp_fc_hdr
, *fh
;
545 void *resp_buf
, *fc_payload
;
547 struct fc_lport
*lport
;
548 struct qedf_ctx
*qedf
;
552 srr_req
= cb_arg
->io_req
;
553 qedf
= srr_req
->fcport
->qedf
;
556 orig_io_req
= cb_arg
->aborted_io_req
;
559 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
563 clear_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
565 if (srr_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
566 srr_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
567 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
569 refcount
= kref_read(&orig_io_req
->refcount
);
570 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
571 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
572 orig_io_req
, orig_io_req
->xid
, srr_req
->xid
, refcount
);
574 /* If a SRR times out, simply free resources */
575 if (srr_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
576 QEDF_ERR(&qedf
->dbg_ctx
,
577 "ELS timeout rec_xid=0x%x.\n", srr_req
->xid
);
581 /* Normalize response data into struct fc_frame */
582 mp_req
= &(srr_req
->mp_req
);
583 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
584 resp_len
= mp_req
->resp_len
;
585 resp_buf
= mp_req
->resp_buf
;
587 fp
= fc_frame_alloc(lport
, resp_len
);
589 QEDF_ERR(&(qedf
->dbg_ctx
),
590 "fc_frame_alloc failure.\n");
594 /* Copy frame header from firmware into fp */
595 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
596 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
598 /* Copy payload from firmware into fp */
599 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
600 memcpy(fc_payload
, resp_buf
, resp_len
);
602 opcode
= fc_frame_payload_op(fp
);
605 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
609 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
611 qedf_initiate_abts(orig_io_req
, true);
617 /* Put reference for original command since SRR completed */
618 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
623 static int qedf_send_srr(struct qedf_ioreq
*orig_io_req
, u32 offset
, u8 r_ctl
)
626 struct qedf_ctx
*qedf
;
627 struct qedf_rport
*fcport
;
628 struct fc_lport
*lport
;
629 struct qedf_els_cb_arg
*cb_arg
= NULL
;
634 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
638 fcport
= orig_io_req
->fcport
;
640 /* Check that fcport is still offloaded */
641 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
642 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
647 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
651 /* Take reference until SRR command completion */
652 kref_get(&orig_io_req
->refcount
);
656 r_a_tov
= lport
->r_a_tov
;
658 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending SRR orig_io=%p, "
659 "orig_xid=0x%x\n", orig_io_req
, orig_io_req
->xid
);
660 memset(&srr
, 0, sizeof(srr
));
662 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
664 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
670 cb_arg
->aborted_io_req
= orig_io_req
;
672 srr
.srr_op
= ELS_SRR
;
673 srr
.srr_ox_id
= htons(orig_io_req
->xid
);
674 srr
.srr_rx_id
= htons(orig_io_req
->rx_id
);
675 srr
.srr_rel_off
= htonl(offset
);
676 srr
.srr_r_ctl
= r_ctl
;
678 rc
= qedf_initiate_els(fcport
, ELS_SRR
, &srr
, sizeof(srr
),
679 qedf_srr_compl
, cb_arg
, r_a_tov
);
683 QEDF_ERR(&(qedf
->dbg_ctx
), "SRR failed - release orig_io_req"
684 "=0x%x\n", orig_io_req
->xid
);
686 /* If we fail to queue SRR, send ABTS to orig_io */
687 qedf_initiate_abts(orig_io_req
, true);
688 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
690 /* Tell other threads that SRR is in progress */
691 set_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
696 static void qedf_initiate_seq_cleanup(struct qedf_ioreq
*orig_io_req
,
697 u32 offset
, u8 r_ctl
)
699 struct qedf_rport
*fcport
;
701 struct qedf_els_cb_arg
*cb_arg
;
702 struct fcoe_wqe
*sqe
;
705 fcport
= orig_io_req
->fcport
;
707 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
708 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
709 orig_io_req
->xid
, offset
);
711 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
713 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to allocate cb_arg "
714 "for sequence cleanup\n");
718 /* Get reference for cleanup request */
719 kref_get(&orig_io_req
->refcount
);
721 orig_io_req
->cmd_type
= QEDF_SEQ_CLEANUP
;
722 cb_arg
->offset
= offset
;
723 cb_arg
->r_ctl
= r_ctl
;
724 orig_io_req
->cb_arg
= cb_arg
;
726 qedf_cmd_timer_set(fcport
->qedf
, orig_io_req
,
727 QEDF_CLEANUP_TIMEOUT
* HZ
);
729 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
731 sqe_idx
= qedf_get_sqe_idx(fcport
);
732 sqe
= &fcport
->sq
[sqe_idx
];
733 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
734 orig_io_req
->task_params
->sqe
= sqe
;
736 init_initiator_sequence_recovery_fcoe_task(orig_io_req
->task_params
,
738 qedf_ring_doorbell(fcport
);
740 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
743 void qedf_process_seq_cleanup_compl(struct qedf_ctx
*qedf
,
744 struct fcoe_cqe
*cqe
, struct qedf_ioreq
*io_req
)
747 struct qedf_els_cb_arg
*cb_arg
;
749 cb_arg
= io_req
->cb_arg
;
751 /* If we timed out just free resources */
752 if (io_req
->event
== QEDF_IOREQ_EV_ELS_TMO
|| !cqe
) {
753 QEDF_ERR(&qedf
->dbg_ctx
,
754 "cqe is NULL or timeout event (0x%x)", io_req
->event
);
758 /* Kill the timer we put on the request */
759 cancel_delayed_work_sync(&io_req
->timeout_work
);
761 rc
= qedf_send_srr(io_req
, cb_arg
->offset
, cb_arg
->r_ctl
);
763 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to send SRR, I/O will "
764 "abort, xid=0x%x.\n", io_req
->xid
);
767 kref_put(&io_req
->refcount
, qedf_release_cmd
);
770 static bool qedf_requeue_io_req(struct qedf_ioreq
*orig_io_req
)
772 struct qedf_rport
*fcport
;
773 struct qedf_ioreq
*new_io_req
;
777 fcport
= orig_io_req
->fcport
;
779 QEDF_ERR(NULL
, "fcport is NULL.\n");
783 if (!orig_io_req
->sc_cmd
) {
784 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "sc_cmd is NULL for "
785 "xid=0x%x.\n", orig_io_req
->xid
);
789 new_io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
791 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Could not allocate new "
796 new_io_req
->sc_cmd
= orig_io_req
->sc_cmd
;
799 * This keeps the sc_cmd struct from being returned to the tape
800 * driver and being requeued twice. We do need to put a reference
801 * for the original I/O request since we will not do a SCSI completion
804 orig_io_req
->sc_cmd
= NULL
;
805 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
807 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
809 /* kref for new command released in qedf_post_io_req on error */
810 if (qedf_post_io_req(fcport
, new_io_req
)) {
811 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to post io_req\n");
812 /* Return SQE to pool */
813 atomic_inc(&fcport
->free_sqes
);
815 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
816 "Reissued SCSI command from orig_xid=0x%x on "
817 "new_xid=0x%x.\n", orig_io_req
->xid
, new_io_req
->xid
);
819 * Abort the original I/O but do not return SCSI command as
820 * it has been reissued on another OX_ID.
822 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
823 qedf_initiate_abts(orig_io_req
, false);
827 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
833 static void qedf_rec_compl(struct qedf_els_cb_arg
*cb_arg
)
835 struct qedf_ioreq
*orig_io_req
;
836 struct qedf_ioreq
*rec_req
;
837 struct qedf_mp_req
*mp_req
;
838 struct fc_frame_header
*mp_fc_hdr
, *fh
;
840 void *resp_buf
, *fc_payload
;
842 struct fc_lport
*lport
;
843 struct qedf_ctx
*qedf
;
846 struct fc_els_ls_rjt
*rjt
;
847 struct fc_els_rec_acc
*acc
;
850 struct scsi_cmnd
*sc_cmd
;
851 bool srr_needed
= false;
853 rec_req
= cb_arg
->io_req
;
854 qedf
= rec_req
->fcport
->qedf
;
857 orig_io_req
= cb_arg
->aborted_io_req
;
860 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
864 if (rec_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
865 rec_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
866 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
868 refcount
= kref_read(&orig_io_req
->refcount
);
869 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
870 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
871 orig_io_req
, orig_io_req
->xid
, rec_req
->xid
, refcount
);
873 /* If a REC times out, free resources */
874 if (rec_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
875 QEDF_ERR(&qedf
->dbg_ctx
,
876 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
877 orig_io_req
, orig_io_req
->xid
);
881 /* Normalize response data into struct fc_frame */
882 mp_req
= &(rec_req
->mp_req
);
883 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
884 resp_len
= mp_req
->resp_len
;
885 acc
= resp_buf
= mp_req
->resp_buf
;
887 fp
= fc_frame_alloc(lport
, resp_len
);
889 QEDF_ERR(&(qedf
->dbg_ctx
),
890 "fc_frame_alloc failure.\n");
894 /* Copy frame header from firmware into fp */
895 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
896 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
898 /* Copy payload from firmware into fp */
899 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
900 memcpy(fc_payload
, resp_buf
, resp_len
);
902 opcode
= fc_frame_payload_op(fp
);
903 if (opcode
== ELS_LS_RJT
) {
904 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
906 QEDF_ERR(&qedf
->dbg_ctx
, "payload get failed");
910 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
911 "Received LS_RJT for REC: er_reason=0x%x, "
912 "er_explan=0x%x.\n", rjt
->er_reason
, rjt
->er_explan
);
914 * The following response(s) mean that we need to reissue the
915 * request on another exchange. We need to do this without
916 * informing the upper layers lest it cause an application
919 if ((rjt
->er_reason
== ELS_RJT_LOGIC
||
920 rjt
->er_reason
== ELS_RJT_UNAB
) &&
921 rjt
->er_explan
== ELS_EXPL_OXID_RXID
) {
922 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
923 "Handle CMD LOST case.\n");
924 qedf_requeue_io_req(orig_io_req
);
926 } else if (opcode
== ELS_LS_ACC
) {
927 offset
= ntohl(acc
->reca_fc4value
);
928 e_stat
= ntohl(acc
->reca_e_stat
);
929 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
930 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
932 if (e_stat
& ESB_ST_SEQ_INIT
) {
933 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
934 "Target has the seq init\n");
937 sc_cmd
= orig_io_req
->sc_cmd
;
939 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
940 "sc_cmd is NULL for xid=0x%x.\n",
944 /* SCSI write case */
945 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
946 if (offset
== orig_io_req
->data_xfer_len
) {
947 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
948 "WRITE - response lost.\n");
949 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
953 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
954 "WRITE - XFER_RDY/DATA lost.\n");
955 r_ctl
= FC_RCTL_DD_DATA_DESC
;
956 /* Use data from warning CQE instead of REC */
957 offset
= orig_io_req
->tx_buf_off
;
961 if (orig_io_req
->rx_buf_off
==
962 orig_io_req
->data_xfer_len
) {
963 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
964 "READ - response lost.\n");
966 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
969 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
970 "READ - DATA lost.\n");
972 * For read case we always set the offset to 0
973 * for sequence recovery task.
976 r_ctl
= FC_RCTL_DD_SOL_DATA
;
981 qedf_send_srr(orig_io_req
, offset
, r_ctl
);
983 qedf_initiate_seq_cleanup(orig_io_req
, offset
, r_ctl
);
989 /* Put reference for original command since REC completed */
990 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
995 /* Assumes kref is already held by caller */
996 int qedf_send_rec(struct qedf_ioreq
*orig_io_req
)
999 struct fc_els_rec rec
;
1000 struct qedf_rport
*fcport
;
1001 struct fc_lport
*lport
;
1002 struct qedf_els_cb_arg
*cb_arg
= NULL
;
1003 struct qedf_ctx
*qedf
;
1009 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
1013 fcport
= orig_io_req
->fcport
;
1015 /* Check that fcport is still offloaded */
1016 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
1017 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
1021 if (!fcport
->qedf
) {
1022 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
1026 /* Take reference until REC command completion */
1027 kref_get(&orig_io_req
->refcount
);
1029 qedf
= fcport
->qedf
;
1030 lport
= qedf
->lport
;
1032 r_a_tov
= lport
->r_a_tov
;
1034 memset(&rec
, 0, sizeof(rec
));
1036 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
1038 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
1044 cb_arg
->aborted_io_req
= orig_io_req
;
1046 rec
.rec_cmd
= ELS_REC
;
1047 hton24(rec
.rec_s_id
, sid
);
1048 rec
.rec_ox_id
= htons(orig_io_req
->xid
);
1050 htons(orig_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
1052 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending REC orig_io=%p, "
1053 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req
,
1054 orig_io_req
->xid
, rec
.rec_rx_id
);
1055 rc
= qedf_initiate_els(fcport
, ELS_REC
, &rec
, sizeof(rec
),
1056 qedf_rec_compl
, cb_arg
, r_a_tov
);
1060 QEDF_ERR(&(qedf
->dbg_ctx
), "REC failed - release orig_io_req"
1061 "=0x%x\n", orig_io_req
->xid
);
1063 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);