1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
8 /* It's assumed that the lock is held when calling this function. */
9 static int qedf_initiate_els(struct qedf_rport
*fcport
, unsigned int op
,
10 void *data
, uint32_t data_len
,
11 void (*cb_func
)(struct qedf_els_cb_arg
*cb_arg
),
12 struct qedf_els_cb_arg
*cb_arg
, uint32_t timer_msec
)
14 struct qedf_ctx
*qedf
;
15 struct fc_lport
*lport
;
16 struct qedf_ioreq
*els_req
;
17 struct qedf_mp_req
*mp_req
;
18 struct fc_frame_header
*fc_hdr
;
19 struct e4_fcoe_task_context
*task
;
28 QEDF_ERR(NULL
, "fcport is NULL");
36 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending ELS\n");
38 rc
= fc_remote_port_chkready(fcport
->rport
);
40 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: rport not ready\n", op
);
44 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
45 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: link is not ready\n",
51 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
52 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: fcport not ready\n", op
);
57 els_req
= qedf_alloc_cmd(fcport
, QEDF_ELS
);
59 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
60 "Failed to alloc ELS request 0x%x\n", op
);
65 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "initiate_els els_req = "
66 "0x%p cb_arg = %p xid = %x\n", els_req
, cb_arg
,
68 els_req
->sc_cmd
= NULL
;
69 els_req
->cmd_type
= QEDF_ELS
;
70 els_req
->fcport
= fcport
;
71 els_req
->cb_func
= cb_func
;
72 cb_arg
->io_req
= els_req
;
74 els_req
->cb_arg
= cb_arg
;
75 els_req
->data_xfer_len
= data_len
;
77 /* Record which cpu this request is associated with */
78 els_req
->cpu
= smp_processor_id();
80 mp_req
= (struct qedf_mp_req
*)&(els_req
->mp_req
);
81 rc
= qedf_init_mp_req(els_req
);
83 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS MP request init failed\n");
84 kref_put(&els_req
->refcount
, qedf_release_cmd
);
90 /* Fill ELS Payload */
91 if ((op
>= ELS_LS_RJT
) && (op
<= ELS_AUTH_ELS
)) {
92 memcpy(mp_req
->req_buf
, data
, data_len
);
94 QEDF_ERR(&(qedf
->dbg_ctx
), "Invalid ELS op 0x%x\n", op
);
95 els_req
->cb_func
= NULL
;
96 els_req
->cb_arg
= NULL
;
97 kref_put(&els_req
->refcount
, qedf_release_cmd
);
105 fc_hdr
= &(mp_req
->req_fc_hdr
);
107 did
= fcport
->rdata
->ids
.port_id
;
110 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS_REQ
, did
, sid
,
111 FC_TYPE_ELS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
114 /* Obtain exchange id */
117 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
119 sqe_idx
= qedf_get_sqe_idx(fcport
);
120 sqe
= &fcport
->sq
[sqe_idx
];
121 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
123 /* Initialize task context for this IO request */
124 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
125 qedf_init_mp_task(els_req
, task
, sqe
);
127 /* Put timer on original I/O request */
129 qedf_cmd_timer_set(qedf
, els_req
, timer_msec
);
132 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Ringing doorbell for ELS "
134 qedf_ring_doorbell(fcport
);
135 set_bit(QEDF_CMD_OUTSTANDING
, &els_req
->flags
);
137 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
142 void qedf_process_els_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
143 struct qedf_ioreq
*els_req
)
145 struct fcoe_cqe_midpath_info
*mp_info
;
147 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered with xid = 0x%x"
148 " cmd_type = %d.\n", els_req
->xid
, els_req
->cmd_type
);
150 clear_bit(QEDF_CMD_OUTSTANDING
, &els_req
->flags
);
152 /* Kill the ELS timer */
153 cancel_delayed_work(&els_req
->timeout_work
);
155 /* Get ELS response length from CQE */
156 mp_info
= &cqe
->cqe_info
.midpath_info
;
157 els_req
->mp_req
.resp_len
= mp_info
->data_placement_size
;
159 /* Parse ELS response */
160 if ((els_req
->cb_func
) && (els_req
->cb_arg
)) {
161 els_req
->cb_func(els_req
->cb_arg
);
162 els_req
->cb_arg
= NULL
;
165 kref_put(&els_req
->refcount
, qedf_release_cmd
);
168 static void qedf_rrq_compl(struct qedf_els_cb_arg
*cb_arg
)
170 struct qedf_ioreq
*orig_io_req
;
171 struct qedf_ioreq
*rrq_req
;
172 struct qedf_ctx
*qedf
;
175 rrq_req
= cb_arg
->io_req
;
176 qedf
= rrq_req
->fcport
->qedf
;
178 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered.\n");
180 orig_io_req
= cb_arg
->aborted_io_req
;
183 QEDF_ERR(&qedf
->dbg_ctx
,
184 "Original io_req is NULL, rrq_req = %p.\n", rrq_req
);
188 if (rrq_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
189 rrq_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
190 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
192 refcount
= kref_read(&orig_io_req
->refcount
);
193 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "rrq_compl: orig io = %p,"
194 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
195 orig_io_req
, orig_io_req
->xid
, rrq_req
->xid
, refcount
);
198 * This should return the aborted io_req to the command pool. Note that
199 * we need to check the refcound in case the original request was
200 * flushed but we get a completion on this xid.
202 if (orig_io_req
&& refcount
> 0)
203 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
207 * Release a reference to the rrq request if we timed out as the
208 * rrq completion handler is called directly from the timeout handler
209 * and not from els_compl where the reference would have normally been
212 if (rrq_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
213 kref_put(&rrq_req
->refcount
, qedf_release_cmd
);
217 /* Assumes kref is already held by caller */
218 int qedf_send_rrq(struct qedf_ioreq
*aborted_io_req
)
221 struct fc_els_rrq rrq
;
222 struct qedf_rport
*fcport
;
223 struct fc_lport
*lport
;
224 struct qedf_els_cb_arg
*cb_arg
= NULL
;
225 struct qedf_ctx
*qedf
;
231 if (!aborted_io_req
) {
232 QEDF_ERR(NULL
, "abort_io_req is NULL.\n");
236 fcport
= aborted_io_req
->fcport
;
239 refcount
= kref_read(&aborted_io_req
->refcount
);
241 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
242 aborted_io_req
->xid
, refcount
);
243 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
247 /* Check that fcport is still offloaded */
248 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
249 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
254 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
261 * Sanity check that we can send a RRQ to make sure that refcount isn't
264 refcount
= kref_read(&aborted_io_req
->refcount
);
266 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
267 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
268 aborted_io_req
->xid
, aborted_io_req
, refcount
);
274 r_a_tov
= lport
->r_a_tov
;
276 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending RRQ orig "
277 "io = %p, orig_xid = 0x%x\n", aborted_io_req
,
278 aborted_io_req
->xid
);
279 memset(&rrq
, 0, sizeof(rrq
));
281 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
283 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
289 cb_arg
->aborted_io_req
= aborted_io_req
;
291 rrq
.rrq_cmd
= ELS_RRQ
;
292 hton24(rrq
.rrq_s_id
, sid
);
293 rrq
.rrq_ox_id
= htons(aborted_io_req
->xid
);
295 htons(aborted_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
297 rc
= qedf_initiate_els(fcport
, ELS_RRQ
, &rrq
, sizeof(rrq
),
298 qedf_rrq_compl
, cb_arg
, r_a_tov
);
302 QEDF_ERR(&(qedf
->dbg_ctx
), "RRQ failed - release orig io "
303 "req 0x%x\n", aborted_io_req
->xid
);
305 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
310 static void qedf_process_l2_frame_compl(struct qedf_rport
*fcport
,
314 struct fc_lport
*lport
= fcport
->qedf
->lport
;
315 struct fc_frame_header
*fh
;
318 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
320 /* Set the OXID we return to what libfc used */
321 if (l2_oxid
!= FC_XID_UNKNOWN
)
322 fh
->fh_ox_id
= htons(l2_oxid
);
324 /* Setup header fields */
325 fh
->fh_r_ctl
= FC_RCTL_ELS_REP
;
326 fh
->fh_type
= FC_TYPE_ELS
;
327 /* Last sequence, end sequence */
328 fh
->fh_f_ctl
[0] = 0x98;
329 hton24(fh
->fh_d_id
, lport
->port_id
);
330 hton24(fh
->fh_s_id
, fcport
->rdata
->ids
.port_id
);
331 fh
->fh_rx_id
= 0xffff;
333 /* Set frame attributes */
334 crc
= fcoe_fc_crc(fp
);
337 fr_sof(fp
) = FC_SOF_I3
;
338 fr_eof(fp
) = FC_EOF_T
;
339 fr_crc(fp
) = cpu_to_le32(~crc
);
341 /* Send completed request to libfc */
342 fc_exch_recv(lport
, fp
);
346 * In instances where an ELS command times out we may need to restart the
347 * rport by logging out and then logging back in.
349 void qedf_restart_rport(struct qedf_rport
*fcport
)
351 struct fc_lport
*lport
;
352 struct fc_rport_priv
*rdata
;
357 QEDF_ERR(NULL
, "fcport is NULL.\n");
361 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
362 if (test_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
) ||
363 !test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
) ||
364 test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
365 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "fcport %p already in reset or not offloaded.\n",
367 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
371 /* Set that we are now in reset */
372 set_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
373 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
375 rdata
= fcport
->rdata
;
376 if (rdata
&& !kref_get_unless_zero(&rdata
->kref
)) {
377 fcport
->rdata
= NULL
;
381 if (rdata
&& rdata
->rp_state
== RPORT_ST_READY
) {
382 lport
= fcport
->qedf
->lport
;
383 port_id
= rdata
->ids
.port_id
;
384 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
385 "LOGO port_id=%x.\n", port_id
);
386 fc_rport_logoff(rdata
);
387 kref_put(&rdata
->kref
, fc_rport_destroy
);
388 mutex_lock(&lport
->disc
.disc_mutex
);
389 /* Recreate the rport and log back in */
390 rdata
= fc_rport_create(lport
, port_id
);
392 mutex_unlock(&lport
->disc
.disc_mutex
);
393 fc_rport_login(rdata
);
394 fcport
->rdata
= rdata
;
396 mutex_unlock(&lport
->disc
.disc_mutex
);
397 fcport
->rdata
= NULL
;
400 clear_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
403 static void qedf_l2_els_compl(struct qedf_els_cb_arg
*cb_arg
)
405 struct qedf_ioreq
*els_req
;
406 struct qedf_rport
*fcport
;
407 struct qedf_mp_req
*mp_req
;
409 struct fc_frame_header
*fh
, *mp_fc_hdr
;
410 void *resp_buf
, *fc_payload
;
414 l2_oxid
= cb_arg
->l2_oxid
;
415 els_req
= cb_arg
->io_req
;
418 QEDF_ERR(NULL
, "els_req is NULL.\n");
423 * If we are flushing the command just free the cb_arg as none of the
424 * response data will be valid.
426 if (els_req
->event
== QEDF_IOREQ_EV_ELS_FLUSH
) {
427 QEDF_ERR(NULL
, "els_req xid=0x%x event is flush.\n",
432 fcport
= els_req
->fcport
;
433 mp_req
= &(els_req
->mp_req
);
434 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
435 resp_len
= mp_req
->resp_len
;
436 resp_buf
= mp_req
->resp_buf
;
439 * If a middle path ELS command times out, don't try to return
440 * the command but rather do any internal cleanup and then libfc
441 * timeout the command and clean up its internal resources.
443 if (els_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
445 * If ADISC times out, libfc will timeout the exchange and then
446 * try to send a PLOGI which will timeout since the session is
447 * still offloaded. Force libfc to logout the session which
448 * will offload the connection and allow the PLOGI response to
449 * flow over the LL2 path.
451 if (cb_arg
->op
== ELS_ADISC
)
452 qedf_restart_rport(fcport
);
456 if (sizeof(struct fc_frame_header
) + resp_len
> QEDF_PAGE_SIZE
) {
457 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "resp_len is "
458 "beyond page size.\n");
462 fp
= fc_frame_alloc(fcport
->qedf
->lport
, resp_len
);
464 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
465 "fc_frame_alloc failure.\n");
469 /* Copy frame header from firmware into fp */
470 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
471 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
473 /* Copy payload from firmware into fp */
474 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
475 memcpy(fc_payload
, resp_buf
, resp_len
);
477 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
478 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid
);
479 qedf_process_l2_frame_compl(fcport
, fp
, l2_oxid
);
485 int qedf_send_adisc(struct qedf_rport
*fcport
, struct fc_frame
*fp
)
487 struct fc_els_adisc
*adisc
;
488 struct fc_frame_header
*fh
;
489 struct fc_lport
*lport
= fcport
->qedf
->lport
;
490 struct qedf_els_cb_arg
*cb_arg
= NULL
;
491 struct qedf_ctx
*qedf
;
492 uint32_t r_a_tov
= lport
->r_a_tov
;
496 fh
= fc_frame_header_get(fp
);
498 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
500 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
505 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
507 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
508 "Sending ADISC ox_id=0x%x.\n", cb_arg
->l2_oxid
);
510 adisc
= fc_frame_payload_get(fp
, sizeof(*adisc
));
512 rc
= qedf_initiate_els(fcport
, ELS_ADISC
, adisc
, sizeof(*adisc
),
513 qedf_l2_els_compl
, cb_arg
, r_a_tov
);
517 QEDF_ERR(&(qedf
->dbg_ctx
), "ADISC failed.\n");
523 static void qedf_srr_compl(struct qedf_els_cb_arg
*cb_arg
)
525 struct qedf_ioreq
*orig_io_req
;
526 struct qedf_ioreq
*srr_req
;
527 struct qedf_mp_req
*mp_req
;
528 struct fc_frame_header
*mp_fc_hdr
, *fh
;
530 void *resp_buf
, *fc_payload
;
532 struct fc_lport
*lport
;
533 struct qedf_ctx
*qedf
;
537 srr_req
= cb_arg
->io_req
;
538 qedf
= srr_req
->fcport
->qedf
;
541 orig_io_req
= cb_arg
->aborted_io_req
;
544 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
548 clear_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
550 if (srr_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
551 srr_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
552 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
554 refcount
= kref_read(&orig_io_req
->refcount
);
555 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
556 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
557 orig_io_req
, orig_io_req
->xid
, srr_req
->xid
, refcount
);
559 /* If a SRR times out, simply free resources */
560 if (srr_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
561 QEDF_ERR(&qedf
->dbg_ctx
,
562 "ELS timeout rec_xid=0x%x.\n", srr_req
->xid
);
566 /* Normalize response data into struct fc_frame */
567 mp_req
= &(srr_req
->mp_req
);
568 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
569 resp_len
= mp_req
->resp_len
;
570 resp_buf
= mp_req
->resp_buf
;
572 fp
= fc_frame_alloc(lport
, resp_len
);
574 QEDF_ERR(&(qedf
->dbg_ctx
),
575 "fc_frame_alloc failure.\n");
579 /* Copy frame header from firmware into fp */
580 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
581 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
583 /* Copy payload from firmware into fp */
584 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
585 memcpy(fc_payload
, resp_buf
, resp_len
);
587 opcode
= fc_frame_payload_op(fp
);
590 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
594 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
596 qedf_initiate_abts(orig_io_req
, true);
602 /* Put reference for original command since SRR completed */
603 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
608 static int qedf_send_srr(struct qedf_ioreq
*orig_io_req
, u32 offset
, u8 r_ctl
)
611 struct qedf_ctx
*qedf
;
612 struct qedf_rport
*fcport
;
613 struct fc_lport
*lport
;
614 struct qedf_els_cb_arg
*cb_arg
= NULL
;
619 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
623 fcport
= orig_io_req
->fcport
;
625 /* Check that fcport is still offloaded */
626 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
627 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
632 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
636 /* Take reference until SRR command completion */
637 kref_get(&orig_io_req
->refcount
);
641 r_a_tov
= lport
->r_a_tov
;
643 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending SRR orig_io=%p, "
644 "orig_xid=0x%x\n", orig_io_req
, orig_io_req
->xid
);
645 memset(&srr
, 0, sizeof(srr
));
647 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
649 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
655 cb_arg
->aborted_io_req
= orig_io_req
;
657 srr
.srr_op
= ELS_SRR
;
658 srr
.srr_ox_id
= htons(orig_io_req
->xid
);
659 srr
.srr_rx_id
= htons(orig_io_req
->rx_id
);
660 srr
.srr_rel_off
= htonl(offset
);
661 srr
.srr_r_ctl
= r_ctl
;
663 rc
= qedf_initiate_els(fcport
, ELS_SRR
, &srr
, sizeof(srr
),
664 qedf_srr_compl
, cb_arg
, r_a_tov
);
668 QEDF_ERR(&(qedf
->dbg_ctx
), "SRR failed - release orig_io_req"
669 "=0x%x\n", orig_io_req
->xid
);
671 /* If we fail to queue SRR, send ABTS to orig_io */
672 qedf_initiate_abts(orig_io_req
, true);
673 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
675 /* Tell other threads that SRR is in progress */
676 set_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
681 static void qedf_initiate_seq_cleanup(struct qedf_ioreq
*orig_io_req
,
682 u32 offset
, u8 r_ctl
)
684 struct qedf_rport
*fcport
;
686 struct qedf_els_cb_arg
*cb_arg
;
687 struct fcoe_wqe
*sqe
;
690 fcport
= orig_io_req
->fcport
;
692 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
693 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
694 orig_io_req
->xid
, offset
);
696 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
698 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to allocate cb_arg "
699 "for sequence cleanup\n");
703 /* Get reference for cleanup request */
704 kref_get(&orig_io_req
->refcount
);
706 orig_io_req
->cmd_type
= QEDF_SEQ_CLEANUP
;
707 cb_arg
->offset
= offset
;
708 cb_arg
->r_ctl
= r_ctl
;
709 orig_io_req
->cb_arg
= cb_arg
;
711 qedf_cmd_timer_set(fcport
->qedf
, orig_io_req
,
712 QEDF_CLEANUP_TIMEOUT
* HZ
);
714 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
716 sqe_idx
= qedf_get_sqe_idx(fcport
);
717 sqe
= &fcport
->sq
[sqe_idx
];
718 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
719 orig_io_req
->task_params
->sqe
= sqe
;
721 init_initiator_sequence_recovery_fcoe_task(orig_io_req
->task_params
,
723 qedf_ring_doorbell(fcport
);
725 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
728 void qedf_process_seq_cleanup_compl(struct qedf_ctx
*qedf
,
729 struct fcoe_cqe
*cqe
, struct qedf_ioreq
*io_req
)
732 struct qedf_els_cb_arg
*cb_arg
;
734 cb_arg
= io_req
->cb_arg
;
736 /* If we timed out just free resources */
737 if (io_req
->event
== QEDF_IOREQ_EV_ELS_TMO
|| !cqe
) {
738 QEDF_ERR(&qedf
->dbg_ctx
,
739 "cqe is NULL or timeout event (0x%x)", io_req
->event
);
743 /* Kill the timer we put on the request */
744 cancel_delayed_work_sync(&io_req
->timeout_work
);
746 rc
= qedf_send_srr(io_req
, cb_arg
->offset
, cb_arg
->r_ctl
);
748 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to send SRR, I/O will "
749 "abort, xid=0x%x.\n", io_req
->xid
);
752 kref_put(&io_req
->refcount
, qedf_release_cmd
);
755 static bool qedf_requeue_io_req(struct qedf_ioreq
*orig_io_req
)
757 struct qedf_rport
*fcport
;
758 struct qedf_ioreq
*new_io_req
;
762 fcport
= orig_io_req
->fcport
;
764 QEDF_ERR(NULL
, "fcport is NULL.\n");
768 if (!orig_io_req
->sc_cmd
) {
769 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "sc_cmd is NULL for "
770 "xid=0x%x.\n", orig_io_req
->xid
);
774 new_io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
776 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Could not allocate new "
781 new_io_req
->sc_cmd
= orig_io_req
->sc_cmd
;
784 * This keeps the sc_cmd struct from being returned to the tape
785 * driver and being requeued twice. We do need to put a reference
786 * for the original I/O request since we will not do a SCSI completion
789 orig_io_req
->sc_cmd
= NULL
;
790 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
792 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
794 /* kref for new command released in qedf_post_io_req on error */
795 if (qedf_post_io_req(fcport
, new_io_req
)) {
796 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to post io_req\n");
797 /* Return SQE to pool */
798 atomic_inc(&fcport
->free_sqes
);
800 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
801 "Reissued SCSI command from orig_xid=0x%x on "
802 "new_xid=0x%x.\n", orig_io_req
->xid
, new_io_req
->xid
);
804 * Abort the original I/O but do not return SCSI command as
805 * it has been reissued on another OX_ID.
807 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
808 qedf_initiate_abts(orig_io_req
, false);
812 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
818 static void qedf_rec_compl(struct qedf_els_cb_arg
*cb_arg
)
820 struct qedf_ioreq
*orig_io_req
;
821 struct qedf_ioreq
*rec_req
;
822 struct qedf_mp_req
*mp_req
;
823 struct fc_frame_header
*mp_fc_hdr
, *fh
;
825 void *resp_buf
, *fc_payload
;
827 struct fc_lport
*lport
;
828 struct qedf_ctx
*qedf
;
831 struct fc_els_ls_rjt
*rjt
;
832 struct fc_els_rec_acc
*acc
;
835 struct scsi_cmnd
*sc_cmd
;
836 bool srr_needed
= false;
838 rec_req
= cb_arg
->io_req
;
839 qedf
= rec_req
->fcport
->qedf
;
842 orig_io_req
= cb_arg
->aborted_io_req
;
845 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
849 if (rec_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
850 rec_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
851 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
853 refcount
= kref_read(&orig_io_req
->refcount
);
854 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
855 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
856 orig_io_req
, orig_io_req
->xid
, rec_req
->xid
, refcount
);
858 /* If a REC times out, free resources */
859 if (rec_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
860 QEDF_ERR(&qedf
->dbg_ctx
,
861 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
862 orig_io_req
, orig_io_req
->xid
);
866 /* Normalize response data into struct fc_frame */
867 mp_req
= &(rec_req
->mp_req
);
868 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
869 resp_len
= mp_req
->resp_len
;
870 acc
= resp_buf
= mp_req
->resp_buf
;
872 fp
= fc_frame_alloc(lport
, resp_len
);
874 QEDF_ERR(&(qedf
->dbg_ctx
),
875 "fc_frame_alloc failure.\n");
879 /* Copy frame header from firmware into fp */
880 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
881 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
883 /* Copy payload from firmware into fp */
884 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
885 memcpy(fc_payload
, resp_buf
, resp_len
);
887 opcode
= fc_frame_payload_op(fp
);
888 if (opcode
== ELS_LS_RJT
) {
889 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
890 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
891 "Received LS_RJT for REC: er_reason=0x%x, "
892 "er_explan=0x%x.\n", rjt
->er_reason
, rjt
->er_explan
);
894 * The following response(s) mean that we need to reissue the
895 * request on another exchange. We need to do this without
896 * informing the upper layers lest it cause an application
899 if ((rjt
->er_reason
== ELS_RJT_LOGIC
||
900 rjt
->er_reason
== ELS_RJT_UNAB
) &&
901 rjt
->er_explan
== ELS_EXPL_OXID_RXID
) {
902 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
903 "Handle CMD LOST case.\n");
904 qedf_requeue_io_req(orig_io_req
);
906 } else if (opcode
== ELS_LS_ACC
) {
907 offset
= ntohl(acc
->reca_fc4value
);
908 e_stat
= ntohl(acc
->reca_e_stat
);
909 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
910 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
912 if (e_stat
& ESB_ST_SEQ_INIT
) {
913 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
914 "Target has the seq init\n");
917 sc_cmd
= orig_io_req
->sc_cmd
;
919 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
920 "sc_cmd is NULL for xid=0x%x.\n",
924 /* SCSI write case */
925 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
926 if (offset
== orig_io_req
->data_xfer_len
) {
927 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
928 "WRITE - response lost.\n");
929 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
933 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
934 "WRITE - XFER_RDY/DATA lost.\n");
935 r_ctl
= FC_RCTL_DD_DATA_DESC
;
936 /* Use data from warning CQE instead of REC */
937 offset
= orig_io_req
->tx_buf_off
;
941 if (orig_io_req
->rx_buf_off
==
942 orig_io_req
->data_xfer_len
) {
943 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
944 "READ - response lost.\n");
946 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
949 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
950 "READ - DATA lost.\n");
952 * For read case we always set the offset to 0
953 * for sequence recovery task.
956 r_ctl
= FC_RCTL_DD_SOL_DATA
;
961 qedf_send_srr(orig_io_req
, offset
, r_ctl
);
963 qedf_initiate_seq_cleanup(orig_io_req
, offset
, r_ctl
);
969 /* Put reference for original command since REC completed */
970 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
975 /* Assumes kref is already held by caller */
976 int qedf_send_rec(struct qedf_ioreq
*orig_io_req
)
979 struct fc_els_rec rec
;
980 struct qedf_rport
*fcport
;
981 struct fc_lport
*lport
;
982 struct qedf_els_cb_arg
*cb_arg
= NULL
;
983 struct qedf_ctx
*qedf
;
989 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
993 fcport
= orig_io_req
->fcport
;
995 /* Check that fcport is still offloaded */
996 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
997 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
1001 if (!fcport
->qedf
) {
1002 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
1006 /* Take reference until REC command completion */
1007 kref_get(&orig_io_req
->refcount
);
1009 qedf
= fcport
->qedf
;
1010 lport
= qedf
->lport
;
1012 r_a_tov
= lport
->r_a_tov
;
1014 memset(&rec
, 0, sizeof(rec
));
1016 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
1018 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
1024 cb_arg
->aborted_io_req
= orig_io_req
;
1026 rec
.rec_cmd
= ELS_REC
;
1027 hton24(rec
.rec_s_id
, sid
);
1028 rec
.rec_ox_id
= htons(orig_io_req
->xid
);
1030 htons(orig_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
1032 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending REC orig_io=%p, "
1033 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req
,
1034 orig_io_req
->xid
, rec
.rec_rx_id
);
1035 rc
= qedf_initiate_els(fcport
, ELS_REC
, &rec
, sizeof(rec
),
1036 qedf_rec_compl
, cb_arg
, r_a_tov
);
1040 QEDF_ERR(&(qedf
->dbg_ctx
), "REC failed - release orig_io_req"
1041 "=0x%x\n", orig_io_req
->xid
);
1043 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);