treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / qedf / qedf_els.c
blob87e169dcebdb368efd503170951fe690eecee99c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
5 */
6 #include "qedf.h"
8 /* It's assumed that the lock is held when calling this function. */
9 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
10 void *data, uint32_t data_len,
11 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
12 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
14 struct qedf_ctx *qedf;
15 struct fc_lport *lport;
16 struct qedf_ioreq *els_req;
17 struct qedf_mp_req *mp_req;
18 struct fc_frame_header *fc_hdr;
19 struct e4_fcoe_task_context *task;
20 int rc = 0;
21 uint32_t did, sid;
22 uint16_t xid;
23 struct fcoe_wqe *sqe;
24 unsigned long flags;
25 u16 sqe_idx;
27 if (!fcport) {
28 QEDF_ERR(NULL, "fcport is NULL");
29 rc = -EINVAL;
30 goto els_err;
33 qedf = fcport->qedf;
34 lport = qedf->lport;
36 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
38 rc = fc_remote_port_chkready(fcport->rport);
39 if (rc) {
40 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
41 rc = -EAGAIN;
42 goto els_err;
44 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
45 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
46 op);
47 rc = -EAGAIN;
48 goto els_err;
51 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
52 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
53 rc = -EINVAL;
54 goto els_err;
57 els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
58 if (!els_req) {
59 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
60 "Failed to alloc ELS request 0x%x\n", op);
61 rc = -ENOMEM;
62 goto els_err;
65 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
66 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
67 els_req->xid);
68 els_req->sc_cmd = NULL;
69 els_req->cmd_type = QEDF_ELS;
70 els_req->fcport = fcport;
71 els_req->cb_func = cb_func;
72 cb_arg->io_req = els_req;
73 cb_arg->op = op;
74 els_req->cb_arg = cb_arg;
75 els_req->data_xfer_len = data_len;
77 /* Record which cpu this request is associated with */
78 els_req->cpu = smp_processor_id();
80 mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
81 rc = qedf_init_mp_req(els_req);
82 if (rc) {
83 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
84 kref_put(&els_req->refcount, qedf_release_cmd);
85 goto els_err;
86 } else {
87 rc = 0;
90 /* Fill ELS Payload */
91 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
92 memcpy(mp_req->req_buf, data, data_len);
93 } else {
94 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
95 els_req->cb_func = NULL;
96 els_req->cb_arg = NULL;
97 kref_put(&els_req->refcount, qedf_release_cmd);
98 rc = -EINVAL;
101 if (rc)
102 goto els_err;
104 /* Fill FC header */
105 fc_hdr = &(mp_req->req_fc_hdr);
107 did = fcport->rdata->ids.port_id;
108 sid = fcport->sid;
110 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
111 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
112 FC_FC_SEQ_INIT, 0);
114 /* Obtain exchange id */
115 xid = els_req->xid;
117 spin_lock_irqsave(&fcport->rport_lock, flags);
119 sqe_idx = qedf_get_sqe_idx(fcport);
120 sqe = &fcport->sq[sqe_idx];
121 memset(sqe, 0, sizeof(struct fcoe_wqe));
123 /* Initialize task context for this IO request */
124 task = qedf_get_task_mem(&qedf->tasks, xid);
125 qedf_init_mp_task(els_req, task, sqe);
127 /* Put timer on original I/O request */
128 if (timer_msec)
129 qedf_cmd_timer_set(qedf, els_req, timer_msec);
131 /* Ring doorbell */
132 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
133 "req\n");
134 qedf_ring_doorbell(fcport);
135 set_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
137 spin_unlock_irqrestore(&fcport->rport_lock, flags);
138 els_err:
139 return rc;
142 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
143 struct qedf_ioreq *els_req)
145 struct fcoe_cqe_midpath_info *mp_info;
147 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
148 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
150 clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
152 /* Kill the ELS timer */
153 cancel_delayed_work(&els_req->timeout_work);
155 /* Get ELS response length from CQE */
156 mp_info = &cqe->cqe_info.midpath_info;
157 els_req->mp_req.resp_len = mp_info->data_placement_size;
159 /* Parse ELS response */
160 if ((els_req->cb_func) && (els_req->cb_arg)) {
161 els_req->cb_func(els_req->cb_arg);
162 els_req->cb_arg = NULL;
165 kref_put(&els_req->refcount, qedf_release_cmd);
168 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
170 struct qedf_ioreq *orig_io_req;
171 struct qedf_ioreq *rrq_req;
172 struct qedf_ctx *qedf;
173 int refcount;
175 rrq_req = cb_arg->io_req;
176 qedf = rrq_req->fcport->qedf;
178 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
180 orig_io_req = cb_arg->aborted_io_req;
182 if (!orig_io_req) {
183 QEDF_ERR(&qedf->dbg_ctx,
184 "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
185 goto out_free;
188 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
189 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
190 cancel_delayed_work_sync(&orig_io_req->timeout_work);
192 refcount = kref_read(&orig_io_req->refcount);
193 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
194 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
195 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
198 * This should return the aborted io_req to the command pool. Note that
199 * we need to check the refcound in case the original request was
200 * flushed but we get a completion on this xid.
202 if (orig_io_req && refcount > 0)
203 kref_put(&orig_io_req->refcount, qedf_release_cmd);
205 out_free:
207 * Release a reference to the rrq request if we timed out as the
208 * rrq completion handler is called directly from the timeout handler
209 * and not from els_compl where the reference would have normally been
210 * released.
212 if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
213 kref_put(&rrq_req->refcount, qedf_release_cmd);
214 kfree(cb_arg);
217 /* Assumes kref is already held by caller */
218 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
221 struct fc_els_rrq rrq;
222 struct qedf_rport *fcport;
223 struct fc_lport *lport;
224 struct qedf_els_cb_arg *cb_arg = NULL;
225 struct qedf_ctx *qedf;
226 uint32_t sid;
227 uint32_t r_a_tov;
228 int rc;
229 int refcount;
231 if (!aborted_io_req) {
232 QEDF_ERR(NULL, "abort_io_req is NULL.\n");
233 return -EINVAL;
236 fcport = aborted_io_req->fcport;
238 if (!fcport) {
239 refcount = kref_read(&aborted_io_req->refcount);
240 QEDF_ERR(NULL,
241 "RRQ work was queued prior to a flush xid=0x%x, refcount=%d.\n",
242 aborted_io_req->xid, refcount);
243 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
244 return -EINVAL;
247 /* Check that fcport is still offloaded */
248 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
249 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
250 return -EINVAL;
253 if (!fcport->qedf) {
254 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
255 return -EINVAL;
258 qedf = fcport->qedf;
261 * Sanity check that we can send a RRQ to make sure that refcount isn't
264 refcount = kref_read(&aborted_io_req->refcount);
265 if (refcount != 1) {
266 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
267 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
268 aborted_io_req->xid, aborted_io_req, refcount);
269 return -EINVAL;
272 lport = qedf->lport;
273 sid = fcport->sid;
274 r_a_tov = lport->r_a_tov;
276 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
277 "io = %p, orig_xid = 0x%x\n", aborted_io_req,
278 aborted_io_req->xid);
279 memset(&rrq, 0, sizeof(rrq));
281 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
282 if (!cb_arg) {
283 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
284 "RRQ\n");
285 rc = -ENOMEM;
286 goto rrq_err;
289 cb_arg->aborted_io_req = aborted_io_req;
291 rrq.rrq_cmd = ELS_RRQ;
292 hton24(rrq.rrq_s_id, sid);
293 rrq.rrq_ox_id = htons(aborted_io_req->xid);
294 rrq.rrq_rx_id =
295 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
297 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
298 qedf_rrq_compl, cb_arg, r_a_tov);
300 rrq_err:
301 if (rc) {
302 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
303 "req 0x%x\n", aborted_io_req->xid);
304 kfree(cb_arg);
305 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
307 return rc;
310 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
311 struct fc_frame *fp,
312 u16 l2_oxid)
314 struct fc_lport *lport = fcport->qedf->lport;
315 struct fc_frame_header *fh;
316 u32 crc;
318 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
320 /* Set the OXID we return to what libfc used */
321 if (l2_oxid != FC_XID_UNKNOWN)
322 fh->fh_ox_id = htons(l2_oxid);
324 /* Setup header fields */
325 fh->fh_r_ctl = FC_RCTL_ELS_REP;
326 fh->fh_type = FC_TYPE_ELS;
327 /* Last sequence, end sequence */
328 fh->fh_f_ctl[0] = 0x98;
329 hton24(fh->fh_d_id, lport->port_id);
330 hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
331 fh->fh_rx_id = 0xffff;
333 /* Set frame attributes */
334 crc = fcoe_fc_crc(fp);
335 fc_frame_init(fp);
336 fr_dev(fp) = lport;
337 fr_sof(fp) = FC_SOF_I3;
338 fr_eof(fp) = FC_EOF_T;
339 fr_crc(fp) = cpu_to_le32(~crc);
341 /* Send completed request to libfc */
342 fc_exch_recv(lport, fp);
346 * In instances where an ELS command times out we may need to restart the
347 * rport by logging out and then logging back in.
349 void qedf_restart_rport(struct qedf_rport *fcport)
351 struct fc_lport *lport;
352 struct fc_rport_priv *rdata;
353 u32 port_id;
354 unsigned long flags;
356 if (!fcport) {
357 QEDF_ERR(NULL, "fcport is NULL.\n");
358 return;
361 spin_lock_irqsave(&fcport->rport_lock, flags);
362 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
363 !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
364 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
365 QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
366 fcport);
367 spin_unlock_irqrestore(&fcport->rport_lock, flags);
368 return;
371 /* Set that we are now in reset */
372 set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
373 spin_unlock_irqrestore(&fcport->rport_lock, flags);
375 rdata = fcport->rdata;
376 if (rdata && !kref_get_unless_zero(&rdata->kref)) {
377 fcport->rdata = NULL;
378 rdata = NULL;
381 if (rdata && rdata->rp_state == RPORT_ST_READY) {
382 lport = fcport->qedf->lport;
383 port_id = rdata->ids.port_id;
384 QEDF_ERR(&(fcport->qedf->dbg_ctx),
385 "LOGO port_id=%x.\n", port_id);
386 fc_rport_logoff(rdata);
387 kref_put(&rdata->kref, fc_rport_destroy);
388 mutex_lock(&lport->disc.disc_mutex);
389 /* Recreate the rport and log back in */
390 rdata = fc_rport_create(lport, port_id);
391 if (rdata) {
392 mutex_unlock(&lport->disc.disc_mutex);
393 fc_rport_login(rdata);
394 fcport->rdata = rdata;
395 } else {
396 mutex_unlock(&lport->disc.disc_mutex);
397 fcport->rdata = NULL;
400 clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
403 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
405 struct qedf_ioreq *els_req;
406 struct qedf_rport *fcport;
407 struct qedf_mp_req *mp_req;
408 struct fc_frame *fp;
409 struct fc_frame_header *fh, *mp_fc_hdr;
410 void *resp_buf, *fc_payload;
411 u32 resp_len;
412 u16 l2_oxid;
414 l2_oxid = cb_arg->l2_oxid;
415 els_req = cb_arg->io_req;
417 if (!els_req) {
418 QEDF_ERR(NULL, "els_req is NULL.\n");
419 goto free_arg;
423 * If we are flushing the command just free the cb_arg as none of the
424 * response data will be valid.
426 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
427 QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
428 els_req->xid);
429 goto free_arg;
432 fcport = els_req->fcport;
433 mp_req = &(els_req->mp_req);
434 mp_fc_hdr = &(mp_req->resp_fc_hdr);
435 resp_len = mp_req->resp_len;
436 resp_buf = mp_req->resp_buf;
439 * If a middle path ELS command times out, don't try to return
440 * the command but rather do any internal cleanup and then libfc
441 * timeout the command and clean up its internal resources.
443 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
445 * If ADISC times out, libfc will timeout the exchange and then
446 * try to send a PLOGI which will timeout since the session is
447 * still offloaded. Force libfc to logout the session which
448 * will offload the connection and allow the PLOGI response to
449 * flow over the LL2 path.
451 if (cb_arg->op == ELS_ADISC)
452 qedf_restart_rport(fcport);
453 return;
456 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
457 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
458 "beyond page size.\n");
459 goto free_arg;
462 fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
463 if (!fp) {
464 QEDF_ERR(&(fcport->qedf->dbg_ctx),
465 "fc_frame_alloc failure.\n");
466 return;
469 /* Copy frame header from firmware into fp */
470 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
471 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
473 /* Copy payload from firmware into fp */
474 fc_payload = fc_frame_payload_get(fp, resp_len);
475 memcpy(fc_payload, resp_buf, resp_len);
477 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
478 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
479 qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
481 free_arg:
482 kfree(cb_arg);
485 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
487 struct fc_els_adisc *adisc;
488 struct fc_frame_header *fh;
489 struct fc_lport *lport = fcport->qedf->lport;
490 struct qedf_els_cb_arg *cb_arg = NULL;
491 struct qedf_ctx *qedf;
492 uint32_t r_a_tov = lport->r_a_tov;
493 int rc;
495 qedf = fcport->qedf;
496 fh = fc_frame_header_get(fp);
498 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
499 if (!cb_arg) {
500 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
501 "ADISC\n");
502 rc = -ENOMEM;
503 goto adisc_err;
505 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
507 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
508 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
510 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
512 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
513 qedf_l2_els_compl, cb_arg, r_a_tov);
515 adisc_err:
516 if (rc) {
517 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
518 kfree(cb_arg);
520 return rc;
523 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
525 struct qedf_ioreq *orig_io_req;
526 struct qedf_ioreq *srr_req;
527 struct qedf_mp_req *mp_req;
528 struct fc_frame_header *mp_fc_hdr, *fh;
529 struct fc_frame *fp;
530 void *resp_buf, *fc_payload;
531 u32 resp_len;
532 struct fc_lport *lport;
533 struct qedf_ctx *qedf;
534 int refcount;
535 u8 opcode;
537 srr_req = cb_arg->io_req;
538 qedf = srr_req->fcport->qedf;
539 lport = qedf->lport;
541 orig_io_req = cb_arg->aborted_io_req;
543 if (!orig_io_req) {
544 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
545 goto out_free;
548 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
550 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
551 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
552 cancel_delayed_work_sync(&orig_io_req->timeout_work);
554 refcount = kref_read(&orig_io_req->refcount);
555 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
556 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
557 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
559 /* If a SRR times out, simply free resources */
560 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
561 QEDF_ERR(&qedf->dbg_ctx,
562 "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
563 goto out_put;
566 /* Normalize response data into struct fc_frame */
567 mp_req = &(srr_req->mp_req);
568 mp_fc_hdr = &(mp_req->resp_fc_hdr);
569 resp_len = mp_req->resp_len;
570 resp_buf = mp_req->resp_buf;
572 fp = fc_frame_alloc(lport, resp_len);
573 if (!fp) {
574 QEDF_ERR(&(qedf->dbg_ctx),
575 "fc_frame_alloc failure.\n");
576 goto out_put;
579 /* Copy frame header from firmware into fp */
580 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
581 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
583 /* Copy payload from firmware into fp */
584 fc_payload = fc_frame_payload_get(fp, resp_len);
585 memcpy(fc_payload, resp_buf, resp_len);
587 opcode = fc_frame_payload_op(fp);
588 switch (opcode) {
589 case ELS_LS_ACC:
590 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
591 "SRR success.\n");
592 break;
593 case ELS_LS_RJT:
594 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
595 "SRR rejected.\n");
596 qedf_initiate_abts(orig_io_req, true);
597 break;
600 fc_frame_free(fp);
601 out_put:
602 /* Put reference for original command since SRR completed */
603 kref_put(&orig_io_req->refcount, qedf_release_cmd);
604 out_free:
605 kfree(cb_arg);
608 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
610 struct fcp_srr srr;
611 struct qedf_ctx *qedf;
612 struct qedf_rport *fcport;
613 struct fc_lport *lport;
614 struct qedf_els_cb_arg *cb_arg = NULL;
615 u32 r_a_tov;
616 int rc;
618 if (!orig_io_req) {
619 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
620 return -EINVAL;
623 fcport = orig_io_req->fcport;
625 /* Check that fcport is still offloaded */
626 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
627 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
628 return -EINVAL;
631 if (!fcport->qedf) {
632 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
633 return -EINVAL;
636 /* Take reference until SRR command completion */
637 kref_get(&orig_io_req->refcount);
639 qedf = fcport->qedf;
640 lport = qedf->lport;
641 r_a_tov = lport->r_a_tov;
643 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
644 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
645 memset(&srr, 0, sizeof(srr));
647 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
648 if (!cb_arg) {
649 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
650 "SRR\n");
651 rc = -ENOMEM;
652 goto srr_err;
655 cb_arg->aborted_io_req = orig_io_req;
657 srr.srr_op = ELS_SRR;
658 srr.srr_ox_id = htons(orig_io_req->xid);
659 srr.srr_rx_id = htons(orig_io_req->rx_id);
660 srr.srr_rel_off = htonl(offset);
661 srr.srr_r_ctl = r_ctl;
663 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
664 qedf_srr_compl, cb_arg, r_a_tov);
666 srr_err:
667 if (rc) {
668 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
669 "=0x%x\n", orig_io_req->xid);
670 kfree(cb_arg);
671 /* If we fail to queue SRR, send ABTS to orig_io */
672 qedf_initiate_abts(orig_io_req, true);
673 kref_put(&orig_io_req->refcount, qedf_release_cmd);
674 } else
675 /* Tell other threads that SRR is in progress */
676 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
678 return rc;
681 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
682 u32 offset, u8 r_ctl)
684 struct qedf_rport *fcport;
685 unsigned long flags;
686 struct qedf_els_cb_arg *cb_arg;
687 struct fcoe_wqe *sqe;
688 u16 sqe_idx;
690 fcport = orig_io_req->fcport;
692 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
693 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
694 orig_io_req->xid, offset);
696 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
697 if (!cb_arg) {
698 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
699 "for sequence cleanup\n");
700 return;
703 /* Get reference for cleanup request */
704 kref_get(&orig_io_req->refcount);
706 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
707 cb_arg->offset = offset;
708 cb_arg->r_ctl = r_ctl;
709 orig_io_req->cb_arg = cb_arg;
711 qedf_cmd_timer_set(fcport->qedf, orig_io_req,
712 QEDF_CLEANUP_TIMEOUT * HZ);
714 spin_lock_irqsave(&fcport->rport_lock, flags);
716 sqe_idx = qedf_get_sqe_idx(fcport);
717 sqe = &fcport->sq[sqe_idx];
718 memset(sqe, 0, sizeof(struct fcoe_wqe));
719 orig_io_req->task_params->sqe = sqe;
721 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
722 offset);
723 qedf_ring_doorbell(fcport);
725 spin_unlock_irqrestore(&fcport->rport_lock, flags);
728 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
729 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
731 int rc;
732 struct qedf_els_cb_arg *cb_arg;
734 cb_arg = io_req->cb_arg;
736 /* If we timed out just free resources */
737 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
738 QEDF_ERR(&qedf->dbg_ctx,
739 "cqe is NULL or timeout event (0x%x)", io_req->event);
740 goto free;
743 /* Kill the timer we put on the request */
744 cancel_delayed_work_sync(&io_req->timeout_work);
746 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
747 if (rc)
748 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
749 "abort, xid=0x%x.\n", io_req->xid);
750 free:
751 kfree(cb_arg);
752 kref_put(&io_req->refcount, qedf_release_cmd);
755 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
757 struct qedf_rport *fcport;
758 struct qedf_ioreq *new_io_req;
759 unsigned long flags;
760 bool rc = false;
762 fcport = orig_io_req->fcport;
763 if (!fcport) {
764 QEDF_ERR(NULL, "fcport is NULL.\n");
765 goto out;
768 if (!orig_io_req->sc_cmd) {
769 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
770 "xid=0x%x.\n", orig_io_req->xid);
771 goto out;
774 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
775 if (!new_io_req) {
776 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
777 "io_req.\n");
778 goto out;
781 new_io_req->sc_cmd = orig_io_req->sc_cmd;
784 * This keeps the sc_cmd struct from being returned to the tape
785 * driver and being requeued twice. We do need to put a reference
786 * for the original I/O request since we will not do a SCSI completion
787 * for it.
789 orig_io_req->sc_cmd = NULL;
790 kref_put(&orig_io_req->refcount, qedf_release_cmd);
792 spin_lock_irqsave(&fcport->rport_lock, flags);
794 /* kref for new command released in qedf_post_io_req on error */
795 if (qedf_post_io_req(fcport, new_io_req)) {
796 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
797 /* Return SQE to pool */
798 atomic_inc(&fcport->free_sqes);
799 } else {
800 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
801 "Reissued SCSI command from orig_xid=0x%x on "
802 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
804 * Abort the original I/O but do not return SCSI command as
805 * it has been reissued on another OX_ID.
807 spin_unlock_irqrestore(&fcport->rport_lock, flags);
808 qedf_initiate_abts(orig_io_req, false);
809 goto out;
812 spin_unlock_irqrestore(&fcport->rport_lock, flags);
813 out:
814 return rc;
818 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
820 struct qedf_ioreq *orig_io_req;
821 struct qedf_ioreq *rec_req;
822 struct qedf_mp_req *mp_req;
823 struct fc_frame_header *mp_fc_hdr, *fh;
824 struct fc_frame *fp;
825 void *resp_buf, *fc_payload;
826 u32 resp_len;
827 struct fc_lport *lport;
828 struct qedf_ctx *qedf;
829 int refcount;
830 enum fc_rctl r_ctl;
831 struct fc_els_ls_rjt *rjt;
832 struct fc_els_rec_acc *acc;
833 u8 opcode;
834 u32 offset, e_stat;
835 struct scsi_cmnd *sc_cmd;
836 bool srr_needed = false;
838 rec_req = cb_arg->io_req;
839 qedf = rec_req->fcport->qedf;
840 lport = qedf->lport;
842 orig_io_req = cb_arg->aborted_io_req;
844 if (!orig_io_req) {
845 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
846 goto out_free;
849 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
850 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
851 cancel_delayed_work_sync(&orig_io_req->timeout_work);
853 refcount = kref_read(&orig_io_req->refcount);
854 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
855 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
856 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
858 /* If a REC times out, free resources */
859 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
860 QEDF_ERR(&qedf->dbg_ctx,
861 "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
862 orig_io_req, orig_io_req->xid);
863 goto out_put;
866 /* Normalize response data into struct fc_frame */
867 mp_req = &(rec_req->mp_req);
868 mp_fc_hdr = &(mp_req->resp_fc_hdr);
869 resp_len = mp_req->resp_len;
870 acc = resp_buf = mp_req->resp_buf;
872 fp = fc_frame_alloc(lport, resp_len);
873 if (!fp) {
874 QEDF_ERR(&(qedf->dbg_ctx),
875 "fc_frame_alloc failure.\n");
876 goto out_put;
879 /* Copy frame header from firmware into fp */
880 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
881 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
883 /* Copy payload from firmware into fp */
884 fc_payload = fc_frame_payload_get(fp, resp_len);
885 memcpy(fc_payload, resp_buf, resp_len);
887 opcode = fc_frame_payload_op(fp);
888 if (opcode == ELS_LS_RJT) {
889 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
890 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
891 "Received LS_RJT for REC: er_reason=0x%x, "
892 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
894 * The following response(s) mean that we need to reissue the
895 * request on another exchange. We need to do this without
896 * informing the upper layers lest it cause an application
897 * error.
899 if ((rjt->er_reason == ELS_RJT_LOGIC ||
900 rjt->er_reason == ELS_RJT_UNAB) &&
901 rjt->er_explan == ELS_EXPL_OXID_RXID) {
902 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
903 "Handle CMD LOST case.\n");
904 qedf_requeue_io_req(orig_io_req);
906 } else if (opcode == ELS_LS_ACC) {
907 offset = ntohl(acc->reca_fc4value);
908 e_stat = ntohl(acc->reca_e_stat);
909 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
910 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
911 offset, e_stat);
912 if (e_stat & ESB_ST_SEQ_INIT) {
913 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
914 "Target has the seq init\n");
915 goto out_free_frame;
917 sc_cmd = orig_io_req->sc_cmd;
918 if (!sc_cmd) {
919 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
920 "sc_cmd is NULL for xid=0x%x.\n",
921 orig_io_req->xid);
922 goto out_free_frame;
924 /* SCSI write case */
925 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
926 if (offset == orig_io_req->data_xfer_len) {
927 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
928 "WRITE - response lost.\n");
929 r_ctl = FC_RCTL_DD_CMD_STATUS;
930 srr_needed = true;
931 offset = 0;
932 } else {
933 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
934 "WRITE - XFER_RDY/DATA lost.\n");
935 r_ctl = FC_RCTL_DD_DATA_DESC;
936 /* Use data from warning CQE instead of REC */
937 offset = orig_io_req->tx_buf_off;
939 /* SCSI read case */
940 } else {
941 if (orig_io_req->rx_buf_off ==
942 orig_io_req->data_xfer_len) {
943 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
944 "READ - response lost.\n");
945 srr_needed = true;
946 r_ctl = FC_RCTL_DD_CMD_STATUS;
947 offset = 0;
948 } else {
949 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
950 "READ - DATA lost.\n");
952 * For read case we always set the offset to 0
953 * for sequence recovery task.
955 offset = 0;
956 r_ctl = FC_RCTL_DD_SOL_DATA;
960 if (srr_needed)
961 qedf_send_srr(orig_io_req, offset, r_ctl);
962 else
963 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
966 out_free_frame:
967 fc_frame_free(fp);
968 out_put:
969 /* Put reference for original command since REC completed */
970 kref_put(&orig_io_req->refcount, qedf_release_cmd);
971 out_free:
972 kfree(cb_arg);
975 /* Assumes kref is already held by caller */
976 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
979 struct fc_els_rec rec;
980 struct qedf_rport *fcport;
981 struct fc_lport *lport;
982 struct qedf_els_cb_arg *cb_arg = NULL;
983 struct qedf_ctx *qedf;
984 uint32_t sid;
985 uint32_t r_a_tov;
986 int rc;
988 if (!orig_io_req) {
989 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
990 return -EINVAL;
993 fcport = orig_io_req->fcport;
995 /* Check that fcport is still offloaded */
996 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
997 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
998 return -EINVAL;
1001 if (!fcport->qedf) {
1002 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
1003 return -EINVAL;
1006 /* Take reference until REC command completion */
1007 kref_get(&orig_io_req->refcount);
1009 qedf = fcport->qedf;
1010 lport = qedf->lport;
1011 sid = fcport->sid;
1012 r_a_tov = lport->r_a_tov;
1014 memset(&rec, 0, sizeof(rec));
1016 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
1017 if (!cb_arg) {
1018 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
1019 "REC\n");
1020 rc = -ENOMEM;
1021 goto rec_err;
1024 cb_arg->aborted_io_req = orig_io_req;
1026 rec.rec_cmd = ELS_REC;
1027 hton24(rec.rec_s_id, sid);
1028 rec.rec_ox_id = htons(orig_io_req->xid);
1029 rec.rec_rx_id =
1030 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
1032 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
1033 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
1034 orig_io_req->xid, rec.rec_rx_id);
1035 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
1036 qedf_rec_compl, cb_arg, r_a_tov);
1038 rec_err:
1039 if (rc) {
1040 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
1041 "=0x%x\n", orig_io_req->xid);
1042 kfree(cb_arg);
1043 kref_put(&orig_io_req->refcount, qedf_release_cmd);
1045 return rc;