Linux 4.19.133
[linux/fpc-iii.git] / drivers / scsi / qedf / qedf_els.c
blob5178cd03666a661aca0406b82545a44c922b19aa
1 /*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9 #include "qedf.h"
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 void *data, uint32_t data_len,
14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
17 struct qedf_ctx *qedf;
18 struct fc_lport *lport;
19 struct qedf_ioreq *els_req;
20 struct qedf_mp_req *mp_req;
21 struct fc_frame_header *fc_hdr;
22 struct e4_fcoe_task_context *task;
23 int rc = 0;
24 uint32_t did, sid;
25 uint16_t xid;
26 struct fcoe_wqe *sqe;
27 unsigned long flags;
28 u16 sqe_idx;
30 if (!fcport) {
31 QEDF_ERR(NULL, "fcport is NULL");
32 rc = -EINVAL;
33 goto els_err;
36 qedf = fcport->qedf;
37 lport = qedf->lport;
39 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
41 rc = fc_remote_port_chkready(fcport->rport);
42 if (rc) {
43 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
44 rc = -EAGAIN;
45 goto els_err;
47 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
48 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
49 op);
50 rc = -EAGAIN;
51 goto els_err;
54 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
55 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
56 rc = -EINVAL;
57 goto els_err;
60 els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
61 if (!els_req) {
62 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
63 "Failed to alloc ELS request 0x%x\n", op);
64 rc = -ENOMEM;
65 goto els_err;
68 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
69 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
70 els_req->xid);
71 els_req->sc_cmd = NULL;
72 els_req->cmd_type = QEDF_ELS;
73 els_req->fcport = fcport;
74 els_req->cb_func = cb_func;
75 cb_arg->io_req = els_req;
76 cb_arg->op = op;
77 els_req->cb_arg = cb_arg;
78 els_req->data_xfer_len = data_len;
80 /* Record which cpu this request is associated with */
81 els_req->cpu = smp_processor_id();
83 mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
84 rc = qedf_init_mp_req(els_req);
85 if (rc) {
86 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
87 kref_put(&els_req->refcount, qedf_release_cmd);
88 goto els_err;
89 } else {
90 rc = 0;
93 /* Fill ELS Payload */
94 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
95 memcpy(mp_req->req_buf, data, data_len);
96 } else {
97 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
98 els_req->cb_func = NULL;
99 els_req->cb_arg = NULL;
100 kref_put(&els_req->refcount, qedf_release_cmd);
101 rc = -EINVAL;
104 if (rc)
105 goto els_err;
107 /* Fill FC header */
108 fc_hdr = &(mp_req->req_fc_hdr);
110 did = fcport->rdata->ids.port_id;
111 sid = fcport->sid;
113 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
114 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
115 FC_FC_SEQ_INIT, 0);
117 /* Obtain exchange id */
118 xid = els_req->xid;
120 spin_lock_irqsave(&fcport->rport_lock, flags);
122 sqe_idx = qedf_get_sqe_idx(fcport);
123 sqe = &fcport->sq[sqe_idx];
124 memset(sqe, 0, sizeof(struct fcoe_wqe));
126 /* Initialize task context for this IO request */
127 task = qedf_get_task_mem(&qedf->tasks, xid);
128 qedf_init_mp_task(els_req, task, sqe);
130 /* Put timer on original I/O request */
131 if (timer_msec)
132 qedf_cmd_timer_set(qedf, els_req, timer_msec);
134 /* Ring doorbell */
135 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
136 "req\n");
137 qedf_ring_doorbell(fcport);
138 spin_unlock_irqrestore(&fcport->rport_lock, flags);
139 els_err:
140 return rc;
143 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
144 struct qedf_ioreq *els_req)
146 struct fcoe_task_context *task_ctx;
147 struct scsi_cmnd *sc_cmd;
148 uint16_t xid;
149 struct fcoe_cqe_midpath_info *mp_info;
151 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
152 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
154 /* Kill the ELS timer */
155 cancel_delayed_work(&els_req->timeout_work);
157 xid = els_req->xid;
158 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
159 sc_cmd = els_req->sc_cmd;
161 /* Get ELS response length from CQE */
162 mp_info = &cqe->cqe_info.midpath_info;
163 els_req->mp_req.resp_len = mp_info->data_placement_size;
165 /* Parse ELS response */
166 if ((els_req->cb_func) && (els_req->cb_arg)) {
167 els_req->cb_func(els_req->cb_arg);
168 els_req->cb_arg = NULL;
171 kref_put(&els_req->refcount, qedf_release_cmd);
174 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
176 struct qedf_ioreq *orig_io_req;
177 struct qedf_ioreq *rrq_req;
178 struct qedf_ctx *qedf;
179 int refcount;
181 rrq_req = cb_arg->io_req;
182 qedf = rrq_req->fcport->qedf;
184 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
186 orig_io_req = cb_arg->aborted_io_req;
188 if (!orig_io_req)
189 goto out_free;
191 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
192 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
193 cancel_delayed_work_sync(&orig_io_req->timeout_work);
195 refcount = kref_read(&orig_io_req->refcount);
196 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
197 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
198 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
200 /* This should return the aborted io_req to the command pool */
201 if (orig_io_req)
202 kref_put(&orig_io_req->refcount, qedf_release_cmd);
204 out_free:
206 * Release a reference to the rrq request if we timed out as the
207 * rrq completion handler is called directly from the timeout handler
208 * and not from els_compl where the reference would have normally been
209 * released.
211 if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
212 kref_put(&rrq_req->refcount, qedf_release_cmd);
213 kfree(cb_arg);
216 /* Assumes kref is already held by caller */
217 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
220 struct fc_els_rrq rrq;
221 struct qedf_rport *fcport;
222 struct fc_lport *lport;
223 struct qedf_els_cb_arg *cb_arg = NULL;
224 struct qedf_ctx *qedf;
225 uint32_t sid;
226 uint32_t r_a_tov;
227 int rc;
229 if (!aborted_io_req) {
230 QEDF_ERR(NULL, "abort_io_req is NULL.\n");
231 return -EINVAL;
234 fcport = aborted_io_req->fcport;
236 /* Check that fcport is still offloaded */
237 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
238 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
239 return -EINVAL;
242 if (!fcport->qedf) {
243 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
244 return -EINVAL;
247 qedf = fcport->qedf;
248 lport = qedf->lport;
249 sid = fcport->sid;
250 r_a_tov = lport->r_a_tov;
252 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
253 "io = %p, orig_xid = 0x%x\n", aborted_io_req,
254 aborted_io_req->xid);
255 memset(&rrq, 0, sizeof(rrq));
257 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
258 if (!cb_arg) {
259 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
260 "RRQ\n");
261 rc = -ENOMEM;
262 goto rrq_err;
265 cb_arg->aborted_io_req = aborted_io_req;
267 rrq.rrq_cmd = ELS_RRQ;
268 hton24(rrq.rrq_s_id, sid);
269 rrq.rrq_ox_id = htons(aborted_io_req->xid);
270 rrq.rrq_rx_id =
271 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
273 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
274 qedf_rrq_compl, cb_arg, r_a_tov);
276 rrq_err:
277 if (rc) {
278 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
279 "req 0x%x\n", aborted_io_req->xid);
280 kfree(cb_arg);
281 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
283 return rc;
286 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
287 struct fc_frame *fp,
288 u16 l2_oxid)
290 struct fc_lport *lport = fcport->qedf->lport;
291 struct fc_frame_header *fh;
292 u32 crc;
294 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
296 /* Set the OXID we return to what libfc used */
297 if (l2_oxid != FC_XID_UNKNOWN)
298 fh->fh_ox_id = htons(l2_oxid);
300 /* Setup header fields */
301 fh->fh_r_ctl = FC_RCTL_ELS_REP;
302 fh->fh_type = FC_TYPE_ELS;
303 /* Last sequence, end sequence */
304 fh->fh_f_ctl[0] = 0x98;
305 hton24(fh->fh_d_id, lport->port_id);
306 hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
307 fh->fh_rx_id = 0xffff;
309 /* Set frame attributes */
310 crc = fcoe_fc_crc(fp);
311 fc_frame_init(fp);
312 fr_dev(fp) = lport;
313 fr_sof(fp) = FC_SOF_I3;
314 fr_eof(fp) = FC_EOF_T;
315 fr_crc(fp) = cpu_to_le32(~crc);
317 /* Send completed request to libfc */
318 fc_exch_recv(lport, fp);
322 * In instances where an ELS command times out we may need to restart the
323 * rport by logging out and then logging back in.
325 void qedf_restart_rport(struct qedf_rport *fcport)
327 struct fc_lport *lport;
328 struct fc_rport_priv *rdata;
329 u32 port_id;
331 if (!fcport)
332 return;
334 if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
335 !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
336 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
337 QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
338 fcport);
339 return;
342 /* Set that we are now in reset */
343 set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
345 rdata = fcport->rdata;
346 if (rdata) {
347 lport = fcport->qedf->lport;
348 port_id = rdata->ids.port_id;
349 QEDF_ERR(&(fcport->qedf->dbg_ctx),
350 "LOGO port_id=%x.\n", port_id);
351 fc_rport_logoff(rdata);
352 /* Recreate the rport and log back in */
353 rdata = fc_rport_create(lport, port_id);
354 if (rdata)
355 fc_rport_login(rdata);
357 clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
360 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
362 struct qedf_ioreq *els_req;
363 struct qedf_rport *fcport;
364 struct qedf_mp_req *mp_req;
365 struct fc_frame *fp;
366 struct fc_frame_header *fh, *mp_fc_hdr;
367 void *resp_buf, *fc_payload;
368 u32 resp_len;
369 u16 l2_oxid;
371 l2_oxid = cb_arg->l2_oxid;
372 els_req = cb_arg->io_req;
374 if (!els_req) {
375 QEDF_ERR(NULL, "els_req is NULL.\n");
376 goto free_arg;
380 * If we are flushing the command just free the cb_arg as none of the
381 * response data will be valid.
383 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
384 goto free_arg;
386 fcport = els_req->fcport;
387 mp_req = &(els_req->mp_req);
388 mp_fc_hdr = &(mp_req->resp_fc_hdr);
389 resp_len = mp_req->resp_len;
390 resp_buf = mp_req->resp_buf;
393 * If a middle path ELS command times out, don't try to return
394 * the command but rather do any internal cleanup and then libfc
395 * timeout the command and clean up its internal resources.
397 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
399 * If ADISC times out, libfc will timeout the exchange and then
400 * try to send a PLOGI which will timeout since the session is
401 * still offloaded. Force libfc to logout the session which
402 * will offload the connection and allow the PLOGI response to
403 * flow over the LL2 path.
405 if (cb_arg->op == ELS_ADISC)
406 qedf_restart_rport(fcport);
407 return;
410 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
411 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
412 "beyond page size.\n");
413 goto free_arg;
416 fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
417 if (!fp) {
418 QEDF_ERR(&(fcport->qedf->dbg_ctx),
419 "fc_frame_alloc failure.\n");
420 return;
423 /* Copy frame header from firmware into fp */
424 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
425 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
427 /* Copy payload from firmware into fp */
428 fc_payload = fc_frame_payload_get(fp, resp_len);
429 memcpy(fc_payload, resp_buf, resp_len);
431 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
432 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
433 qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
435 free_arg:
436 kfree(cb_arg);
439 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
441 struct fc_els_adisc *adisc;
442 struct fc_frame_header *fh;
443 struct fc_lport *lport = fcport->qedf->lport;
444 struct qedf_els_cb_arg *cb_arg = NULL;
445 struct qedf_ctx *qedf;
446 uint32_t r_a_tov = lport->r_a_tov;
447 int rc;
449 qedf = fcport->qedf;
450 fh = fc_frame_header_get(fp);
452 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
453 if (!cb_arg) {
454 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
455 "ADISC\n");
456 rc = -ENOMEM;
457 goto adisc_err;
459 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
461 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
462 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
464 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
466 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
467 qedf_l2_els_compl, cb_arg, r_a_tov);
469 adisc_err:
470 if (rc) {
471 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
472 kfree(cb_arg);
474 return rc;
477 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
479 struct qedf_ioreq *orig_io_req;
480 struct qedf_ioreq *srr_req;
481 struct qedf_mp_req *mp_req;
482 struct fc_frame_header *mp_fc_hdr, *fh;
483 struct fc_frame *fp;
484 void *resp_buf, *fc_payload;
485 u32 resp_len;
486 struct fc_lport *lport;
487 struct qedf_ctx *qedf;
488 int refcount;
489 u8 opcode;
491 srr_req = cb_arg->io_req;
492 qedf = srr_req->fcport->qedf;
493 lport = qedf->lport;
495 orig_io_req = cb_arg->aborted_io_req;
497 if (!orig_io_req)
498 goto out_free;
500 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
502 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
503 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
504 cancel_delayed_work_sync(&orig_io_req->timeout_work);
506 refcount = kref_read(&orig_io_req->refcount);
507 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
508 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
509 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
511 /* If a SRR times out, simply free resources */
512 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
513 goto out_put;
515 /* Normalize response data into struct fc_frame */
516 mp_req = &(srr_req->mp_req);
517 mp_fc_hdr = &(mp_req->resp_fc_hdr);
518 resp_len = mp_req->resp_len;
519 resp_buf = mp_req->resp_buf;
521 fp = fc_frame_alloc(lport, resp_len);
522 if (!fp) {
523 QEDF_ERR(&(qedf->dbg_ctx),
524 "fc_frame_alloc failure.\n");
525 goto out_put;
528 /* Copy frame header from firmware into fp */
529 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
530 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
532 /* Copy payload from firmware into fp */
533 fc_payload = fc_frame_payload_get(fp, resp_len);
534 memcpy(fc_payload, resp_buf, resp_len);
536 opcode = fc_frame_payload_op(fp);
537 switch (opcode) {
538 case ELS_LS_ACC:
539 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
540 "SRR success.\n");
541 break;
542 case ELS_LS_RJT:
543 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
544 "SRR rejected.\n");
545 qedf_initiate_abts(orig_io_req, true);
546 break;
549 fc_frame_free(fp);
550 out_put:
551 /* Put reference for original command since SRR completed */
552 kref_put(&orig_io_req->refcount, qedf_release_cmd);
553 out_free:
554 kfree(cb_arg);
557 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
559 struct fcp_srr srr;
560 struct qedf_ctx *qedf;
561 struct qedf_rport *fcport;
562 struct fc_lport *lport;
563 struct qedf_els_cb_arg *cb_arg = NULL;
564 u32 sid, r_a_tov;
565 int rc;
567 if (!orig_io_req) {
568 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
569 return -EINVAL;
572 fcport = orig_io_req->fcport;
574 /* Check that fcport is still offloaded */
575 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
576 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
577 return -EINVAL;
580 if (!fcport->qedf) {
581 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
582 return -EINVAL;
585 /* Take reference until SRR command completion */
586 kref_get(&orig_io_req->refcount);
588 qedf = fcport->qedf;
589 lport = qedf->lport;
590 sid = fcport->sid;
591 r_a_tov = lport->r_a_tov;
593 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
594 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
595 memset(&srr, 0, sizeof(srr));
597 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
598 if (!cb_arg) {
599 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
600 "SRR\n");
601 rc = -ENOMEM;
602 goto srr_err;
605 cb_arg->aborted_io_req = orig_io_req;
607 srr.srr_op = ELS_SRR;
608 srr.srr_ox_id = htons(orig_io_req->xid);
609 srr.srr_rx_id = htons(orig_io_req->rx_id);
610 srr.srr_rel_off = htonl(offset);
611 srr.srr_r_ctl = r_ctl;
613 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
614 qedf_srr_compl, cb_arg, r_a_tov);
616 srr_err:
617 if (rc) {
618 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
619 "=0x%x\n", orig_io_req->xid);
620 kfree(cb_arg);
621 /* If we fail to queue SRR, send ABTS to orig_io */
622 qedf_initiate_abts(orig_io_req, true);
623 kref_put(&orig_io_req->refcount, qedf_release_cmd);
624 } else
625 /* Tell other threads that SRR is in progress */
626 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
628 return rc;
631 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
632 u32 offset, u8 r_ctl)
634 struct qedf_rport *fcport;
635 unsigned long flags;
636 struct qedf_els_cb_arg *cb_arg;
637 struct fcoe_wqe *sqe;
638 u16 sqe_idx;
640 fcport = orig_io_req->fcport;
642 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
643 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
644 orig_io_req->xid, offset);
646 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
647 if (!cb_arg) {
648 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
649 "for sequence cleanup\n");
650 return;
653 /* Get reference for cleanup request */
654 kref_get(&orig_io_req->refcount);
656 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
657 cb_arg->offset = offset;
658 cb_arg->r_ctl = r_ctl;
659 orig_io_req->cb_arg = cb_arg;
661 qedf_cmd_timer_set(fcport->qedf, orig_io_req,
662 QEDF_CLEANUP_TIMEOUT * HZ);
664 spin_lock_irqsave(&fcport->rport_lock, flags);
666 sqe_idx = qedf_get_sqe_idx(fcport);
667 sqe = &fcport->sq[sqe_idx];
668 memset(sqe, 0, sizeof(struct fcoe_wqe));
669 orig_io_req->task_params->sqe = sqe;
671 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
672 offset);
673 qedf_ring_doorbell(fcport);
675 spin_unlock_irqrestore(&fcport->rport_lock, flags);
678 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
679 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
681 int rc;
682 struct qedf_els_cb_arg *cb_arg;
684 cb_arg = io_req->cb_arg;
686 /* If we timed out just free resources */
687 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
688 goto free;
690 /* Kill the timer we put on the request */
691 cancel_delayed_work_sync(&io_req->timeout_work);
693 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
694 if (rc)
695 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
696 "abort, xid=0x%x.\n", io_req->xid);
697 free:
698 kfree(cb_arg);
699 kref_put(&io_req->refcount, qedf_release_cmd);
702 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
704 struct qedf_rport *fcport;
705 struct qedf_ioreq *new_io_req;
706 unsigned long flags;
707 bool rc = false;
709 fcport = orig_io_req->fcport;
710 if (!fcport) {
711 QEDF_ERR(NULL, "fcport is NULL.\n");
712 goto out;
715 if (!orig_io_req->sc_cmd) {
716 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
717 "xid=0x%x.\n", orig_io_req->xid);
718 goto out;
721 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
722 if (!new_io_req) {
723 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
724 "io_req.\n");
725 goto out;
728 new_io_req->sc_cmd = orig_io_req->sc_cmd;
731 * This keeps the sc_cmd struct from being returned to the tape
732 * driver and being requeued twice. We do need to put a reference
733 * for the original I/O request since we will not do a SCSI completion
734 * for it.
736 orig_io_req->sc_cmd = NULL;
737 kref_put(&orig_io_req->refcount, qedf_release_cmd);
739 spin_lock_irqsave(&fcport->rport_lock, flags);
741 /* kref for new command released in qedf_post_io_req on error */
742 if (qedf_post_io_req(fcport, new_io_req)) {
743 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
744 /* Return SQE to pool */
745 atomic_inc(&fcport->free_sqes);
746 } else {
747 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
748 "Reissued SCSI command from orig_xid=0x%x on "
749 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
751 * Abort the original I/O but do not return SCSI command as
752 * it has been reissued on another OX_ID.
754 spin_unlock_irqrestore(&fcport->rport_lock, flags);
755 qedf_initiate_abts(orig_io_req, false);
756 goto out;
759 spin_unlock_irqrestore(&fcport->rport_lock, flags);
760 out:
761 return rc;
765 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
767 struct qedf_ioreq *orig_io_req;
768 struct qedf_ioreq *rec_req;
769 struct qedf_mp_req *mp_req;
770 struct fc_frame_header *mp_fc_hdr, *fh;
771 struct fc_frame *fp;
772 void *resp_buf, *fc_payload;
773 u32 resp_len;
774 struct fc_lport *lport;
775 struct qedf_ctx *qedf;
776 int refcount;
777 enum fc_rctl r_ctl;
778 struct fc_els_ls_rjt *rjt;
779 struct fc_els_rec_acc *acc;
780 u8 opcode;
781 u32 offset, e_stat;
782 struct scsi_cmnd *sc_cmd;
783 bool srr_needed = false;
785 rec_req = cb_arg->io_req;
786 qedf = rec_req->fcport->qedf;
787 lport = qedf->lport;
789 orig_io_req = cb_arg->aborted_io_req;
791 if (!orig_io_req)
792 goto out_free;
794 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
795 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
796 cancel_delayed_work_sync(&orig_io_req->timeout_work);
798 refcount = kref_read(&orig_io_req->refcount);
799 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
800 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
801 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
803 /* If a REC times out, free resources */
804 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
805 goto out_put;
807 /* Normalize response data into struct fc_frame */
808 mp_req = &(rec_req->mp_req);
809 mp_fc_hdr = &(mp_req->resp_fc_hdr);
810 resp_len = mp_req->resp_len;
811 acc = resp_buf = mp_req->resp_buf;
813 fp = fc_frame_alloc(lport, resp_len);
814 if (!fp) {
815 QEDF_ERR(&(qedf->dbg_ctx),
816 "fc_frame_alloc failure.\n");
817 goto out_put;
820 /* Copy frame header from firmware into fp */
821 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
822 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
824 /* Copy payload from firmware into fp */
825 fc_payload = fc_frame_payload_get(fp, resp_len);
826 memcpy(fc_payload, resp_buf, resp_len);
828 opcode = fc_frame_payload_op(fp);
829 if (opcode == ELS_LS_RJT) {
830 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
831 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
832 "Received LS_RJT for REC: er_reason=0x%x, "
833 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
835 * The following response(s) mean that we need to reissue the
836 * request on another exchange. We need to do this without
837 * informing the upper layers lest it cause an application
838 * error.
840 if ((rjt->er_reason == ELS_RJT_LOGIC ||
841 rjt->er_reason == ELS_RJT_UNAB) &&
842 rjt->er_explan == ELS_EXPL_OXID_RXID) {
843 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
844 "Handle CMD LOST case.\n");
845 qedf_requeue_io_req(orig_io_req);
847 } else if (opcode == ELS_LS_ACC) {
848 offset = ntohl(acc->reca_fc4value);
849 e_stat = ntohl(acc->reca_e_stat);
850 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
851 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
852 offset, e_stat);
853 if (e_stat & ESB_ST_SEQ_INIT) {
854 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
855 "Target has the seq init\n");
856 goto out_free_frame;
858 sc_cmd = orig_io_req->sc_cmd;
859 if (!sc_cmd) {
860 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
861 "sc_cmd is NULL for xid=0x%x.\n",
862 orig_io_req->xid);
863 goto out_free_frame;
865 /* SCSI write case */
866 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
867 if (offset == orig_io_req->data_xfer_len) {
868 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
869 "WRITE - response lost.\n");
870 r_ctl = FC_RCTL_DD_CMD_STATUS;
871 srr_needed = true;
872 offset = 0;
873 } else {
874 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
875 "WRITE - XFER_RDY/DATA lost.\n");
876 r_ctl = FC_RCTL_DD_DATA_DESC;
877 /* Use data from warning CQE instead of REC */
878 offset = orig_io_req->tx_buf_off;
880 /* SCSI read case */
881 } else {
882 if (orig_io_req->rx_buf_off ==
883 orig_io_req->data_xfer_len) {
884 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
885 "READ - response lost.\n");
886 srr_needed = true;
887 r_ctl = FC_RCTL_DD_CMD_STATUS;
888 offset = 0;
889 } else {
890 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
891 "READ - DATA lost.\n");
893 * For read case we always set the offset to 0
894 * for sequence recovery task.
896 offset = 0;
897 r_ctl = FC_RCTL_DD_SOL_DATA;
901 if (srr_needed)
902 qedf_send_srr(orig_io_req, offset, r_ctl);
903 else
904 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
907 out_free_frame:
908 fc_frame_free(fp);
909 out_put:
910 /* Put reference for original command since REC completed */
911 kref_put(&orig_io_req->refcount, qedf_release_cmd);
912 out_free:
913 kfree(cb_arg);
916 /* Assumes kref is already held by caller */
917 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
920 struct fc_els_rec rec;
921 struct qedf_rport *fcport;
922 struct fc_lport *lport;
923 struct qedf_els_cb_arg *cb_arg = NULL;
924 struct qedf_ctx *qedf;
925 uint32_t sid;
926 uint32_t r_a_tov;
927 int rc;
929 if (!orig_io_req) {
930 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
931 return -EINVAL;
934 fcport = orig_io_req->fcport;
936 /* Check that fcport is still offloaded */
937 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
938 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
939 return -EINVAL;
942 if (!fcport->qedf) {
943 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
944 return -EINVAL;
947 /* Take reference until REC command completion */
948 kref_get(&orig_io_req->refcount);
950 qedf = fcport->qedf;
951 lport = qedf->lport;
952 sid = fcport->sid;
953 r_a_tov = lport->r_a_tov;
955 memset(&rec, 0, sizeof(rec));
957 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
958 if (!cb_arg) {
959 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
960 "REC\n");
961 rc = -ENOMEM;
962 goto rec_err;
965 cb_arg->aborted_io_req = orig_io_req;
967 rec.rec_cmd = ELS_REC;
968 hton24(rec.rec_s_id, sid);
969 rec.rec_ox_id = htons(orig_io_req->xid);
970 rec.rec_rx_id =
971 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
973 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
974 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
975 orig_io_req->xid, rec.rec_rx_id);
976 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
977 qedf_rec_compl, cb_arg, r_a_tov);
979 rec_err:
980 if (rc) {
981 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
982 "=0x%x\n", orig_io_req->xid);
983 kfree(cb_arg);
984 kref_put(&orig_io_req->refcount, qedf_release_cmd);
986 return rc;