2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport
*fcport
, unsigned int op
,
13 void *data
, uint32_t data_len
,
14 void (*cb_func
)(struct qedf_els_cb_arg
*cb_arg
),
15 struct qedf_els_cb_arg
*cb_arg
, uint32_t timer_msec
)
17 struct qedf_ctx
*qedf
;
18 struct fc_lport
*lport
;
19 struct qedf_ioreq
*els_req
;
20 struct qedf_mp_req
*mp_req
;
21 struct fc_frame_header
*fc_hdr
;
22 struct e4_fcoe_task_context
*task
;
26 uint32_t start_time
= jiffies
/ HZ
;
27 uint32_t current_time
;
33 QEDF_ERR(NULL
, "fcport is NULL");
41 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending ELS\n");
43 rc
= fc_remote_port_chkready(fcport
->rport
);
45 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: rport not ready\n", op
);
49 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
50 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: link is not ready\n",
56 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
57 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: fcport not ready\n", op
);
63 els_req
= qedf_alloc_cmd(fcport
, QEDF_ELS
);
65 current_time
= jiffies
/ HZ
;
66 if ((current_time
- start_time
) > 10) {
67 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
68 "els: Failed els 0x%x\n", op
);
72 mdelay(20 * USEC_PER_MSEC
);
76 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "initiate_els els_req = "
77 "0x%p cb_arg = %p xid = %x\n", els_req
, cb_arg
,
79 els_req
->sc_cmd
= NULL
;
80 els_req
->cmd_type
= QEDF_ELS
;
81 els_req
->fcport
= fcport
;
82 els_req
->cb_func
= cb_func
;
83 cb_arg
->io_req
= els_req
;
85 els_req
->cb_arg
= cb_arg
;
86 els_req
->data_xfer_len
= data_len
;
88 /* Record which cpu this request is associated with */
89 els_req
->cpu
= smp_processor_id();
91 mp_req
= (struct qedf_mp_req
*)&(els_req
->mp_req
);
92 rc
= qedf_init_mp_req(els_req
);
94 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS MP request init failed\n");
95 kref_put(&els_req
->refcount
, qedf_release_cmd
);
101 /* Fill ELS Payload */
102 if ((op
>= ELS_LS_RJT
) && (op
<= ELS_AUTH_ELS
)) {
103 memcpy(mp_req
->req_buf
, data
, data_len
);
105 QEDF_ERR(&(qedf
->dbg_ctx
), "Invalid ELS op 0x%x\n", op
);
106 els_req
->cb_func
= NULL
;
107 els_req
->cb_arg
= NULL
;
108 kref_put(&els_req
->refcount
, qedf_release_cmd
);
116 fc_hdr
= &(mp_req
->req_fc_hdr
);
118 did
= fcport
->rdata
->ids
.port_id
;
121 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS_REQ
, did
, sid
,
122 FC_TYPE_ELS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
125 /* Obtain exchange id */
128 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
130 sqe_idx
= qedf_get_sqe_idx(fcport
);
131 sqe
= &fcport
->sq
[sqe_idx
];
132 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
134 /* Initialize task context for this IO request */
135 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
136 qedf_init_mp_task(els_req
, task
, sqe
);
138 /* Put timer on original I/O request */
140 qedf_cmd_timer_set(qedf
, els_req
, timer_msec
);
143 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Ringing doorbell for ELS "
145 qedf_ring_doorbell(fcport
);
146 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
151 void qedf_process_els_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
152 struct qedf_ioreq
*els_req
)
154 struct fcoe_task_context
*task_ctx
;
155 struct scsi_cmnd
*sc_cmd
;
157 struct fcoe_cqe_midpath_info
*mp_info
;
159 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered with xid = 0x%x"
160 " cmd_type = %d.\n", els_req
->xid
, els_req
->cmd_type
);
162 /* Kill the ELS timer */
163 cancel_delayed_work(&els_req
->timeout_work
);
166 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
167 sc_cmd
= els_req
->sc_cmd
;
169 /* Get ELS response length from CQE */
170 mp_info
= &cqe
->cqe_info
.midpath_info
;
171 els_req
->mp_req
.resp_len
= mp_info
->data_placement_size
;
173 /* Parse ELS response */
174 if ((els_req
->cb_func
) && (els_req
->cb_arg
)) {
175 els_req
->cb_func(els_req
->cb_arg
);
176 els_req
->cb_arg
= NULL
;
179 kref_put(&els_req
->refcount
, qedf_release_cmd
);
182 static void qedf_rrq_compl(struct qedf_els_cb_arg
*cb_arg
)
184 struct qedf_ioreq
*orig_io_req
;
185 struct qedf_ioreq
*rrq_req
;
186 struct qedf_ctx
*qedf
;
189 rrq_req
= cb_arg
->io_req
;
190 qedf
= rrq_req
->fcport
->qedf
;
192 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered.\n");
194 orig_io_req
= cb_arg
->aborted_io_req
;
199 if (rrq_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
200 rrq_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
201 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
203 refcount
= kref_read(&orig_io_req
->refcount
);
204 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "rrq_compl: orig io = %p,"
205 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
206 orig_io_req
, orig_io_req
->xid
, rrq_req
->xid
, refcount
);
208 /* This should return the aborted io_req to the command pool */
210 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
214 * Release a reference to the rrq request if we timed out as the
215 * rrq completion handler is called directly from the timeout handler
216 * and not from els_compl where the reference would have normally been
219 if (rrq_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
220 kref_put(&rrq_req
->refcount
, qedf_release_cmd
);
224 /* Assumes kref is already held by caller */
225 int qedf_send_rrq(struct qedf_ioreq
*aborted_io_req
)
228 struct fc_els_rrq rrq
;
229 struct qedf_rport
*fcport
;
230 struct fc_lport
*lport
;
231 struct qedf_els_cb_arg
*cb_arg
= NULL
;
232 struct qedf_ctx
*qedf
;
237 if (!aborted_io_req
) {
238 QEDF_ERR(NULL
, "abort_io_req is NULL.\n");
242 fcport
= aborted_io_req
->fcport
;
244 /* Check that fcport is still offloaded */
245 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
246 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
251 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
258 r_a_tov
= lport
->r_a_tov
;
260 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending RRQ orig "
261 "io = %p, orig_xid = 0x%x\n", aborted_io_req
,
262 aborted_io_req
->xid
);
263 memset(&rrq
, 0, sizeof(rrq
));
265 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
267 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
273 cb_arg
->aborted_io_req
= aborted_io_req
;
275 rrq
.rrq_cmd
= ELS_RRQ
;
276 hton24(rrq
.rrq_s_id
, sid
);
277 rrq
.rrq_ox_id
= htons(aborted_io_req
->xid
);
279 htons(aborted_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
281 rc
= qedf_initiate_els(fcport
, ELS_RRQ
, &rrq
, sizeof(rrq
),
282 qedf_rrq_compl
, cb_arg
, r_a_tov
);
286 QEDF_ERR(&(qedf
->dbg_ctx
), "RRQ failed - release orig io "
287 "req 0x%x\n", aborted_io_req
->xid
);
289 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
294 static void qedf_process_l2_frame_compl(struct qedf_rport
*fcport
,
298 struct fc_lport
*lport
= fcport
->qedf
->lport
;
299 struct fc_frame_header
*fh
;
302 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
304 /* Set the OXID we return to what libfc used */
305 if (l2_oxid
!= FC_XID_UNKNOWN
)
306 fh
->fh_ox_id
= htons(l2_oxid
);
308 /* Setup header fields */
309 fh
->fh_r_ctl
= FC_RCTL_ELS_REP
;
310 fh
->fh_type
= FC_TYPE_ELS
;
311 /* Last sequence, end sequence */
312 fh
->fh_f_ctl
[0] = 0x98;
313 hton24(fh
->fh_d_id
, lport
->port_id
);
314 hton24(fh
->fh_s_id
, fcport
->rdata
->ids
.port_id
);
315 fh
->fh_rx_id
= 0xffff;
317 /* Set frame attributes */
318 crc
= fcoe_fc_crc(fp
);
321 fr_sof(fp
) = FC_SOF_I3
;
322 fr_eof(fp
) = FC_EOF_T
;
323 fr_crc(fp
) = cpu_to_le32(~crc
);
325 /* Send completed request to libfc */
326 fc_exch_recv(lport
, fp
);
330 * In instances where an ELS command times out we may need to restart the
331 * rport by logging out and then logging back in.
333 void qedf_restart_rport(struct qedf_rport
*fcport
)
335 struct fc_lport
*lport
;
336 struct fc_rport_priv
*rdata
;
342 if (test_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
) ||
343 !test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
) ||
344 test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
345 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "fcport %p already in reset or not offloaded.\n",
350 /* Set that we are now in reset */
351 set_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
353 rdata
= fcport
->rdata
;
355 lport
= fcport
->qedf
->lport
;
356 port_id
= rdata
->ids
.port_id
;
357 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
358 "LOGO port_id=%x.\n", port_id
);
359 fc_rport_logoff(rdata
);
360 /* Recreate the rport and log back in */
361 rdata
= fc_rport_create(lport
, port_id
);
363 fc_rport_login(rdata
);
365 clear_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
368 static void qedf_l2_els_compl(struct qedf_els_cb_arg
*cb_arg
)
370 struct qedf_ioreq
*els_req
;
371 struct qedf_rport
*fcport
;
372 struct qedf_mp_req
*mp_req
;
374 struct fc_frame_header
*fh
, *mp_fc_hdr
;
375 void *resp_buf
, *fc_payload
;
379 l2_oxid
= cb_arg
->l2_oxid
;
380 els_req
= cb_arg
->io_req
;
383 QEDF_ERR(NULL
, "els_req is NULL.\n");
388 * If we are flushing the command just free the cb_arg as none of the
389 * response data will be valid.
391 if (els_req
->event
== QEDF_IOREQ_EV_ELS_FLUSH
)
394 fcport
= els_req
->fcport
;
395 mp_req
= &(els_req
->mp_req
);
396 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
397 resp_len
= mp_req
->resp_len
;
398 resp_buf
= mp_req
->resp_buf
;
401 * If a middle path ELS command times out, don't try to return
402 * the command but rather do any internal cleanup and then libfc
403 * timeout the command and clean up its internal resources.
405 if (els_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
407 * If ADISC times out, libfc will timeout the exchange and then
408 * try to send a PLOGI which will timeout since the session is
409 * still offloaded. Force libfc to logout the session which
410 * will offload the connection and allow the PLOGI response to
411 * flow over the LL2 path.
413 if (cb_arg
->op
== ELS_ADISC
)
414 qedf_restart_rport(fcport
);
418 if (sizeof(struct fc_frame_header
) + resp_len
> QEDF_PAGE_SIZE
) {
419 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "resp_len is "
420 "beyond page size.\n");
424 fp
= fc_frame_alloc(fcport
->qedf
->lport
, resp_len
);
426 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
427 "fc_frame_alloc failure.\n");
431 /* Copy frame header from firmware into fp */
432 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
433 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
435 /* Copy payload from firmware into fp */
436 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
437 memcpy(fc_payload
, resp_buf
, resp_len
);
439 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
440 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid
);
441 qedf_process_l2_frame_compl(fcport
, fp
, l2_oxid
);
447 int qedf_send_adisc(struct qedf_rport
*fcport
, struct fc_frame
*fp
)
449 struct fc_els_adisc
*adisc
;
450 struct fc_frame_header
*fh
;
451 struct fc_lport
*lport
= fcport
->qedf
->lport
;
452 struct qedf_els_cb_arg
*cb_arg
= NULL
;
453 struct qedf_ctx
*qedf
;
454 uint32_t r_a_tov
= lport
->r_a_tov
;
458 fh
= fc_frame_header_get(fp
);
460 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
462 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
467 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
469 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
470 "Sending ADISC ox_id=0x%x.\n", cb_arg
->l2_oxid
);
472 adisc
= fc_frame_payload_get(fp
, sizeof(*adisc
));
474 rc
= qedf_initiate_els(fcport
, ELS_ADISC
, adisc
, sizeof(*adisc
),
475 qedf_l2_els_compl
, cb_arg
, r_a_tov
);
479 QEDF_ERR(&(qedf
->dbg_ctx
), "ADISC failed.\n");
485 static void qedf_srr_compl(struct qedf_els_cb_arg
*cb_arg
)
487 struct qedf_ioreq
*orig_io_req
;
488 struct qedf_ioreq
*srr_req
;
489 struct qedf_mp_req
*mp_req
;
490 struct fc_frame_header
*mp_fc_hdr
, *fh
;
492 void *resp_buf
, *fc_payload
;
494 struct fc_lport
*lport
;
495 struct qedf_ctx
*qedf
;
499 srr_req
= cb_arg
->io_req
;
500 qedf
= srr_req
->fcport
->qedf
;
503 orig_io_req
= cb_arg
->aborted_io_req
;
508 clear_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
510 if (srr_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
511 srr_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
512 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
514 refcount
= kref_read(&orig_io_req
->refcount
);
515 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
516 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
517 orig_io_req
, orig_io_req
->xid
, srr_req
->xid
, refcount
);
519 /* If a SRR times out, simply free resources */
520 if (srr_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
523 /* Normalize response data into struct fc_frame */
524 mp_req
= &(srr_req
->mp_req
);
525 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
526 resp_len
= mp_req
->resp_len
;
527 resp_buf
= mp_req
->resp_buf
;
529 fp
= fc_frame_alloc(lport
, resp_len
);
531 QEDF_ERR(&(qedf
->dbg_ctx
),
532 "fc_frame_alloc failure.\n");
536 /* Copy frame header from firmware into fp */
537 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
538 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
540 /* Copy payload from firmware into fp */
541 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
542 memcpy(fc_payload
, resp_buf
, resp_len
);
544 opcode
= fc_frame_payload_op(fp
);
547 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
551 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
553 qedf_initiate_abts(orig_io_req
, true);
559 /* Put reference for original command since SRR completed */
560 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
565 static int qedf_send_srr(struct qedf_ioreq
*orig_io_req
, u32 offset
, u8 r_ctl
)
568 struct qedf_ctx
*qedf
;
569 struct qedf_rport
*fcport
;
570 struct fc_lport
*lport
;
571 struct qedf_els_cb_arg
*cb_arg
= NULL
;
576 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
580 fcport
= orig_io_req
->fcport
;
582 /* Check that fcport is still offloaded */
583 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
584 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
589 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
593 /* Take reference until SRR command completion */
594 kref_get(&orig_io_req
->refcount
);
599 r_a_tov
= lport
->r_a_tov
;
601 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending SRR orig_io=%p, "
602 "orig_xid=0x%x\n", orig_io_req
, orig_io_req
->xid
);
603 memset(&srr
, 0, sizeof(srr
));
605 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
607 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
613 cb_arg
->aborted_io_req
= orig_io_req
;
615 srr
.srr_op
= ELS_SRR
;
616 srr
.srr_ox_id
= htons(orig_io_req
->xid
);
617 srr
.srr_rx_id
= htons(orig_io_req
->rx_id
);
618 srr
.srr_rel_off
= htonl(offset
);
619 srr
.srr_r_ctl
= r_ctl
;
621 rc
= qedf_initiate_els(fcport
, ELS_SRR
, &srr
, sizeof(srr
),
622 qedf_srr_compl
, cb_arg
, r_a_tov
);
626 QEDF_ERR(&(qedf
->dbg_ctx
), "SRR failed - release orig_io_req"
627 "=0x%x\n", orig_io_req
->xid
);
629 /* If we fail to queue SRR, send ABTS to orig_io */
630 qedf_initiate_abts(orig_io_req
, true);
631 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
633 /* Tell other threads that SRR is in progress */
634 set_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
639 static void qedf_initiate_seq_cleanup(struct qedf_ioreq
*orig_io_req
,
640 u32 offset
, u8 r_ctl
)
642 struct qedf_rport
*fcport
;
644 struct qedf_els_cb_arg
*cb_arg
;
645 struct fcoe_wqe
*sqe
;
648 fcport
= orig_io_req
->fcport
;
650 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
651 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
652 orig_io_req
->xid
, offset
);
654 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
656 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to allocate cb_arg "
657 "for sequence cleanup\n");
661 /* Get reference for cleanup request */
662 kref_get(&orig_io_req
->refcount
);
664 orig_io_req
->cmd_type
= QEDF_SEQ_CLEANUP
;
665 cb_arg
->offset
= offset
;
666 cb_arg
->r_ctl
= r_ctl
;
667 orig_io_req
->cb_arg
= cb_arg
;
669 qedf_cmd_timer_set(fcport
->qedf
, orig_io_req
,
670 QEDF_CLEANUP_TIMEOUT
* HZ
);
672 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
674 sqe_idx
= qedf_get_sqe_idx(fcport
);
675 sqe
= &fcport
->sq
[sqe_idx
];
676 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
677 orig_io_req
->task_params
->sqe
= sqe
;
679 init_initiator_sequence_recovery_fcoe_task(orig_io_req
->task_params
,
681 qedf_ring_doorbell(fcport
);
683 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
686 void qedf_process_seq_cleanup_compl(struct qedf_ctx
*qedf
,
687 struct fcoe_cqe
*cqe
, struct qedf_ioreq
*io_req
)
690 struct qedf_els_cb_arg
*cb_arg
;
692 cb_arg
= io_req
->cb_arg
;
694 /* If we timed out just free resources */
695 if (io_req
->event
== QEDF_IOREQ_EV_ELS_TMO
|| !cqe
)
698 /* Kill the timer we put on the request */
699 cancel_delayed_work_sync(&io_req
->timeout_work
);
701 rc
= qedf_send_srr(io_req
, cb_arg
->offset
, cb_arg
->r_ctl
);
703 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to send SRR, I/O will "
704 "abort, xid=0x%x.\n", io_req
->xid
);
707 kref_put(&io_req
->refcount
, qedf_release_cmd
);
710 static bool qedf_requeue_io_req(struct qedf_ioreq
*orig_io_req
)
712 struct qedf_rport
*fcport
;
713 struct qedf_ioreq
*new_io_req
;
717 fcport
= orig_io_req
->fcport
;
719 QEDF_ERR(NULL
, "fcport is NULL.\n");
723 if (!orig_io_req
->sc_cmd
) {
724 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "sc_cmd is NULL for "
725 "xid=0x%x.\n", orig_io_req
->xid
);
729 new_io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
731 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Could not allocate new "
736 new_io_req
->sc_cmd
= orig_io_req
->sc_cmd
;
739 * This keeps the sc_cmd struct from being returned to the tape
740 * driver and being requeued twice. We do need to put a reference
741 * for the original I/O request since we will not do a SCSI completion
744 orig_io_req
->sc_cmd
= NULL
;
745 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
747 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
749 /* kref for new command released in qedf_post_io_req on error */
750 if (qedf_post_io_req(fcport
, new_io_req
)) {
751 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to post io_req\n");
752 /* Return SQE to pool */
753 atomic_inc(&fcport
->free_sqes
);
755 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
756 "Reissued SCSI command from orig_xid=0x%x on "
757 "new_xid=0x%x.\n", orig_io_req
->xid
, new_io_req
->xid
);
759 * Abort the original I/O but do not return SCSI command as
760 * it has been reissued on another OX_ID.
762 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
763 qedf_initiate_abts(orig_io_req
, false);
767 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
773 static void qedf_rec_compl(struct qedf_els_cb_arg
*cb_arg
)
775 struct qedf_ioreq
*orig_io_req
;
776 struct qedf_ioreq
*rec_req
;
777 struct qedf_mp_req
*mp_req
;
778 struct fc_frame_header
*mp_fc_hdr
, *fh
;
780 void *resp_buf
, *fc_payload
;
782 struct fc_lport
*lport
;
783 struct qedf_ctx
*qedf
;
786 struct fc_els_ls_rjt
*rjt
;
787 struct fc_els_rec_acc
*acc
;
790 struct scsi_cmnd
*sc_cmd
;
791 bool srr_needed
= false;
793 rec_req
= cb_arg
->io_req
;
794 qedf
= rec_req
->fcport
->qedf
;
797 orig_io_req
= cb_arg
->aborted_io_req
;
802 if (rec_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
803 rec_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
804 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
806 refcount
= kref_read(&orig_io_req
->refcount
);
807 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
808 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
809 orig_io_req
, orig_io_req
->xid
, rec_req
->xid
, refcount
);
811 /* If a REC times out, free resources */
812 if (rec_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
815 /* Normalize response data into struct fc_frame */
816 mp_req
= &(rec_req
->mp_req
);
817 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
818 resp_len
= mp_req
->resp_len
;
819 acc
= resp_buf
= mp_req
->resp_buf
;
821 fp
= fc_frame_alloc(lport
, resp_len
);
823 QEDF_ERR(&(qedf
->dbg_ctx
),
824 "fc_frame_alloc failure.\n");
828 /* Copy frame header from firmware into fp */
829 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
830 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
832 /* Copy payload from firmware into fp */
833 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
834 memcpy(fc_payload
, resp_buf
, resp_len
);
836 opcode
= fc_frame_payload_op(fp
);
837 if (opcode
== ELS_LS_RJT
) {
838 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
839 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
840 "Received LS_RJT for REC: er_reason=0x%x, "
841 "er_explan=0x%x.\n", rjt
->er_reason
, rjt
->er_explan
);
843 * The following response(s) mean that we need to reissue the
844 * request on another exchange. We need to do this without
845 * informing the upper layers lest it cause an application
848 if ((rjt
->er_reason
== ELS_RJT_LOGIC
||
849 rjt
->er_reason
== ELS_RJT_UNAB
) &&
850 rjt
->er_explan
== ELS_EXPL_OXID_RXID
) {
851 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
852 "Handle CMD LOST case.\n");
853 qedf_requeue_io_req(orig_io_req
);
855 } else if (opcode
== ELS_LS_ACC
) {
856 offset
= ntohl(acc
->reca_fc4value
);
857 e_stat
= ntohl(acc
->reca_e_stat
);
858 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
859 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
861 if (e_stat
& ESB_ST_SEQ_INIT
) {
862 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
863 "Target has the seq init\n");
866 sc_cmd
= orig_io_req
->sc_cmd
;
868 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
869 "sc_cmd is NULL for xid=0x%x.\n",
873 /* SCSI write case */
874 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
875 if (offset
== orig_io_req
->data_xfer_len
) {
876 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
877 "WRITE - response lost.\n");
878 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
882 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
883 "WRITE - XFER_RDY/DATA lost.\n");
884 r_ctl
= FC_RCTL_DD_DATA_DESC
;
885 /* Use data from warning CQE instead of REC */
886 offset
= orig_io_req
->tx_buf_off
;
890 if (orig_io_req
->rx_buf_off
==
891 orig_io_req
->data_xfer_len
) {
892 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
893 "READ - response lost.\n");
895 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
898 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
899 "READ - DATA lost.\n");
901 * For read case we always set the offset to 0
902 * for sequence recovery task.
905 r_ctl
= FC_RCTL_DD_SOL_DATA
;
910 qedf_send_srr(orig_io_req
, offset
, r_ctl
);
912 qedf_initiate_seq_cleanup(orig_io_req
, offset
, r_ctl
);
918 /* Put reference for original command since REC completed */
919 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
924 /* Assumes kref is already held by caller */
925 int qedf_send_rec(struct qedf_ioreq
*orig_io_req
)
928 struct fc_els_rec rec
;
929 struct qedf_rport
*fcport
;
930 struct fc_lport
*lport
;
931 struct qedf_els_cb_arg
*cb_arg
= NULL
;
932 struct qedf_ctx
*qedf
;
938 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
942 fcport
= orig_io_req
->fcport
;
944 /* Check that fcport is still offloaded */
945 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
946 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
951 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
955 /* Take reference until REC command completion */
956 kref_get(&orig_io_req
->refcount
);
961 r_a_tov
= lport
->r_a_tov
;
963 memset(&rec
, 0, sizeof(rec
));
965 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
967 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
973 cb_arg
->aborted_io_req
= orig_io_req
;
975 rec
.rec_cmd
= ELS_REC
;
976 hton24(rec
.rec_s_id
, sid
);
977 rec
.rec_ox_id
= htons(orig_io_req
->xid
);
979 htons(orig_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
981 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending REC orig_io=%p, "
982 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req
,
983 orig_io_req
->xid
, rec
.rec_rx_id
);
984 rc
= qedf_initiate_els(fcport
, ELS_REC
, &rec
, sizeof(rec
),
985 qedf_rec_compl
, cb_arg
, r_a_tov
);
989 QEDF_ERR(&(qedf
->dbg_ctx
), "REC failed - release orig_io_req"
990 "=0x%x\n", orig_io_req
->xid
);
992 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);