2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2018 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport
*fcport
, unsigned int op
,
13 void *data
, uint32_t data_len
,
14 void (*cb_func
)(struct qedf_els_cb_arg
*cb_arg
),
15 struct qedf_els_cb_arg
*cb_arg
, uint32_t timer_msec
)
17 struct qedf_ctx
*qedf
;
18 struct fc_lport
*lport
;
19 struct qedf_ioreq
*els_req
;
20 struct qedf_mp_req
*mp_req
;
21 struct fc_frame_header
*fc_hdr
;
22 struct e4_fcoe_task_context
*task
;
31 QEDF_ERR(NULL
, "fcport is NULL");
39 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending ELS\n");
41 rc
= fc_remote_port_chkready(fcport
->rport
);
43 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: rport not ready\n", op
);
47 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
48 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: link is not ready\n",
54 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
55 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: fcport not ready\n", op
);
60 els_req
= qedf_alloc_cmd(fcport
, QEDF_ELS
);
62 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
63 "Failed to alloc ELS request 0x%x\n", op
);
68 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "initiate_els els_req = "
69 "0x%p cb_arg = %p xid = %x\n", els_req
, cb_arg
,
71 els_req
->sc_cmd
= NULL
;
72 els_req
->cmd_type
= QEDF_ELS
;
73 els_req
->fcport
= fcport
;
74 els_req
->cb_func
= cb_func
;
75 cb_arg
->io_req
= els_req
;
77 els_req
->cb_arg
= cb_arg
;
78 els_req
->data_xfer_len
= data_len
;
80 /* Record which cpu this request is associated with */
81 els_req
->cpu
= smp_processor_id();
83 mp_req
= (struct qedf_mp_req
*)&(els_req
->mp_req
);
84 rc
= qedf_init_mp_req(els_req
);
86 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS MP request init failed\n");
87 kref_put(&els_req
->refcount
, qedf_release_cmd
);
93 /* Fill ELS Payload */
94 if ((op
>= ELS_LS_RJT
) && (op
<= ELS_AUTH_ELS
)) {
95 memcpy(mp_req
->req_buf
, data
, data_len
);
97 QEDF_ERR(&(qedf
->dbg_ctx
), "Invalid ELS op 0x%x\n", op
);
98 els_req
->cb_func
= NULL
;
99 els_req
->cb_arg
= NULL
;
100 kref_put(&els_req
->refcount
, qedf_release_cmd
);
108 fc_hdr
= &(mp_req
->req_fc_hdr
);
110 did
= fcport
->rdata
->ids
.port_id
;
113 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS_REQ
, did
, sid
,
114 FC_TYPE_ELS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
117 /* Obtain exchange id */
120 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
122 sqe_idx
= qedf_get_sqe_idx(fcport
);
123 sqe
= &fcport
->sq
[sqe_idx
];
124 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
126 /* Initialize task context for this IO request */
127 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
128 qedf_init_mp_task(els_req
, task
, sqe
);
130 /* Put timer on original I/O request */
132 qedf_cmd_timer_set(qedf
, els_req
, timer_msec
);
135 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Ringing doorbell for ELS "
137 qedf_ring_doorbell(fcport
);
138 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
143 void qedf_process_els_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
144 struct qedf_ioreq
*els_req
)
146 struct fcoe_task_context
*task_ctx
;
147 struct scsi_cmnd
*sc_cmd
;
149 struct fcoe_cqe_midpath_info
*mp_info
;
151 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered with xid = 0x%x"
152 " cmd_type = %d.\n", els_req
->xid
, els_req
->cmd_type
);
154 /* Kill the ELS timer */
155 cancel_delayed_work(&els_req
->timeout_work
);
158 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
159 sc_cmd
= els_req
->sc_cmd
;
161 /* Get ELS response length from CQE */
162 mp_info
= &cqe
->cqe_info
.midpath_info
;
163 els_req
->mp_req
.resp_len
= mp_info
->data_placement_size
;
165 /* Parse ELS response */
166 if ((els_req
->cb_func
) && (els_req
->cb_arg
)) {
167 els_req
->cb_func(els_req
->cb_arg
);
168 els_req
->cb_arg
= NULL
;
171 kref_put(&els_req
->refcount
, qedf_release_cmd
);
174 static void qedf_rrq_compl(struct qedf_els_cb_arg
*cb_arg
)
176 struct qedf_ioreq
*orig_io_req
;
177 struct qedf_ioreq
*rrq_req
;
178 struct qedf_ctx
*qedf
;
181 rrq_req
= cb_arg
->io_req
;
182 qedf
= rrq_req
->fcport
->qedf
;
184 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered.\n");
186 orig_io_req
= cb_arg
->aborted_io_req
;
191 if (rrq_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
192 rrq_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
193 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
195 refcount
= kref_read(&orig_io_req
->refcount
);
196 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "rrq_compl: orig io = %p,"
197 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
198 orig_io_req
, orig_io_req
->xid
, rrq_req
->xid
, refcount
);
200 /* This should return the aborted io_req to the command pool */
202 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
206 * Release a reference to the rrq request if we timed out as the
207 * rrq completion handler is called directly from the timeout handler
208 * and not from els_compl where the reference would have normally been
211 if (rrq_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
212 kref_put(&rrq_req
->refcount
, qedf_release_cmd
);
216 /* Assumes kref is already held by caller */
217 int qedf_send_rrq(struct qedf_ioreq
*aborted_io_req
)
220 struct fc_els_rrq rrq
;
221 struct qedf_rport
*fcport
;
222 struct fc_lport
*lport
;
223 struct qedf_els_cb_arg
*cb_arg
= NULL
;
224 struct qedf_ctx
*qedf
;
229 if (!aborted_io_req
) {
230 QEDF_ERR(NULL
, "abort_io_req is NULL.\n");
234 fcport
= aborted_io_req
->fcport
;
236 /* Check that fcport is still offloaded */
237 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
238 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
243 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
250 r_a_tov
= lport
->r_a_tov
;
252 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending RRQ orig "
253 "io = %p, orig_xid = 0x%x\n", aborted_io_req
,
254 aborted_io_req
->xid
);
255 memset(&rrq
, 0, sizeof(rrq
));
257 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
259 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
265 cb_arg
->aborted_io_req
= aborted_io_req
;
267 rrq
.rrq_cmd
= ELS_RRQ
;
268 hton24(rrq
.rrq_s_id
, sid
);
269 rrq
.rrq_ox_id
= htons(aborted_io_req
->xid
);
271 htons(aborted_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
273 rc
= qedf_initiate_els(fcport
, ELS_RRQ
, &rrq
, sizeof(rrq
),
274 qedf_rrq_compl
, cb_arg
, r_a_tov
);
278 QEDF_ERR(&(qedf
->dbg_ctx
), "RRQ failed - release orig io "
279 "req 0x%x\n", aborted_io_req
->xid
);
281 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
286 static void qedf_process_l2_frame_compl(struct qedf_rport
*fcport
,
290 struct fc_lport
*lport
= fcport
->qedf
->lport
;
291 struct fc_frame_header
*fh
;
294 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
296 /* Set the OXID we return to what libfc used */
297 if (l2_oxid
!= FC_XID_UNKNOWN
)
298 fh
->fh_ox_id
= htons(l2_oxid
);
300 /* Setup header fields */
301 fh
->fh_r_ctl
= FC_RCTL_ELS_REP
;
302 fh
->fh_type
= FC_TYPE_ELS
;
303 /* Last sequence, end sequence */
304 fh
->fh_f_ctl
[0] = 0x98;
305 hton24(fh
->fh_d_id
, lport
->port_id
);
306 hton24(fh
->fh_s_id
, fcport
->rdata
->ids
.port_id
);
307 fh
->fh_rx_id
= 0xffff;
309 /* Set frame attributes */
310 crc
= fcoe_fc_crc(fp
);
313 fr_sof(fp
) = FC_SOF_I3
;
314 fr_eof(fp
) = FC_EOF_T
;
315 fr_crc(fp
) = cpu_to_le32(~crc
);
317 /* Send completed request to libfc */
318 fc_exch_recv(lport
, fp
);
322 * In instances where an ELS command times out we may need to restart the
323 * rport by logging out and then logging back in.
325 void qedf_restart_rport(struct qedf_rport
*fcport
)
327 struct fc_lport
*lport
;
328 struct fc_rport_priv
*rdata
;
334 if (test_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
) ||
335 !test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
) ||
336 test_bit(QEDF_RPORT_UPLOADING_CONNECTION
, &fcport
->flags
)) {
337 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "fcport %p already in reset or not offloaded.\n",
342 /* Set that we are now in reset */
343 set_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
345 rdata
= fcport
->rdata
;
347 lport
= fcport
->qedf
->lport
;
348 port_id
= rdata
->ids
.port_id
;
349 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
350 "LOGO port_id=%x.\n", port_id
);
351 fc_rport_logoff(rdata
);
352 /* Recreate the rport and log back in */
353 rdata
= fc_rport_create(lport
, port_id
);
355 fc_rport_login(rdata
);
357 clear_bit(QEDF_RPORT_IN_RESET
, &fcport
->flags
);
360 static void qedf_l2_els_compl(struct qedf_els_cb_arg
*cb_arg
)
362 struct qedf_ioreq
*els_req
;
363 struct qedf_rport
*fcport
;
364 struct qedf_mp_req
*mp_req
;
366 struct fc_frame_header
*fh
, *mp_fc_hdr
;
367 void *resp_buf
, *fc_payload
;
371 l2_oxid
= cb_arg
->l2_oxid
;
372 els_req
= cb_arg
->io_req
;
375 QEDF_ERR(NULL
, "els_req is NULL.\n");
380 * If we are flushing the command just free the cb_arg as none of the
381 * response data will be valid.
383 if (els_req
->event
== QEDF_IOREQ_EV_ELS_FLUSH
)
386 fcport
= els_req
->fcport
;
387 mp_req
= &(els_req
->mp_req
);
388 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
389 resp_len
= mp_req
->resp_len
;
390 resp_buf
= mp_req
->resp_buf
;
393 * If a middle path ELS command times out, don't try to return
394 * the command but rather do any internal cleanup and then libfc
395 * timeout the command and clean up its internal resources.
397 if (els_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
399 * If ADISC times out, libfc will timeout the exchange and then
400 * try to send a PLOGI which will timeout since the session is
401 * still offloaded. Force libfc to logout the session which
402 * will offload the connection and allow the PLOGI response to
403 * flow over the LL2 path.
405 if (cb_arg
->op
== ELS_ADISC
)
406 qedf_restart_rport(fcport
);
410 if (sizeof(struct fc_frame_header
) + resp_len
> QEDF_PAGE_SIZE
) {
411 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "resp_len is "
412 "beyond page size.\n");
416 fp
= fc_frame_alloc(fcport
->qedf
->lport
, resp_len
);
418 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
419 "fc_frame_alloc failure.\n");
423 /* Copy frame header from firmware into fp */
424 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
425 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
427 /* Copy payload from firmware into fp */
428 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
429 memcpy(fc_payload
, resp_buf
, resp_len
);
431 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
432 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid
);
433 qedf_process_l2_frame_compl(fcport
, fp
, l2_oxid
);
439 int qedf_send_adisc(struct qedf_rport
*fcport
, struct fc_frame
*fp
)
441 struct fc_els_adisc
*adisc
;
442 struct fc_frame_header
*fh
;
443 struct fc_lport
*lport
= fcport
->qedf
->lport
;
444 struct qedf_els_cb_arg
*cb_arg
= NULL
;
445 struct qedf_ctx
*qedf
;
446 uint32_t r_a_tov
= lport
->r_a_tov
;
450 fh
= fc_frame_header_get(fp
);
452 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
454 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
459 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
461 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
462 "Sending ADISC ox_id=0x%x.\n", cb_arg
->l2_oxid
);
464 adisc
= fc_frame_payload_get(fp
, sizeof(*adisc
));
466 rc
= qedf_initiate_els(fcport
, ELS_ADISC
, adisc
, sizeof(*adisc
),
467 qedf_l2_els_compl
, cb_arg
, r_a_tov
);
471 QEDF_ERR(&(qedf
->dbg_ctx
), "ADISC failed.\n");
477 static void qedf_srr_compl(struct qedf_els_cb_arg
*cb_arg
)
479 struct qedf_ioreq
*orig_io_req
;
480 struct qedf_ioreq
*srr_req
;
481 struct qedf_mp_req
*mp_req
;
482 struct fc_frame_header
*mp_fc_hdr
, *fh
;
484 void *resp_buf
, *fc_payload
;
486 struct fc_lport
*lport
;
487 struct qedf_ctx
*qedf
;
491 srr_req
= cb_arg
->io_req
;
492 qedf
= srr_req
->fcport
->qedf
;
495 orig_io_req
= cb_arg
->aborted_io_req
;
500 clear_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
502 if (srr_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
503 srr_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
504 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
506 refcount
= kref_read(&orig_io_req
->refcount
);
507 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
508 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
509 orig_io_req
, orig_io_req
->xid
, srr_req
->xid
, refcount
);
511 /* If a SRR times out, simply free resources */
512 if (srr_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
515 /* Normalize response data into struct fc_frame */
516 mp_req
= &(srr_req
->mp_req
);
517 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
518 resp_len
= mp_req
->resp_len
;
519 resp_buf
= mp_req
->resp_buf
;
521 fp
= fc_frame_alloc(lport
, resp_len
);
523 QEDF_ERR(&(qedf
->dbg_ctx
),
524 "fc_frame_alloc failure.\n");
528 /* Copy frame header from firmware into fp */
529 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
530 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
532 /* Copy payload from firmware into fp */
533 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
534 memcpy(fc_payload
, resp_buf
, resp_len
);
536 opcode
= fc_frame_payload_op(fp
);
539 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
543 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
545 qedf_initiate_abts(orig_io_req
, true);
551 /* Put reference for original command since SRR completed */
552 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
557 static int qedf_send_srr(struct qedf_ioreq
*orig_io_req
, u32 offset
, u8 r_ctl
)
560 struct qedf_ctx
*qedf
;
561 struct qedf_rport
*fcport
;
562 struct fc_lport
*lport
;
563 struct qedf_els_cb_arg
*cb_arg
= NULL
;
568 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
572 fcport
= orig_io_req
->fcport
;
574 /* Check that fcport is still offloaded */
575 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
576 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
581 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
585 /* Take reference until SRR command completion */
586 kref_get(&orig_io_req
->refcount
);
591 r_a_tov
= lport
->r_a_tov
;
593 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending SRR orig_io=%p, "
594 "orig_xid=0x%x\n", orig_io_req
, orig_io_req
->xid
);
595 memset(&srr
, 0, sizeof(srr
));
597 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
599 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
605 cb_arg
->aborted_io_req
= orig_io_req
;
607 srr
.srr_op
= ELS_SRR
;
608 srr
.srr_ox_id
= htons(orig_io_req
->xid
);
609 srr
.srr_rx_id
= htons(orig_io_req
->rx_id
);
610 srr
.srr_rel_off
= htonl(offset
);
611 srr
.srr_r_ctl
= r_ctl
;
613 rc
= qedf_initiate_els(fcport
, ELS_SRR
, &srr
, sizeof(srr
),
614 qedf_srr_compl
, cb_arg
, r_a_tov
);
618 QEDF_ERR(&(qedf
->dbg_ctx
), "SRR failed - release orig_io_req"
619 "=0x%x\n", orig_io_req
->xid
);
621 /* If we fail to queue SRR, send ABTS to orig_io */
622 qedf_initiate_abts(orig_io_req
, true);
623 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
625 /* Tell other threads that SRR is in progress */
626 set_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
631 static void qedf_initiate_seq_cleanup(struct qedf_ioreq
*orig_io_req
,
632 u32 offset
, u8 r_ctl
)
634 struct qedf_rport
*fcport
;
636 struct qedf_els_cb_arg
*cb_arg
;
637 struct fcoe_wqe
*sqe
;
640 fcport
= orig_io_req
->fcport
;
642 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
643 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
644 orig_io_req
->xid
, offset
);
646 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
648 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to allocate cb_arg "
649 "for sequence cleanup\n");
653 /* Get reference for cleanup request */
654 kref_get(&orig_io_req
->refcount
);
656 orig_io_req
->cmd_type
= QEDF_SEQ_CLEANUP
;
657 cb_arg
->offset
= offset
;
658 cb_arg
->r_ctl
= r_ctl
;
659 orig_io_req
->cb_arg
= cb_arg
;
661 qedf_cmd_timer_set(fcport
->qedf
, orig_io_req
,
662 QEDF_CLEANUP_TIMEOUT
* HZ
);
664 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
666 sqe_idx
= qedf_get_sqe_idx(fcport
);
667 sqe
= &fcport
->sq
[sqe_idx
];
668 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
669 orig_io_req
->task_params
->sqe
= sqe
;
671 init_initiator_sequence_recovery_fcoe_task(orig_io_req
->task_params
,
673 qedf_ring_doorbell(fcport
);
675 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
678 void qedf_process_seq_cleanup_compl(struct qedf_ctx
*qedf
,
679 struct fcoe_cqe
*cqe
, struct qedf_ioreq
*io_req
)
682 struct qedf_els_cb_arg
*cb_arg
;
684 cb_arg
= io_req
->cb_arg
;
686 /* If we timed out just free resources */
687 if (io_req
->event
== QEDF_IOREQ_EV_ELS_TMO
|| !cqe
)
690 /* Kill the timer we put on the request */
691 cancel_delayed_work_sync(&io_req
->timeout_work
);
693 rc
= qedf_send_srr(io_req
, cb_arg
->offset
, cb_arg
->r_ctl
);
695 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to send SRR, I/O will "
696 "abort, xid=0x%x.\n", io_req
->xid
);
699 kref_put(&io_req
->refcount
, qedf_release_cmd
);
702 static bool qedf_requeue_io_req(struct qedf_ioreq
*orig_io_req
)
704 struct qedf_rport
*fcport
;
705 struct qedf_ioreq
*new_io_req
;
709 fcport
= orig_io_req
->fcport
;
711 QEDF_ERR(NULL
, "fcport is NULL.\n");
715 if (!orig_io_req
->sc_cmd
) {
716 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "sc_cmd is NULL for "
717 "xid=0x%x.\n", orig_io_req
->xid
);
721 new_io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
723 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Could not allocate new "
728 new_io_req
->sc_cmd
= orig_io_req
->sc_cmd
;
731 * This keeps the sc_cmd struct from being returned to the tape
732 * driver and being requeued twice. We do need to put a reference
733 * for the original I/O request since we will not do a SCSI completion
736 orig_io_req
->sc_cmd
= NULL
;
737 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
739 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
741 /* kref for new command released in qedf_post_io_req on error */
742 if (qedf_post_io_req(fcport
, new_io_req
)) {
743 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to post io_req\n");
744 /* Return SQE to pool */
745 atomic_inc(&fcport
->free_sqes
);
747 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
748 "Reissued SCSI command from orig_xid=0x%x on "
749 "new_xid=0x%x.\n", orig_io_req
->xid
, new_io_req
->xid
);
751 * Abort the original I/O but do not return SCSI command as
752 * it has been reissued on another OX_ID.
754 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
755 qedf_initiate_abts(orig_io_req
, false);
759 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
765 static void qedf_rec_compl(struct qedf_els_cb_arg
*cb_arg
)
767 struct qedf_ioreq
*orig_io_req
;
768 struct qedf_ioreq
*rec_req
;
769 struct qedf_mp_req
*mp_req
;
770 struct fc_frame_header
*mp_fc_hdr
, *fh
;
772 void *resp_buf
, *fc_payload
;
774 struct fc_lport
*lport
;
775 struct qedf_ctx
*qedf
;
778 struct fc_els_ls_rjt
*rjt
;
779 struct fc_els_rec_acc
*acc
;
782 struct scsi_cmnd
*sc_cmd
;
783 bool srr_needed
= false;
785 rec_req
= cb_arg
->io_req
;
786 qedf
= rec_req
->fcport
->qedf
;
789 orig_io_req
= cb_arg
->aborted_io_req
;
794 if (rec_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
795 rec_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
796 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
798 refcount
= kref_read(&orig_io_req
->refcount
);
799 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
800 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
801 orig_io_req
, orig_io_req
->xid
, rec_req
->xid
, refcount
);
803 /* If a REC times out, free resources */
804 if (rec_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
807 /* Normalize response data into struct fc_frame */
808 mp_req
= &(rec_req
->mp_req
);
809 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
810 resp_len
= mp_req
->resp_len
;
811 acc
= resp_buf
= mp_req
->resp_buf
;
813 fp
= fc_frame_alloc(lport
, resp_len
);
815 QEDF_ERR(&(qedf
->dbg_ctx
),
816 "fc_frame_alloc failure.\n");
820 /* Copy frame header from firmware into fp */
821 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
822 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
824 /* Copy payload from firmware into fp */
825 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
826 memcpy(fc_payload
, resp_buf
, resp_len
);
828 opcode
= fc_frame_payload_op(fp
);
829 if (opcode
== ELS_LS_RJT
) {
830 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
831 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
832 "Received LS_RJT for REC: er_reason=0x%x, "
833 "er_explan=0x%x.\n", rjt
->er_reason
, rjt
->er_explan
);
835 * The following response(s) mean that we need to reissue the
836 * request on another exchange. We need to do this without
837 * informing the upper layers lest it cause an application
840 if ((rjt
->er_reason
== ELS_RJT_LOGIC
||
841 rjt
->er_reason
== ELS_RJT_UNAB
) &&
842 rjt
->er_explan
== ELS_EXPL_OXID_RXID
) {
843 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
844 "Handle CMD LOST case.\n");
845 qedf_requeue_io_req(orig_io_req
);
847 } else if (opcode
== ELS_LS_ACC
) {
848 offset
= ntohl(acc
->reca_fc4value
);
849 e_stat
= ntohl(acc
->reca_e_stat
);
850 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
851 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
853 if (e_stat
& ESB_ST_SEQ_INIT
) {
854 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
855 "Target has the seq init\n");
858 sc_cmd
= orig_io_req
->sc_cmd
;
860 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
861 "sc_cmd is NULL for xid=0x%x.\n",
865 /* SCSI write case */
866 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
867 if (offset
== orig_io_req
->data_xfer_len
) {
868 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
869 "WRITE - response lost.\n");
870 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
874 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
875 "WRITE - XFER_RDY/DATA lost.\n");
876 r_ctl
= FC_RCTL_DD_DATA_DESC
;
877 /* Use data from warning CQE instead of REC */
878 offset
= orig_io_req
->tx_buf_off
;
882 if (orig_io_req
->rx_buf_off
==
883 orig_io_req
->data_xfer_len
) {
884 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
885 "READ - response lost.\n");
887 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
890 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
891 "READ - DATA lost.\n");
893 * For read case we always set the offset to 0
894 * for sequence recovery task.
897 r_ctl
= FC_RCTL_DD_SOL_DATA
;
902 qedf_send_srr(orig_io_req
, offset
, r_ctl
);
904 qedf_initiate_seq_cleanup(orig_io_req
, offset
, r_ctl
);
910 /* Put reference for original command since REC completed */
911 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
916 /* Assumes kref is already held by caller */
917 int qedf_send_rec(struct qedf_ioreq
*orig_io_req
)
920 struct fc_els_rec rec
;
921 struct qedf_rport
*fcport
;
922 struct fc_lport
*lport
;
923 struct qedf_els_cb_arg
*cb_arg
= NULL
;
924 struct qedf_ctx
*qedf
;
930 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
934 fcport
= orig_io_req
->fcport
;
936 /* Check that fcport is still offloaded */
937 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
938 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
943 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
947 /* Take reference until REC command completion */
948 kref_get(&orig_io_req
->refcount
);
953 r_a_tov
= lport
->r_a_tov
;
955 memset(&rec
, 0, sizeof(rec
));
957 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
959 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
965 cb_arg
->aborted_io_req
= orig_io_req
;
967 rec
.rec_cmd
= ELS_REC
;
968 hton24(rec
.rec_s_id
, sid
);
969 rec
.rec_ox_id
= htons(orig_io_req
->xid
);
971 htons(orig_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
973 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending REC orig_io=%p, "
974 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req
,
975 orig_io_req
->xid
, rec
.rec_rx_id
);
976 rc
= qedf_initiate_els(fcport
, ELS_REC
, &rec
, sizeof(rec
),
977 qedf_rec_compl
, cb_arg
, r_a_tov
);
981 QEDF_ERR(&(qedf
->dbg_ctx
), "REC failed - release orig_io_req"
982 "=0x%x\n", orig_io_req
->xid
);
984 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);