2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc.
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
11 /* It's assumed that the lock is held when calling this function. */
12 static int qedf_initiate_els(struct qedf_rport
*fcport
, unsigned int op
,
13 void *data
, uint32_t data_len
,
14 void (*cb_func
)(struct qedf_els_cb_arg
*cb_arg
),
15 struct qedf_els_cb_arg
*cb_arg
, uint32_t timer_msec
)
17 struct qedf_ctx
*qedf
= fcport
->qedf
;
18 struct fc_lport
*lport
= qedf
->lport
;
19 struct qedf_ioreq
*els_req
;
20 struct qedf_mp_req
*mp_req
;
21 struct fc_frame_header
*fc_hdr
;
22 struct e4_fcoe_task_context
*task
;
26 uint32_t start_time
= jiffies
/ HZ
;
27 uint32_t current_time
;
32 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending ELS\n");
34 rc
= fc_remote_port_chkready(fcport
->rport
);
36 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: rport not ready\n", op
);
40 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
41 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: link is not ready\n",
47 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
48 QEDF_ERR(&(qedf
->dbg_ctx
), "els 0x%x: fcport not ready\n", op
);
54 els_req
= qedf_alloc_cmd(fcport
, QEDF_ELS
);
56 current_time
= jiffies
/ HZ
;
57 if ((current_time
- start_time
) > 10) {
58 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
59 "els: Failed els 0x%x\n", op
);
63 mdelay(20 * USEC_PER_MSEC
);
67 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "initiate_els els_req = "
68 "0x%p cb_arg = %p xid = %x\n", els_req
, cb_arg
,
70 els_req
->sc_cmd
= NULL
;
71 els_req
->cmd_type
= QEDF_ELS
;
72 els_req
->fcport
= fcport
;
73 els_req
->cb_func
= cb_func
;
74 cb_arg
->io_req
= els_req
;
76 els_req
->cb_arg
= cb_arg
;
77 els_req
->data_xfer_len
= data_len
;
79 /* Record which cpu this request is associated with */
80 els_req
->cpu
= smp_processor_id();
82 mp_req
= (struct qedf_mp_req
*)&(els_req
->mp_req
);
83 rc
= qedf_init_mp_req(els_req
);
85 QEDF_ERR(&(qedf
->dbg_ctx
), "ELS MP request init failed\n");
86 kref_put(&els_req
->refcount
, qedf_release_cmd
);
92 /* Fill ELS Payload */
93 if ((op
>= ELS_LS_RJT
) && (op
<= ELS_AUTH_ELS
)) {
94 memcpy(mp_req
->req_buf
, data
, data_len
);
96 QEDF_ERR(&(qedf
->dbg_ctx
), "Invalid ELS op 0x%x\n", op
);
97 els_req
->cb_func
= NULL
;
98 els_req
->cb_arg
= NULL
;
99 kref_put(&els_req
->refcount
, qedf_release_cmd
);
107 fc_hdr
= &(mp_req
->req_fc_hdr
);
109 did
= fcport
->rdata
->ids
.port_id
;
112 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS_REQ
, did
, sid
,
113 FC_TYPE_ELS
, FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
|
116 /* Obtain exchange id */
119 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
121 sqe_idx
= qedf_get_sqe_idx(fcport
);
122 sqe
= &fcport
->sq
[sqe_idx
];
123 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
125 /* Initialize task context for this IO request */
126 task
= qedf_get_task_mem(&qedf
->tasks
, xid
);
127 qedf_init_mp_task(els_req
, task
, sqe
);
129 /* Put timer on original I/O request */
131 qedf_cmd_timer_set(qedf
, els_req
, timer_msec
);
134 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Ringing doorbell for ELS "
136 qedf_ring_doorbell(fcport
);
137 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
142 void qedf_process_els_compl(struct qedf_ctx
*qedf
, struct fcoe_cqe
*cqe
,
143 struct qedf_ioreq
*els_req
)
145 struct fcoe_task_context
*task_ctx
;
146 struct scsi_cmnd
*sc_cmd
;
148 struct fcoe_cqe_midpath_info
*mp_info
;
150 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered with xid = 0x%x"
151 " cmd_type = %d.\n", els_req
->xid
, els_req
->cmd_type
);
153 /* Kill the ELS timer */
154 cancel_delayed_work(&els_req
->timeout_work
);
157 task_ctx
= qedf_get_task_mem(&qedf
->tasks
, xid
);
158 sc_cmd
= els_req
->sc_cmd
;
160 /* Get ELS response length from CQE */
161 mp_info
= &cqe
->cqe_info
.midpath_info
;
162 els_req
->mp_req
.resp_len
= mp_info
->data_placement_size
;
164 /* Parse ELS response */
165 if ((els_req
->cb_func
) && (els_req
->cb_arg
)) {
166 els_req
->cb_func(els_req
->cb_arg
);
167 els_req
->cb_arg
= NULL
;
170 kref_put(&els_req
->refcount
, qedf_release_cmd
);
173 static void qedf_rrq_compl(struct qedf_els_cb_arg
*cb_arg
)
175 struct qedf_ioreq
*orig_io_req
;
176 struct qedf_ioreq
*rrq_req
;
177 struct qedf_ctx
*qedf
;
180 rrq_req
= cb_arg
->io_req
;
181 qedf
= rrq_req
->fcport
->qedf
;
183 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered.\n");
185 orig_io_req
= cb_arg
->aborted_io_req
;
190 if (rrq_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
191 rrq_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
192 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
194 refcount
= kref_read(&orig_io_req
->refcount
);
195 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "rrq_compl: orig io = %p,"
196 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
197 orig_io_req
, orig_io_req
->xid
, rrq_req
->xid
, refcount
);
199 /* This should return the aborted io_req to the command pool */
201 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
207 /* Assumes kref is already held by caller */
208 int qedf_send_rrq(struct qedf_ioreq
*aborted_io_req
)
211 struct fc_els_rrq rrq
;
212 struct qedf_rport
*fcport
;
213 struct fc_lport
*lport
;
214 struct qedf_els_cb_arg
*cb_arg
= NULL
;
215 struct qedf_ctx
*qedf
;
220 if (!aborted_io_req
) {
221 QEDF_ERR(NULL
, "abort_io_req is NULL.\n");
225 fcport
= aborted_io_req
->fcport
;
227 /* Check that fcport is still offloaded */
228 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
229 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
234 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
241 r_a_tov
= lport
->r_a_tov
;
243 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending RRQ orig "
244 "io = %p, orig_xid = 0x%x\n", aborted_io_req
,
245 aborted_io_req
->xid
);
246 memset(&rrq
, 0, sizeof(rrq
));
248 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
250 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
256 cb_arg
->aborted_io_req
= aborted_io_req
;
258 rrq
.rrq_cmd
= ELS_RRQ
;
259 hton24(rrq
.rrq_s_id
, sid
);
260 rrq
.rrq_ox_id
= htons(aborted_io_req
->xid
);
262 htons(aborted_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
264 rc
= qedf_initiate_els(fcport
, ELS_RRQ
, &rrq
, sizeof(rrq
),
265 qedf_rrq_compl
, cb_arg
, r_a_tov
);
269 QEDF_ERR(&(qedf
->dbg_ctx
), "RRQ failed - release orig io "
270 "req 0x%x\n", aborted_io_req
->xid
);
272 kref_put(&aborted_io_req
->refcount
, qedf_release_cmd
);
277 static void qedf_process_l2_frame_compl(struct qedf_rport
*fcport
,
281 struct fc_lport
*lport
= fcport
->qedf
->lport
;
282 struct fc_frame_header
*fh
;
285 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
287 /* Set the OXID we return to what libfc used */
288 if (l2_oxid
!= FC_XID_UNKNOWN
)
289 fh
->fh_ox_id
= htons(l2_oxid
);
291 /* Setup header fields */
292 fh
->fh_r_ctl
= FC_RCTL_ELS_REP
;
293 fh
->fh_type
= FC_TYPE_ELS
;
294 /* Last sequence, end sequence */
295 fh
->fh_f_ctl
[0] = 0x98;
296 hton24(fh
->fh_d_id
, lport
->port_id
);
297 hton24(fh
->fh_s_id
, fcport
->rdata
->ids
.port_id
);
298 fh
->fh_rx_id
= 0xffff;
300 /* Set frame attributes */
301 crc
= fcoe_fc_crc(fp
);
304 fr_sof(fp
) = FC_SOF_I3
;
305 fr_eof(fp
) = FC_EOF_T
;
306 fr_crc(fp
) = cpu_to_le32(~crc
);
308 /* Send completed request to libfc */
309 fc_exch_recv(lport
, fp
);
313 * In instances where an ELS command times out we may need to restart the
314 * rport by logging out and then logging back in.
316 void qedf_restart_rport(struct qedf_rport
*fcport
)
318 struct fc_lport
*lport
;
319 struct fc_rport_priv
*rdata
;
325 rdata
= fcport
->rdata
;
327 lport
= fcport
->qedf
->lport
;
328 port_id
= rdata
->ids
.port_id
;
329 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
330 "LOGO port_id=%x.\n", port_id
);
331 fc_rport_logoff(rdata
);
332 /* Recreate the rport and log back in */
333 rdata
= fc_rport_create(lport
, port_id
);
335 fc_rport_login(rdata
);
339 static void qedf_l2_els_compl(struct qedf_els_cb_arg
*cb_arg
)
341 struct qedf_ioreq
*els_req
;
342 struct qedf_rport
*fcport
;
343 struct qedf_mp_req
*mp_req
;
345 struct fc_frame_header
*fh
, *mp_fc_hdr
;
346 void *resp_buf
, *fc_payload
;
350 l2_oxid
= cb_arg
->l2_oxid
;
351 els_req
= cb_arg
->io_req
;
354 QEDF_ERR(NULL
, "els_req is NULL.\n");
359 * If we are flushing the command just free the cb_arg as none of the
360 * response data will be valid.
362 if (els_req
->event
== QEDF_IOREQ_EV_ELS_FLUSH
)
365 fcport
= els_req
->fcport
;
366 mp_req
= &(els_req
->mp_req
);
367 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
368 resp_len
= mp_req
->resp_len
;
369 resp_buf
= mp_req
->resp_buf
;
372 * If a middle path ELS command times out, don't try to return
373 * the command but rather do any internal cleanup and then libfc
374 * timeout the command and clean up its internal resources.
376 if (els_req
->event
== QEDF_IOREQ_EV_ELS_TMO
) {
378 * If ADISC times out, libfc will timeout the exchange and then
379 * try to send a PLOGI which will timeout since the session is
380 * still offloaded. Force libfc to logout the session which
381 * will offload the connection and allow the PLOGI response to
382 * flow over the LL2 path.
384 if (cb_arg
->op
== ELS_ADISC
)
385 qedf_restart_rport(fcport
);
389 if (sizeof(struct fc_frame_header
) + resp_len
> QEDF_PAGE_SIZE
) {
390 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "resp_len is "
391 "beyond page size.\n");
395 fp
= fc_frame_alloc(fcport
->qedf
->lport
, resp_len
);
397 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
),
398 "fc_frame_alloc failure.\n");
402 /* Copy frame header from firmware into fp */
403 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
404 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
406 /* Copy payload from firmware into fp */
407 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
408 memcpy(fc_payload
, resp_buf
, resp_len
);
410 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
411 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid
);
412 qedf_process_l2_frame_compl(fcport
, fp
, l2_oxid
);
418 int qedf_send_adisc(struct qedf_rport
*fcport
, struct fc_frame
*fp
)
420 struct fc_els_adisc
*adisc
;
421 struct fc_frame_header
*fh
;
422 struct fc_lport
*lport
= fcport
->qedf
->lport
;
423 struct qedf_els_cb_arg
*cb_arg
= NULL
;
424 struct qedf_ctx
*qedf
;
425 uint32_t r_a_tov
= lport
->r_a_tov
;
429 fh
= fc_frame_header_get(fp
);
431 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
433 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
438 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
440 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
441 "Sending ADISC ox_id=0x%x.\n", cb_arg
->l2_oxid
);
443 adisc
= fc_frame_payload_get(fp
, sizeof(*adisc
));
445 rc
= qedf_initiate_els(fcport
, ELS_ADISC
, adisc
, sizeof(*adisc
),
446 qedf_l2_els_compl
, cb_arg
, r_a_tov
);
450 QEDF_ERR(&(qedf
->dbg_ctx
), "ADISC failed.\n");
456 static void qedf_srr_compl(struct qedf_els_cb_arg
*cb_arg
)
458 struct qedf_ioreq
*orig_io_req
;
459 struct qedf_ioreq
*srr_req
;
460 struct qedf_mp_req
*mp_req
;
461 struct fc_frame_header
*mp_fc_hdr
, *fh
;
463 void *resp_buf
, *fc_payload
;
465 struct fc_lport
*lport
;
466 struct qedf_ctx
*qedf
;
470 srr_req
= cb_arg
->io_req
;
471 qedf
= srr_req
->fcport
->qedf
;
474 orig_io_req
= cb_arg
->aborted_io_req
;
479 clear_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
481 if (srr_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
482 srr_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
483 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
485 refcount
= kref_read(&orig_io_req
->refcount
);
486 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
487 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
488 orig_io_req
, orig_io_req
->xid
, srr_req
->xid
, refcount
);
490 /* If a SRR times out, simply free resources */
491 if (srr_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
494 /* Normalize response data into struct fc_frame */
495 mp_req
= &(srr_req
->mp_req
);
496 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
497 resp_len
= mp_req
->resp_len
;
498 resp_buf
= mp_req
->resp_buf
;
500 fp
= fc_frame_alloc(lport
, resp_len
);
502 QEDF_ERR(&(qedf
->dbg_ctx
),
503 "fc_frame_alloc failure.\n");
507 /* Copy frame header from firmware into fp */
508 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
509 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
511 /* Copy payload from firmware into fp */
512 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
513 memcpy(fc_payload
, resp_buf
, resp_len
);
515 opcode
= fc_frame_payload_op(fp
);
518 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
522 QEDF_INFO(&qedf
->dbg_ctx
, QEDF_LOG_ELS
,
524 qedf_initiate_abts(orig_io_req
, true);
530 /* Put reference for original command since SRR completed */
531 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
536 static int qedf_send_srr(struct qedf_ioreq
*orig_io_req
, u32 offset
, u8 r_ctl
)
539 struct qedf_ctx
*qedf
;
540 struct qedf_rport
*fcport
;
541 struct fc_lport
*lport
;
542 struct qedf_els_cb_arg
*cb_arg
= NULL
;
547 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
551 fcport
= orig_io_req
->fcport
;
553 /* Check that fcport is still offloaded */
554 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
555 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
560 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
564 /* Take reference until SRR command completion */
565 kref_get(&orig_io_req
->refcount
);
570 r_a_tov
= lport
->r_a_tov
;
572 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending SRR orig_io=%p, "
573 "orig_xid=0x%x\n", orig_io_req
, orig_io_req
->xid
);
574 memset(&srr
, 0, sizeof(srr
));
576 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
578 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
584 cb_arg
->aborted_io_req
= orig_io_req
;
586 srr
.srr_op
= ELS_SRR
;
587 srr
.srr_ox_id
= htons(orig_io_req
->xid
);
588 srr
.srr_rx_id
= htons(orig_io_req
->rx_id
);
589 srr
.srr_rel_off
= htonl(offset
);
590 srr
.srr_r_ctl
= r_ctl
;
592 rc
= qedf_initiate_els(fcport
, ELS_SRR
, &srr
, sizeof(srr
),
593 qedf_srr_compl
, cb_arg
, r_a_tov
);
597 QEDF_ERR(&(qedf
->dbg_ctx
), "SRR failed - release orig_io_req"
598 "=0x%x\n", orig_io_req
->xid
);
600 /* If we fail to queue SRR, send ABTS to orig_io */
601 qedf_initiate_abts(orig_io_req
, true);
602 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
604 /* Tell other threads that SRR is in progress */
605 set_bit(QEDF_CMD_SRR_SENT
, &orig_io_req
->flags
);
610 static void qedf_initiate_seq_cleanup(struct qedf_ioreq
*orig_io_req
,
611 u32 offset
, u8 r_ctl
)
613 struct qedf_rport
*fcport
;
615 struct qedf_els_cb_arg
*cb_arg
;
616 struct fcoe_wqe
*sqe
;
619 fcport
= orig_io_req
->fcport
;
621 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
622 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
623 orig_io_req
->xid
, offset
);
625 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
627 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to allocate cb_arg "
628 "for sequence cleanup\n");
632 /* Get reference for cleanup request */
633 kref_get(&orig_io_req
->refcount
);
635 orig_io_req
->cmd_type
= QEDF_SEQ_CLEANUP
;
636 cb_arg
->offset
= offset
;
637 cb_arg
->r_ctl
= r_ctl
;
638 orig_io_req
->cb_arg
= cb_arg
;
640 qedf_cmd_timer_set(fcport
->qedf
, orig_io_req
,
641 QEDF_CLEANUP_TIMEOUT
* HZ
);
643 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
645 sqe_idx
= qedf_get_sqe_idx(fcport
);
646 sqe
= &fcport
->sq
[sqe_idx
];
647 memset(sqe
, 0, sizeof(struct fcoe_wqe
));
648 orig_io_req
->task_params
->sqe
= sqe
;
650 init_initiator_sequence_recovery_fcoe_task(orig_io_req
->task_params
,
652 qedf_ring_doorbell(fcport
);
654 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
657 void qedf_process_seq_cleanup_compl(struct qedf_ctx
*qedf
,
658 struct fcoe_cqe
*cqe
, struct qedf_ioreq
*io_req
)
661 struct qedf_els_cb_arg
*cb_arg
;
663 cb_arg
= io_req
->cb_arg
;
665 /* If we timed out just free resources */
666 if (io_req
->event
== QEDF_IOREQ_EV_ELS_TMO
|| !cqe
)
669 /* Kill the timer we put on the request */
670 cancel_delayed_work_sync(&io_req
->timeout_work
);
672 rc
= qedf_send_srr(io_req
, cb_arg
->offset
, cb_arg
->r_ctl
);
674 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to send SRR, I/O will "
675 "abort, xid=0x%x.\n", io_req
->xid
);
678 kref_put(&io_req
->refcount
, qedf_release_cmd
);
681 static bool qedf_requeue_io_req(struct qedf_ioreq
*orig_io_req
)
683 struct qedf_rport
*fcport
;
684 struct qedf_ioreq
*new_io_req
;
688 fcport
= orig_io_req
->fcport
;
690 QEDF_ERR(NULL
, "fcport is NULL.\n");
694 if (!orig_io_req
->sc_cmd
) {
695 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "sc_cmd is NULL for "
696 "xid=0x%x.\n", orig_io_req
->xid
);
700 new_io_req
= qedf_alloc_cmd(fcport
, QEDF_SCSI_CMD
);
702 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Could not allocate new "
707 new_io_req
->sc_cmd
= orig_io_req
->sc_cmd
;
710 * This keeps the sc_cmd struct from being returned to the tape
711 * driver and being requeued twice. We do need to put a reference
712 * for the original I/O request since we will not do a SCSI completion
715 orig_io_req
->sc_cmd
= NULL
;
716 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
718 spin_lock_irqsave(&fcport
->rport_lock
, flags
);
720 /* kref for new command released in qedf_post_io_req on error */
721 if (qedf_post_io_req(fcport
, new_io_req
)) {
722 QEDF_ERR(&(fcport
->qedf
->dbg_ctx
), "Unable to post io_req\n");
723 /* Return SQE to pool */
724 atomic_inc(&fcport
->free_sqes
);
726 QEDF_INFO(&(fcport
->qedf
->dbg_ctx
), QEDF_LOG_ELS
,
727 "Reissued SCSI command from orig_xid=0x%x on "
728 "new_xid=0x%x.\n", orig_io_req
->xid
, new_io_req
->xid
);
730 * Abort the original I/O but do not return SCSI command as
731 * it has been reissued on another OX_ID.
733 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
734 qedf_initiate_abts(orig_io_req
, false);
738 spin_unlock_irqrestore(&fcport
->rport_lock
, flags
);
744 static void qedf_rec_compl(struct qedf_els_cb_arg
*cb_arg
)
746 struct qedf_ioreq
*orig_io_req
;
747 struct qedf_ioreq
*rec_req
;
748 struct qedf_mp_req
*mp_req
;
749 struct fc_frame_header
*mp_fc_hdr
, *fh
;
751 void *resp_buf
, *fc_payload
;
753 struct fc_lport
*lport
;
754 struct qedf_ctx
*qedf
;
757 struct fc_els_ls_rjt
*rjt
;
758 struct fc_els_rec_acc
*acc
;
761 struct scsi_cmnd
*sc_cmd
;
762 bool srr_needed
= false;
764 rec_req
= cb_arg
->io_req
;
765 qedf
= rec_req
->fcport
->qedf
;
768 orig_io_req
= cb_arg
->aborted_io_req
;
773 if (rec_req
->event
!= QEDF_IOREQ_EV_ELS_TMO
&&
774 rec_req
->event
!= QEDF_IOREQ_EV_ELS_ERR_DETECT
)
775 cancel_delayed_work_sync(&orig_io_req
->timeout_work
);
777 refcount
= kref_read(&orig_io_req
->refcount
);
778 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Entered: orig_io=%p,"
779 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
780 orig_io_req
, orig_io_req
->xid
, rec_req
->xid
, refcount
);
782 /* If a REC times out, free resources */
783 if (rec_req
->event
== QEDF_IOREQ_EV_ELS_TMO
)
786 /* Normalize response data into struct fc_frame */
787 mp_req
= &(rec_req
->mp_req
);
788 mp_fc_hdr
= &(mp_req
->resp_fc_hdr
);
789 resp_len
= mp_req
->resp_len
;
790 acc
= resp_buf
= mp_req
->resp_buf
;
792 fp
= fc_frame_alloc(lport
, resp_len
);
794 QEDF_ERR(&(qedf
->dbg_ctx
),
795 "fc_frame_alloc failure.\n");
799 /* Copy frame header from firmware into fp */
800 fh
= (struct fc_frame_header
*)fc_frame_header_get(fp
);
801 memcpy(fh
, mp_fc_hdr
, sizeof(struct fc_frame_header
));
803 /* Copy payload from firmware into fp */
804 fc_payload
= fc_frame_payload_get(fp
, resp_len
);
805 memcpy(fc_payload
, resp_buf
, resp_len
);
807 opcode
= fc_frame_payload_op(fp
);
808 if (opcode
== ELS_LS_RJT
) {
809 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
810 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
811 "Received LS_RJT for REC: er_reason=0x%x, "
812 "er_explan=0x%x.\n", rjt
->er_reason
, rjt
->er_explan
);
814 * The following response(s) mean that we need to reissue the
815 * request on another exchange. We need to do this without
816 * informing the upper layers lest it cause an application
819 if ((rjt
->er_reason
== ELS_RJT_LOGIC
||
820 rjt
->er_reason
== ELS_RJT_UNAB
) &&
821 rjt
->er_explan
== ELS_EXPL_OXID_RXID
) {
822 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
823 "Handle CMD LOST case.\n");
824 qedf_requeue_io_req(orig_io_req
);
826 } else if (opcode
== ELS_LS_ACC
) {
827 offset
= ntohl(acc
->reca_fc4value
);
828 e_stat
= ntohl(acc
->reca_e_stat
);
829 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
830 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
832 if (e_stat
& ESB_ST_SEQ_INIT
) {
833 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
834 "Target has the seq init\n");
837 sc_cmd
= orig_io_req
->sc_cmd
;
839 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
840 "sc_cmd is NULL for xid=0x%x.\n",
844 /* SCSI write case */
845 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
846 if (offset
== orig_io_req
->data_xfer_len
) {
847 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
848 "WRITE - response lost.\n");
849 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
853 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
854 "WRITE - XFER_RDY/DATA lost.\n");
855 r_ctl
= FC_RCTL_DD_DATA_DESC
;
856 /* Use data from warning CQE instead of REC */
857 offset
= orig_io_req
->tx_buf_off
;
861 if (orig_io_req
->rx_buf_off
==
862 orig_io_req
->data_xfer_len
) {
863 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
864 "READ - response lost.\n");
866 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
869 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
,
870 "READ - DATA lost.\n");
872 * For read case we always set the offset to 0
873 * for sequence recovery task.
876 r_ctl
= FC_RCTL_DD_SOL_DATA
;
881 qedf_send_srr(orig_io_req
, offset
, r_ctl
);
883 qedf_initiate_seq_cleanup(orig_io_req
, offset
, r_ctl
);
889 /* Put reference for original command since REC completed */
890 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);
895 /* Assumes kref is already held by caller */
896 int qedf_send_rec(struct qedf_ioreq
*orig_io_req
)
899 struct fc_els_rec rec
;
900 struct qedf_rport
*fcport
;
901 struct fc_lport
*lport
;
902 struct qedf_els_cb_arg
*cb_arg
= NULL
;
903 struct qedf_ctx
*qedf
;
909 QEDF_ERR(NULL
, "orig_io_req is NULL.\n");
913 fcport
= orig_io_req
->fcport
;
915 /* Check that fcport is still offloaded */
916 if (!test_bit(QEDF_RPORT_SESSION_READY
, &fcport
->flags
)) {
917 QEDF_ERR(NULL
, "fcport is no longer offloaded.\n");
922 QEDF_ERR(NULL
, "fcport->qedf is NULL.\n");
926 /* Take reference until REC command completion */
927 kref_get(&orig_io_req
->refcount
);
932 r_a_tov
= lport
->r_a_tov
;
934 memset(&rec
, 0, sizeof(rec
));
936 cb_arg
= kzalloc(sizeof(struct qedf_els_cb_arg
), GFP_NOIO
);
938 QEDF_ERR(&(qedf
->dbg_ctx
), "Unable to allocate cb_arg for "
944 cb_arg
->aborted_io_req
= orig_io_req
;
946 rec
.rec_cmd
= ELS_REC
;
947 hton24(rec
.rec_s_id
, sid
);
948 rec
.rec_ox_id
= htons(orig_io_req
->xid
);
950 htons(orig_io_req
->task
->tstorm_st_context
.read_write
.rx_id
);
952 QEDF_INFO(&(qedf
->dbg_ctx
), QEDF_LOG_ELS
, "Sending REC orig_io=%p, "
953 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req
,
954 orig_io_req
->xid
, rec
.rec_rx_id
);
955 rc
= qedf_initiate_els(fcport
, ELS_REC
, &rec
, sizeof(rec
),
956 qedf_rec_compl
, cb_arg
, r_a_tov
);
960 QEDF_ERR(&(qedf
->dbg_ctx
), "REC failed - release orig_io_req"
961 "=0x%x\n", orig_io_req
->xid
);
963 kref_put(&orig_io_req
->refcount
, qedf_release_cmd
);