2 * bnx2fc_els.c: QLogic Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
6 * Copyright (c) 2008-2013 Broadcom Corporation
7 * Copyright (c) 2014-2016 QLogic Corporation
8 * Copyright (c) 2016-2017 Cavium Inc.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation.
14 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
19 static void bnx2fc_logo_resp(struct fc_seq
*seq
, struct fc_frame
*fp
,
21 static void bnx2fc_flogi_resp(struct fc_seq
*seq
, struct fc_frame
*fp
,
23 static int bnx2fc_initiate_els(struct bnx2fc_rport
*tgt
, unsigned int op
,
24 void *data
, u32 data_len
,
25 void (*cb_func
)(struct bnx2fc_els_cb_arg
*cb_arg
),
26 struct bnx2fc_els_cb_arg
*cb_arg
, u32 timer_msec
);
28 static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg
*cb_arg
)
30 struct bnx2fc_cmd
*orig_io_req
;
31 struct bnx2fc_cmd
*rrq_req
;
35 rrq_req
= cb_arg
->io_req
;
36 orig_io_req
= cb_arg
->aborted_io_req
;
38 BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
39 orig_io_req
->xid
, rrq_req
->xid
);
41 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
43 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT
, &rrq_req
->req_flags
)) {
45 * els req is timed out. cleanup the IO with FW and
46 * drop the completion. Remove from active_cmd_queue.
48 BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
51 if (rrq_req
->on_active_queue
) {
52 list_del_init(&rrq_req
->link
);
53 rrq_req
->on_active_queue
= 0;
54 rc
= bnx2fc_initiate_cleanup(rrq_req
);
60 int bnx2fc_send_rrq(struct bnx2fc_cmd
*aborted_io_req
)
63 struct fc_els_rrq rrq
;
64 struct bnx2fc_rport
*tgt
= aborted_io_req
->tgt
;
65 struct fc_lport
*lport
= NULL
;
66 struct bnx2fc_els_cb_arg
*cb_arg
= NULL
;
69 unsigned long start
= jiffies
;
72 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
))
75 lport
= tgt
->rdata
->local_port
;
77 r_a_tov
= lport
->r_a_tov
;
79 BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
81 memset(&rrq
, 0, sizeof(rrq
));
83 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_NOIO
);
85 printk(KERN_ERR PFX
"Unable to allocate cb_arg for RRQ\n");
90 cb_arg
->aborted_io_req
= aborted_io_req
;
92 rrq
.rrq_cmd
= ELS_RRQ
;
93 hton24(rrq
.rrq_s_id
, sid
);
94 rrq
.rrq_ox_id
= htons(aborted_io_req
->xid
);
95 rrq
.rrq_rx_id
= htons(aborted_io_req
->task
->rxwr_txrd
.var_ctx
.rx_id
);
98 rc
= bnx2fc_initiate_els(tgt
, ELS_RRQ
, &rrq
, sizeof(rrq
),
99 bnx2fc_rrq_compl
, cb_arg
,
102 if (time_after(jiffies
, start
+ (10 * HZ
))) {
103 BNX2FC_ELS_DBG("rrq Failed\n");
112 BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
113 aborted_io_req
->xid
);
115 spin_lock_bh(&tgt
->tgt_lock
);
116 kref_put(&aborted_io_req
->refcount
, bnx2fc_cmd_release
);
117 spin_unlock_bh(&tgt
->tgt_lock
);
122 static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg
*cb_arg
)
124 struct bnx2fc_cmd
*els_req
;
125 struct bnx2fc_rport
*tgt
;
126 struct bnx2fc_mp_req
*mp_req
;
127 struct fc_frame_header
*fc_hdr
;
130 u32 resp_len
, hdr_len
;
135 l2_oxid
= cb_arg
->l2_oxid
;
136 BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid
);
138 els_req
= cb_arg
->io_req
;
139 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT
, &els_req
->req_flags
)) {
141 * els req is timed out. cleanup the IO with FW and
142 * drop the completion. libfc will handle the els timeout
144 if (els_req
->on_active_queue
) {
145 list_del_init(&els_req
->link
);
146 els_req
->on_active_queue
= 0;
147 rc
= bnx2fc_initiate_cleanup(els_req
);
154 mp_req
= &(els_req
->mp_req
);
155 fc_hdr
= &(mp_req
->resp_fc_hdr
);
156 resp_len
= mp_req
->resp_len
;
157 resp_buf
= mp_req
->resp_buf
;
159 buf
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
161 printk(KERN_ERR PFX
"Unable to alloc mp buf\n");
164 hdr_len
= sizeof(*fc_hdr
);
165 if (hdr_len
+ resp_len
> PAGE_SIZE
) {
166 printk(KERN_ERR PFX
"l2_els_compl: resp len is "
167 "beyond page size\n");
170 memcpy(buf
, fc_hdr
, hdr_len
);
171 memcpy(buf
+ hdr_len
, resp_buf
, resp_len
);
172 frame_len
= hdr_len
+ resp_len
;
174 bnx2fc_process_l2_frame_compl(tgt
, buf
, frame_len
, l2_oxid
);
182 int bnx2fc_send_adisc(struct bnx2fc_rport
*tgt
, struct fc_frame
*fp
)
184 struct fc_els_adisc
*adisc
;
185 struct fc_frame_header
*fh
;
186 struct bnx2fc_els_cb_arg
*cb_arg
;
187 struct fc_lport
*lport
= tgt
->rdata
->local_port
;
188 u32 r_a_tov
= lport
->r_a_tov
;
191 fh
= fc_frame_header_get(fp
);
192 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
194 printk(KERN_ERR PFX
"Unable to allocate cb_arg for ADISC\n");
198 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
200 BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg
->l2_oxid
);
201 adisc
= fc_frame_payload_get(fp
, sizeof(*adisc
));
202 /* adisc is initialized by libfc */
203 rc
= bnx2fc_initiate_els(tgt
, ELS_ADISC
, adisc
, sizeof(*adisc
),
204 bnx2fc_l2_els_compl
, cb_arg
, 2 * r_a_tov
);
210 int bnx2fc_send_logo(struct bnx2fc_rport
*tgt
, struct fc_frame
*fp
)
212 struct fc_els_logo
*logo
;
213 struct fc_frame_header
*fh
;
214 struct bnx2fc_els_cb_arg
*cb_arg
;
215 struct fc_lport
*lport
= tgt
->rdata
->local_port
;
216 u32 r_a_tov
= lport
->r_a_tov
;
219 fh
= fc_frame_header_get(fp
);
220 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
222 printk(KERN_ERR PFX
"Unable to allocate cb_arg for LOGO\n");
226 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
228 BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg
->l2_oxid
);
229 logo
= fc_frame_payload_get(fp
, sizeof(*logo
));
230 /* logo is initialized by libfc */
231 rc
= bnx2fc_initiate_els(tgt
, ELS_LOGO
, logo
, sizeof(*logo
),
232 bnx2fc_l2_els_compl
, cb_arg
, 2 * r_a_tov
);
238 int bnx2fc_send_rls(struct bnx2fc_rport
*tgt
, struct fc_frame
*fp
)
240 struct fc_els_rls
*rls
;
241 struct fc_frame_header
*fh
;
242 struct bnx2fc_els_cb_arg
*cb_arg
;
243 struct fc_lport
*lport
= tgt
->rdata
->local_port
;
244 u32 r_a_tov
= lport
->r_a_tov
;
247 fh
= fc_frame_header_get(fp
);
248 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
250 printk(KERN_ERR PFX
"Unable to allocate cb_arg for LOGO\n");
254 cb_arg
->l2_oxid
= ntohs(fh
->fh_ox_id
);
256 rls
= fc_frame_payload_get(fp
, sizeof(*rls
));
257 /* rls is initialized by libfc */
258 rc
= bnx2fc_initiate_els(tgt
, ELS_RLS
, rls
, sizeof(*rls
),
259 bnx2fc_l2_els_compl
, cb_arg
, 2 * r_a_tov
);
265 static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg
*cb_arg
)
267 struct bnx2fc_mp_req
*mp_req
;
268 struct fc_frame_header
*fc_hdr
, *fh
;
269 struct bnx2fc_cmd
*srr_req
;
270 struct bnx2fc_cmd
*orig_io_req
;
274 u32 resp_len
, hdr_len
;
278 orig_io_req
= cb_arg
->aborted_io_req
;
279 srr_req
= cb_arg
->io_req
;
280 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT
, &srr_req
->req_flags
)) {
282 BNX2FC_IO_DBG(srr_req
, "srr timed out, abort "
285 rc
= bnx2fc_initiate_abts(srr_req
);
287 BNX2FC_IO_DBG(srr_req
, "srr_compl: initiate_abts "
288 "failed. issue cleanup\n");
289 bnx2fc_initiate_cleanup(srr_req
);
291 if (test_bit(BNX2FC_FLAG_IO_COMPL
, &orig_io_req
->req_flags
) ||
292 test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &orig_io_req
->req_flags
)) {
293 BNX2FC_IO_DBG(srr_req
, "srr_compl:xid 0x%x flags = %lx",
294 orig_io_req
->xid
, orig_io_req
->req_flags
);
297 orig_io_req
->srr_retry
++;
298 if (orig_io_req
->srr_retry
<= SRR_RETRY_COUNT
) {
299 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
300 spin_unlock_bh(&tgt
->tgt_lock
);
301 rc
= bnx2fc_send_srr(orig_io_req
,
302 orig_io_req
->srr_offset
,
303 orig_io_req
->srr_rctl
);
304 spin_lock_bh(&tgt
->tgt_lock
);
309 rc
= bnx2fc_initiate_abts(orig_io_req
);
311 BNX2FC_IO_DBG(srr_req
, "srr_compl: initiate_abts "
312 "failed xid = 0x%x. issue cleanup\n",
314 bnx2fc_initiate_cleanup(orig_io_req
);
318 if (test_bit(BNX2FC_FLAG_IO_COMPL
, &orig_io_req
->req_flags
) ||
319 test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &orig_io_req
->req_flags
)) {
320 BNX2FC_IO_DBG(srr_req
, "srr_compl:xid - 0x%x flags = %lx",
321 orig_io_req
->xid
, orig_io_req
->req_flags
);
324 mp_req
= &(srr_req
->mp_req
);
325 fc_hdr
= &(mp_req
->resp_fc_hdr
);
326 resp_len
= mp_req
->resp_len
;
327 resp_buf
= mp_req
->resp_buf
;
329 hdr_len
= sizeof(*fc_hdr
);
330 buf
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
332 printk(KERN_ERR PFX
"srr buf: mem alloc failure\n");
335 memcpy(buf
, fc_hdr
, hdr_len
);
336 memcpy(buf
+ hdr_len
, resp_buf
, resp_len
);
338 fp
= fc_frame_alloc(NULL
, resp_len
);
340 printk(KERN_ERR PFX
"fc_frame_alloc failure\n");
344 fh
= (struct fc_frame_header
*) fc_frame_header_get(fp
);
345 /* Copy FC Frame header and payload into the frame */
346 memcpy(fh
, buf
, hdr_len
+ resp_len
);
348 opcode
= fc_frame_payload_op(fp
);
351 BNX2FC_IO_DBG(srr_req
, "SRR success\n");
354 BNX2FC_IO_DBG(srr_req
, "SRR rejected\n");
355 rc
= bnx2fc_initiate_abts(orig_io_req
);
357 BNX2FC_IO_DBG(srr_req
, "srr_compl: initiate_abts "
358 "failed xid = 0x%x. issue cleanup\n",
360 bnx2fc_initiate_cleanup(orig_io_req
);
364 BNX2FC_IO_DBG(srr_req
, "srr compl - invalid opcode = %d\n",
372 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
375 static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg
*cb_arg
)
377 struct bnx2fc_cmd
*orig_io_req
, *new_io_req
;
378 struct bnx2fc_cmd
*rec_req
;
379 struct bnx2fc_mp_req
*mp_req
;
380 struct fc_frame_header
*fc_hdr
, *fh
;
381 struct fc_els_ls_rjt
*rjt
;
382 struct fc_els_rec_acc
*acc
;
383 struct bnx2fc_rport
*tgt
;
384 struct fcoe_err_report_entry
*err_entry
;
385 struct scsi_cmnd
*sc_cmd
;
393 u32 resp_len
, hdr_len
;
395 bool send_seq_clnp
= false;
396 bool abort_io
= false;
398 BNX2FC_MISC_DBG("Entered rec_compl callback\n");
399 rec_req
= cb_arg
->io_req
;
400 orig_io_req
= cb_arg
->aborted_io_req
;
401 BNX2FC_IO_DBG(rec_req
, "rec_compl: orig xid = 0x%x", orig_io_req
->xid
);
402 tgt
= orig_io_req
->tgt
;
404 /* Handle REC timeout case */
405 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT
, &rec_req
->req_flags
)) {
406 BNX2FC_IO_DBG(rec_req
, "timed out, abort "
409 /* els req is timed out. send abts for els */
410 rc
= bnx2fc_initiate_abts(rec_req
);
412 BNX2FC_IO_DBG(rec_req
, "rec_compl: initiate_abts "
413 "failed. issue cleanup\n");
414 bnx2fc_initiate_cleanup(rec_req
);
416 orig_io_req
->rec_retry
++;
417 /* REC timedout. send ABTS to the orig IO req */
418 if (orig_io_req
->rec_retry
<= REC_RETRY_COUNT
) {
419 spin_unlock_bh(&tgt
->tgt_lock
);
420 rc
= bnx2fc_send_rec(orig_io_req
);
421 spin_lock_bh(&tgt
->tgt_lock
);
425 rc
= bnx2fc_initiate_abts(orig_io_req
);
427 BNX2FC_IO_DBG(rec_req
, "rec_compl: initiate_abts "
428 "failed xid = 0x%x. issue cleanup\n",
430 bnx2fc_initiate_cleanup(orig_io_req
);
435 if (test_bit(BNX2FC_FLAG_IO_COMPL
, &orig_io_req
->req_flags
)) {
436 BNX2FC_IO_DBG(rec_req
, "completed"
441 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &orig_io_req
->req_flags
)) {
442 BNX2FC_IO_DBG(rec_req
, "abts in prog "
448 mp_req
= &(rec_req
->mp_req
);
449 fc_hdr
= &(mp_req
->resp_fc_hdr
);
450 resp_len
= mp_req
->resp_len
;
451 acc
= resp_buf
= mp_req
->resp_buf
;
453 hdr_len
= sizeof(*fc_hdr
);
455 buf
= kzalloc(PAGE_SIZE
, GFP_ATOMIC
);
457 printk(KERN_ERR PFX
"rec buf: mem alloc failure\n");
460 memcpy(buf
, fc_hdr
, hdr_len
);
461 memcpy(buf
+ hdr_len
, resp_buf
, resp_len
);
463 fp
= fc_frame_alloc(NULL
, resp_len
);
465 printk(KERN_ERR PFX
"fc_frame_alloc failure\n");
469 fh
= (struct fc_frame_header
*) fc_frame_header_get(fp
);
470 /* Copy FC Frame header and payload into the frame */
471 memcpy(fh
, buf
, hdr_len
+ resp_len
);
473 opcode
= fc_frame_payload_op(fp
);
474 if (opcode
== ELS_LS_RJT
) {
475 BNX2FC_IO_DBG(rec_req
, "opcode is RJT\n");
476 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
477 if ((rjt
->er_reason
== ELS_RJT_LOGIC
||
478 rjt
->er_reason
== ELS_RJT_UNAB
) &&
479 rjt
->er_explan
== ELS_EXPL_OXID_RXID
) {
480 BNX2FC_IO_DBG(rec_req
, "handle CMD LOST case\n");
481 new_io_req
= bnx2fc_cmd_alloc(tgt
);
484 new_io_req
->sc_cmd
= orig_io_req
->sc_cmd
;
485 /* cleanup orig_io_req that is with the FW */
486 set_bit(BNX2FC_FLAG_CMD_LOST
,
487 &orig_io_req
->req_flags
);
488 bnx2fc_initiate_cleanup(orig_io_req
);
489 /* Post a new IO req with the same sc_cmd */
490 BNX2FC_IO_DBG(rec_req
, "Post IO request again\n");
491 rc
= bnx2fc_post_io_req(tgt
, new_io_req
);
494 BNX2FC_IO_DBG(rec_req
, "REC: io post err\n");
497 rc
= bnx2fc_initiate_abts(orig_io_req
);
499 BNX2FC_IO_DBG(rec_req
, "rec_compl: initiate_abts "
500 "failed. issue cleanup\n");
501 bnx2fc_initiate_cleanup(orig_io_req
);
503 } else if (opcode
== ELS_LS_ACC
) {
504 /* REVISIT: Check if the exchange is already aborted */
505 offset
= ntohl(acc
->reca_fc4value
);
506 e_stat
= ntohl(acc
->reca_e_stat
);
507 if (e_stat
& ESB_ST_SEQ_INIT
) {
508 BNX2FC_IO_DBG(rec_req
, "target has the seq init\n");
511 BNX2FC_IO_DBG(rec_req
, "e_stat = 0x%x, offset = 0x%x\n",
513 /* Seq initiative is with us */
514 err_entry
= (struct fcoe_err_report_entry
*)
515 &orig_io_req
->err_entry
;
516 sc_cmd
= orig_io_req
->sc_cmd
;
517 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
518 /* SCSI WRITE command */
519 if (offset
== orig_io_req
->data_xfer_len
) {
520 BNX2FC_IO_DBG(rec_req
, "WRITE - resp lost\n");
522 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
525 /* start transmitting from offset */
526 BNX2FC_IO_DBG(rec_req
, "XFER_RDY/DATA lost\n");
527 send_seq_clnp
= true;
528 r_ctl
= FC_RCTL_DD_DATA_DESC
;
529 if (bnx2fc_initiate_seq_cleanup(orig_io_req
,
535 /* SCSI READ command */
536 if (err_entry
->data
.rx_buf_off
==
537 orig_io_req
->data_xfer_len
) {
539 BNX2FC_IO_DBG(rec_req
, "READ - resp lost\n");
540 r_ctl
= FC_RCTL_DD_CMD_STATUS
;
543 /* request retransmission from this offset */
544 send_seq_clnp
= true;
545 offset
= err_entry
->data
.rx_buf_off
;
546 BNX2FC_IO_DBG(rec_req
, "RD DATA lost\n");
548 r_ctl
= FC_RCTL_DD_SOL_DATA
;
549 if (bnx2fc_initiate_seq_cleanup(orig_io_req
,
555 rc
= bnx2fc_initiate_abts(orig_io_req
);
557 BNX2FC_IO_DBG(rec_req
, "rec_compl:initiate_abts"
558 " failed. issue cleanup\n");
559 bnx2fc_initiate_cleanup(orig_io_req
);
561 } else if (!send_seq_clnp
) {
562 BNX2FC_IO_DBG(rec_req
, "Send SRR - FCP_RSP\n");
563 spin_unlock_bh(&tgt
->tgt_lock
);
564 rc
= bnx2fc_send_srr(orig_io_req
, offset
, r_ctl
);
565 spin_lock_bh(&tgt
->tgt_lock
);
568 BNX2FC_IO_DBG(rec_req
, "Unable to send SRR"
578 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
582 int bnx2fc_send_rec(struct bnx2fc_cmd
*orig_io_req
)
584 struct fc_els_rec rec
;
585 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
586 struct fc_lport
*lport
= tgt
->rdata
->local_port
;
587 struct bnx2fc_els_cb_arg
*cb_arg
= NULL
;
589 u32 r_a_tov
= lport
->r_a_tov
;
592 BNX2FC_IO_DBG(orig_io_req
, "Sending REC\n");
593 memset(&rec
, 0, sizeof(rec
));
595 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
597 printk(KERN_ERR PFX
"Unable to allocate cb_arg for REC\n");
601 kref_get(&orig_io_req
->refcount
);
603 cb_arg
->aborted_io_req
= orig_io_req
;
605 rec
.rec_cmd
= ELS_REC
;
606 hton24(rec
.rec_s_id
, sid
);
607 rec
.rec_ox_id
= htons(orig_io_req
->xid
);
608 rec
.rec_rx_id
= htons(orig_io_req
->task
->rxwr_txrd
.var_ctx
.rx_id
);
610 rc
= bnx2fc_initiate_els(tgt
, ELS_REC
, &rec
, sizeof(rec
),
611 bnx2fc_rec_compl
, cb_arg
,
614 BNX2FC_IO_DBG(orig_io_req
, "REC failed - release\n");
615 spin_lock_bh(&tgt
->tgt_lock
);
616 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
617 spin_unlock_bh(&tgt
->tgt_lock
);
624 int bnx2fc_send_srr(struct bnx2fc_cmd
*orig_io_req
, u32 offset
, u8 r_ctl
)
627 struct bnx2fc_rport
*tgt
= orig_io_req
->tgt
;
628 struct fc_lport
*lport
= tgt
->rdata
->local_port
;
629 struct bnx2fc_els_cb_arg
*cb_arg
= NULL
;
630 u32 r_a_tov
= lport
->r_a_tov
;
633 BNX2FC_IO_DBG(orig_io_req
, "Sending SRR\n");
634 memset(&srr
, 0, sizeof(srr
));
636 cb_arg
= kzalloc(sizeof(struct bnx2fc_els_cb_arg
), GFP_ATOMIC
);
638 printk(KERN_ERR PFX
"Unable to allocate cb_arg for SRR\n");
642 kref_get(&orig_io_req
->refcount
);
644 cb_arg
->aborted_io_req
= orig_io_req
;
646 srr
.srr_op
= ELS_SRR
;
647 srr
.srr_ox_id
= htons(orig_io_req
->xid
);
648 srr
.srr_rx_id
= htons(orig_io_req
->task
->rxwr_txrd
.var_ctx
.rx_id
);
649 srr
.srr_rel_off
= htonl(offset
);
650 srr
.srr_r_ctl
= r_ctl
;
651 orig_io_req
->srr_offset
= offset
;
652 orig_io_req
->srr_rctl
= r_ctl
;
654 rc
= bnx2fc_initiate_els(tgt
, ELS_SRR
, &srr
, sizeof(srr
),
655 bnx2fc_srr_compl
, cb_arg
,
658 BNX2FC_IO_DBG(orig_io_req
, "SRR failed - release\n");
659 spin_lock_bh(&tgt
->tgt_lock
);
660 kref_put(&orig_io_req
->refcount
, bnx2fc_cmd_release
);
661 spin_unlock_bh(&tgt
->tgt_lock
);
664 set_bit(BNX2FC_FLAG_SRR_SENT
, &orig_io_req
->req_flags
);
670 static int bnx2fc_initiate_els(struct bnx2fc_rport
*tgt
, unsigned int op
,
671 void *data
, u32 data_len
,
672 void (*cb_func
)(struct bnx2fc_els_cb_arg
*cb_arg
),
673 struct bnx2fc_els_cb_arg
*cb_arg
, u32 timer_msec
)
675 struct fcoe_port
*port
= tgt
->port
;
676 struct bnx2fc_interface
*interface
= port
->priv
;
677 struct fc_rport
*rport
= tgt
->rport
;
678 struct fc_lport
*lport
= port
->lport
;
679 struct bnx2fc_cmd
*els_req
;
680 struct bnx2fc_mp_req
*mp_req
;
681 struct fc_frame_header
*fc_hdr
;
682 struct fcoe_task_ctx_entry
*task
;
683 struct fcoe_task_ctx_entry
*task_page
;
689 rc
= fc_remote_port_chkready(rport
);
691 printk(KERN_ERR PFX
"els 0x%x: rport not ready\n", op
);
695 if (lport
->state
!= LPORT_ST_READY
|| !(lport
->link_up
)) {
696 printk(KERN_ERR PFX
"els 0x%x: link is not ready\n", op
);
700 if (!(test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
))) {
701 printk(KERN_ERR PFX
"els 0x%x: tgt not ready\n", op
);
705 els_req
= bnx2fc_elstm_alloc(tgt
, BNX2FC_ELS
);
711 els_req
->sc_cmd
= NULL
;
712 els_req
->port
= port
;
714 els_req
->cb_func
= cb_func
;
715 cb_arg
->io_req
= els_req
;
716 els_req
->cb_arg
= cb_arg
;
717 els_req
->data_xfer_len
= data_len
;
719 mp_req
= (struct bnx2fc_mp_req
*)&(els_req
->mp_req
);
720 rc
= bnx2fc_init_mp_req(els_req
);
722 printk(KERN_ERR PFX
"ELS MP request init failed\n");
723 spin_lock_bh(&tgt
->tgt_lock
);
724 kref_put(&els_req
->refcount
, bnx2fc_cmd_release
);
725 spin_unlock_bh(&tgt
->tgt_lock
);
733 /* Set the data_xfer_len to the size of ELS payload */
734 mp_req
->req_len
= data_len
;
735 els_req
->data_xfer_len
= mp_req
->req_len
;
737 /* Fill ELS Payload */
738 if ((op
>= ELS_LS_RJT
) && (op
<= ELS_AUTH_ELS
)) {
739 memcpy(mp_req
->req_buf
, data
, data_len
);
741 printk(KERN_ERR PFX
"Invalid ELS op 0x%x\n", op
);
742 els_req
->cb_func
= NULL
;
743 els_req
->cb_arg
= NULL
;
744 spin_lock_bh(&tgt
->tgt_lock
);
745 kref_put(&els_req
->refcount
, bnx2fc_cmd_release
);
746 spin_unlock_bh(&tgt
->tgt_lock
);
754 fc_hdr
= &(mp_req
->req_fc_hdr
);
756 did
= tgt
->rport
->port_id
;
760 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS4_REQ
, did
, sid
,
761 FC_TYPE_FCP
, FC_FC_FIRST_SEQ
|
762 FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
764 __fc_fill_fc_hdr(fc_hdr
, FC_RCTL_ELS_REQ
, did
, sid
,
765 FC_TYPE_ELS
, FC_FC_FIRST_SEQ
|
766 FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
768 /* Obtain exchange id */
770 task_idx
= xid
/BNX2FC_TASKS_PER_PAGE
;
771 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
773 /* Initialize task context for this IO request */
774 task_page
= (struct fcoe_task_ctx_entry
*)
775 interface
->hba
->task_ctx
[task_idx
];
776 task
= &(task_page
[index
]);
777 bnx2fc_init_mp_task(els_req
, task
);
779 spin_lock_bh(&tgt
->tgt_lock
);
781 if (!test_bit(BNX2FC_FLAG_SESSION_READY
, &tgt
->flags
)) {
782 printk(KERN_ERR PFX
"initiate_els.. session not ready\n");
783 els_req
->cb_func
= NULL
;
784 els_req
->cb_arg
= NULL
;
785 kref_put(&els_req
->refcount
, bnx2fc_cmd_release
);
786 spin_unlock_bh(&tgt
->tgt_lock
);
791 bnx2fc_cmd_timer_set(els_req
, timer_msec
);
792 bnx2fc_add_2_sq(tgt
, xid
);
794 els_req
->on_active_queue
= 1;
795 list_add_tail(&els_req
->link
, &tgt
->els_queue
);
798 bnx2fc_ring_doorbell(tgt
);
799 spin_unlock_bh(&tgt
->tgt_lock
);
805 void bnx2fc_process_els_compl(struct bnx2fc_cmd
*els_req
,
806 struct fcoe_task_ctx_entry
*task
, u8 num_rq
)
808 struct bnx2fc_mp_req
*mp_req
;
809 struct fc_frame_header
*fc_hdr
;
813 BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
814 "cmd_type = %d\n", els_req
->xid
, els_req
->cmd_type
);
816 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE
,
817 &els_req
->req_flags
)) {
818 BNX2FC_ELS_DBG("Timer context finished processing this "
819 "els - 0x%x\n", els_req
->xid
);
820 /* This IO doesn't receive cleanup completion */
821 kref_put(&els_req
->refcount
, bnx2fc_cmd_release
);
825 /* Cancel the timeout_work, as we received the response */
826 if (cancel_delayed_work(&els_req
->timeout_work
))
827 kref_put(&els_req
->refcount
,
828 bnx2fc_cmd_release
); /* drop timer hold */
830 if (els_req
->on_active_queue
) {
831 list_del_init(&els_req
->link
);
832 els_req
->on_active_queue
= 0;
835 mp_req
= &(els_req
->mp_req
);
836 fc_hdr
= &(mp_req
->resp_fc_hdr
);
840 &task
->rxwr_only
.union_ctx
.comp_info
.mp_rsp
.fc_hdr
;
841 hdr
[0] = cpu_to_be64(temp_hdr
[0]);
842 hdr
[1] = cpu_to_be64(temp_hdr
[1]);
843 hdr
[2] = cpu_to_be64(temp_hdr
[2]);
846 task
->rxwr_only
.union_ctx
.comp_info
.mp_rsp
.mp_payload_len
;
848 /* Parse ELS response */
849 if ((els_req
->cb_func
) && (els_req
->cb_arg
)) {
850 els_req
->cb_func(els_req
->cb_arg
);
851 els_req
->cb_arg
= NULL
;
854 kref_put(&els_req
->refcount
, bnx2fc_cmd_release
);
857 #define BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC 1
858 #define BNX2FC_FCOE_MAC_METHOD_FCF_MAP 2
859 #define BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC 3
860 static void bnx2fc_flogi_resp(struct fc_seq
*seq
, struct fc_frame
*fp
,
863 struct fcoe_ctlr
*fip
= arg
;
864 struct fc_exch
*exch
= fc_seq_exch(seq
);
865 struct fc_lport
*lport
= exch
->lp
;
867 struct fc_frame_header
*fh
;
876 fh
= fc_frame_header_get(fp
);
877 granted_mac
= fr_cb(fp
)->granted_mac
;
880 * We set the source MAC for FCoE traffic based on the Granted MAC
881 * address from the switch.
883 * If granted_mac is non-zero, we use that.
884 * If the granted_mac is zeroed out, create the FCoE MAC based on
885 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
886 * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the
887 * d_id of the FLOGI frame.
889 if (!is_zero_ether_addr(granted_mac
)) {
890 ether_addr_copy(fcoe_mac
, granted_mac
);
891 method
= BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC
;
892 } else if (fip
->sel_fcf
&& fip
->sel_fcf
->fc_map
!= 0) {
893 hton24(fc_map
, fip
->sel_fcf
->fc_map
);
894 fcoe_mac
[0] = fc_map
[0];
895 fcoe_mac
[1] = fc_map
[1];
896 fcoe_mac
[2] = fc_map
[2];
897 fcoe_mac
[3] = fh
->fh_d_id
[0];
898 fcoe_mac
[4] = fh
->fh_d_id
[1];
899 fcoe_mac
[5] = fh
->fh_d_id
[2];
900 method
= BNX2FC_FCOE_MAC_METHOD_FCF_MAP
;
902 fc_fcoe_set_mac(fcoe_mac
, fh
->fh_d_id
);
903 method
= BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC
;
906 BNX2FC_HBA_DBG(lport
, "fcoe_mac=%pM method=%d\n", fcoe_mac
, method
);
907 fip
->update_mac(lport
, fcoe_mac
);
909 fc_lport_flogi_resp(seq
, fp
, lport
);
912 static void bnx2fc_logo_resp(struct fc_seq
*seq
, struct fc_frame
*fp
,
915 struct fcoe_ctlr
*fip
= arg
;
916 struct fc_exch
*exch
= fc_seq_exch(seq
);
917 struct fc_lport
*lport
= exch
->lp
;
918 static u8 zero_mac
[ETH_ALEN
] = { 0 };
921 fip
->update_mac(lport
, zero_mac
);
922 fc_lport_logo_resp(seq
, fp
, lport
);
925 struct fc_seq
*bnx2fc_elsct_send(struct fc_lport
*lport
, u32 did
,
926 struct fc_frame
*fp
, unsigned int op
,
927 void (*resp
)(struct fc_seq
*,
930 void *arg
, u32 timeout
)
932 struct fcoe_port
*port
= lport_priv(lport
);
933 struct bnx2fc_interface
*interface
= port
->priv
;
934 struct fcoe_ctlr
*fip
= bnx2fc_to_ctlr(interface
);
935 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
940 return fc_elsct_send(lport
, did
, fp
, op
, bnx2fc_flogi_resp
,
943 /* only hook onto fabric logouts, not port logouts */
944 if (ntoh24(fh
->fh_d_id
) != FC_FID_FLOGI
)
946 return fc_elsct_send(lport
, did
, fp
, op
, bnx2fc_logo_resp
,
949 return fc_elsct_send(lport
, did
, fp
, op
, resp
, arg
, timeout
);