2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/string.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_els.h>
39 #include <scsi/fc/fc_fs.h>
42 #include "csio_lnode.h"
43 #include "csio_rnode.h"
45 static int csio_rnode_init(struct csio_rnode
*, struct csio_lnode
*);
46 static void csio_rnode_exit(struct csio_rnode
*);
48 /* Static machine forward declarations */
49 static void csio_rns_uninit(struct csio_rnode
*, enum csio_rn_ev
);
50 static void csio_rns_ready(struct csio_rnode
*, enum csio_rn_ev
);
51 static void csio_rns_offline(struct csio_rnode
*, enum csio_rn_ev
);
52 static void csio_rns_disappeared(struct csio_rnode
*, enum csio_rn_ev
);
54 /* RNF event mapping */
55 static enum csio_rn_ev fwevt_to_rnevt
[] = {
56 CSIO_RNFE_NONE
, /* None */
57 CSIO_RNFE_LOGGED_IN
, /* PLOGI_ACC_RCVD */
58 CSIO_RNFE_NONE
, /* PLOGI_RJT_RCVD */
59 CSIO_RNFE_PLOGI_RECV
, /* PLOGI_RCVD */
60 CSIO_RNFE_LOGO_RECV
, /* PLOGO_RCVD */
61 CSIO_RNFE_PRLI_DONE
, /* PRLI_ACC_RCVD */
62 CSIO_RNFE_NONE
, /* PRLI_RJT_RCVD */
63 CSIO_RNFE_PRLI_RECV
, /* PRLI_RCVD */
64 CSIO_RNFE_PRLO_RECV
, /* PRLO_RCVD */
65 CSIO_RNFE_NONE
, /* NPORT_ID_CHGD */
66 CSIO_RNFE_LOGO_RECV
, /* FLOGO_RCVD */
67 CSIO_RNFE_NONE
, /* CLR_VIRT_LNK_RCVD */
68 CSIO_RNFE_LOGGED_IN
, /* FLOGI_ACC_RCVD */
69 CSIO_RNFE_NONE
, /* FLOGI_RJT_RCVD */
70 CSIO_RNFE_LOGGED_IN
, /* FDISC_ACC_RCVD */
71 CSIO_RNFE_NONE
, /* FDISC_RJT_RCVD */
72 CSIO_RNFE_NONE
, /* FLOGI_TMO_MAX_RETRY */
73 CSIO_RNFE_NONE
, /* IMPL_LOGO_ADISC_ACC */
74 CSIO_RNFE_NONE
, /* IMPL_LOGO_ADISC_RJT */
75 CSIO_RNFE_NONE
, /* IMPL_LOGO_ADISC_CNFLT */
76 CSIO_RNFE_NONE
, /* PRLI_TMO */
77 CSIO_RNFE_NONE
, /* ADISC_TMO */
78 CSIO_RNFE_NAME_MISSING
, /* RSCN_DEV_LOST */
79 CSIO_RNFE_NONE
, /* SCR_ACC_RCVD */
80 CSIO_RNFE_NONE
, /* ADISC_RJT_RCVD */
81 CSIO_RNFE_NONE
, /* LOGO_SNT */
82 CSIO_RNFE_LOGO_RECV
, /* PROTO_ERR_IMPL_LOGO */
85 #define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
89 csio_is_rnode_ready(struct csio_rnode
*rn
)
91 return csio_match_state(rn
, csio_rns_ready
);
95 csio_is_rnode_uninit(struct csio_rnode
*rn
)
97 return csio_match_state(rn
, csio_rns_uninit
);
101 csio_is_rnode_wka(uint8_t rport_type
)
103 if ((rport_type
== FLOGI_VFPORT
) ||
104 (rport_type
== FDISC_VFPORT
) ||
105 (rport_type
== NS_VNPORT
) ||
106 (rport_type
== FDMI_VNPORT
))
113 * csio_rn_lookup - Finds the rnode with the given flowid
117 * Does the rnode lookup on the given lnode and flowid.If no matching entry
118 * found, NULL is returned.
120 static struct csio_rnode
*
121 csio_rn_lookup(struct csio_lnode
*ln
, uint32_t flowid
)
123 struct csio_rnode
*rnhead
= (struct csio_rnode
*) &ln
->rnhead
;
124 struct list_head
*tmp
;
125 struct csio_rnode
*rn
;
127 list_for_each(tmp
, &rnhead
->sm
.sm_list
) {
128 rn
= (struct csio_rnode
*) tmp
;
129 if (rn
->flowid
== flowid
)
137 * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
141 * Does the rnode lookup on the given lnode and wwpn. If no matching entry
142 * found, NULL is returned.
144 static struct csio_rnode
*
145 csio_rn_lookup_wwpn(struct csio_lnode
*ln
, uint8_t *wwpn
)
147 struct csio_rnode
*rnhead
= (struct csio_rnode
*) &ln
->rnhead
;
148 struct list_head
*tmp
;
149 struct csio_rnode
*rn
;
151 list_for_each(tmp
, &rnhead
->sm
.sm_list
) {
152 rn
= (struct csio_rnode
*) tmp
;
153 if (!memcmp(csio_rn_wwpn(rn
), wwpn
, 8))
161 * csio_rnode_lookup_portid - Finds the rnode with the given portid
165 * Lookup the rnode list for a given portid. If no matching entry
166 * found, NULL is returned.
169 csio_rnode_lookup_portid(struct csio_lnode
*ln
, uint32_t portid
)
171 struct csio_rnode
*rnhead
= (struct csio_rnode
*) &ln
->rnhead
;
172 struct list_head
*tmp
;
173 struct csio_rnode
*rn
;
175 list_for_each(tmp
, &rnhead
->sm
.sm_list
) {
176 rn
= (struct csio_rnode
*) tmp
;
177 if (rn
->nport_id
== portid
)
185 csio_rn_dup_flowid(struct csio_lnode
*ln
, uint32_t rdev_flowid
,
186 uint32_t *vnp_flowid
)
188 struct csio_rnode
*rnhead
;
189 struct list_head
*tmp
, *tmp1
;
190 struct csio_rnode
*rn
;
191 struct csio_lnode
*ln_tmp
;
192 struct csio_hw
*hw
= csio_lnode_to_hw(ln
);
194 list_for_each(tmp1
, &hw
->sln_head
) {
195 ln_tmp
= (struct csio_lnode
*) tmp1
;
199 rnhead
= (struct csio_rnode
*)&ln_tmp
->rnhead
;
200 list_for_each(tmp
, &rnhead
->sm
.sm_list
) {
202 rn
= (struct csio_rnode
*) tmp
;
203 if (csio_is_rnode_ready(rn
)) {
204 if (rn
->flowid
== rdev_flowid
) {
205 *vnp_flowid
= csio_ln_flowid(ln_tmp
);
215 static struct csio_rnode
*
216 csio_alloc_rnode(struct csio_lnode
*ln
)
218 struct csio_hw
*hw
= csio_lnode_to_hw(ln
);
220 struct csio_rnode
*rn
= mempool_alloc(hw
->rnode_mempool
, GFP_ATOMIC
);
224 memset(rn
, 0, sizeof(struct csio_rnode
));
225 if (csio_rnode_init(rn
, ln
))
228 CSIO_INC_STATS(ln
, n_rnode_alloc
);
233 mempool_free(rn
, hw
->rnode_mempool
);
235 CSIO_INC_STATS(ln
, n_rnode_nomem
);
240 csio_free_rnode(struct csio_rnode
*rn
)
242 struct csio_hw
*hw
= csio_lnode_to_hw(csio_rnode_to_lnode(rn
));
245 CSIO_INC_STATS(rn
->lnp
, n_rnode_free
);
246 mempool_free(rn
, hw
->rnode_mempool
);
250 * csio_get_rnode - Gets rnode with the given flowid
254 * Does the rnode lookup on the given lnode and flowid. If no matching
255 * rnode found, then new rnode with given npid is allocated and returned.
257 static struct csio_rnode
*
258 csio_get_rnode(struct csio_lnode
*ln
, uint32_t flowid
)
260 struct csio_rnode
*rn
;
262 rn
= csio_rn_lookup(ln
, flowid
);
264 rn
= csio_alloc_rnode(ln
);
275 * csio_put_rnode - Frees the given rnode
279 * Does the rnode lookup on the given lnode and flowid. If no matching
280 * rnode found, then new rnode with given npid is allocated and returned.
283 csio_put_rnode(struct csio_lnode
*ln
, struct csio_rnode
*rn
)
285 CSIO_DB_ASSERT(csio_is_rnode_uninit(rn
) != 0);
290 * csio_confirm_rnode - confirms rnode based on wwpn.
292 * @rdev_flowid: remote device flowid
293 * @rdevp: remote device params
294 * This routines searches other rnode in list having same wwpn of new rnode.
295 * If there is a match, then matched rnode is returned and otherwise new rnode
300 csio_confirm_rnode(struct csio_lnode
*ln
, uint32_t rdev_flowid
,
301 struct fcoe_rdev_entry
*rdevp
)
304 struct csio_rnode
*rn
, *match_rn
;
305 uint32_t vnp_flowid
= 0;
308 port_id
= (__be32
*)&rdevp
->r_id
[0];
310 FW_RDEV_WR_RPORT_TYPE_GET(rdevp
->rd_xfer_rdy_to_rport_type
);
312 /* Drop rdev event for cntrl port */
313 if (rport_type
== FAB_CTLR_VNPORT
) {
315 "Unhandled rport_type:%d recv in rdev evt "
316 "ssni:x%x\n", rport_type
, rdev_flowid
);
320 /* Lookup on flowid */
321 rn
= csio_rn_lookup(ln
, rdev_flowid
);
324 /* Drop events with duplicate flowid */
325 if (csio_rn_dup_flowid(ln
, rdev_flowid
, &vnp_flowid
)) {
327 "ssni:%x already active on vnpi:%x",
328 rdev_flowid
, vnp_flowid
);
332 /* Lookup on wwpn for NPORTs */
333 rn
= csio_rn_lookup_wwpn(ln
, rdevp
->wwpn
);
338 /* Lookup well-known ports with nport id */
339 if (csio_is_rnode_wka(rport_type
)) {
340 match_rn
= csio_rnode_lookup_portid(ln
,
341 ((ntohl(*port_id
) >> 8) & CSIO_DID_MASK
));
342 if (match_rn
== NULL
) {
343 csio_rn_flowid(rn
) = CSIO_INVALID_IDX
;
348 * Now compare the wwpn to confirm that
349 * same port relogged in. If so update the matched rn.
350 * Else, go ahead and alloc a new rnode.
352 if (!memcmp(csio_rn_wwpn(match_rn
), rdevp
->wwpn
, 8)) {
356 "nport_id:x%x and wwpn:%llx"
357 " match for ssni:x%x\n",
359 wwn_to_u64(rdevp
->wwpn
),
361 if (csio_is_rnode_ready(rn
)) {
368 csio_rn_flowid(rn
) = CSIO_INVALID_IDX
;
374 csio_rn_flowid(rn
) = CSIO_INVALID_IDX
;
379 if (!memcmp(csio_rn_wwpn(rn
), rdevp
->wwpn
, 8))
382 /* Search for rnode that have same wwpn */
383 match_rn
= csio_rn_lookup_wwpn(ln
, rdevp
->wwpn
);
384 if (match_rn
!= NULL
) {
386 "ssni:x%x changed for rport name(wwpn):%llx "
387 "did:x%x\n", rdev_flowid
,
388 wwn_to_u64(rdevp
->wwpn
),
390 csio_rn_flowid(rn
) = CSIO_INVALID_IDX
;
394 "rnode wwpn mismatch found ssni:x%x "
397 wwn_to_u64(csio_rn_wwpn(rn
)));
398 if (csio_is_rnode_ready(rn
)) {
400 "rnode is already active "
401 "wwpn:%llx ssni:x%x\n",
402 wwn_to_u64(csio_rn_wwpn(rn
)),
406 csio_rn_flowid(rn
) = CSIO_INVALID_IDX
;
412 csio_ln_dbg(ln
, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
413 rn
, rdev_flowid
, wwn_to_u64(rdevp
->wwpn
));
416 csio_rn_flowid(rn
) = rdev_flowid
;
418 /* update rdev entry */
419 rn
->rdev_entry
= rdevp
;
420 CSIO_INC_STATS(ln
, n_rnode_match
);
424 rn
= csio_get_rnode(ln
, rdev_flowid
);
428 csio_ln_dbg(ln
, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
429 rn
, rdev_flowid
, wwn_to_u64(rdevp
->wwpn
));
431 /* update rdev entry */
432 rn
->rdev_entry
= rdevp
;
437 * csio_rn_verify_rparams - verify rparams.
440 * @rdevp: remote device params
441 * returns success if rparams are verified.
444 csio_rn_verify_rparams(struct csio_lnode
*ln
, struct csio_rnode
*rn
,
445 struct fcoe_rdev_entry
*rdevp
)
452 did
= (__be32
*) &rdevp
->r_id
[0];
454 FW_RDEV_WR_RPORT_TYPE_GET(rdevp
->rd_xfer_rdy_to_rport_type
);
455 switch (rport_type
) {
457 rn
->role
= CSIO_RNFR_FABRIC
;
458 if (((ntohl(*did
) >> 8) & CSIO_DID_MASK
) != FC_FID_FLOGI
) {
459 csio_ln_err(ln
, "ssni:x%x invalid fabric portid\n",
464 if (FW_RDEV_WR_NPIV_GET(rdevp
->vft_to_qos
))
465 ln
->flags
|= CSIO_LNF_NPIVSUPP
;
470 rn
->role
= CSIO_RNFR_NS
;
471 if (((ntohl(*did
) >> 8) & CSIO_DID_MASK
) != FC_FID_DIR_SERV
) {
472 csio_ln_err(ln
, "ssni:x%x invalid fabric portid\n",
480 rn
->role
= CSIO_RNFR_NPORT
;
481 if (rdevp
->event_cause
== PRLI_ACC_RCVD
||
482 rdevp
->event_cause
== PRLI_RCVD
) {
483 if (FW_RDEV_WR_TASK_RETRY_ID_GET(
484 rdevp
->enh_disc_to_tgt
))
485 rn
->fcp_flags
|= FCP_SPPF_OVLY_ALLOW
;
487 if (FW_RDEV_WR_RETRY_GET(rdevp
->enh_disc_to_tgt
))
488 rn
->fcp_flags
|= FCP_SPPF_RETRY
;
490 if (FW_RDEV_WR_CONF_CMPL_GET(rdevp
->enh_disc_to_tgt
))
491 rn
->fcp_flags
|= FCP_SPPF_CONF_COMPL
;
493 if (FW_RDEV_WR_TGT_GET(rdevp
->enh_disc_to_tgt
))
494 rn
->role
|= CSIO_RNFR_TARGET
;
496 if (FW_RDEV_WR_INI_GET(rdevp
->enh_disc_to_tgt
))
497 rn
->role
|= CSIO_RNFR_INITIATOR
;
503 case FAB_CTLR_VNPORT
:
508 csio_ln_err(ln
, "ssni:x%x invalid rport type recv x%x\n",
509 csio_rn_flowid(rn
), rport_type
);
513 /* validate wwpn/wwnn for Name server/remote port */
514 if (rport_type
== REG_VNPORT
|| rport_type
== NS_VNPORT
) {
516 if (!memcmp(rdevp
->wwnn
, null
, 8)) {
518 "ssni:x%x invalid wwnn received from"
521 (ntohl(*did
) & CSIO_DID_MASK
));
525 if (!memcmp(rdevp
->wwpn
, null
, 8)) {
527 "ssni:x%x invalid wwpn received from"
530 (ntohl(*did
) & CSIO_DID_MASK
));
536 /* Copy wwnn, wwpn and nport id */
537 rn
->nport_id
= (ntohl(*did
) >> 8) & CSIO_DID_MASK
;
538 memcpy(csio_rn_wwnn(rn
), rdevp
->wwnn
, 8);
539 memcpy(csio_rn_wwpn(rn
), rdevp
->wwpn
, 8);
540 rn
->rn_sparm
.csp
.sp_bb_data
= rdevp
->rcv_fr_sz
;
541 fc_class
= FW_RDEV_WR_CLASS_GET(rdevp
->vft_to_qos
);
542 rn
->rn_sparm
.clsp
[fc_class
- 1].cp_class
= htons(FC_CPC_VALID
);
548 __csio_reg_rnode(struct csio_rnode
*rn
)
550 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
551 struct csio_hw
*hw
= csio_lnode_to_hw(ln
);
553 spin_unlock_irq(&hw
->lock
);
555 spin_lock_irq(&hw
->lock
);
557 if (rn
->role
& CSIO_RNFR_TARGET
)
560 if (rn
->nport_id
== FC_FID_MGMT_SERV
)
561 csio_ln_fdmi_start(ln
, (void *) rn
);
565 __csio_unreg_rnode(struct csio_rnode
*rn
)
567 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
568 struct csio_hw
*hw
= csio_lnode_to_hw(ln
);
572 if (!list_empty(&rn
->host_cmpl_q
)) {
573 csio_dbg(hw
, "Returning completion queue I/Os\n");
574 list_splice_tail_init(&rn
->host_cmpl_q
, &tmp_q
);
578 if (rn
->role
& CSIO_RNFR_TARGET
) {
580 ln
->last_scan_ntgts
--;
583 spin_unlock_irq(&hw
->lock
);
584 csio_unreg_rnode(rn
);
585 spin_lock_irq(&hw
->lock
);
587 /* Cleanup I/Os that were waiting for rnode to unregister */
589 csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw
), &tmp_q
);
593 /*****************************************************************************/
594 /* START: Rnode SM */
595 /*****************************************************************************/
604 csio_rns_uninit(struct csio_rnode
*rn
, enum csio_rn_ev evt
)
606 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
609 CSIO_INC_STATS(rn
, n_evt_sm
[evt
]);
612 case CSIO_RNFE_LOGGED_IN
:
613 case CSIO_RNFE_PLOGI_RECV
:
614 ret
= csio_rn_verify_rparams(ln
, rn
, rn
->rdev_entry
);
616 csio_set_state(&rn
->sm
, csio_rns_ready
);
617 __csio_reg_rnode(rn
);
619 CSIO_INC_STATS(rn
, n_err_inval
);
622 case CSIO_RNFE_LOGO_RECV
:
624 "ssni:x%x Ignoring event %d recv "
625 "in rn state[uninit]\n", csio_rn_flowid(rn
), evt
);
626 CSIO_INC_STATS(rn
, n_evt_drop
);
630 "ssni:x%x unexp event %d recv "
631 "in rn state[uninit]\n", csio_rn_flowid(rn
), evt
);
632 CSIO_INC_STATS(rn
, n_evt_unexp
);
644 csio_rns_ready(struct csio_rnode
*rn
, enum csio_rn_ev evt
)
646 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
649 CSIO_INC_STATS(rn
, n_evt_sm
[evt
]);
652 case CSIO_RNFE_LOGGED_IN
:
653 case CSIO_RNFE_PLOGI_RECV
:
655 "ssni:x%x Ignoring event %d recv from did:x%x "
656 "in rn state[ready]\n", csio_rn_flowid(rn
), evt
,
658 CSIO_INC_STATS(rn
, n_evt_drop
);
661 case CSIO_RNFE_PRLI_DONE
:
662 case CSIO_RNFE_PRLI_RECV
:
663 ret
= csio_rn_verify_rparams(ln
, rn
, rn
->rdev_entry
);
665 __csio_reg_rnode(rn
);
667 CSIO_INC_STATS(rn
, n_err_inval
);
671 csio_set_state(&rn
->sm
, csio_rns_offline
);
672 __csio_unreg_rnode(rn
);
674 /* FW expected to internally aborted outstanding SCSI WRs
675 * and return all SCSI WRs to host with status "ABORTED".
679 case CSIO_RNFE_LOGO_RECV
:
680 csio_set_state(&rn
->sm
, csio_rns_offline
);
682 __csio_unreg_rnode(rn
);
684 /* FW expected to internally aborted outstanding SCSI WRs
685 * and return all SCSI WRs to host with status "ABORTED".
689 case CSIO_RNFE_CLOSE
:
691 * Each rnode receives CLOSE event when driver is removed or
693 * Note: All outstanding IOs on remote port need to returned
694 * to uppper layer with appropriate error before sending
697 csio_set_state(&rn
->sm
, csio_rns_uninit
);
698 __csio_unreg_rnode(rn
);
701 case CSIO_RNFE_NAME_MISSING
:
702 csio_set_state(&rn
->sm
, csio_rns_disappeared
);
703 __csio_unreg_rnode(rn
);
706 * FW expected to internally aborted outstanding SCSI WRs
707 * and return all SCSI WRs to host with status "ABORTED".
714 "ssni:x%x unexp event %d recv from did:x%x "
715 "in rn state[uninit]\n", csio_rn_flowid(rn
), evt
,
717 CSIO_INC_STATS(rn
, n_evt_unexp
);
729 csio_rns_offline(struct csio_rnode
*rn
, enum csio_rn_ev evt
)
731 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
734 CSIO_INC_STATS(rn
, n_evt_sm
[evt
]);
737 case CSIO_RNFE_LOGGED_IN
:
738 case CSIO_RNFE_PLOGI_RECV
:
739 ret
= csio_rn_verify_rparams(ln
, rn
, rn
->rdev_entry
);
741 csio_set_state(&rn
->sm
, csio_rns_ready
);
742 __csio_reg_rnode(rn
);
744 CSIO_INC_STATS(rn
, n_err_inval
);
745 csio_post_event(&rn
->sm
, CSIO_RNFE_CLOSE
);
751 "ssni:x%x Ignoring event %d recv from did:x%x "
752 "in rn state[offline]\n", csio_rn_flowid(rn
), evt
,
754 CSIO_INC_STATS(rn
, n_evt_drop
);
757 case CSIO_RNFE_CLOSE
:
758 /* Each rnode receives CLOSE event when driver is removed or
760 * Note: All outstanding IOs on remote port need to returned
761 * to uppper layer with appropriate error before sending
764 csio_set_state(&rn
->sm
, csio_rns_uninit
);
767 case CSIO_RNFE_NAME_MISSING
:
768 csio_set_state(&rn
->sm
, csio_rns_disappeared
);
773 "ssni:x%x unexp event %d recv from did:x%x "
774 "in rn state[offline]\n", csio_rn_flowid(rn
), evt
,
776 CSIO_INC_STATS(rn
, n_evt_unexp
);
782 * csio_rns_disappeared -
788 csio_rns_disappeared(struct csio_rnode
*rn
, enum csio_rn_ev evt
)
790 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
793 CSIO_INC_STATS(rn
, n_evt_sm
[evt
]);
796 case CSIO_RNFE_LOGGED_IN
:
797 case CSIO_RNFE_PLOGI_RECV
:
798 ret
= csio_rn_verify_rparams(ln
, rn
, rn
->rdev_entry
);
800 csio_set_state(&rn
->sm
, csio_rns_ready
);
801 __csio_reg_rnode(rn
);
803 CSIO_INC_STATS(rn
, n_err_inval
);
804 csio_post_event(&rn
->sm
, CSIO_RNFE_CLOSE
);
808 case CSIO_RNFE_CLOSE
:
809 /* Each rnode receives CLOSE event when driver is removed or
811 * Note: All outstanding IOs on remote port need to returned
812 * to uppper layer with appropriate error before sending
815 csio_set_state(&rn
->sm
, csio_rns_uninit
);
819 case CSIO_RNFE_NAME_MISSING
:
821 "ssni:x%x Ignoring event %d recv from did x%x"
822 "in rn state[disappeared]\n", csio_rn_flowid(rn
),
828 "ssni:x%x unexp event %d recv from did x%x"
829 "in rn state[disappeared]\n", csio_rn_flowid(rn
),
831 CSIO_INC_STATS(rn
, n_evt_unexp
);
836 /*****************************************************************************/
838 /*****************************************************************************/
841 * csio_rnode_devloss_handler - Device loss event handler
844 * Post event to close rnode SM and free rnode.
847 csio_rnode_devloss_handler(struct csio_rnode
*rn
)
849 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
851 /* ignore if same rnode came back as online */
852 if (csio_is_rnode_ready(rn
))
855 csio_post_event(&rn
->sm
, CSIO_RNFE_CLOSE
);
857 /* Free rn if in uninit state */
858 if (csio_is_rnode_uninit(rn
))
859 csio_put_rnode(ln
, rn
);
863 * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
868 csio_rnode_fwevt_handler(struct csio_rnode
*rn
, uint8_t fwevt
)
870 struct csio_lnode
*ln
= csio_rnode_to_lnode(rn
);
873 evt
= CSIO_FWE_TO_RNFE(fwevt
);
875 csio_ln_err(ln
, "ssni:x%x Unhandled FW Rdev event: %d\n",
876 csio_rn_flowid(rn
), fwevt
);
877 CSIO_INC_STATS(rn
, n_evt_unexp
);
880 CSIO_INC_STATS(rn
, n_evt_fw
[fwevt
]);
882 /* Track previous & current events for debugging */
883 rn
->prev_evt
= rn
->cur_evt
;
886 /* Post event to rnode SM */
887 csio_post_event(&rn
->sm
, evt
);
889 /* Free rn if in uninit state */
890 if (csio_is_rnode_uninit(rn
))
891 csio_put_rnode(ln
, rn
);
895 * csio_rnode_init - Initialize rnode.
897 * @ln: Associated lnode
899 * Caller is responsible for holding the lock. The lock is required
900 * to be held for inserting the rnode in ln->rnhead list.
903 csio_rnode_init(struct csio_rnode
*rn
, struct csio_lnode
*ln
)
905 csio_rnode_to_lnode(rn
) = ln
;
906 csio_init_state(&rn
->sm
, csio_rns_uninit
);
907 INIT_LIST_HEAD(&rn
->host_cmpl_q
);
908 csio_rn_flowid(rn
) = CSIO_INVALID_IDX
;
910 /* Add rnode to list of lnodes->rnhead */
911 list_add_tail(&rn
->sm
.sm_list
, &ln
->rnhead
);
917 csio_rnode_exit(struct csio_rnode
*rn
)
919 list_del_init(&rn
->sm
.sm_list
);
920 CSIO_DB_ASSERT(list_empty(&rn
->host_cmpl_q
));