1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
7 * This program is free software; you can redistribute it and/or *
8 * modify it under the terms of version 2 of the GNU General *
9 * Public License as published by the Free Software Foundation. *
10 * This program is distributed in the hope that it will be useful. *
11 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
12 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
13 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
14 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
15 * TO BE LEGALLY INVALID. See the GNU General Public License for *
16 * more details, a copy of which can be found in the file COPYING *
17 * included with this package. *
18 ********************************************************************/
21 * This file implements remote node state machines for:
23 * - Fabric controller events.
24 * - Name/directory services interaction.
25 * - Point-to-point logins.
29 * fabric_sm Node State Machine: Fabric States
30 * ns_sm Node State Machine: Name/Directory Services States
31 * p2p_sm Node State Machine: Point-to-Point Node States
35 #include "efc_fabric.h"
36 #include "efc_device.h"
38 static void efc_fabric_initiate_shutdown(struct efc_node_s
*node
);
39 static void *__efc_fabric_common(const char *funcname
,
40 struct efc_sm_ctx_s
*ctx
,
41 enum efc_sm_event_e evt
, void *arg
);
42 static int efc_start_ns_node(struct efc_sli_port_s
*sport
);
43 static int efc_start_fabctl_node(struct efc_sli_port_s
*sport
);
44 static int efc_process_gidpt_payload(struct efc_node_s
*node
,
45 struct fcct_gidpt_acc_s
*gidpt
,
47 static void efc_process_rscn(struct efc_node_s
*node
,
48 struct efc_node_cb_s
*cbdata
);
49 static uint64_t efc_get_wwpn(struct fc_plogi_payload_s
*sp
);
50 static void gidpt_delay_timer_cb(struct timer_list
*t
);
54 * @brief Fabric node state machine: Initial state.
57 * Send an FLOGI to a well-known fabric.
59 * @param ctx Remote node sm context.
60 * @param evt Event to process.
61 * @param arg Per event optional argument.
63 * @return Returns NULL.
66 __efc_fabric_init(struct efc_sm_ctx_s
*ctx
, enum efc_sm_event_e evt
,
69 struct efc_node_s
*node
= ctx
->app
;
70 struct efc_lport
*efc
= node
->efc
;
72 efc_node_evt_set(ctx
, evt
, __func__
);
77 case EFC_EVT_REENTER
: /* not sure why we're getting these ... */
78 efc_log_debug(efc
, ">>> reenter !!\n");
81 /* sm: / send FLOGI */
82 efc
->tt
.els_send(efc
, node
, ELS_FLOGI
,
83 EFC_FC_FLOGI_TIMEOUT_SEC
,
84 EFC_FC_ELS_DEFAULT_RETRIES
);
85 efc_node_transition(node
, __efc_fabric_flogi_wait_rsp
, NULL
);
89 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
98 * @brief Set sport topology.
101 * Set sport topology.
103 * @param node Pointer to the node for which the topology is set.
104 * @param topology Topology to set.
106 * @return Returns NULL.
109 efc_fabric_set_topology(struct efc_node_s
*node
,
110 enum efc_sport_topology_e topology
)
112 node
->sport
->topology
= topology
;
117 * @brief Set sport topology.
120 * Nofity sport topology.
122 * @param node Pointer to the node for which the topology is set.
123 * @param topology Topology to set.
125 * @return Returns NULL.
128 efc_fabric_notify_topology(struct efc_node_s
*node
)
130 struct efc_node_s
*tmp_node
;
131 struct efc_node_s
*next
;
133 enum efc_sport_topology_e topology
= node
->sport
->topology
;
135 * now loop through the nodes in the sport
136 * and send topology notification
138 efc_sport_lock(node
->sport
);
139 list_for_each_entry_safe(tmp_node
, next
, &node
->sport
->node_list
,
141 if (tmp_node
!= node
) {
142 efc_node_post_event(tmp_node
,
143 EFC_EVT_SPORT_TOPOLOGY_NOTIFY
,
147 efc_sport_unlock(node
->sport
);
152 * @brief Fabric node state machine: Wait for an FLOGI response.
155 * Wait for an FLOGI response event.
157 * @param ctx Remote node state machine context.
158 * @param evt Event to process.
159 * @param arg Per event optional argument.
161 * @return Returns NULL.
165 __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx_s
*ctx
,
166 enum efc_sm_event_e evt
, void *arg
)
168 struct efc_node_cb_s
*cbdata
= arg
;
169 struct efc_node_s
*node
= ctx
->app
;
171 efc_node_evt_set(ctx
, evt
, __func__
);
176 case EFC_EVT_SRRS_ELS_REQ_OK
: {
177 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_FLOGI
,
178 __efc_fabric_common
, __func__
)) {
181 efc_assert(node
->els_req_cnt
, NULL
);
184 memcpy(node
->sport
->domain
->flogi_service_params
,
185 cbdata
->els_rsp
.virt
,
186 sizeof(struct fc_plogi_payload_s
));
188 /* Check to see if the fabric is an F_PORT or and N_PORT */
189 if (!efc_rnode_is_nport(cbdata
->els_rsp
.virt
)) {
190 /* sm: if not nport / efc_domain_attach */
191 /* ext_status has the fc_id, attach domain */
192 if (efc_rnode_is_npiv_capable(cbdata
->els_rsp
.virt
)) {
193 efc_log_debug(node
->efc
,
194 " NPIV is enabled at switch side\n");
195 //node->efc->sw_feature_cap |= 1<<10;
197 efc_fabric_set_topology(node
,
198 EFC_SPORT_TOPOLOGY_FABRIC
);
199 efc_fabric_notify_topology(node
);
200 efc_assert(!node
->sport
->domain
->attached
, NULL
);
201 efc_domain_attach(node
->sport
->domain
,
203 efc_node_transition(node
,
204 __efc_fabric_wait_domain_attach
,
209 /* sm: if nport and p2p_winner / efc_domain_attach */
210 efc_fabric_set_topology(node
, EFC_SPORT_TOPOLOGY_P2P
);
211 if (efc_p2p_setup(node
->sport
)) {
213 "p2p setup failed, shutting down node\n");
214 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
215 efc_fabric_initiate_shutdown(node
);
219 if (node
->sport
->p2p_winner
) {
220 efc_node_transition(node
,
221 __efc_p2p_wait_domain_attach
,
223 if (node
->sport
->domain
->attached
&&
224 !node
->sport
->domain
->domain_notify_pend
) {
227 * just send ATTACH_OK
230 "p2p winner, domain already attached\n");
231 efc_node_post_event(node
,
232 EFC_EVT_DOMAIN_ATTACH_OK
,
237 * peer is p2p winner;
238 * PLOGI will be received on the
240 * this node has served its purpose
242 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
243 efc_fabric_initiate_shutdown(node
);
249 case EFC_EVT_ELS_REQ_ABORTED
:
250 case EFC_EVT_SRRS_ELS_REQ_RJT
:
251 case EFC_EVT_SRRS_ELS_REQ_FAIL
: {
252 struct efc_sli_port_s
*sport
= node
->sport
;
254 * with these errors, we have no recovery,
255 * so shutdown the sport, leave the link
256 * up and the domain ready
258 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_FLOGI
,
259 __efc_fabric_common
, __func__
)) {
263 "FLOGI failed evt=%s, shutting down sport [%s]\n",
264 efc_sm_event_name(evt
), sport
->display_name
);
265 efc_assert(node
->els_req_cnt
, NULL
);
267 efc_sm_post_event(&sport
->sm
, EFC_EVT_SHUTDOWN
, NULL
);
272 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
281 * @brief Fabric node state machine: Initial state for a virtual port.
284 * State entered when a virtual port is created. Send FDISC.
286 * @param ctx Remote node state machine context.
287 * @param evt Event to process.
288 * @param arg Per event optional argument.
290 * @return Returns NULL.
293 __efc_vport_fabric_init(struct efc_sm_ctx_s
*ctx
,
294 enum efc_sm_event_e evt
, void *arg
)
296 struct efc_node_s
*node
= ctx
->app
;
297 struct efc_lport
*efc
= node
->efc
;
299 efc_node_evt_set(ctx
, evt
, __func__
);
305 /* sm: / send FDISC */
306 efc
->tt
.els_send(efc
, node
, ELS_FDISC
,
307 EFC_FC_FLOGI_TIMEOUT_SEC
,
308 EFC_FC_ELS_DEFAULT_RETRIES
);
310 efc_node_transition(node
, __efc_fabric_fdisc_wait_rsp
, NULL
);
314 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
323 * @brief Fabric node state machine: Wait for an FDISC response
326 * Used for a virtual port. Waits for an FDISC response.
327 * If OK, issue a HW port attach.
329 * @param ctx Remote node state machine context.
330 * @param evt Event to process.
331 * @param arg Per event optional argument.
333 * @return Returns NULL.
336 __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx_s
*ctx
,
337 enum efc_sm_event_e evt
, void *arg
)
339 struct efc_node_cb_s
*cbdata
= arg
;
340 struct efc_node_s
*node
= ctx
->app
;
342 efc_node_evt_set(ctx
, evt
, __func__
);
347 case EFC_EVT_SRRS_ELS_REQ_OK
: {
348 /* fc_id is in ext_status */
349 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_FDISC
,
350 __efc_fabric_common
, __func__
)) {
354 efc_assert(node
->els_req_cnt
, NULL
);
356 /* sm: / efc_sport_attach */
357 efc_sport_attach(node
->sport
, cbdata
->ext_status
);
358 efc_node_transition(node
, __efc_fabric_wait_domain_attach
,
363 case EFC_EVT_SRRS_ELS_REQ_RJT
:
364 case EFC_EVT_SRRS_ELS_REQ_FAIL
: {
365 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_FDISC
,
366 __efc_fabric_common
, __func__
)) {
369 efc_assert(node
->els_req_cnt
, NULL
);
371 efc_log_err(node
->efc
, "FDISC failed, shutting down sport\n");
372 /* sm: / shutdown sport */
373 efc_sm_post_event(&node
->sport
->sm
, EFC_EVT_SHUTDOWN
, NULL
);
378 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
387 * @brief Fabric node state machine: Wait for a domain/sport attach event.
390 * Waits for a domain/sport attach event.
392 * @param ctx Remote node state machine context.
393 * @param evt Event to process.
394 * @param arg Per event optional argument.
396 * @return Returns NULL.
399 __efc_fabric_wait_domain_attach(struct efc_sm_ctx_s
*ctx
,
400 enum efc_sm_event_e evt
, void *arg
)
402 struct efc_node_s
*node
= ctx
->app
;
404 efc_node_evt_set(ctx
, evt
, __func__
);
410 efc_node_hold_frames(node
);
414 efc_node_accept_frames(node
);
416 case EFC_EVT_DOMAIN_ATTACH_OK
:
417 case EFC_EVT_SPORT_ATTACH_OK
: {
420 rc
= efc_start_ns_node(node
->sport
);
424 /* sm: if enable_ini / start fabctl node */
425 /* Instantiate the fabric controller (sends SCR) */
426 if (node
->sport
->enable_rscn
) {
427 rc
= efc_start_fabctl_node(node
->sport
);
431 efc_node_transition(node
, __efc_fabric_idle
, NULL
);
435 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
444 * @brief Fabric node state machine: Fabric node is idle.
447 * Wait for fabric node events.
449 * @param ctx Remote node state machine context.
450 * @param evt Event to process.
451 * @param arg Per event optional argument.
453 * @return Returns NULL.
456 __efc_fabric_idle(struct efc_sm_ctx_s
*ctx
, enum efc_sm_event_e evt
,
459 struct efc_node_s
*node
= ctx
->app
;
461 efc_node_evt_set(ctx
, evt
, __func__
);
466 case EFC_EVT_DOMAIN_ATTACH_OK
:
469 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
478 * @brief Name services node state machine: Initialize.
481 * A PLOGI is sent to the well-known name/directory services node.
483 * @param ctx Remote node state machine context.
484 * @param evt Event to process.
485 * @param arg Per event optional argument.
487 * @return Returns NULL.
490 __efc_ns_init(struct efc_sm_ctx_s
*ctx
, enum efc_sm_event_e evt
, void *arg
)
492 struct efc_node_s
*node
= ctx
->app
;
493 struct efc_lport
*efc
= node
->efc
;
495 efc_node_evt_set(ctx
, evt
, __func__
);
501 /* sm: / send PLOGI */
502 efc
->tt
.els_send(efc
, node
, ELS_PLOGI
,
503 EFC_FC_FLOGI_TIMEOUT_SEC
,
504 EFC_FC_ELS_DEFAULT_RETRIES
);
505 efc_node_transition(node
, __efc_ns_plogi_wait_rsp
, NULL
);
508 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
517 * @brief Name services node state machine: Wait for a PLOGI response.
520 * Waits for a response from PLOGI to name services node, then issues a
521 * node attach request to the HW.
523 * @param ctx Remote node state machine context.
524 * @param evt Event to process.
525 * @param arg Per event optional argument.
527 * @return Returns NULL.
530 __efc_ns_plogi_wait_rsp(struct efc_sm_ctx_s
*ctx
,
531 enum efc_sm_event_e evt
, void *arg
)
534 struct efc_node_cb_s
*cbdata
= arg
;
535 struct efc_node_s
*node
= ctx
->app
;
537 efc_node_evt_set(ctx
, evt
, __func__
);
542 case EFC_EVT_SRRS_ELS_REQ_OK
: {
543 /* Save service parameters */
544 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_PLOGI
,
545 __efc_fabric_common
, __func__
)) {
548 efc_assert(node
->els_req_cnt
, NULL
);
550 /* sm: / save sparams, efc_node_attach */
551 efc_node_save_sparms(node
, cbdata
->els_rsp
.virt
);
552 rc
= efc_node_attach(node
);
553 efc_node_transition(node
, __efc_ns_wait_node_attach
, NULL
);
554 if (rc
== EFC_HW_RTN_SUCCESS_SYNC
)
555 efc_node_post_event(node
, EFC_EVT_NODE_ATTACH_OK
,
560 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
569 * @brief Name services node state machine: Wait for a node attach completion.
572 * Waits for a node attach completion, then issues an RFTID name services
575 * @param ctx Remote node state machine context.
576 * @param evt Event to process.
577 * @param arg Per event optional argument.
579 * @return Returns NULL.
582 __efc_ns_wait_node_attach(struct efc_sm_ctx_s
*ctx
,
583 enum efc_sm_event_e evt
, void *arg
)
585 struct efc_node_s
*node
= ctx
->app
;
586 struct efc_lport
*efc
= node
->efc
;
588 efc_node_evt_set(ctx
, evt
, __func__
);
594 efc_node_hold_frames(node
);
598 efc_node_accept_frames(node
);
601 case EFC_EVT_NODE_ATTACH_OK
:
602 node
->attached
= true;
603 /* sm: / send RFTID */
604 efc
->tt
.els_send_ct(efc
, node
, FC_RCTL_ELS
,
605 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT
,
606 EFC_FC_ELS_DEFAULT_RETRIES
);
607 efc_node_transition(node
, __efc_ns_rftid_wait_rsp
, NULL
);
610 case EFC_EVT_NODE_ATTACH_FAIL
:
611 /* node attach failed, shutdown the node */
612 node
->attached
= false;
613 node_printf(node
, "Node attach failed\n");
614 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
615 efc_fabric_initiate_shutdown(node
);
618 case EFC_EVT_SHUTDOWN
:
619 node_printf(node
, "Shutdown event received\n");
620 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
621 efc_node_transition(node
,
622 __efc_fabric_wait_attach_evt_shutdown
,
627 * if receive RSCN just ignore,
628 * we haven't sent GID_PT yet (ACC sent by fabctl node)
630 case EFC_EVT_RSCN_RCVD
:
634 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
643 * @brief Wait for a domain/sport/node attach completion, then
647 * Waits for a domain/sport/node attach completion, then shuts
650 * @param ctx Remote node state machine context.
651 * @param evt Event to process.
652 * @param arg Per event optional argument.
654 * @return Returns NULL.
657 __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx_s
*ctx
,
658 enum efc_sm_event_e evt
, void *arg
)
660 struct efc_node_s
*node
= ctx
->app
;
662 efc_node_evt_set(ctx
, evt
, __func__
);
668 efc_node_hold_frames(node
);
672 efc_node_accept_frames(node
);
675 /* wait for any of these attach events and then shutdown */
676 case EFC_EVT_NODE_ATTACH_OK
:
677 node
->attached
= true;
678 node_printf(node
, "Attach evt=%s, proceed to shutdown\n",
679 efc_sm_event_name(evt
));
680 efc_fabric_initiate_shutdown(node
);
683 case EFC_EVT_NODE_ATTACH_FAIL
:
684 node
->attached
= false;
685 node_printf(node
, "Attach evt=%s, proceed to shutdown\n",
686 efc_sm_event_name(evt
));
687 efc_fabric_initiate_shutdown(node
);
690 /* ignore shutdown event as we're already in shutdown path */
691 case EFC_EVT_SHUTDOWN
:
692 node_printf(node
, "Shutdown event received\n");
696 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
705 * @brief Name services node state machine: Wait for an RFTID response event.
708 * Waits for an RFTID response event; if configured for an initiator operation,
709 * a GIDPT name services request is issued.
711 * @param ctx Remote node state machine context.
712 * @param evt Event to process.
713 * @param arg Per event optional argument.
715 * @return Returns NULL.
718 __efc_ns_rftid_wait_rsp(struct efc_sm_ctx_s
*ctx
,
719 enum efc_sm_event_e evt
, void *arg
)
721 struct efc_node_s
*node
= ctx
->app
;
722 struct efc_lport
*efc
= node
->efc
;
724 efc_node_evt_set(ctx
, evt
, __func__
);
729 case EFC_EVT_SRRS_ELS_REQ_OK
:
730 if (efc_node_check_ns_req(ctx
, evt
, arg
, FC_NS_RFT_ID
,
731 __efc_fabric_common
, __func__
)) {
734 efc_assert(node
->els_req_cnt
, NULL
);
736 /* sm: / send RFFID */
737 efc
->tt
.els_send_ct(efc
, node
, FC_NS_RFF_ID
,
738 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT
,
739 EFC_FC_ELS_DEFAULT_RETRIES
);
740 efc_node_transition(node
, __efc_ns_rffid_wait_rsp
, NULL
);
744 * if receive RSCN just ignore,
745 * we haven't sent GID_PT yet (ACC sent by fabctl node)
747 case EFC_EVT_RSCN_RCVD
:
751 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
760 * @brief Fabric node state machine: Wait for RFFID response event.
763 * Waits for an RFFID response event; if configured for an initiator operation,
764 * a GIDPT name services request is issued.
766 * @param ctx Remote node state machine context.
767 * @param evt Event to process.
768 * @param arg Per event optional argument.
770 * @return Returns NULL.
773 __efc_ns_rffid_wait_rsp(struct efc_sm_ctx_s
*ctx
,
774 enum efc_sm_event_e evt
, void *arg
)
776 struct efc_node_s
*node
= ctx
->app
;
777 struct efc_lport
*efc
= node
->efc
;
779 efc_node_evt_set(ctx
, evt
, __func__
);
784 case EFC_EVT_SRRS_ELS_REQ_OK
: {
785 if (efc_node_check_ns_req(ctx
, evt
, arg
, FC_NS_RFF_ID
,
786 __efc_fabric_common
, __func__
)) {
789 efc_assert(node
->els_req_cnt
, NULL
);
791 if (node
->sport
->enable_rscn
) {
792 /* sm: if enable_rscn / send GIDPT */
793 efc
->tt
.els_send_ct(efc
, node
, FC_NS_GID_PT
,
794 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT
,
795 EFC_FC_ELS_DEFAULT_RETRIES
);
797 efc_node_transition(node
, __efc_ns_gidpt_wait_rsp
,
800 /* if 'T' only, we're done, go to idle */
801 efc_node_transition(node
, __efc_ns_idle
, NULL
);
806 * if receive RSCN just ignore,
807 * we haven't sent GID_PT yet (ACC sent by fabctl node)
809 case EFC_EVT_RSCN_RCVD
:
813 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
822 * @brief Name services node state machine: Wait for a GIDPT response.
825 * Wait for a GIDPT response from the name server. Process the FC_IDs that are
826 * reported by creating new remote ports, as needed.
828 * @param ctx Remote node state machine context.
829 * @param evt Event to process.
830 * @param arg Per event optional argument.
832 * @return Returns NULL.
835 __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx_s
*ctx
,
836 enum efc_sm_event_e evt
, void *arg
)
838 struct efc_node_cb_s
*cbdata
= arg
;
839 struct efc_node_s
*node
= ctx
->app
;
841 efc_node_evt_set(ctx
, evt
, __func__
);
846 case EFC_EVT_SRRS_ELS_REQ_OK
: {
847 if (efc_node_check_ns_req(ctx
, evt
, arg
, FC_NS_GID_PT
,
848 __efc_fabric_common
, __func__
)) {
851 efc_assert(node
->els_req_cnt
, NULL
);
853 /* sm: / process GIDPT payload */
854 efc_process_gidpt_payload(node
, cbdata
->els_rsp
.virt
,
855 cbdata
->els_rsp
.len
);
856 efc_node_transition(node
, __efc_ns_idle
, NULL
);
860 case EFC_EVT_SRRS_ELS_REQ_FAIL
: {
861 /* not much we can do; will retry with the next RSCN */
862 node_printf(node
, "GID_PT failed to complete\n");
863 efc_assert(node
->els_req_cnt
, NULL
);
865 efc_node_transition(node
, __efc_ns_idle
, NULL
);
869 /* if receive RSCN here, queue up another discovery processing */
870 case EFC_EVT_RSCN_RCVD
: {
871 node_printf(node
, "RSCN received during GID_PT processing\n");
872 node
->rscn_pending
= true;
877 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
886 * @brief Name services node state machine: Idle state.
889 * Idle. Waiting for RSCN received events
890 * (posted from the fabric controller), and
891 * restarts the GIDPT name services query and processing.
893 * @param ctx Remote node state machine context.
894 * @param evt Event to process.
895 * @param arg Per event optional argument.
897 * @return Returns NULL.
900 __efc_ns_idle(struct efc_sm_ctx_s
*ctx
, enum efc_sm_event_e evt
, void *arg
)
902 struct efc_node_s
*node
= ctx
->app
;
903 struct efc_lport
*efc
= node
->efc
;
905 efc_node_evt_set(ctx
, evt
, __func__
);
911 if (!node
->rscn_pending
)
914 node_printf(node
, "RSCN pending, restart discovery\n");
915 node
->rscn_pending
= false;
919 case EFC_EVT_RSCN_RCVD
: {
920 /* sm: / send GIDPT */
922 * If target RSCN processing is enabled,
923 * and this is target only (not initiator),
924 * and tgt_rscn_delay is non-zero,
925 * then we delay issuing the GID_PT
927 if (efc
->tgt_rscn_delay_msec
!= 0 &&
928 !node
->sport
->enable_ini
&& node
->sport
->enable_tgt
&&
929 enable_target_rscn(efc
)) {
930 efc_node_transition(node
, __efc_ns_gidpt_delay
, NULL
);
932 efc
->tt
.els_send_ct(efc
, node
, FC_NS_GID_PT
,
933 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT
,
934 EFC_FC_ELS_DEFAULT_RETRIES
);
935 efc_node_transition(node
, __efc_ns_gidpt_wait_rsp
,
942 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
950 * @brief Handle GIDPT delay timer callback
953 * Post an EFC_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
955 * @param arg Pointer to node.
960 gidpt_delay_timer_cb(struct timer_list
*t
)
962 struct efc_node_s
*node
= from_timer(node
, t
, gidpt_delay_timer
);
964 del_timer(&node
->gidpt_delay_timer
);
966 efc_node_post_event(node
, EFC_EVT_GIDPT_DELAY_EXPIRED
, NULL
);
971 * @brief Name services node state machine: Delayed GIDPT.
974 * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
976 * @param ctx Remote node state machine context.
977 * @param evt Event to process.
978 * @param arg Per event optional argument.
980 * @return Returns NULL.
983 __efc_ns_gidpt_delay(struct efc_sm_ctx_s
*ctx
,
984 enum efc_sm_event_e evt
, void *arg
)
986 struct efc_node_s
*node
= ctx
->app
;
987 struct efc_lport
*efc
= node
->efc
;
989 efc_node_evt_set(ctx
, evt
, __func__
);
994 case EFC_EVT_ENTER
: {
997 efc_assert(efc
->tgt_rscn_delay_msec
!= 0, NULL
);
1000 * Compute the delay time.
1001 * Set to tgt_rscn_delay, if the time since last GIDPT
1002 * is less than tgt_rscn_period, then use tgt_rscn_period.
1004 delay_msec
= efc
->tgt_rscn_delay_msec
;
1005 if ((jiffies_to_msecs(jiffies
) - node
->time_last_gidpt_msec
)
1006 < efc
->tgt_rscn_period_msec
) {
1007 delay_msec
= efc
->tgt_rscn_period_msec
;
1009 timer_setup(&node
->gidpt_delay_timer
, &gidpt_delay_timer_cb
,
1011 mod_timer(&node
->gidpt_delay_timer
,
1012 jiffies
+ msecs_to_jiffies(delay_msec
));
1017 case EFC_EVT_GIDPT_DELAY_EXPIRED
:
1018 node
->time_last_gidpt_msec
= jiffies_to_msecs(jiffies
);
1020 efc
->tt
.els_send_ct(efc
, node
, FC_NS_GID_PT
,
1021 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT
,
1022 EFC_FC_ELS_DEFAULT_RETRIES
);
1023 efc_node_transition(node
, __efc_ns_gidpt_wait_rsp
, NULL
);
1026 case EFC_EVT_RSCN_RCVD
: {
1028 "RSCN received while in GIDPT delay - no action\n");
1033 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1041 * @ingroup fabric_sm
1042 * @brief Fabric controller node state machine: Initial state.
1045 * Issue a PLOGI to a well-known fabric controller address.
1047 * @param ctx Remote node state machine context.
1048 * @param evt Event to process.
1049 * @param arg Per event optional argument.
1051 * @return Returns NULL.
1054 __efc_fabctl_init(struct efc_sm_ctx_s
*ctx
,
1055 enum efc_sm_event_e evt
, void *arg
)
1057 struct efc_node_s
*node
= ctx
->app
;
1058 struct efc_lport
*efc
= node
->efc
;
1064 /* no need to login to fabric controller, just send SCR */
1065 efc
->tt
.els_send(efc
, node
, ELS_SCR
,
1066 EFC_FC_FLOGI_TIMEOUT_SEC
,
1067 EFC_FC_ELS_DEFAULT_RETRIES
);
1068 efc_node_transition(node
, __efc_fabctl_wait_scr_rsp
, NULL
);
1071 case EFC_EVT_NODE_ATTACH_OK
:
1072 node
->attached
= true;
1076 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1084 * @ingroup fabric_sm
1085 * @brief Fabric controller node state machine: Wait for a node attach request
1089 * Wait for a node attach to complete. If successful, issue an SCR
1090 * to the fabric controller, subscribing to all RSCN.
1092 * @param ctx Remote node state machine context.
1093 * @param evt Event to process.
1094 * @param arg Per event optional argument.
1096 * @return Returns NULL.
1100 __efc_fabctl_wait_node_attach(struct efc_sm_ctx_s
*ctx
,
1101 enum efc_sm_event_e evt
, void *arg
)
1103 struct efc_node_s
*node
= ctx
->app
;
1104 struct efc_lport
*efc
= node
->efc
;
1106 efc_node_evt_set(ctx
, evt
, __func__
);
1112 efc_node_hold_frames(node
);
1116 efc_node_accept_frames(node
);
1119 case EFC_EVT_NODE_ATTACH_OK
:
1120 node
->attached
= true;
1121 /* sm: / send SCR */
1122 efc
->tt
.els_send(efc
, node
, ELS_SCR
,
1123 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT
,
1124 EFC_FC_ELS_DEFAULT_RETRIES
);
1125 efc_node_transition(node
, __efc_fabctl_wait_scr_rsp
, NULL
);
1128 case EFC_EVT_NODE_ATTACH_FAIL
:
1129 /* node attach failed, shutdown the node */
1130 node
->attached
= false;
1131 node_printf(node
, "Node attach failed\n");
1132 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1133 efc_fabric_initiate_shutdown(node
);
1136 case EFC_EVT_SHUTDOWN
:
1137 node_printf(node
, "Shutdown event received\n");
1138 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1139 efc_node_transition(node
,
1140 __efc_fabric_wait_attach_evt_shutdown
,
1145 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1153 * @ingroup fabric_sm
1154 * @brief Fabric controller node state machine:
1155 * Wait for an SCR response from the
1156 * fabric controller.
1159 * Waits for an SCR response from the fabric controller.
1161 * @param ctx Remote node state machine context.
1162 * @param evt Event to process.
1163 * @param arg Per event optional argument.
1165 * @return Returns NULL.
1168 __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx_s
*ctx
,
1169 enum efc_sm_event_e evt
, void *arg
)
1171 struct efc_node_s
*node
= ctx
->app
;
1173 efc_node_evt_set(ctx
, evt
, __func__
);
1178 case EFC_EVT_SRRS_ELS_REQ_OK
:
1179 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_SCR
,
1180 __efc_fabric_common
, __func__
)) {
1183 efc_assert(node
->els_req_cnt
, NULL
);
1184 node
->els_req_cnt
--;
1185 efc_node_transition(node
, __efc_fabctl_ready
, NULL
);
1189 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1197 * @ingroup fabric_sm
1198 * @brief Fabric controller node state machine: Ready.
1201 * In this state, the fabric controller sends a RSCN, which is received
1202 * by this node and is forwarded to the name services node object; and
1203 * the RSCN LS_ACC is sent.
1205 * @param ctx Remote node state machine context.
1206 * @param evt Event to process.
1207 * @param arg Per event optional argument.
1209 * @return Returns NULL.
1213 __efc_fabctl_ready(struct efc_sm_ctx_s
*ctx
,
1214 enum efc_sm_event_e evt
, void *arg
)
1216 struct efc_node_cb_s
*cbdata
= arg
;
1217 struct efc_node_s
*node
= ctx
->app
;
1218 struct efc_lport
*efc
= node
->efc
;
1220 efc_node_evt_set(ctx
, evt
, __func__
);
1225 case EFC_EVT_RSCN_RCVD
: {
1226 struct fc_frame_header
*hdr
= cbdata
->header
->dma
.virt
;
1229 * sm: / process RSCN (forward to name services node),
1232 efc_process_rscn(node
, cbdata
);
1233 efc
->tt
.els_send_resp(efc
, node
, ELS_LS_ACC
,
1234 be16_to_cpu(hdr
->fh_ox_id
));
1235 efc_node_transition(node
, __efc_fabctl_wait_ls_acc_cmpl
,
1241 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1249 * @ingroup fabric_sm
1250 * @brief Fabric controller node state machine: Wait for LS_ACC.
1253 * Waits for the LS_ACC from the fabric controller.
1255 * @param ctx Remote node state machine context.
1256 * @param evt Event to process.
1257 * @param arg Per event optional argument.
1259 * @return Returns NULL.
1263 __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx_s
*ctx
,
1264 enum efc_sm_event_e evt
, void *arg
)
1266 struct efc_node_s
*node
= ctx
->app
;
1268 efc_node_evt_set(ctx
, evt
, __func__
);
1274 efc_node_hold_frames(node
);
1278 efc_node_accept_frames(node
);
1281 case EFC_EVT_SRRS_ELS_CMPL_OK
:
1282 efc_assert(node
->els_cmpl_cnt
, NULL
);
1283 node
->els_cmpl_cnt
--;
1284 efc_node_transition(node
, __efc_fabctl_ready
, NULL
);
1288 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1296 * @ingroup fabric_sm
1297 * @brief Initiate fabric node shutdown.
1299 * @param node Node for which shutdown is initiated.
1301 * @return Returns None.
1305 efc_fabric_initiate_shutdown(struct efc_node_s
*node
)
1308 struct efc_lport
*efc
= node
->efc
;
1310 efc
->tt
.scsi_io_alloc_disable(efc
, node
);
1312 if (node
->attached
) {
1313 /* issue hw node free; don't care if succeeds right away
1314 * or sometime later, will check node->attached later in
1317 rc
= efc
->tt
.hw_node_detach(efc
, &node
->rnode
);
1318 if (rc
!= EFC_HW_RTN_SUCCESS
&&
1319 rc
!= EFC_HW_RTN_SUCCESS_SYNC
) {
1320 node_printf(node
, "Failed freeing HW node, rc=%d\n",
1325 * node has either been detached or is in the process of being detached,
1326 * call common node's initiate cleanup function
1328 efc_node_initiate_cleanup(node
);
1332 * @ingroup fabric_sm
1333 * @brief Fabric node state machine: Handle the common fabric node events.
1335 * @param funcname Function name text.
1336 * @param ctx Remote node state machine context.
1337 * @param evt Event to process.
1338 * @param arg Per event optional argument.
1340 * @return Returns NULL.
1344 __efc_fabric_common(const char *funcname
, struct efc_sm_ctx_s
*ctx
,
1345 enum efc_sm_event_e evt
, void *arg
)
1347 struct efc_node_s
*node
= NULL
;
1349 efc_assert(ctx
, NULL
);
1350 efc_assert(ctx
->app
, NULL
);
1354 case EFC_EVT_DOMAIN_ATTACH_OK
:
1356 case EFC_EVT_SHUTDOWN
:
1357 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1358 efc_fabric_initiate_shutdown(node
);
1362 /* call default event handler common to all nodes */
1363 __efc_node_common(funcname
, ctx
, evt
, arg
);
1370 * @brief Return TRUE if the remote node is an NPORT.
1373 * Examines the service parameters. Returns TRUE if the node reports itself as
1376 * @param remote_sparms Remote node service parameters.
1378 * @return Returns TRUE if NPORT.
1382 efc_rnode_is_nport(struct fc_plogi_payload_s
*remote_sparms
)
1384 int rc
= be32_to_cpu(remote_sparms
->common_service_parameters
[1]);
1386 return (rc
& (1U << 28)) == 0;
1390 * @brief Return TRUE if the remote node is NPIV capable.
1393 * Examines the service parameters. Returns TRUE if the node reports itself as
1394 * an NPIV feature capable.
1396 * @param remote_sparms Remote node service parameters.
1398 * @return Returns TRUE if NPIV supported..
1401 efc_rnode_is_npiv_capable(struct fc_plogi_payload_s
*remote_sparms
)
1403 int rc
= be32_to_cpu(remote_sparms
->common_service_parameters
[1]);
1405 return (rc
& (1U << 29)) == 0;
1409 * @brief Return the node's WWPN as an uint64_t.
1412 * The WWPN is computed from service parameters, and returned as a uint64_t.
1414 * @param sp Pointer to service parameters.
1416 * @return Returns WWPN.
1421 efc_get_wwpn(struct fc_plogi_payload_s
*sp
)
1425 rc
= ((uint64_t)be32_to_cpu(sp
->port_name_hi
) << 32ll) |
1426 (be32_to_cpu(sp
->port_name_lo
));
1431 * @brief Return TRUE if the remote node is the point-to-point winner.
1434 * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
1435 * higher than the local node's WWPN.
1437 * @param sport Pointer to the sport object.
1440 * - 0, if the remote node is the loser.
1441 * - 1, if the remote node is the winner.
1442 * - (-1), if remote node is neither the loser nor the winner
1447 efc_rnode_is_winner(struct efc_sli_port_s
*sport
)
1449 struct fc_plogi_payload_s
*remote_sp
;
1451 u64 local_wwpn
= sport
->wwpn
;
1452 //char prop_buf[32];
1456 (struct fc_plogi_payload_s
*)sport
->domain
->flogi_service_params
;
1457 remote_wwpn
= efc_get_wwpn(remote_sp
);
1459 local_wwpn
^= wwn_bump
;
1461 remote_wwpn
= efc_get_wwpn(remote_sp
);
1463 efc_log_debug(sport
->efc
, "r: %08x %08x\n",
1464 be32_to_cpu(remote_sp
->port_name_hi
),
1465 be32_to_cpu(remote_sp
->port_name_lo
));
1466 efc_log_debug(sport
->efc
, "l: %08x %08x\n",
1467 (u32
)(local_wwpn
>> 32ll), (u32
)local_wwpn
);
1469 if (remote_wwpn
== local_wwpn
) {
1470 efc_log_warn(sport
->efc
,
1471 "WWPN of remote node [%08x %08x] matches local WWPN\n",
1472 (u32
)(local_wwpn
>> 32ll),
1477 return (remote_wwpn
> local_wwpn
);
1482 * @brief Point-to-point state machine: Wait for the domain attach to complete.
1485 * Once the domain attach has completed, a PLOGI is sent (if we're the
1486 * winning point-to-point node).
1488 * @param ctx Remote node state machine context.
1489 * @param evt Event to process.
1490 * @param arg Per event optional argument.
1492 * @return Returns NULL.
1496 __efc_p2p_wait_domain_attach(struct efc_sm_ctx_s
*ctx
,
1497 enum efc_sm_event_e evt
, void *arg
)
1499 struct efc_node_s
*node
= ctx
->app
;
1500 struct efc_lport
*efc
= node
->efc
;
1502 efc_node_evt_set(ctx
, evt
, __func__
);
1508 efc_node_hold_frames(node
);
1512 efc_node_accept_frames(node
);
1515 case EFC_EVT_DOMAIN_ATTACH_OK
: {
1516 struct efc_sli_port_s
*sport
= node
->sport
;
1517 struct efc_node_s
*rnode
;
1520 * this transient node (SID=0 (recv'd FLOGI)
1521 * or DID=fabric (sent FLOGI))
1522 * is the p2p winner, will use a separate node
1523 * to send PLOGI to peer
1525 efc_assert(node
->sport
->p2p_winner
, NULL
);
1527 rnode
= efc_node_find(sport
, node
->sport
->p2p_remote_port_id
);
1530 * the "other" transient p2p node has
1531 * already kicked off the
1532 * new node from which PLOGI is sent
1535 "Node with fc_id x%x already exists\n",
1536 rnode
->rnode
.fc_id
);
1537 efc_assert(rnode
!= node
, NULL
);
1540 * create new node (SID=1, DID=2)
1541 * from which to send PLOGI
1543 rnode
= efc_node_alloc(sport
,
1544 sport
->p2p_remote_port_id
,
1547 efc_log_err(efc
, "node alloc failed\n");
1551 efc_fabric_notify_topology(node
);
1552 /* sm: / allocate p2p remote node */
1553 efc_node_transition(rnode
, __efc_p2p_rnode_init
,
1558 * the transient node (SID=0 or DID=fabric)
1559 * has served its purpose
1561 if (node
->rnode
.fc_id
== 0) {
1563 * if this is the SID=0 node,
1564 * move to the init state in case peer
1565 * has restarted FLOGI discovery and FLOGI is pending
1567 /* don't send PLOGI on efc_d_init entry */
1568 efc_node_init_device(node
, false);
1571 * if this is the DID=fabric node
1572 * (we initiated FLOGI), shut it down
1574 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1575 efc_fabric_initiate_shutdown(node
);
1581 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1590 * @brief Point-to-point state machine: Remote node initialization state.
1593 * This state is entered after winning point-to-point, and the remote node
1596 * @param ctx Remote node state machine context.
1597 * @param evt Event to process.
1598 * @param arg Per event optional argument.
1600 * @return Returns NULL.
1604 __efc_p2p_rnode_init(struct efc_sm_ctx_s
*ctx
,
1605 enum efc_sm_event_e evt
, void *arg
)
1607 struct efc_node_cb_s
*cbdata
= arg
;
1608 struct efc_node_s
*node
= ctx
->app
;
1609 struct efc_lport
*efc
= node
->efc
;
1611 efc_node_evt_set(ctx
, evt
, __func__
);
1617 /* sm: / send PLOGI */
1618 efc
->tt
.els_send(efc
, node
, ELS_PLOGI
,
1619 EFC_FC_FLOGI_TIMEOUT_SEC
,
1620 EFC_FC_ELS_DEFAULT_RETRIES
);
1621 efc_node_transition(node
, __efc_p2p_wait_plogi_rsp
, NULL
);
1624 case EFC_EVT_ABTS_RCVD
:
1625 /* sm: send BA_ACC */
1626 efc
->tt
.bls_send_acc_hdr(efc
, node
, cbdata
->header
->dma
.virt
);
1630 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1639 * @brief Point-to-point node state machine:
1640 * Wait for the FLOGI accept completion.
1643 * Wait for the FLOGI accept completion.
1645 * @param ctx Remote node state machine context.
1646 * @param evt Event to process.
1647 * @param arg Per event optional argument.
1649 * @return Returns NULL.
1653 __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx_s
*ctx
,
1654 enum efc_sm_event_e evt
, void *arg
)
1656 struct efc_node_cb_s
*cbdata
= arg
;
1657 struct efc_node_s
*node
= ctx
->app
;
1658 struct efc_lport
*efc
= node
->efc
;
1660 efc_node_evt_set(ctx
, evt
, __func__
);
1666 efc_node_hold_frames(node
);
1670 efc_node_accept_frames(node
);
1673 case EFC_EVT_SRRS_ELS_CMPL_OK
:
1674 efc_assert(node
->els_cmpl_cnt
, NULL
);
1675 node
->els_cmpl_cnt
--;
1677 /* sm: if p2p_winner / domain_attach */
1678 if (node
->sport
->p2p_winner
) {
1679 efc_node_transition(node
,
1680 __efc_p2p_wait_domain_attach
,
1682 if (!node
->sport
->domain
->attached
) {
1683 node_printf(node
, "Domain not attached\n");
1684 efc_domain_attach(node
->sport
->domain
,
1685 node
->sport
->p2p_port_id
);
1687 node_printf(node
, "Domain already attached\n");
1688 efc_node_post_event(node
,
1689 EFC_EVT_DOMAIN_ATTACH_OK
,
1693 /* this node has served its purpose;
1694 * we'll expect a PLOGI on a separate
1695 * node (remote SID=0x1); return this node
1696 * to init state in case peer
1697 * restarts discovery -- it may already
1698 * have (pending frames may exist).
1700 /* don't send PLOGI on efc_d_init entry */
1701 efc_node_init_device(node
, false);
1705 case EFC_EVT_SRRS_ELS_CMPL_FAIL
:
1707 * LS_ACC failed, possibly due to link down;
1708 * shutdown node and wait
1709 * for FLOGI discovery to restart
1711 node_printf(node
, "FLOGI LS_ACC failed, shutting down\n");
1712 efc_assert(node
->els_cmpl_cnt
, NULL
);
1713 node
->els_cmpl_cnt
--;
1714 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1715 efc_fabric_initiate_shutdown(node
);
1718 case EFC_EVT_ABTS_RCVD
: {
1719 /* sm: / send BA_ACC */
1720 //efc_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1721 efc
->tt
.bls_send_acc_hdr(efc
, node
,
1722 cbdata
->header
->dma
.virt
);
1727 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1736 * @brief Point-to-point node state machine: Wait for a PLOGI response
1737 * as a point-to-point winner.
1740 * Wait for a PLOGI response from the remote node as a point-to-point winner.
1741 * Submit node attach request to the HW.
1743 * @param ctx Remote node state machine context.
1744 * @param evt Event to process.
1745 * @param arg Per event optional argument.
1747 * @return Returns NULL.
1751 __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx_s
*ctx
,
1752 enum efc_sm_event_e evt
, void *arg
)
1755 struct efc_node_cb_s
*cbdata
= arg
;
1756 struct efc_node_s
*node
= ctx
->app
;
1757 struct efc_lport
*efc
= node
->efc
;
1759 efc_node_evt_set(ctx
, evt
, __func__
);
1764 case EFC_EVT_SRRS_ELS_REQ_OK
: {
1765 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_PLOGI
,
1766 __efc_fabric_common
, __func__
)) {
1769 efc_assert(node
->els_req_cnt
, NULL
);
1770 node
->els_req_cnt
--;
1771 /* sm: / save sparams, efc_node_attach */
1772 efc_node_save_sparms(node
, cbdata
->els_rsp
.virt
);
1773 rc
= efc_node_attach(node
);
1774 efc_node_transition(node
, __efc_p2p_wait_node_attach
, NULL
);
1775 if (rc
== EFC_HW_RTN_SUCCESS_SYNC
)
1776 efc_node_post_event(node
, EFC_EVT_NODE_ATTACH_OK
,
1780 case EFC_EVT_SRRS_ELS_REQ_FAIL
: {
1781 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_PLOGI
,
1782 __efc_fabric_common
, __func__
)) {
1785 node_printf(node
, "PLOGI failed, shutting down\n");
1786 efc_assert(node
->els_req_cnt
, NULL
);
1787 node
->els_req_cnt
--;
1788 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1789 efc_fabric_initiate_shutdown(node
);
1793 case EFC_EVT_PLOGI_RCVD
: {
1794 struct fc_frame_header
*hdr
= cbdata
->header
->dma
.virt
;
1795 /* if we're in external loopback mode, just send LS_ACC */
1796 if (node
->efc
->external_loopback
) {
1797 efc
->tt
.els_send_resp(efc
, node
, ELS_PLOGI
,
1798 be16_to_cpu(hdr
->fh_ox_id
));
1801 * if this isn't external loopback,
1802 * pass to default handler
1804 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1808 case EFC_EVT_PRLI_RCVD
:
1810 /* sent PLOGI and before completion was seen, received the
1811 * PRLI from the remote node (WCQEs and RCQEs come in on
1812 * different queues and order of processing cannot be assumed)
1813 * Save OXID so PRLI can be sent after the attach and continue
1814 * to wait for PLOGI response
1816 efc_process_prli_payload(node
, cbdata
->payload
->dma
.virt
);
1817 efc_send_ls_acc_after_attach(node
,
1818 cbdata
->header
->dma
.virt
,
1819 EFC_NODE_SEND_LS_ACC_PRLI
);
1820 efc_node_transition(node
, __efc_p2p_wait_plogi_rsp_recvd_prli
,
1824 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1833 * @brief Point-to-point node state machine:
1834 * Waiting on a response for a sent PLOGI.
1837 * State is entered when the point-to-point winner has sent
1838 * a PLOGI and is waiting for a response. Before receiving the
1839 * response, a PRLI was received, implying that the PLOGI was
1842 * @param ctx Remote node state machine context.
1843 * @param evt Event to process.
1844 * @param arg Per event optional argument.
1846 * @return Returns NULL.
1850 __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx_s
*ctx
,
1851 enum efc_sm_event_e evt
, void *arg
)
1854 struct efc_node_cb_s
*cbdata
= arg
;
1855 struct efc_node_s
*node
= ctx
->app
;
1857 efc_node_evt_set(ctx
, evt
, __func__
);
1864 * Since we've received a PRLI, we have a port login and will
1865 * just need to wait for the PLOGI response to do the node
1866 * attach and then we can send the LS_ACC for the PRLI. If,
1867 * during this time, we receive FCP_CMNDs (which is possible
1868 * since we've already sent a PRLI and our peer may have
1870 * At this time, we are not waiting on any other unsolicited
1871 * frames to continue with the login process. Thus, it will not
1872 * hurt to hold frames here.
1874 efc_node_hold_frames(node
);
1878 efc_node_accept_frames(node
);
1881 case EFC_EVT_SRRS_ELS_REQ_OK
: /* PLOGI response received */
1882 /* Completion from PLOGI sent */
1883 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_PLOGI
,
1884 __efc_fabric_common
, __func__
)) {
1887 efc_assert(node
->els_req_cnt
, NULL
);
1888 node
->els_req_cnt
--;
1889 /* sm: / save sparams, efc_node_attach */
1890 efc_node_save_sparms(node
, cbdata
->els_rsp
.virt
);
1891 rc
= efc_node_attach(node
);
1892 efc_node_transition(node
, __efc_p2p_wait_node_attach
, NULL
);
1893 if (rc
== EFC_HW_RTN_SUCCESS_SYNC
)
1894 efc_node_post_event(node
, EFC_EVT_NODE_ATTACH_OK
,
1898 case EFC_EVT_SRRS_ELS_REQ_FAIL
: /* PLOGI response received */
1899 case EFC_EVT_SRRS_ELS_REQ_RJT
:
1900 /* PLOGI failed, shutdown the node */
1901 if (efc_node_check_els_req(ctx
, evt
, arg
, ELS_PLOGI
,
1902 __efc_fabric_common
, __func__
)) {
1905 efc_assert(node
->els_req_cnt
, NULL
);
1906 node
->els_req_cnt
--;
1907 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1908 efc_fabric_initiate_shutdown(node
);
1912 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
1921 * @brief Point-to-point node state machine:
1922 * Wait for a point-to-point node attach
1926 * Waits for the point-to-point node attach to complete.
1928 * @param ctx Remote node state machine context.
1929 * @param evt Event to process.
1930 * @param arg Per event optional argument.
1932 * @return Returns NULL.
1936 __efc_p2p_wait_node_attach(struct efc_sm_ctx_s
*ctx
,
1937 enum efc_sm_event_e evt
, void *arg
)
1939 struct efc_node_cb_s
*cbdata
= arg
;
1940 struct efc_node_s
*node
= ctx
->app
;
1942 efc_node_evt_set(ctx
, evt
, __func__
);
1948 efc_node_hold_frames(node
);
1952 efc_node_accept_frames(node
);
1955 case EFC_EVT_NODE_ATTACH_OK
:
1956 node
->attached
= true;
1957 switch (node
->send_ls_acc
) {
1958 case EFC_NODE_SEND_LS_ACC_PRLI
: {
1959 efc_d_send_prli_rsp(node
->ls_acc_io
,
1961 node
->send_ls_acc
= EFC_NODE_SEND_LS_ACC_NONE
;
1962 node
->ls_acc_io
= NULL
;
1965 case EFC_NODE_SEND_LS_ACC_PLOGI
: /* Can't happen in P2P */
1966 case EFC_NODE_SEND_LS_ACC_NONE
:
1968 /* Normal case for I */
1969 /* sm: send_plogi_acc is not set / send PLOGI acc */
1970 efc_node_transition(node
, __efc_d_port_logged_in
,
1976 case EFC_EVT_NODE_ATTACH_FAIL
:
1977 /* node attach failed, shutdown the node */
1978 node
->attached
= false;
1979 node_printf(node
, "Node attach failed\n");
1980 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1981 efc_fabric_initiate_shutdown(node
);
1984 case EFC_EVT_SHUTDOWN
:
1985 node_printf(node
, "%s received\n", efc_sm_event_name(evt
));
1986 node
->shutdown_reason
= EFC_NODE_SHUTDOWN_DEFAULT
;
1987 efc_node_transition(node
,
1988 __efc_fabric_wait_attach_evt_shutdown
,
1991 case EFC_EVT_PRLI_RCVD
:
1992 node_printf(node
, "%s: PRLI received before node is attached\n",
1993 efc_sm_event_name(evt
));
1994 efc_process_prli_payload(node
, cbdata
->payload
->dma
.virt
);
1995 efc_send_ls_acc_after_attach(node
,
1996 cbdata
->header
->dma
.virt
,
1997 EFC_NODE_SEND_LS_ACC_PRLI
);
2001 __efc_fabric_common(__func__
, ctx
, evt
, arg
);
2009 * @brief Start up the name services node.
2012 * Allocates and starts up the name services node.
2014 * @param sport Pointer to the sport structure.
2016 * @return Returns 0 on success, or a negative error value on failure.
2020 efc_start_ns_node(struct efc_sli_port_s
*sport
)
2022 struct efc_node_s
*ns
;
2024 /* Instantiate a name services node */
2025 ns
= efc_node_find(sport
, FC_FID_DIR_SERV
);
2027 ns
= efc_node_alloc(sport
, FC_FID_DIR_SERV
, false, false);
2032 * for found ns, should we be transitioning from here?
2033 * breaks transition only
2034 * 1. from within state machine or
2037 if (ns
->efc
->nodedb_mask
& EFC_NODEDB_PAUSE_NAMESERVER
)
2038 efc_node_pause(ns
, __efc_ns_init
);
2040 efc_node_transition(ns
, __efc_ns_init
, NULL
);
2045 * @brief Start up the fabric controller node.
2048 * Allocates and starts up the fabric controller node.
2050 * @param sport Pointer to the sport structure.
2052 * @return Returns 0 on success, or a negative error value on failure.
2056 efc_start_fabctl_node(struct efc_sli_port_s
*sport
)
2058 struct efc_node_s
*fabctl
;
2060 fabctl
= efc_node_find(sport
, FC_FID_FCTRL
);
2062 fabctl
= efc_node_alloc(sport
, FC_FID_FCTRL
,
2068 * for found ns, should we be transitioning from here?
2069 * breaks transition only
2070 * 1. from within state machine or
2073 efc_node_transition(fabctl
, __efc_fabctl_init
, NULL
);
2078 * @brief Process the GIDPT payload.
2081 * The GIDPT payload is parsed, and new nodes are created, as needed.
2083 * @param node Pointer to the node structure.
2084 * @param gidpt Pointer to the GIDPT payload.
2085 * @param gidpt_len Payload length
2087 * @return Returns 0 on success, or a negative error value on failure.
2091 efc_process_gidpt_payload(struct efc_node_s
*node
,
2092 struct fcct_gidpt_acc_s
*gidpt
, u32 gidpt_len
)
2096 struct efc_node_s
*newnode
;
2097 struct efc_sli_port_s
*sport
= node
->sport
;
2098 struct efc_lport
*efc
= node
->efc
;
2101 struct efc_node_s
*n
;
2102 struct efc_node_s
**active_nodes
;
2106 residual
= be16_to_cpu(gidpt
->hdr
.max_residual_size
);
2109 efc_log_debug(node
->efc
, "residual is %u words\n", residual
);
2111 if (be16_to_cpu(gidpt
->hdr
.cmd_rsp_code
) == FCCT_HDR_CMDRSP_REJECT
) {
2113 "GIDPT request failed: rsn x%x rsn_expl x%x\n",
2114 gidpt
->hdr
.reason_code
,
2115 gidpt
->hdr
.reason_code_explanation
);
2119 portlist_count
= (gidpt_len
- sizeof(struct fcct_iu_header_s
)) /
2120 sizeof(gidpt
->port_list
);
2122 /* Count the number of nodes */
2124 efc_sport_lock(sport
);
2125 list_for_each_entry(n
, &sport
->node_list
, list_entry
) {
2129 /* Allocate a buffer for all nodes */
2130 active_nodes
= kzalloc(port_count
* sizeof(*active_nodes
), GFP_ATOMIC
);
2131 if (!active_nodes
) {
2132 node_printf(node
, "efc_malloc failed\n");
2133 efc_sport_unlock(sport
);
2137 /* Fill buffer with fc_id of active nodes */
2139 list_for_each_entry(n
, &sport
->node_list
, list_entry
) {
2140 port_id
= n
->rnode
.fc_id
;
2144 case FC_FID_DIR_SERV
:
2147 if (!FC_ADDR_IS_DOMAIN_CTRL(port_id
))
2148 active_nodes
[i
++] = n
;
2153 /* update the active nodes buffer */
2154 for (i
= 0; i
< portlist_count
; i
++) {
2155 port_id
= fc_be24toh(gidpt
->port_list
[i
].port_id
);
2157 for (j
= 0; j
< port_count
; j
++) {
2158 if (active_nodes
[j
] &&
2159 port_id
== active_nodes
[j
]->rnode
.fc_id
) {
2160 active_nodes
[j
] = NULL
;
2164 if (gidpt
->port_list
[i
].ctl
& FCCT_GID_PT_LAST_ID
)
2168 /* Those remaining in the active_nodes[] are now gone ! */
2169 for (i
= 0; i
< port_count
; i
++) {
2171 * if we're an initiator and the remote node
2172 * is a target, then post the node missing event.
2173 * if we're target and we have enabled
2174 * target RSCN, then post the node missing event.
2176 if (active_nodes
[i
]) {
2177 if ((node
->sport
->enable_ini
&&
2178 active_nodes
[i
]->targ
) ||
2179 (node
->sport
->enable_tgt
&&
2180 enable_target_rscn(efc
))) {
2181 efc_node_post_event(active_nodes
[i
],
2182 EFC_EVT_NODE_MISSING
,
2186 "GID_PT: skipping non-tgt port_id x%06x\n",
2187 active_nodes
[i
]->rnode
.fc_id
);
2191 kfree(active_nodes
);
2193 for (i
= 0; i
< portlist_count
; i
++) {
2194 u32 port_id
= fc_be24toh(gidpt
->port_list
[i
].port_id
);
2196 /* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
2198 /* Don't create node for ourselves */
2199 if (port_id
!= node
->rnode
.sport
->fc_id
) {
2200 newnode
= efc_node_find(sport
, port_id
);
2202 if (node
->sport
->enable_ini
) {
2203 newnode
= efc_node_alloc(sport
,
2209 "efc_node_alloc() failed\n");
2210 efc_sport_unlock(sport
);
2214 * send PLOGI automatically
2217 efc_node_init_device(newnode
, true);
2222 if (node
->sport
->enable_ini
&& newnode
->targ
) {
2223 efc_node_post_event(newnode
,
2224 EFC_EVT_NODE_REFOUND
,
2228 * original code sends ADISC,
2229 * has notion of "refound"
2233 if (gidpt
->port_list
[i
].ctl
& FCCT_GID_PT_LAST_ID
)
2236 efc_sport_unlock(sport
);
2241 * @brief Set up the domain point-to-point parameters.
2244 * The remote node service parameters are examined, and various point-to-point
2245 * variables are set.
2247 * @param sport Pointer to the sport object.
2249 * @return Returns 0 on success, or a negative error value on failure.
2253 efc_p2p_setup(struct efc_sli_port_s
*sport
)
2255 struct efc_lport
*efc
= sport
->efc
;
2258 rnode_winner
= efc_rnode_is_winner(sport
);
2260 /* set sport flags to indicate p2p "winner" */
2261 if (rnode_winner
== 1) {
2262 sport
->p2p_remote_port_id
= 0;
2263 sport
->p2p_port_id
= 0;
2264 sport
->p2p_winner
= false;
2265 } else if (rnode_winner
== 0) {
2266 sport
->p2p_remote_port_id
= 2;
2267 sport
->p2p_port_id
= 1;
2268 sport
->p2p_winner
= true;
2270 /* no winner; only okay if external loopback enabled */
2271 if (sport
->efc
->external_loopback
) {
2273 * External loopback mode enabled;
2274 * local sport and remote node
2275 * will be registered with an NPortID = 1;
2278 "External loopback mode enabled\n");
2279 sport
->p2p_remote_port_id
= 1;
2280 sport
->p2p_port_id
= 1;
2281 sport
->p2p_winner
= true;
2284 "failed to determine p2p winner\n");
2285 return rnode_winner
;
2292 * @brief Process the FABCTL node RSCN.
2294 * <h3 class="desc">Description</h3>
2295 * Processes the FABCTL node RSCN payload,
2296 * simply passes the event to the name server.
2298 * @param node Pointer to the node structure.
2299 * @param cbdata Callback data to pass forward.
2305 efc_process_rscn(struct efc_node_s
*node
, struct efc_node_cb_s
*cbdata
)
2307 struct efc_lport
*efc
= node
->efc
;
2308 struct efc_sli_port_s
*sport
= node
->sport
;
2309 struct efc_node_s
*ns
;
2311 /* Forward this event to the name-services node */
2312 ns
= efc_node_find(sport
, FC_FID_DIR_SERV
);
2314 efc_node_post_event(ns
, EFC_EVT_RSCN_RCVD
, cbdata
);
2316 efc_log_warn(efc
, "can't find name server node\n");