Update supported PCI ids of efct driver.
[efct-Emulex_FC_Target.git] / libefc / efc_fabric.c
blob87023137cf9bbf487a392755449596e904b8d081
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * *
7 * This program is free software; you can redistribute it and/or *
8 * modify it under the terms of version 2 of the GNU General *
9 * Public License as published by the Free Software Foundation. *
10 * This program is distributed in the hope that it will be useful. *
11 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
12 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
13 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
14 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
15 * TO BE LEGALLY INVALID. See the GNU General Public License for *
16 * more details, a copy of which can be found in the file COPYING *
17 * included with this package. *
18 ********************************************************************/
21 * This file implements remote node state machines for:
22 * - Fabric logins.
23 * - Fabric controller events.
24 * - Name/directory services interaction.
25 * - Point-to-point logins.
29 * fabric_sm Node State Machine: Fabric States
30 * ns_sm Node State Machine: Name/Directory Services States
31 * p2p_sm Node State Machine: Point-to-Point Node States
34 #include "efc.h"
35 #include "efc_fabric.h"
36 #include "efc_device.h"
38 static void efc_fabric_initiate_shutdown(struct efc_node_s *node);
39 static void *__efc_fabric_common(const char *funcname,
40 struct efc_sm_ctx_s *ctx,
41 enum efc_sm_event_e evt, void *arg);
42 static int efc_start_ns_node(struct efc_sli_port_s *sport);
43 static int efc_start_fabctl_node(struct efc_sli_port_s *sport);
44 static int efc_process_gidpt_payload(struct efc_node_s *node,
45 struct fcct_gidpt_acc_s *gidpt,
46 u32 gidpt_len);
47 static void efc_process_rscn(struct efc_node_s *node,
48 struct efc_node_cb_s *cbdata);
49 static uint64_t efc_get_wwpn(struct fc_plogi_payload_s *sp);
50 static void gidpt_delay_timer_cb(struct timer_list *t);
52 /**
53 * @ingroup fabric_sm
54 * @brief Fabric node state machine: Initial state.
56 * @par Description
57 * Send an FLOGI to a well-known fabric.
59 * @param ctx Remote node sm context.
60 * @param evt Event to process.
61 * @param arg Per event optional argument.
63 * @return Returns NULL.
65 void *
66 __efc_fabric_init(struct efc_sm_ctx_s *ctx, enum efc_sm_event_e evt,
67 void *arg)
69 struct efc_node_s *node = ctx->app;
70 struct efc_lport *efc = node->efc;
72 efc_node_evt_set(ctx, evt, __func__);
74 node_sm_trace();
76 switch (evt) {
77 case EFC_EVT_REENTER: /* not sure why we're getting these ... */
78 efc_log_debug(efc, ">>> reenter !!\n");
79 /* fall through */
80 case EFC_EVT_ENTER:
81 /* sm: / send FLOGI */
82 efc->tt.els_send(efc, node, ELS_FLOGI,
83 EFC_FC_FLOGI_TIMEOUT_SEC,
84 EFC_FC_ELS_DEFAULT_RETRIES);
85 efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
86 break;
88 default:
89 __efc_fabric_common(__func__, ctx, evt, arg);
90 break;
93 return NULL;
96 /**
97 * @ingroup fabric_sm
98 * @brief Set sport topology.
100 * @par Description
101 * Set sport topology.
103 * @param node Pointer to the node for which the topology is set.
104 * @param topology Topology to set.
106 * @return Returns NULL.
108 void
109 efc_fabric_set_topology(struct efc_node_s *node,
110 enum efc_sport_topology_e topology)
112 node->sport->topology = topology;
116 * @ingroup fabric_sm
117 * @brief Set sport topology.
119 * @par Description
120 * Nofity sport topology.
122 * @param node Pointer to the node for which the topology is set.
123 * @param topology Topology to set.
125 * @return Returns NULL.
127 void
128 efc_fabric_notify_topology(struct efc_node_s *node)
130 struct efc_node_s *tmp_node;
131 struct efc_node_s *next;
133 enum efc_sport_topology_e topology = node->sport->topology;
135 * now loop through the nodes in the sport
136 * and send topology notification
138 efc_sport_lock(node->sport);
139 list_for_each_entry_safe(tmp_node, next, &node->sport->node_list,
140 list_entry) {
141 if (tmp_node != node) {
142 efc_node_post_event(tmp_node,
143 EFC_EVT_SPORT_TOPOLOGY_NOTIFY,
144 (void *)topology);
147 efc_sport_unlock(node->sport);
151 * @ingroup fabric_sm
152 * @brief Fabric node state machine: Wait for an FLOGI response.
154 * @par Description
155 * Wait for an FLOGI response event.
157 * @param ctx Remote node state machine context.
158 * @param evt Event to process.
159 * @param arg Per event optional argument.
161 * @return Returns NULL.
164 void *
165 __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx_s *ctx,
166 enum efc_sm_event_e evt, void *arg)
168 struct efc_node_cb_s *cbdata = arg;
169 struct efc_node_s *node = ctx->app;
171 efc_node_evt_set(ctx, evt, __func__);
173 node_sm_trace();
175 switch (evt) {
176 case EFC_EVT_SRRS_ELS_REQ_OK: {
177 if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
178 __efc_fabric_common, __func__)) {
179 return NULL;
181 efc_assert(node->els_req_cnt, NULL);
182 node->els_req_cnt--;
184 memcpy(node->sport->domain->flogi_service_params,
185 cbdata->els_rsp.virt,
186 sizeof(struct fc_plogi_payload_s));
188 /* Check to see if the fabric is an F_PORT or and N_PORT */
189 if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
190 /* sm: if not nport / efc_domain_attach */
191 /* ext_status has the fc_id, attach domain */
192 if (efc_rnode_is_npiv_capable(cbdata->els_rsp.virt)) {
193 efc_log_debug(node->efc,
194 " NPIV is enabled at switch side\n");
195 //node->efc->sw_feature_cap |= 1<<10;
197 efc_fabric_set_topology(node,
198 EFC_SPORT_TOPOLOGY_FABRIC);
199 efc_fabric_notify_topology(node);
200 efc_assert(!node->sport->domain->attached, NULL);
201 efc_domain_attach(node->sport->domain,
202 cbdata->ext_status);
203 efc_node_transition(node,
204 __efc_fabric_wait_domain_attach,
205 NULL);
206 break;
209 /* sm: if nport and p2p_winner / efc_domain_attach */
210 efc_fabric_set_topology(node, EFC_SPORT_TOPOLOGY_P2P);
211 if (efc_p2p_setup(node->sport)) {
212 node_printf(node,
213 "p2p setup failed, shutting down node\n");
214 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
215 efc_fabric_initiate_shutdown(node);
216 break;
219 if (node->sport->p2p_winner) {
220 efc_node_transition(node,
221 __efc_p2p_wait_domain_attach,
222 NULL);
223 if (node->sport->domain->attached &&
224 !node->sport->domain->domain_notify_pend) {
226 * already attached,
227 * just send ATTACH_OK
229 node_printf(node,
230 "p2p winner, domain already attached\n");
231 efc_node_post_event(node,
232 EFC_EVT_DOMAIN_ATTACH_OK,
233 NULL);
235 } else {
237 * peer is p2p winner;
238 * PLOGI will be received on the
239 * remote SID=1 node;
240 * this node has served its purpose
242 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
243 efc_fabric_initiate_shutdown(node);
246 break;
249 case EFC_EVT_ELS_REQ_ABORTED:
250 case EFC_EVT_SRRS_ELS_REQ_RJT:
251 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
252 struct efc_sli_port_s *sport = node->sport;
254 * with these errors, we have no recovery,
255 * so shutdown the sport, leave the link
256 * up and the domain ready
258 if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
259 __efc_fabric_common, __func__)) {
260 return NULL;
262 node_printf(node,
263 "FLOGI failed evt=%s, shutting down sport [%s]\n",
264 efc_sm_event_name(evt), sport->display_name);
265 efc_assert(node->els_req_cnt, NULL);
266 node->els_req_cnt--;
267 efc_sm_post_event(&sport->sm, EFC_EVT_SHUTDOWN, NULL);
268 break;
271 default:
272 __efc_fabric_common(__func__, ctx, evt, arg);
273 break;
276 return NULL;
280 * @ingroup fabric_sm
281 * @brief Fabric node state machine: Initial state for a virtual port.
283 * @par Description
284 * State entered when a virtual port is created. Send FDISC.
286 * @param ctx Remote node state machine context.
287 * @param evt Event to process.
288 * @param arg Per event optional argument.
290 * @return Returns NULL.
292 void *
293 __efc_vport_fabric_init(struct efc_sm_ctx_s *ctx,
294 enum efc_sm_event_e evt, void *arg)
296 struct efc_node_s *node = ctx->app;
297 struct efc_lport *efc = node->efc;
299 efc_node_evt_set(ctx, evt, __func__);
301 node_sm_trace();
303 switch (evt) {
304 case EFC_EVT_ENTER:
305 /* sm: / send FDISC */
306 efc->tt.els_send(efc, node, ELS_FDISC,
307 EFC_FC_FLOGI_TIMEOUT_SEC,
308 EFC_FC_ELS_DEFAULT_RETRIES);
310 efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
311 break;
313 default:
314 __efc_fabric_common(__func__, ctx, evt, arg);
315 break;
318 return NULL;
322 * @ingroup fabric_sm
323 * @brief Fabric node state machine: Wait for an FDISC response
325 * @par Description
326 * Used for a virtual port. Waits for an FDISC response.
327 * If OK, issue a HW port attach.
329 * @param ctx Remote node state machine context.
330 * @param evt Event to process.
331 * @param arg Per event optional argument.
333 * @return Returns NULL.
335 void *
336 __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx_s *ctx,
337 enum efc_sm_event_e evt, void *arg)
339 struct efc_node_cb_s *cbdata = arg;
340 struct efc_node_s *node = ctx->app;
342 efc_node_evt_set(ctx, evt, __func__);
344 node_sm_trace();
346 switch (evt) {
347 case EFC_EVT_SRRS_ELS_REQ_OK: {
348 /* fc_id is in ext_status */
349 if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
350 __efc_fabric_common, __func__)) {
351 return NULL;
354 efc_assert(node->els_req_cnt, NULL);
355 node->els_req_cnt--;
356 /* sm: / efc_sport_attach */
357 efc_sport_attach(node->sport, cbdata->ext_status);
358 efc_node_transition(node, __efc_fabric_wait_domain_attach,
359 NULL);
360 break;
363 case EFC_EVT_SRRS_ELS_REQ_RJT:
364 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
365 if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
366 __efc_fabric_common, __func__)) {
367 return NULL;
369 efc_assert(node->els_req_cnt, NULL);
370 node->els_req_cnt--;
371 efc_log_err(node->efc, "FDISC failed, shutting down sport\n");
372 /* sm: / shutdown sport */
373 efc_sm_post_event(&node->sport->sm, EFC_EVT_SHUTDOWN, NULL);
374 break;
377 default:
378 __efc_fabric_common(__func__, ctx, evt, arg);
379 break;
382 return NULL;
386 * @ingroup fabric_sm
387 * @brief Fabric node state machine: Wait for a domain/sport attach event.
389 * @par Description
390 * Waits for a domain/sport attach event.
392 * @param ctx Remote node state machine context.
393 * @param evt Event to process.
394 * @param arg Per event optional argument.
396 * @return Returns NULL.
398 void *
399 __efc_fabric_wait_domain_attach(struct efc_sm_ctx_s *ctx,
400 enum efc_sm_event_e evt, void *arg)
402 struct efc_node_s *node = ctx->app;
404 efc_node_evt_set(ctx, evt, __func__);
406 node_sm_trace();
408 switch (evt) {
409 case EFC_EVT_ENTER:
410 efc_node_hold_frames(node);
411 break;
413 case EFC_EVT_EXIT:
414 efc_node_accept_frames(node);
415 break;
416 case EFC_EVT_DOMAIN_ATTACH_OK:
417 case EFC_EVT_SPORT_ATTACH_OK: {
418 int rc;
420 rc = efc_start_ns_node(node->sport);
421 if (rc)
422 return NULL;
424 /* sm: if enable_ini / start fabctl node */
425 /* Instantiate the fabric controller (sends SCR) */
426 if (node->sport->enable_rscn) {
427 rc = efc_start_fabctl_node(node->sport);
428 if (rc)
429 return NULL;
431 efc_node_transition(node, __efc_fabric_idle, NULL);
432 break;
434 default:
435 __efc_fabric_common(__func__, ctx, evt, arg);
436 return NULL;
439 return NULL;
443 * @ingroup fabric_sm
444 * @brief Fabric node state machine: Fabric node is idle.
446 * @par Description
447 * Wait for fabric node events.
449 * @param ctx Remote node state machine context.
450 * @param evt Event to process.
451 * @param arg Per event optional argument.
453 * @return Returns NULL.
455 void *
456 __efc_fabric_idle(struct efc_sm_ctx_s *ctx, enum efc_sm_event_e evt,
457 void *arg)
459 struct efc_node_s *node = ctx->app;
461 efc_node_evt_set(ctx, evt, __func__);
463 node_sm_trace();
465 switch (evt) {
466 case EFC_EVT_DOMAIN_ATTACH_OK:
467 break;
468 default:
469 __efc_fabric_common(__func__, ctx, evt, arg);
470 return NULL;
473 return NULL;
477 * @ingroup ns_sm
478 * @brief Name services node state machine: Initialize.
480 * @par Description
481 * A PLOGI is sent to the well-known name/directory services node.
483 * @param ctx Remote node state machine context.
484 * @param evt Event to process.
485 * @param arg Per event optional argument.
487 * @return Returns NULL.
489 void *
490 __efc_ns_init(struct efc_sm_ctx_s *ctx, enum efc_sm_event_e evt, void *arg)
492 struct efc_node_s *node = ctx->app;
493 struct efc_lport *efc = node->efc;
495 efc_node_evt_set(ctx, evt, __func__);
497 node_sm_trace();
499 switch (evt) {
500 case EFC_EVT_ENTER:
501 /* sm: / send PLOGI */
502 efc->tt.els_send(efc, node, ELS_PLOGI,
503 EFC_FC_FLOGI_TIMEOUT_SEC,
504 EFC_FC_ELS_DEFAULT_RETRIES);
505 efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
506 break;
507 default:
508 __efc_fabric_common(__func__, ctx, evt, arg);
509 break;
512 return NULL;
516 * @ingroup ns_sm
517 * @brief Name services node state machine: Wait for a PLOGI response.
519 * @par Description
520 * Waits for a response from PLOGI to name services node, then issues a
521 * node attach request to the HW.
523 * @param ctx Remote node state machine context.
524 * @param evt Event to process.
525 * @param arg Per event optional argument.
527 * @return Returns NULL.
529 void *
530 __efc_ns_plogi_wait_rsp(struct efc_sm_ctx_s *ctx,
531 enum efc_sm_event_e evt, void *arg)
533 int rc;
534 struct efc_node_cb_s *cbdata = arg;
535 struct efc_node_s *node = ctx->app;
537 efc_node_evt_set(ctx, evt, __func__);
539 node_sm_trace();
541 switch (evt) {
542 case EFC_EVT_SRRS_ELS_REQ_OK: {
543 /* Save service parameters */
544 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
545 __efc_fabric_common, __func__)) {
546 return NULL;
548 efc_assert(node->els_req_cnt, NULL);
549 node->els_req_cnt--;
550 /* sm: / save sparams, efc_node_attach */
551 efc_node_save_sparms(node, cbdata->els_rsp.virt);
552 rc = efc_node_attach(node);
553 efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
554 if (rc == EFC_HW_RTN_SUCCESS_SYNC)
555 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_OK,
556 NULL);
557 break;
559 default:
560 __efc_fabric_common(__func__, ctx, evt, arg);
561 return NULL;
564 return NULL;
568 * @ingroup ns_sm
569 * @brief Name services node state machine: Wait for a node attach completion.
571 * @par Description
572 * Waits for a node attach completion, then issues an RFTID name services
573 * request.
575 * @param ctx Remote node state machine context.
576 * @param evt Event to process.
577 * @param arg Per event optional argument.
579 * @return Returns NULL.
581 void *
582 __efc_ns_wait_node_attach(struct efc_sm_ctx_s *ctx,
583 enum efc_sm_event_e evt, void *arg)
585 struct efc_node_s *node = ctx->app;
586 struct efc_lport *efc = node->efc;
588 efc_node_evt_set(ctx, evt, __func__);
590 node_sm_trace();
592 switch (evt) {
593 case EFC_EVT_ENTER:
594 efc_node_hold_frames(node);
595 break;
597 case EFC_EVT_EXIT:
598 efc_node_accept_frames(node);
599 break;
601 case EFC_EVT_NODE_ATTACH_OK:
602 node->attached = true;
603 /* sm: / send RFTID */
604 efc->tt.els_send_ct(efc, node, FC_RCTL_ELS,
605 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT,
606 EFC_FC_ELS_DEFAULT_RETRIES);
607 efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
608 break;
610 case EFC_EVT_NODE_ATTACH_FAIL:
611 /* node attach failed, shutdown the node */
612 node->attached = false;
613 node_printf(node, "Node attach failed\n");
614 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
615 efc_fabric_initiate_shutdown(node);
616 break;
618 case EFC_EVT_SHUTDOWN:
619 node_printf(node, "Shutdown event received\n");
620 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
621 efc_node_transition(node,
622 __efc_fabric_wait_attach_evt_shutdown,
623 NULL);
624 break;
627 * if receive RSCN just ignore,
628 * we haven't sent GID_PT yet (ACC sent by fabctl node)
630 case EFC_EVT_RSCN_RCVD:
631 break;
633 default:
634 __efc_fabric_common(__func__, ctx, evt, arg);
635 return NULL;
638 return NULL;
642 * @ingroup ns_sm
643 * @brief Wait for a domain/sport/node attach completion, then
644 * shutdown.
646 * @par Description
647 * Waits for a domain/sport/node attach completion, then shuts
648 * node down.
650 * @param ctx Remote node state machine context.
651 * @param evt Event to process.
652 * @param arg Per event optional argument.
654 * @return Returns NULL.
656 void *
657 __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx_s *ctx,
658 enum efc_sm_event_e evt, void *arg)
660 struct efc_node_s *node = ctx->app;
662 efc_node_evt_set(ctx, evt, __func__);
664 node_sm_trace();
666 switch (evt) {
667 case EFC_EVT_ENTER:
668 efc_node_hold_frames(node);
669 break;
671 case EFC_EVT_EXIT:
672 efc_node_accept_frames(node);
673 break;
675 /* wait for any of these attach events and then shutdown */
676 case EFC_EVT_NODE_ATTACH_OK:
677 node->attached = true;
678 node_printf(node, "Attach evt=%s, proceed to shutdown\n",
679 efc_sm_event_name(evt));
680 efc_fabric_initiate_shutdown(node);
681 break;
683 case EFC_EVT_NODE_ATTACH_FAIL:
684 node->attached = false;
685 node_printf(node, "Attach evt=%s, proceed to shutdown\n",
686 efc_sm_event_name(evt));
687 efc_fabric_initiate_shutdown(node);
688 break;
690 /* ignore shutdown event as we're already in shutdown path */
691 case EFC_EVT_SHUTDOWN:
692 node_printf(node, "Shutdown event received\n");
693 break;
695 default:
696 __efc_fabric_common(__func__, ctx, evt, arg);
697 return NULL;
700 return NULL;
704 * @ingroup ns_sm
705 * @brief Name services node state machine: Wait for an RFTID response event.
707 * @par Description
708 * Waits for an RFTID response event; if configured for an initiator operation,
709 * a GIDPT name services request is issued.
711 * @param ctx Remote node state machine context.
712 * @param evt Event to process.
713 * @param arg Per event optional argument.
715 * @return Returns NULL.
717 void *
718 __efc_ns_rftid_wait_rsp(struct efc_sm_ctx_s *ctx,
719 enum efc_sm_event_e evt, void *arg)
721 struct efc_node_s *node = ctx->app;
722 struct efc_lport *efc = node->efc;
724 efc_node_evt_set(ctx, evt, __func__);
726 node_sm_trace();
728 switch (evt) {
729 case EFC_EVT_SRRS_ELS_REQ_OK:
730 if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
731 __efc_fabric_common, __func__)) {
732 return NULL;
734 efc_assert(node->els_req_cnt, NULL);
735 node->els_req_cnt--;
736 /* sm: / send RFFID */
737 efc->tt.els_send_ct(efc, node, FC_NS_RFF_ID,
738 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT,
739 EFC_FC_ELS_DEFAULT_RETRIES);
740 efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
741 break;
744 * if receive RSCN just ignore,
745 * we haven't sent GID_PT yet (ACC sent by fabctl node)
747 case EFC_EVT_RSCN_RCVD:
748 break;
750 default:
751 __efc_fabric_common(__func__, ctx, evt, arg);
752 return NULL;
755 return NULL;
759 * @ingroup ns_sm
760 * @brief Fabric node state machine: Wait for RFFID response event.
762 * @par Description
763 * Waits for an RFFID response event; if configured for an initiator operation,
764 * a GIDPT name services request is issued.
766 * @param ctx Remote node state machine context.
767 * @param evt Event to process.
768 * @param arg Per event optional argument.
770 * @return Returns NULL.
772 void *
773 __efc_ns_rffid_wait_rsp(struct efc_sm_ctx_s *ctx,
774 enum efc_sm_event_e evt, void *arg)
776 struct efc_node_s *node = ctx->app;
777 struct efc_lport *efc = node->efc;
779 efc_node_evt_set(ctx, evt, __func__);
781 node_sm_trace();
783 switch (evt) {
784 case EFC_EVT_SRRS_ELS_REQ_OK: {
785 if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
786 __efc_fabric_common, __func__)) {
787 return NULL;
789 efc_assert(node->els_req_cnt, NULL);
790 node->els_req_cnt--;
791 if (node->sport->enable_rscn) {
792 /* sm: if enable_rscn / send GIDPT */
793 efc->tt.els_send_ct(efc, node, FC_NS_GID_PT,
794 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT,
795 EFC_FC_ELS_DEFAULT_RETRIES);
797 efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
798 NULL);
799 } else {
800 /* if 'T' only, we're done, go to idle */
801 efc_node_transition(node, __efc_ns_idle, NULL);
803 break;
806 * if receive RSCN just ignore,
807 * we haven't sent GID_PT yet (ACC sent by fabctl node)
809 case EFC_EVT_RSCN_RCVD:
810 break;
812 default:
813 __efc_fabric_common(__func__, ctx, evt, arg);
814 return NULL;
817 return NULL;
821 * @ingroup ns_sm
822 * @brief Name services node state machine: Wait for a GIDPT response.
824 * @par Description
825 * Wait for a GIDPT response from the name server. Process the FC_IDs that are
826 * reported by creating new remote ports, as needed.
828 * @param ctx Remote node state machine context.
829 * @param evt Event to process.
830 * @param arg Per event optional argument.
832 * @return Returns NULL.
834 void *
835 __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx_s *ctx,
836 enum efc_sm_event_e evt, void *arg)
838 struct efc_node_cb_s *cbdata = arg;
839 struct efc_node_s *node = ctx->app;
841 efc_node_evt_set(ctx, evt, __func__);
843 node_sm_trace();
845 switch (evt) {
846 case EFC_EVT_SRRS_ELS_REQ_OK: {
847 if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
848 __efc_fabric_common, __func__)) {
849 return NULL;
851 efc_assert(node->els_req_cnt, NULL);
852 node->els_req_cnt--;
853 /* sm: / process GIDPT payload */
854 efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
855 cbdata->els_rsp.len);
856 efc_node_transition(node, __efc_ns_idle, NULL);
857 break;
860 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
861 /* not much we can do; will retry with the next RSCN */
862 node_printf(node, "GID_PT failed to complete\n");
863 efc_assert(node->els_req_cnt, NULL);
864 node->els_req_cnt--;
865 efc_node_transition(node, __efc_ns_idle, NULL);
866 break;
869 /* if receive RSCN here, queue up another discovery processing */
870 case EFC_EVT_RSCN_RCVD: {
871 node_printf(node, "RSCN received during GID_PT processing\n");
872 node->rscn_pending = true;
873 break;
876 default:
877 __efc_fabric_common(__func__, ctx, evt, arg);
878 return NULL;
881 return NULL;
885 * @ingroup ns_sm
886 * @brief Name services node state machine: Idle state.
888 * @par Description
889 * Idle. Waiting for RSCN received events
890 * (posted from the fabric controller), and
891 * restarts the GIDPT name services query and processing.
893 * @param ctx Remote node state machine context.
894 * @param evt Event to process.
895 * @param arg Per event optional argument.
897 * @return Returns NULL.
899 void *
900 __efc_ns_idle(struct efc_sm_ctx_s *ctx, enum efc_sm_event_e evt, void *arg)
902 struct efc_node_s *node = ctx->app;
903 struct efc_lport *efc = node->efc;
905 efc_node_evt_set(ctx, evt, __func__);
907 node_sm_trace();
909 switch (evt) {
910 case EFC_EVT_ENTER:
911 if (!node->rscn_pending)
912 break;
914 node_printf(node, "RSCN pending, restart discovery\n");
915 node->rscn_pending = false;
917 /* fall through */
919 case EFC_EVT_RSCN_RCVD: {
920 /* sm: / send GIDPT */
922 * If target RSCN processing is enabled,
923 * and this is target only (not initiator),
924 * and tgt_rscn_delay is non-zero,
925 * then we delay issuing the GID_PT
927 if (efc->tgt_rscn_delay_msec != 0 &&
928 !node->sport->enable_ini && node->sport->enable_tgt &&
929 enable_target_rscn(efc)) {
930 efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
931 } else {
932 efc->tt.els_send_ct(efc, node, FC_NS_GID_PT,
933 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT,
934 EFC_FC_ELS_DEFAULT_RETRIES);
935 efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
936 NULL);
938 break;
941 default:
942 __efc_fabric_common(__func__, ctx, evt, arg);
943 break;
946 return NULL;
950 * @brief Handle GIDPT delay timer callback
952 * @par Description
953 * Post an EFC_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
955 * @param arg Pointer to node.
957 * @return None.
959 static void
960 gidpt_delay_timer_cb(struct timer_list *t)
962 struct efc_node_s *node = from_timer(node, t, gidpt_delay_timer);
964 del_timer(&node->gidpt_delay_timer);
966 efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
970 * @ingroup ns_sm
971 * @brief Name services node state machine: Delayed GIDPT.
973 * @par Description
974 * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
976 * @param ctx Remote node state machine context.
977 * @param evt Event to process.
978 * @param arg Per event optional argument.
980 * @return Returns NULL.
982 void *
983 __efc_ns_gidpt_delay(struct efc_sm_ctx_s *ctx,
984 enum efc_sm_event_e evt, void *arg)
986 struct efc_node_s *node = ctx->app;
987 struct efc_lport *efc = node->efc;
989 efc_node_evt_set(ctx, evt, __func__);
991 node_sm_trace();
993 switch (evt) {
994 case EFC_EVT_ENTER: {
995 time_t delay_msec;
997 efc_assert(efc->tgt_rscn_delay_msec != 0, NULL);
1000 * Compute the delay time.
1001 * Set to tgt_rscn_delay, if the time since last GIDPT
1002 * is less than tgt_rscn_period, then use tgt_rscn_period.
1004 delay_msec = efc->tgt_rscn_delay_msec;
1005 if ((jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec)
1006 < efc->tgt_rscn_period_msec) {
1007 delay_msec = efc->tgt_rscn_period_msec;
1009 timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
1011 mod_timer(&node->gidpt_delay_timer,
1012 jiffies + msecs_to_jiffies(delay_msec));
1014 break;
1017 case EFC_EVT_GIDPT_DELAY_EXPIRED:
1018 node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
1020 efc->tt.els_send_ct(efc, node, FC_NS_GID_PT,
1021 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT,
1022 EFC_FC_ELS_DEFAULT_RETRIES);
1023 efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
1024 break;
1026 case EFC_EVT_RSCN_RCVD: {
1027 efc_log_debug(efc,
1028 "RSCN received while in GIDPT delay - no action\n");
1029 break;
1032 default:
1033 __efc_fabric_common(__func__, ctx, evt, arg);
1034 break;
1037 return NULL;
1041 * @ingroup fabric_sm
1042 * @brief Fabric controller node state machine: Initial state.
1044 * @par Description
1045 * Issue a PLOGI to a well-known fabric controller address.
1047 * @param ctx Remote node state machine context.
1048 * @param evt Event to process.
1049 * @param arg Per event optional argument.
1051 * @return Returns NULL.
1053 void *
1054 __efc_fabctl_init(struct efc_sm_ctx_s *ctx,
1055 enum efc_sm_event_e evt, void *arg)
1057 struct efc_node_s *node = ctx->app;
1058 struct efc_lport *efc = node->efc;
1060 node_sm_trace();
1062 switch (evt) {
1063 case EFC_EVT_ENTER:
1064 /* no need to login to fabric controller, just send SCR */
1065 efc->tt.els_send(efc, node, ELS_SCR,
1066 EFC_FC_FLOGI_TIMEOUT_SEC,
1067 EFC_FC_ELS_DEFAULT_RETRIES);
1068 efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
1069 break;
1071 case EFC_EVT_NODE_ATTACH_OK:
1072 node->attached = true;
1073 break;
1075 default:
1076 __efc_fabric_common(__func__, ctx, evt, arg);
1077 return NULL;
1080 return NULL;
1084 * @ingroup fabric_sm
1085 * @brief Fabric controller node state machine: Wait for a node attach request
1086 * to complete.
1088 * @par Description
1089 * Wait for a node attach to complete. If successful, issue an SCR
1090 * to the fabric controller, subscribing to all RSCN.
1092 * @param ctx Remote node state machine context.
1093 * @param evt Event to process.
1094 * @param arg Per event optional argument.
1096 * @return Returns NULL.
1099 void *
1100 __efc_fabctl_wait_node_attach(struct efc_sm_ctx_s *ctx,
1101 enum efc_sm_event_e evt, void *arg)
1103 struct efc_node_s *node = ctx->app;
1104 struct efc_lport *efc = node->efc;
1106 efc_node_evt_set(ctx, evt, __func__);
1108 node_sm_trace();
1110 switch (evt) {
1111 case EFC_EVT_ENTER:
1112 efc_node_hold_frames(node);
1113 break;
1115 case EFC_EVT_EXIT:
1116 efc_node_accept_frames(node);
1117 break;
1119 case EFC_EVT_NODE_ATTACH_OK:
1120 node->attached = true;
1121 /* sm: / send SCR */
1122 efc->tt.els_send(efc, node, ELS_SCR,
1123 EFC_FC_ELS_SEND_DEFAULT_TIMEOUT,
1124 EFC_FC_ELS_DEFAULT_RETRIES);
1125 efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
1126 break;
1128 case EFC_EVT_NODE_ATTACH_FAIL:
1129 /* node attach failed, shutdown the node */
1130 node->attached = false;
1131 node_printf(node, "Node attach failed\n");
1132 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1133 efc_fabric_initiate_shutdown(node);
1134 break;
1136 case EFC_EVT_SHUTDOWN:
1137 node_printf(node, "Shutdown event received\n");
1138 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1139 efc_node_transition(node,
1140 __efc_fabric_wait_attach_evt_shutdown,
1141 NULL);
1142 break;
1144 default:
1145 __efc_fabric_common(__func__, ctx, evt, arg);
1146 return NULL;
1149 return NULL;
1153 * @ingroup fabric_sm
1154 * @brief Fabric controller node state machine:
1155 * Wait for an SCR response from the
1156 * fabric controller.
1158 * @par Description
1159 * Waits for an SCR response from the fabric controller.
1161 * @param ctx Remote node state machine context.
1162 * @param evt Event to process.
1163 * @param arg Per event optional argument.
1165 * @return Returns NULL.
1167 void *
1168 __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx_s *ctx,
1169 enum efc_sm_event_e evt, void *arg)
1171 struct efc_node_s *node = ctx->app;
1173 efc_node_evt_set(ctx, evt, __func__);
1175 node_sm_trace();
1177 switch (evt) {
1178 case EFC_EVT_SRRS_ELS_REQ_OK:
1179 if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
1180 __efc_fabric_common, __func__)) {
1181 return NULL;
1183 efc_assert(node->els_req_cnt, NULL);
1184 node->els_req_cnt--;
1185 efc_node_transition(node, __efc_fabctl_ready, NULL);
1186 break;
1188 default:
1189 __efc_fabric_common(__func__, ctx, evt, arg);
1190 return NULL;
1193 return NULL;
1197 * @ingroup fabric_sm
1198 * @brief Fabric controller node state machine: Ready.
1200 * @par Description
1201 * In this state, the fabric controller sends a RSCN, which is received
1202 * by this node and is forwarded to the name services node object; and
1203 * the RSCN LS_ACC is sent.
1205 * @param ctx Remote node state machine context.
1206 * @param evt Event to process.
1207 * @param arg Per event optional argument.
1209 * @return Returns NULL.
1212 void *
1213 __efc_fabctl_ready(struct efc_sm_ctx_s *ctx,
1214 enum efc_sm_event_e evt, void *arg)
1216 struct efc_node_cb_s *cbdata = arg;
1217 struct efc_node_s *node = ctx->app;
1218 struct efc_lport *efc = node->efc;
1220 efc_node_evt_set(ctx, evt, __func__);
1222 node_sm_trace();
1224 switch (evt) {
1225 case EFC_EVT_RSCN_RCVD: {
1226 struct fc_frame_header *hdr = cbdata->header->dma.virt;
1229 * sm: / process RSCN (forward to name services node),
1230 * send LS_ACC
1232 efc_process_rscn(node, cbdata);
1233 efc->tt.els_send_resp(efc, node, ELS_LS_ACC,
1234 be16_to_cpu(hdr->fh_ox_id));
1235 efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
1236 NULL);
1237 break;
1240 default:
1241 __efc_fabric_common(__func__, ctx, evt, arg);
1242 return NULL;
1245 return NULL;
1249 * @ingroup fabric_sm
1250 * @brief Fabric controller node state machine: Wait for LS_ACC.
1252 * @par Description
1253 * Waits for the LS_ACC from the fabric controller.
1255 * @param ctx Remote node state machine context.
1256 * @param evt Event to process.
1257 * @param arg Per event optional argument.
1259 * @return Returns NULL.
1262 void *
1263 __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx_s *ctx,
1264 enum efc_sm_event_e evt, void *arg)
1266 struct efc_node_s *node = ctx->app;
1268 efc_node_evt_set(ctx, evt, __func__);
1270 node_sm_trace();
1272 switch (evt) {
1273 case EFC_EVT_ENTER:
1274 efc_node_hold_frames(node);
1275 break;
1277 case EFC_EVT_EXIT:
1278 efc_node_accept_frames(node);
1279 break;
1281 case EFC_EVT_SRRS_ELS_CMPL_OK:
1282 efc_assert(node->els_cmpl_cnt, NULL);
1283 node->els_cmpl_cnt--;
1284 efc_node_transition(node, __efc_fabctl_ready, NULL);
1285 break;
1287 default:
1288 __efc_fabric_common(__func__, ctx, evt, arg);
1289 return NULL;
1292 return NULL;
1296 * @ingroup fabric_sm
1297 * @brief Initiate fabric node shutdown.
1299 * @param node Node for which shutdown is initiated.
1301 * @return Returns None.
1304 static void
1305 efc_fabric_initiate_shutdown(struct efc_node_s *node)
1307 int rc;
1308 struct efc_lport *efc = node->efc;
1310 efc->tt.scsi_io_alloc_disable(efc, node);
1312 if (node->attached) {
1313 /* issue hw node free; don't care if succeeds right away
1314 * or sometime later, will check node->attached later in
1315 * shutdown process
1317 rc = efc->tt.hw_node_detach(efc, &node->rnode);
1318 if (rc != EFC_HW_RTN_SUCCESS &&
1319 rc != EFC_HW_RTN_SUCCESS_SYNC) {
1320 node_printf(node, "Failed freeing HW node, rc=%d\n",
1321 rc);
1325 * node has either been detached or is in the process of being detached,
1326 * call common node's initiate cleanup function
1328 efc_node_initiate_cleanup(node);
1332 * @ingroup fabric_sm
1333 * @brief Fabric node state machine: Handle the common fabric node events.
1335 * @param funcname Function name text.
1336 * @param ctx Remote node state machine context.
1337 * @param evt Event to process.
1338 * @param arg Per event optional argument.
1340 * @return Returns NULL.
1343 static void *
1344 __efc_fabric_common(const char *funcname, struct efc_sm_ctx_s *ctx,
1345 enum efc_sm_event_e evt, void *arg)
1347 struct efc_node_s *node = NULL;
1349 efc_assert(ctx, NULL);
1350 efc_assert(ctx->app, NULL);
1351 node = ctx->app;
1353 switch (evt) {
1354 case EFC_EVT_DOMAIN_ATTACH_OK:
1355 break;
1356 case EFC_EVT_SHUTDOWN:
1357 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1358 efc_fabric_initiate_shutdown(node);
1359 break;
1361 default:
1362 /* call default event handler common to all nodes */
1363 __efc_node_common(funcname, ctx, evt, arg);
1364 break;
1366 return NULL;
1370 * @brief Return TRUE if the remote node is an NPORT.
1372 * @par Description
1373 * Examines the service parameters. Returns TRUE if the node reports itself as
1374 * an NPORT.
1376 * @param remote_sparms Remote node service parameters.
1378 * @return Returns TRUE if NPORT.
1382 efc_rnode_is_nport(struct fc_plogi_payload_s *remote_sparms)
1384 int rc = be32_to_cpu(remote_sparms->common_service_parameters[1]);
1386 return (rc & (1U << 28)) == 0;
1390 * @brief Return TRUE if the remote node is NPIV capable.
1392 * @par Description
1393 * Examines the service parameters. Returns TRUE if the node reports itself as
1394 * an NPIV feature capable.
1396 * @param remote_sparms Remote node service parameters.
1398 * @return Returns TRUE if NPIV supported..
1401 efc_rnode_is_npiv_capable(struct fc_plogi_payload_s *remote_sparms)
1403 int rc = be32_to_cpu(remote_sparms->common_service_parameters[1]);
1405 return (rc & (1U << 29)) == 0;
1409 * @brief Return the node's WWPN as an uint64_t.
1411 * @par Description
1412 * The WWPN is computed from service parameters, and returned as a uint64_t.
1414 * @param sp Pointer to service parameters.
1416 * @return Returns WWPN.
1420 static uint64_t
1421 efc_get_wwpn(struct fc_plogi_payload_s *sp)
1423 u64 rc;
1425 rc = ((uint64_t)be32_to_cpu(sp->port_name_hi) << 32ll) |
1426 (be32_to_cpu(sp->port_name_lo));
1427 return rc;
1431 * @brief Return TRUE if the remote node is the point-to-point winner.
1433 * @par Description
1434 * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
1435 * higher than the local node's WWPN.
1437 * @param sport Pointer to the sport object.
1439 * @return
1440 * - 0, if the remote node is the loser.
1441 * - 1, if the remote node is the winner.
1442 * - (-1), if remote node is neither the loser nor the winner
1443 * (WWPNs match)
1446 static int
1447 efc_rnode_is_winner(struct efc_sli_port_s *sport)
1449 struct fc_plogi_payload_s *remote_sp;
1450 u64 remote_wwpn;
1451 u64 local_wwpn = sport->wwpn;
1452 //char prop_buf[32];
1453 u64 wwn_bump = 0;
1455 remote_sp =
1456 (struct fc_plogi_payload_s *)sport->domain->flogi_service_params;
1457 remote_wwpn = efc_get_wwpn(remote_sp);
1459 local_wwpn ^= wwn_bump;
1461 remote_wwpn = efc_get_wwpn(remote_sp);
1463 efc_log_debug(sport->efc, "r: %08x %08x\n",
1464 be32_to_cpu(remote_sp->port_name_hi),
1465 be32_to_cpu(remote_sp->port_name_lo));
1466 efc_log_debug(sport->efc, "l: %08x %08x\n",
1467 (u32)(local_wwpn >> 32ll), (u32)local_wwpn);
1469 if (remote_wwpn == local_wwpn) {
1470 efc_log_warn(sport->efc,
1471 "WWPN of remote node [%08x %08x] matches local WWPN\n",
1472 (u32)(local_wwpn >> 32ll),
1473 (u32)local_wwpn);
1474 return -1;
1477 return (remote_wwpn > local_wwpn);
1481 * @ingroup p2p_sm
1482 * @brief Point-to-point state machine: Wait for the domain attach to complete.
1484 * @par Description
1485 * Once the domain attach has completed, a PLOGI is sent (if we're the
1486 * winning point-to-point node).
1488 * @param ctx Remote node state machine context.
1489 * @param evt Event to process.
1490 * @param arg Per event optional argument.
1492 * @return Returns NULL.
1495 void *
1496 __efc_p2p_wait_domain_attach(struct efc_sm_ctx_s *ctx,
1497 enum efc_sm_event_e evt, void *arg)
1499 struct efc_node_s *node = ctx->app;
1500 struct efc_lport *efc = node->efc;
1502 efc_node_evt_set(ctx, evt, __func__);
1504 node_sm_trace();
1506 switch (evt) {
1507 case EFC_EVT_ENTER:
1508 efc_node_hold_frames(node);
1509 break;
1511 case EFC_EVT_EXIT:
1512 efc_node_accept_frames(node);
1513 break;
1515 case EFC_EVT_DOMAIN_ATTACH_OK: {
1516 struct efc_sli_port_s *sport = node->sport;
1517 struct efc_node_s *rnode;
1520 * this transient node (SID=0 (recv'd FLOGI)
1521 * or DID=fabric (sent FLOGI))
1522 * is the p2p winner, will use a separate node
1523 * to send PLOGI to peer
1525 efc_assert(node->sport->p2p_winner, NULL);
1527 rnode = efc_node_find(sport, node->sport->p2p_remote_port_id);
1528 if (rnode) {
1530 * the "other" transient p2p node has
1531 * already kicked off the
1532 * new node from which PLOGI is sent
1534 node_printf(node,
1535 "Node with fc_id x%x already exists\n",
1536 rnode->rnode.fc_id);
1537 efc_assert(rnode != node, NULL);
1538 } else {
1540 * create new node (SID=1, DID=2)
1541 * from which to send PLOGI
1543 rnode = efc_node_alloc(sport,
1544 sport->p2p_remote_port_id,
1545 false, false);
1546 if (!rnode) {
1547 efc_log_err(efc, "node alloc failed\n");
1548 return NULL;
1551 efc_fabric_notify_topology(node);
1552 /* sm: / allocate p2p remote node */
1553 efc_node_transition(rnode, __efc_p2p_rnode_init,
1554 NULL);
1558 * the transient node (SID=0 or DID=fabric)
1559 * has served its purpose
1561 if (node->rnode.fc_id == 0) {
1563 * if this is the SID=0 node,
1564 * move to the init state in case peer
1565 * has restarted FLOGI discovery and FLOGI is pending
1567 /* don't send PLOGI on efc_d_init entry */
1568 efc_node_init_device(node, false);
1569 } else {
1571 * if this is the DID=fabric node
1572 * (we initiated FLOGI), shut it down
1574 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1575 efc_fabric_initiate_shutdown(node);
1577 break;
1580 default:
1581 __efc_fabric_common(__func__, ctx, evt, arg);
1582 return NULL;
1585 return NULL;
1589 * @ingroup p2p_sm
1590 * @brief Point-to-point state machine: Remote node initialization state.
1592 * @par Description
1593 * This state is entered after winning point-to-point, and the remote node
1594 * is instantiated.
1596 * @param ctx Remote node state machine context.
1597 * @param evt Event to process.
1598 * @param arg Per event optional argument.
1600 * @return Returns NULL.
1603 void *
1604 __efc_p2p_rnode_init(struct efc_sm_ctx_s *ctx,
1605 enum efc_sm_event_e evt, void *arg)
1607 struct efc_node_cb_s *cbdata = arg;
1608 struct efc_node_s *node = ctx->app;
1609 struct efc_lport *efc = node->efc;
1611 efc_node_evt_set(ctx, evt, __func__);
1613 node_sm_trace();
1615 switch (evt) {
1616 case EFC_EVT_ENTER:
1617 /* sm: / send PLOGI */
1618 efc->tt.els_send(efc, node, ELS_PLOGI,
1619 EFC_FC_FLOGI_TIMEOUT_SEC,
1620 EFC_FC_ELS_DEFAULT_RETRIES);
1621 efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
1622 break;
1624 case EFC_EVT_ABTS_RCVD:
1625 /* sm: send BA_ACC */
1626 efc->tt.bls_send_acc_hdr(efc, node, cbdata->header->dma.virt);
1627 break;
1629 default:
1630 __efc_fabric_common(__func__, ctx, evt, arg);
1631 return NULL;
1634 return NULL;
1638 * @ingroup p2p_sm
1639 * @brief Point-to-point node state machine:
1640 * Wait for the FLOGI accept completion.
1642 * @par Description
1643 * Wait for the FLOGI accept completion.
1645 * @param ctx Remote node state machine context.
1646 * @param evt Event to process.
1647 * @param arg Per event optional argument.
1649 * @return Returns NULL.
1652 void *
1653 __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx_s *ctx,
1654 enum efc_sm_event_e evt, void *arg)
1656 struct efc_node_cb_s *cbdata = arg;
1657 struct efc_node_s *node = ctx->app;
1658 struct efc_lport *efc = node->efc;
1660 efc_node_evt_set(ctx, evt, __func__);
1662 node_sm_trace();
1664 switch (evt) {
1665 case EFC_EVT_ENTER:
1666 efc_node_hold_frames(node);
1667 break;
1669 case EFC_EVT_EXIT:
1670 efc_node_accept_frames(node);
1671 break;
1673 case EFC_EVT_SRRS_ELS_CMPL_OK:
1674 efc_assert(node->els_cmpl_cnt, NULL);
1675 node->els_cmpl_cnt--;
1677 /* sm: if p2p_winner / domain_attach */
1678 if (node->sport->p2p_winner) {
1679 efc_node_transition(node,
1680 __efc_p2p_wait_domain_attach,
1681 NULL);
1682 if (!node->sport->domain->attached) {
1683 node_printf(node, "Domain not attached\n");
1684 efc_domain_attach(node->sport->domain,
1685 node->sport->p2p_port_id);
1686 } else {
1687 node_printf(node, "Domain already attached\n");
1688 efc_node_post_event(node,
1689 EFC_EVT_DOMAIN_ATTACH_OK,
1690 NULL);
1692 } else {
1693 /* this node has served its purpose;
1694 * we'll expect a PLOGI on a separate
1695 * node (remote SID=0x1); return this node
1696 * to init state in case peer
1697 * restarts discovery -- it may already
1698 * have (pending frames may exist).
1700 /* don't send PLOGI on efc_d_init entry */
1701 efc_node_init_device(node, false);
1703 break;
1705 case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1707 * LS_ACC failed, possibly due to link down;
1708 * shutdown node and wait
1709 * for FLOGI discovery to restart
1711 node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1712 efc_assert(node->els_cmpl_cnt, NULL);
1713 node->els_cmpl_cnt--;
1714 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1715 efc_fabric_initiate_shutdown(node);
1716 break;
1718 case EFC_EVT_ABTS_RCVD: {
1719 /* sm: / send BA_ACC */
1720 //efc_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1721 efc->tt.bls_send_acc_hdr(efc, node,
1722 cbdata->header->dma.virt);
1723 break;
1726 default:
1727 __efc_fabric_common(__func__, ctx, evt, arg);
1728 return NULL;
1731 return NULL;
1735 * @ingroup p2p_sm
1736 * @brief Point-to-point node state machine: Wait for a PLOGI response
1737 * as a point-to-point winner.
1739 * @par Description
1740 * Wait for a PLOGI response from the remote node as a point-to-point winner.
1741 * Submit node attach request to the HW.
1743 * @param ctx Remote node state machine context.
1744 * @param evt Event to process.
1745 * @param arg Per event optional argument.
1747 * @return Returns NULL.
1750 void *
1751 __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx_s *ctx,
1752 enum efc_sm_event_e evt, void *arg)
1754 int rc;
1755 struct efc_node_cb_s *cbdata = arg;
1756 struct efc_node_s *node = ctx->app;
1757 struct efc_lport *efc = node->efc;
1759 efc_node_evt_set(ctx, evt, __func__);
1761 node_sm_trace();
1763 switch (evt) {
1764 case EFC_EVT_SRRS_ELS_REQ_OK: {
1765 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1766 __efc_fabric_common, __func__)) {
1767 return NULL;
1769 efc_assert(node->els_req_cnt, NULL);
1770 node->els_req_cnt--;
1771 /* sm: / save sparams, efc_node_attach */
1772 efc_node_save_sparms(node, cbdata->els_rsp.virt);
1773 rc = efc_node_attach(node);
1774 efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1775 if (rc == EFC_HW_RTN_SUCCESS_SYNC)
1776 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_OK,
1777 NULL);
1778 break;
1780 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
1781 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1782 __efc_fabric_common, __func__)) {
1783 return NULL;
1785 node_printf(node, "PLOGI failed, shutting down\n");
1786 efc_assert(node->els_req_cnt, NULL);
1787 node->els_req_cnt--;
1788 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1789 efc_fabric_initiate_shutdown(node);
1790 break;
1793 case EFC_EVT_PLOGI_RCVD: {
1794 struct fc_frame_header *hdr = cbdata->header->dma.virt;
1795 /* if we're in external loopback mode, just send LS_ACC */
1796 if (node->efc->external_loopback) {
1797 efc->tt.els_send_resp(efc, node, ELS_PLOGI,
1798 be16_to_cpu(hdr->fh_ox_id));
1799 } else {
1801 * if this isn't external loopback,
1802 * pass to default handler
1804 __efc_fabric_common(__func__, ctx, evt, arg);
1806 break;
1808 case EFC_EVT_PRLI_RCVD:
1809 /* I, or I+T */
1810 /* sent PLOGI and before completion was seen, received the
1811 * PRLI from the remote node (WCQEs and RCQEs come in on
1812 * different queues and order of processing cannot be assumed)
1813 * Save OXID so PRLI can be sent after the attach and continue
1814 * to wait for PLOGI response
1816 efc_process_prli_payload(node, cbdata->payload->dma.virt);
1817 efc_send_ls_acc_after_attach(node,
1818 cbdata->header->dma.virt,
1819 EFC_NODE_SEND_LS_ACC_PRLI);
1820 efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
1821 NULL);
1822 break;
1823 default:
1824 __efc_fabric_common(__func__, ctx, evt, arg);
1825 return NULL;
1828 return NULL;
1832 * @ingroup p2p_sm
1833 * @brief Point-to-point node state machine:
1834 * Waiting on a response for a sent PLOGI.
1836 * @par Description
1837 * State is entered when the point-to-point winner has sent
1838 * a PLOGI and is waiting for a response. Before receiving the
1839 * response, a PRLI was received, implying that the PLOGI was
1840 * successful.
1842 * @param ctx Remote node state machine context.
1843 * @param evt Event to process.
1844 * @param arg Per event optional argument.
1846 * @return Returns NULL.
1849 void *
1850 __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx_s *ctx,
1851 enum efc_sm_event_e evt, void *arg)
1853 int rc;
1854 struct efc_node_cb_s *cbdata = arg;
1855 struct efc_node_s *node = ctx->app;
1857 efc_node_evt_set(ctx, evt, __func__);
1859 node_sm_trace();
1861 switch (evt) {
1862 case EFC_EVT_ENTER:
1864 * Since we've received a PRLI, we have a port login and will
1865 * just need to wait for the PLOGI response to do the node
1866 * attach and then we can send the LS_ACC for the PRLI. If,
1867 * during this time, we receive FCP_CMNDs (which is possible
1868 * since we've already sent a PRLI and our peer may have
1869 * accepted).
1870 * At this time, we are not waiting on any other unsolicited
1871 * frames to continue with the login process. Thus, it will not
1872 * hurt to hold frames here.
1874 efc_node_hold_frames(node);
1875 break;
1877 case EFC_EVT_EXIT:
1878 efc_node_accept_frames(node);
1879 break;
1881 case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1882 /* Completion from PLOGI sent */
1883 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1884 __efc_fabric_common, __func__)) {
1885 return NULL;
1887 efc_assert(node->els_req_cnt, NULL);
1888 node->els_req_cnt--;
1889 /* sm: / save sparams, efc_node_attach */
1890 efc_node_save_sparms(node, cbdata->els_rsp.virt);
1891 rc = efc_node_attach(node);
1892 efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1893 if (rc == EFC_HW_RTN_SUCCESS_SYNC)
1894 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_OK,
1895 NULL);
1896 break;
1898 case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1899 case EFC_EVT_SRRS_ELS_REQ_RJT:
1900 /* PLOGI failed, shutdown the node */
1901 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1902 __efc_fabric_common, __func__)) {
1903 return NULL;
1905 efc_assert(node->els_req_cnt, NULL);
1906 node->els_req_cnt--;
1907 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1908 efc_fabric_initiate_shutdown(node);
1909 break;
1911 default:
1912 __efc_fabric_common(__func__, ctx, evt, arg);
1913 return NULL;
1916 return NULL;
1920 * @ingroup p2p_sm
1921 * @brief Point-to-point node state machine:
1922 * Wait for a point-to-point node attach
1923 * to complete.
1925 * @par Description
1926 * Waits for the point-to-point node attach to complete.
1928 * @param ctx Remote node state machine context.
1929 * @param evt Event to process.
1930 * @param arg Per event optional argument.
1932 * @return Returns NULL.
1935 void *
1936 __efc_p2p_wait_node_attach(struct efc_sm_ctx_s *ctx,
1937 enum efc_sm_event_e evt, void *arg)
1939 struct efc_node_cb_s *cbdata = arg;
1940 struct efc_node_s *node = ctx->app;
1942 efc_node_evt_set(ctx, evt, __func__);
1944 node_sm_trace();
1946 switch (evt) {
1947 case EFC_EVT_ENTER:
1948 efc_node_hold_frames(node);
1949 break;
1951 case EFC_EVT_EXIT:
1952 efc_node_accept_frames(node);
1953 break;
1955 case EFC_EVT_NODE_ATTACH_OK:
1956 node->attached = true;
1957 switch (node->send_ls_acc) {
1958 case EFC_NODE_SEND_LS_ACC_PRLI: {
1959 efc_d_send_prli_rsp(node->ls_acc_io,
1960 node->ls_acc_oxid);
1961 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1962 node->ls_acc_io = NULL;
1963 break;
1965 case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1966 case EFC_NODE_SEND_LS_ACC_NONE:
1967 default:
1968 /* Normal case for I */
1969 /* sm: send_plogi_acc is not set / send PLOGI acc */
1970 efc_node_transition(node, __efc_d_port_logged_in,
1971 NULL);
1972 break;
1974 break;
1976 case EFC_EVT_NODE_ATTACH_FAIL:
1977 /* node attach failed, shutdown the node */
1978 node->attached = false;
1979 node_printf(node, "Node attach failed\n");
1980 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1981 efc_fabric_initiate_shutdown(node);
1982 break;
1984 case EFC_EVT_SHUTDOWN:
1985 node_printf(node, "%s received\n", efc_sm_event_name(evt));
1986 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1987 efc_node_transition(node,
1988 __efc_fabric_wait_attach_evt_shutdown,
1989 NULL);
1990 break;
1991 case EFC_EVT_PRLI_RCVD:
1992 node_printf(node, "%s: PRLI received before node is attached\n",
1993 efc_sm_event_name(evt));
1994 efc_process_prli_payload(node, cbdata->payload->dma.virt);
1995 efc_send_ls_acc_after_attach(node,
1996 cbdata->header->dma.virt,
1997 EFC_NODE_SEND_LS_ACC_PRLI);
1998 break;
2000 default:
2001 __efc_fabric_common(__func__, ctx, evt, arg);
2002 return NULL;
2005 return NULL;
2009 * @brief Start up the name services node.
2011 * @par Description
2012 * Allocates and starts up the name services node.
2014 * @param sport Pointer to the sport structure.
2016 * @return Returns 0 on success, or a negative error value on failure.
2019 static int
2020 efc_start_ns_node(struct efc_sli_port_s *sport)
2022 struct efc_node_s *ns;
2024 /* Instantiate a name services node */
2025 ns = efc_node_find(sport, FC_FID_DIR_SERV);
2026 if (!ns) {
2027 ns = efc_node_alloc(sport, FC_FID_DIR_SERV, false, false);
2028 if (!ns)
2029 return -1;
2032 * for found ns, should we be transitioning from here?
2033 * breaks transition only
2034 * 1. from within state machine or
2035 * 2. if after alloc
2037 if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
2038 efc_node_pause(ns, __efc_ns_init);
2039 else
2040 efc_node_transition(ns, __efc_ns_init, NULL);
2041 return 0;
2045 * @brief Start up the fabric controller node.
2047 * @par Description
2048 * Allocates and starts up the fabric controller node.
2050 * @param sport Pointer to the sport structure.
2052 * @return Returns 0 on success, or a negative error value on failure.
2055 static int
2056 efc_start_fabctl_node(struct efc_sli_port_s *sport)
2058 struct efc_node_s *fabctl;
2060 fabctl = efc_node_find(sport, FC_FID_FCTRL);
2061 if (!fabctl) {
2062 fabctl = efc_node_alloc(sport, FC_FID_FCTRL,
2063 false, false);
2064 if (!fabctl)
2065 return -1;
2068 * for found ns, should we be transitioning from here?
2069 * breaks transition only
2070 * 1. from within state machine or
2071 * 2. if after alloc
2073 efc_node_transition(fabctl, __efc_fabctl_init, NULL);
2074 return 0;
2078 * @brief Process the GIDPT payload.
2080 * @par Description
2081 * The GIDPT payload is parsed, and new nodes are created, as needed.
2083 * @param node Pointer to the node structure.
2084 * @param gidpt Pointer to the GIDPT payload.
2085 * @param gidpt_len Payload length
2087 * @return Returns 0 on success, or a negative error value on failure.
2090 static int
2091 efc_process_gidpt_payload(struct efc_node_s *node,
2092 struct fcct_gidpt_acc_s *gidpt, u32 gidpt_len)
2094 u32 i;
2095 u32 j;
2096 struct efc_node_s *newnode;
2097 struct efc_sli_port_s *sport = node->sport;
2098 struct efc_lport *efc = node->efc;
2099 u32 port_id;
2100 u32 port_count;
2101 struct efc_node_s *n;
2102 struct efc_node_s **active_nodes;
2103 u32 portlist_count;
2104 int residual;
2106 residual = be16_to_cpu(gidpt->hdr.max_residual_size);
2108 if (residual != 0)
2109 efc_log_debug(node->efc, "residual is %u words\n", residual);
2111 if (be16_to_cpu(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
2112 node_printf(node,
2113 "GIDPT request failed: rsn x%x rsn_expl x%x\n",
2114 gidpt->hdr.reason_code,
2115 gidpt->hdr.reason_code_explanation);
2116 return -1;
2119 portlist_count = (gidpt_len - sizeof(struct fcct_iu_header_s)) /
2120 sizeof(gidpt->port_list);
2122 /* Count the number of nodes */
2123 port_count = 0;
2124 efc_sport_lock(sport);
2125 list_for_each_entry(n, &sport->node_list, list_entry) {
2126 port_count++;
2129 /* Allocate a buffer for all nodes */
2130 active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
2131 if (!active_nodes) {
2132 node_printf(node, "efc_malloc failed\n");
2133 efc_sport_unlock(sport);
2134 return -1;
2137 /* Fill buffer with fc_id of active nodes */
2138 i = 0;
2139 list_for_each_entry(n, &sport->node_list, list_entry) {
2140 port_id = n->rnode.fc_id;
2141 switch (port_id) {
2142 case FC_FID_FLOGI:
2143 case FC_FID_FCTRL:
2144 case FC_FID_DIR_SERV:
2145 break;
2146 default:
2147 if (!FC_ADDR_IS_DOMAIN_CTRL(port_id))
2148 active_nodes[i++] = n;
2149 break;
2153 /* update the active nodes buffer */
2154 for (i = 0; i < portlist_count; i++) {
2155 port_id = fc_be24toh(gidpt->port_list[i].port_id);
2157 for (j = 0; j < port_count; j++) {
2158 if (active_nodes[j] &&
2159 port_id == active_nodes[j]->rnode.fc_id) {
2160 active_nodes[j] = NULL;
2164 if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
2165 break;
2168 /* Those remaining in the active_nodes[] are now gone ! */
2169 for (i = 0; i < port_count; i++) {
2171 * if we're an initiator and the remote node
2172 * is a target, then post the node missing event.
2173 * if we're target and we have enabled
2174 * target RSCN, then post the node missing event.
2176 if (active_nodes[i]) {
2177 if ((node->sport->enable_ini &&
2178 active_nodes[i]->targ) ||
2179 (node->sport->enable_tgt &&
2180 enable_target_rscn(efc))) {
2181 efc_node_post_event(active_nodes[i],
2182 EFC_EVT_NODE_MISSING,
2183 NULL);
2184 } else {
2185 node_printf(node,
2186 "GID_PT: skipping non-tgt port_id x%06x\n",
2187 active_nodes[i]->rnode.fc_id);
2191 kfree(active_nodes);
2193 for (i = 0; i < portlist_count; i++) {
2194 u32 port_id = fc_be24toh(gidpt->port_list[i].port_id);
2196 /* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
2198 /* Don't create node for ourselves */
2199 if (port_id != node->rnode.sport->fc_id) {
2200 newnode = efc_node_find(sport, port_id);
2201 if (!newnode) {
2202 if (node->sport->enable_ini) {
2203 newnode = efc_node_alloc(sport,
2204 port_id,
2205 false,
2206 false);
2207 if (!newnode) {
2208 efc_log_err(efc,
2209 "efc_node_alloc() failed\n");
2210 efc_sport_unlock(sport);
2211 return -1;
2214 * send PLOGI automatically
2215 * if initiator
2217 efc_node_init_device(newnode, true);
2219 continue;
2222 if (node->sport->enable_ini && newnode->targ) {
2223 efc_node_post_event(newnode,
2224 EFC_EVT_NODE_REFOUND,
2225 NULL);
2228 * original code sends ADISC,
2229 * has notion of "refound"
2233 if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
2234 break;
2236 efc_sport_unlock(sport);
2237 return 0;
2241 * @brief Set up the domain point-to-point parameters.
2243 * @par Description
2244 * The remote node service parameters are examined, and various point-to-point
2245 * variables are set.
2247 * @param sport Pointer to the sport object.
2249 * @return Returns 0 on success, or a negative error value on failure.
2253 efc_p2p_setup(struct efc_sli_port_s *sport)
2255 struct efc_lport *efc = sport->efc;
2256 int rnode_winner;
2258 rnode_winner = efc_rnode_is_winner(sport);
2260 /* set sport flags to indicate p2p "winner" */
2261 if (rnode_winner == 1) {
2262 sport->p2p_remote_port_id = 0;
2263 sport->p2p_port_id = 0;
2264 sport->p2p_winner = false;
2265 } else if (rnode_winner == 0) {
2266 sport->p2p_remote_port_id = 2;
2267 sport->p2p_port_id = 1;
2268 sport->p2p_winner = true;
2269 } else {
2270 /* no winner; only okay if external loopback enabled */
2271 if (sport->efc->external_loopback) {
2273 * External loopback mode enabled;
2274 * local sport and remote node
2275 * will be registered with an NPortID = 1;
2277 efc_log_debug(efc,
2278 "External loopback mode enabled\n");
2279 sport->p2p_remote_port_id = 1;
2280 sport->p2p_port_id = 1;
2281 sport->p2p_winner = true;
2282 } else {
2283 efc_log_warn(efc,
2284 "failed to determine p2p winner\n");
2285 return rnode_winner;
2288 return 0;
2292 * @brief Process the FABCTL node RSCN.
2294 * <h3 class="desc">Description</h3>
2295 * Processes the FABCTL node RSCN payload,
2296 * simply passes the event to the name server.
2298 * @param node Pointer to the node structure.
2299 * @param cbdata Callback data to pass forward.
2301 * @return None.
2304 static void
2305 efc_process_rscn(struct efc_node_s *node, struct efc_node_cb_s *cbdata)
2307 struct efc_lport *efc = node->efc;
2308 struct efc_sli_port_s *sport = node->sport;
2309 struct efc_node_s *ns;
2311 /* Forward this event to the name-services node */
2312 ns = efc_node_find(sport, FC_FID_DIR_SERV);
2313 if (ns)
2314 efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
2315 else
2316 efc_log_warn(efc, "can't find name server node\n");