1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7/*
8 * This file implements remote node state machines for:
9 * - Fabric logins.
10 * - Fabric controller events.
11 * - Name/directory services interaction.
12 * - Point-to-point logins.
13 */
14
15/*
16 * fabric_sm Node State Machine: Fabric States
17 * ns_sm Node State Machine: Name/Directory Services States
18 * p2p_sm Node State Machine: Point-to-Point Node States
19 */
20
21#include "efc.h"
22
23static void
24efc_fabric_initiate_shutdown(struct efc_node *node)
25{
26	struct efc *efc = node->efc;
27
28	node->els_io_enabled = false;
29
30	if (node->attached) {
31		int rc;
32
33		/* issue hw node free; don't care if succeeds right away
34		 * or sometime later, will check node->attached later in
35		 * shutdown process
36		 */
37		rc = efc_cmd_node_detach(efc, &node->rnode);
38		if (rc < 0) {
39			node_printf(node, "Failed freeing HW node, rc=%d\n",
40				    rc);
41		}
42	}
43	/*
44	 * node has either been detached or is in the process of being detached,
45	 * call common node's initiate cleanup function
46	 */
47	efc_node_initiate_cleanup(node);
48}
49
50static void
51__efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
52		    enum efc_sm_event evt, void *arg)
53{
54	struct efc_node *node = NULL;
55
56	node = ctx->app;
57
58	switch (evt) {
59	case EFC_EVT_DOMAIN_ATTACH_OK:
60		break;
61	case EFC_EVT_SHUTDOWN:
62		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
63		efc_fabric_initiate_shutdown(node);
64		break;
65
66	default:
67		/* call default event handler common to all nodes */
68		__efc_node_common(funcname, ctx, evt, arg);
69	}
70}
71
72void
73__efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
74		  void *arg)
75{
76	struct efc_node *node = ctx->app;
77	struct efc *efc = node->efc;
78
79	efc_node_evt_set(ctx, evt, __func__);
80
81	node_sm_trace();
82
83	switch (evt) {
84	case EFC_EVT_REENTER:
85		efc_log_debug(efc, ">>> reenter !!\n");
86		fallthrough;
87
88	case EFC_EVT_ENTER:
89		/* send FLOGI */
90		efc_send_flogi(node);
91		efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
92		break;
93
94	default:
95		__efc_fabric_common(__func__, ctx, evt, arg);
96	}
97}
98
99void
100efc_fabric_set_topology(struct efc_node *node,
101			enum efc_nport_topology topology)
102{
103	node->nport->topology = topology;
104}
105
106void
107efc_fabric_notify_topology(struct efc_node *node)
108{
109	struct efc_node *tmp_node;
110	unsigned long index;
111
112	/*
113	 * now loop through the nodes in the nport
114	 * and send topology notification
115	 */
116	xa_for_each(&node->nport->lookup, index, tmp_node) {
117		if (tmp_node != node) {
118			efc_node_post_event(tmp_node,
119					    EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
120					    &node->nport->topology);
121		}
122	}
123}
124
125static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
126{
127	return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
128}
129
130void
131__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
132			    enum efc_sm_event evt, void *arg)
133{
134	struct efc_node_cb *cbdata = arg;
135	struct efc_node *node = ctx->app;
136
137	efc_node_evt_set(ctx, evt, __func__);
138
139	node_sm_trace();
140
141	switch (evt) {
142	case EFC_EVT_SRRS_ELS_REQ_OK: {
143		if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
144					   __efc_fabric_common, __func__)) {
145			return;
146		}
147		WARN_ON(!node->els_req_cnt);
148		node->els_req_cnt--;
149
150		memcpy(node->nport->domain->flogi_service_params,
151		       cbdata->els_rsp.virt,
152		       sizeof(struct fc_els_flogi));
153
154		/* Check to see if the fabric is an F_PORT or and N_PORT */
155		if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
156			/* sm: if not nport / efc_domain_attach */
157			/* ext_status has the fc_id, attach domain */
158			efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
159			efc_fabric_notify_topology(node);
160			WARN_ON(node->nport->domain->attached);
161			efc_domain_attach(node->nport->domain,
162					  cbdata->ext_status);
163			efc_node_transition(node,
164					    __efc_fabric_wait_domain_attach,
165					    NULL);
166			break;
167		}
168
169		/*  sm: if nport and p2p_winner / efc_domain_attach */
170		efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
171		if (efc_p2p_setup(node->nport)) {
172			node_printf(node,
173				    "p2p setup failed, shutting down node\n");
174			node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
175			efc_fabric_initiate_shutdown(node);
176			break;
177		}
178
179		if (node->nport->p2p_winner) {
180			efc_node_transition(node,
181					    __efc_p2p_wait_domain_attach,
182					     NULL);
183			if (node->nport->domain->attached &&
184			    !node->nport->domain->domain_notify_pend) {
185				/*
186				 * already attached,
187				 * just send ATTACH_OK
188				 */
189				node_printf(node,
190					    "p2p winner, domain already attached\n");
191				efc_node_post_event(node,
192						    EFC_EVT_DOMAIN_ATTACH_OK,
193						    NULL);
194			}
195		} else {
196			/*
197			 * peer is p2p winner;
198			 * PLOGI will be received on the
199			 * remote SID=1 node;
200			 * this node has served its purpose
201			 */
202			node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
203			efc_fabric_initiate_shutdown(node);
204		}
205
206		break;
207	}
208
209	case EFC_EVT_ELS_REQ_ABORTED:
210	case EFC_EVT_SRRS_ELS_REQ_RJT:
211	case EFC_EVT_SRRS_ELS_REQ_FAIL: {
212		struct efc_nport *nport = node->nport;
213		/*
214		 * with these errors, we have no recovery,
215		 * so shutdown the nport, leave the link
216		 * up and the domain ready
217		 */
218		if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
219					   __efc_fabric_common, __func__)) {
220			return;
221		}
222		node_printf(node,
223			    "FLOGI failed evt=%s, shutting down nport [%s]\n",
224			    efc_sm_event_name(evt), nport->display_name);
225		WARN_ON(!node->els_req_cnt);
226		node->els_req_cnt--;
227		efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
228		break;
229	}
230
231	default:
232		__efc_fabric_common(__func__, ctx, evt, arg);
233	}
234}
235
236void
237__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
238			enum efc_sm_event evt, void *arg)
239{
240	struct efc_node *node = ctx->app;
241
242	efc_node_evt_set(ctx, evt, __func__);
243
244	node_sm_trace();
245
246	switch (evt) {
247	case EFC_EVT_ENTER:
248		/* sm: / send FDISC */
249		efc_send_fdisc(node);
250		efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
251		break;
252
253	default:
254		__efc_fabric_common(__func__, ctx, evt, arg);
255	}
256}
257
258void
259__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
260			    enum efc_sm_event evt, void *arg)
261{
262	struct efc_node_cb *cbdata = arg;
263	struct efc_node *node = ctx->app;
264
265	efc_node_evt_set(ctx, evt, __func__);
266
267	node_sm_trace();
268
269	switch (evt) {
270	case EFC_EVT_SRRS_ELS_REQ_OK: {
271		/* fc_id is in ext_status */
272		if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
273					   __efc_fabric_common, __func__)) {
274			return;
275		}
276
277		WARN_ON(!node->els_req_cnt);
278		node->els_req_cnt--;
279		/* sm: / efc_nport_attach */
280		efc_nport_attach(node->nport, cbdata->ext_status);
281		efc_node_transition(node, __efc_fabric_wait_domain_attach,
282				    NULL);
283		break;
284	}
285
286	case EFC_EVT_SRRS_ELS_REQ_RJT:
287	case EFC_EVT_SRRS_ELS_REQ_FAIL: {
288		if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
289					   __efc_fabric_common, __func__)) {
290			return;
291		}
292		WARN_ON(!node->els_req_cnt);
293		node->els_req_cnt--;
294		efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
295		/* sm: / shutdown nport */
296		efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
297		break;
298	}
299
300	default:
301		__efc_fabric_common(__func__, ctx, evt, arg);
302	}
303}
304
305static int
306efc_start_ns_node(struct efc_nport *nport)
307{
308	struct efc_node *ns;
309
310	/* Instantiate a name services node */
311	ns = efc_node_find(nport, FC_FID_DIR_SERV);
312	if (!ns) {
313		ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
314		if (!ns)
315			return -EIO;
316	}
317	/*
318	 * for found ns, should we be transitioning from here?
319	 * breaks transition only
320	 *  1. from within state machine or
321	 *  2. if after alloc
322	 */
323	if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
324		efc_node_pause(ns, __efc_ns_init);
325	else
326		efc_node_transition(ns, __efc_ns_init, NULL);
327	return 0;
328}
329
330static int
331efc_start_fabctl_node(struct efc_nport *nport)
332{
333	struct efc_node *fabctl;
334
335	fabctl = efc_node_find(nport, FC_FID_FCTRL);
336	if (!fabctl) {
337		fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
338					false, false);
339		if (!fabctl)
340			return -EIO;
341	}
342	/*
343	 * for found ns, should we be transitioning from here?
344	 * breaks transition only
345	 *  1. from within state machine or
346	 *  2. if after alloc
347	 */
348	efc_node_transition(fabctl, __efc_fabctl_init, NULL);
349	return 0;
350}
351
352void
353__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
354				enum efc_sm_event evt, void *arg)
355{
356	struct efc_node *node = ctx->app;
357
358	efc_node_evt_set(ctx, evt, __func__);
359
360	node_sm_trace();
361
362	switch (evt) {
363	case EFC_EVT_ENTER:
364		efc_node_hold_frames(node);
365		break;
366
367	case EFC_EVT_EXIT:
368		efc_node_accept_frames(node);
369		break;
370	case EFC_EVT_DOMAIN_ATTACH_OK:
371	case EFC_EVT_NPORT_ATTACH_OK: {
372		int rc;
373
374		rc = efc_start_ns_node(node->nport);
375		if (rc)
376			return;
377
378		/* sm: if enable_ini / start fabctl node */
379		/* Instantiate the fabric controller (sends SCR) */
380		if (node->nport->enable_rscn) {
381			rc = efc_start_fabctl_node(node->nport);
382			if (rc)
383				return;
384		}
385		efc_node_transition(node, __efc_fabric_idle, NULL);
386		break;
387	}
388	default:
389		__efc_fabric_common(__func__, ctx, evt, arg);
390	}
391}
392
393void
394__efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
395		  void *arg)
396{
397	struct efc_node *node = ctx->app;
398
399	efc_node_evt_set(ctx, evt, __func__);
400
401	node_sm_trace();
402
403	switch (evt) {
404	case EFC_EVT_DOMAIN_ATTACH_OK:
405		break;
406	default:
407		__efc_fabric_common(__func__, ctx, evt, arg);
408	}
409}
410
411void
412__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
413{
414	struct efc_node *node = ctx->app;
415
416	efc_node_evt_set(ctx, evt, __func__);
417
418	node_sm_trace();
419
420	switch (evt) {
421	case EFC_EVT_ENTER:
422		/* sm: / send PLOGI */
423		efc_send_plogi(node);
424		efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
425		break;
426	default:
427		__efc_fabric_common(__func__, ctx, evt, arg);
428	}
429}
430
431void
432__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
433			enum efc_sm_event evt, void *arg)
434{
435	struct efc_node_cb *cbdata = arg;
436	struct efc_node *node = ctx->app;
437
438	efc_node_evt_set(ctx, evt, __func__);
439
440	node_sm_trace();
441
442	switch (evt) {
443	case EFC_EVT_SRRS_ELS_REQ_OK: {
444		int rc;
445
446		/* Save service parameters */
447		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
448					   __efc_fabric_common, __func__)) {
449			return;
450		}
451		WARN_ON(!node->els_req_cnt);
452		node->els_req_cnt--;
453		/* sm: / save sparams, efc_node_attach */
454		efc_node_save_sparms(node, cbdata->els_rsp.virt);
455		rc = efc_node_attach(node);
456		efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
457		if (rc < 0)
458			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
459					    NULL);
460		break;
461	}
462	default:
463		__efc_fabric_common(__func__, ctx, evt, arg);
464	}
465}
466
467void
468__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
469			  enum efc_sm_event evt, void *arg)
470{
471	struct efc_node *node = ctx->app;
472
473	efc_node_evt_set(ctx, evt, __func__);
474
475	node_sm_trace();
476
477	switch (evt) {
478	case EFC_EVT_ENTER:
479		efc_node_hold_frames(node);
480		break;
481
482	case EFC_EVT_EXIT:
483		efc_node_accept_frames(node);
484		break;
485
486	case EFC_EVT_NODE_ATTACH_OK:
487		node->attached = true;
488		/* sm: / send RFTID */
489		efc_ns_send_rftid(node);
490		efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
491		break;
492
493	case EFC_EVT_NODE_ATTACH_FAIL:
494		/* node attach failed, shutdown the node */
495		node->attached = false;
496		node_printf(node, "Node attach failed\n");
497		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
498		efc_fabric_initiate_shutdown(node);
499		break;
500
501	case EFC_EVT_SHUTDOWN:
502		node_printf(node, "Shutdown event received\n");
503		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
504		efc_node_transition(node,
505				    __efc_fabric_wait_attach_evt_shutdown,
506				     NULL);
507		break;
508
509	/*
510	 * if receive RSCN just ignore,
511	 * we haven't sent GID_PT yet (ACC sent by fabctl node)
512	 */
513	case EFC_EVT_RSCN_RCVD:
514		break;
515
516	default:
517		__efc_fabric_common(__func__, ctx, evt, arg);
518	}
519}
520
521void
522__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
523				      enum efc_sm_event evt, void *arg)
524{
525	struct efc_node *node = ctx->app;
526
527	efc_node_evt_set(ctx, evt, __func__);
528
529	node_sm_trace();
530
531	switch (evt) {
532	case EFC_EVT_ENTER:
533		efc_node_hold_frames(node);
534		break;
535
536	case EFC_EVT_EXIT:
537		efc_node_accept_frames(node);
538		break;
539
540	/* wait for any of these attach events and then shutdown */
541	case EFC_EVT_NODE_ATTACH_OK:
542		node->attached = true;
543		node_printf(node, "Attach evt=%s, proceed to shutdown\n",
544			    efc_sm_event_name(evt));
545		efc_fabric_initiate_shutdown(node);
546		break;
547
548	case EFC_EVT_NODE_ATTACH_FAIL:
549		node->attached = false;
550		node_printf(node, "Attach evt=%s, proceed to shutdown\n",
551			    efc_sm_event_name(evt));
552		efc_fabric_initiate_shutdown(node);
553		break;
554
555	/* ignore shutdown event as we're already in shutdown path */
556	case EFC_EVT_SHUTDOWN:
557		node_printf(node, "Shutdown event received\n");
558		break;
559
560	default:
561		__efc_fabric_common(__func__, ctx, evt, arg);
562	}
563}
564
565void
566__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
567			enum efc_sm_event evt, void *arg)
568{
569	struct efc_node *node = ctx->app;
570
571	efc_node_evt_set(ctx, evt, __func__);
572
573	node_sm_trace();
574
575	switch (evt) {
576	case EFC_EVT_SRRS_ELS_REQ_OK:
577		if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
578					  __efc_fabric_common, __func__)) {
579			return;
580		}
581		WARN_ON(!node->els_req_cnt);
582		node->els_req_cnt--;
583		/* sm: / send RFFID */
584		efc_ns_send_rffid(node);
585		efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
586		break;
587
588	/*
589	 * if receive RSCN just ignore,
590	 * we haven't sent GID_PT yet (ACC sent by fabctl node)
591	 */
592	case EFC_EVT_RSCN_RCVD:
593		break;
594
595	default:
596		__efc_fabric_common(__func__, ctx, evt, arg);
597	}
598}
599
600void
601__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
602			enum efc_sm_event evt, void *arg)
603{
604	struct efc_node *node = ctx->app;
605
606	efc_node_evt_set(ctx, evt, __func__);
607
608	node_sm_trace();
609
610	/*
611	 * Waits for an RFFID response event;
612	 * if rscn enabled, a GIDPT name services request is issued.
613	 */
614	switch (evt) {
615	case EFC_EVT_SRRS_ELS_REQ_OK:	{
616		if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
617					  __efc_fabric_common, __func__)) {
618			return;
619		}
620		WARN_ON(!node->els_req_cnt);
621		node->els_req_cnt--;
622		if (node->nport->enable_rscn) {
623			/* sm: if enable_rscn / send GIDPT */
624			efc_ns_send_gidpt(node);
625
626			efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
627					    NULL);
628		} else {
629			/* if 'T' only, we're done, go to idle */
630			efc_node_transition(node, __efc_ns_idle, NULL);
631		}
632		break;
633	}
634	/*
635	 * if receive RSCN just ignore,
636	 * we haven't sent GID_PT yet (ACC sent by fabctl node)
637	 */
638	case EFC_EVT_RSCN_RCVD:
639		break;
640
641	default:
642		__efc_fabric_common(__func__, ctx, evt, arg);
643	}
644}
645
646static int
647efc_process_gidpt_payload(struct efc_node *node,
648			  void *data, u32 gidpt_len)
649{
650	u32 i, j;
651	struct efc_node *newnode;
652	struct efc_nport *nport = node->nport;
653	struct efc *efc = node->efc;
654	u32 port_id = 0, port_count, plist_count;
655	struct efc_node *n;
656	struct efc_node **active_nodes;
657	int residual;
658	struct {
659		struct fc_ct_hdr hdr;
660		struct fc_gid_pn_resp pn_rsp;
661	} *rsp;
662	struct fc_gid_pn_resp *gidpt;
663	unsigned long index;
664
665	rsp = data;
666	gidpt = &rsp->pn_rsp;
667	residual = be16_to_cpu(rsp->hdr.ct_mr_size);
668
669	if (residual != 0)
670		efc_log_debug(node->efc, "residual is %u words\n", residual);
671
672	if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
673		node_printf(node,
674			    "GIDPT request failed: rsn x%x rsn_expl x%x\n",
675			    rsp->hdr.ct_reason, rsp->hdr.ct_explan);
676		return -EIO;
677	}
678
679	plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
680
681	/* Count the number of nodes */
682	port_count = 0;
683	xa_for_each(&nport->lookup, index, n) {
684		port_count++;
685	}
686
687	/* Allocate a buffer for all nodes */
688	active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
689	if (!active_nodes) {
690		node_printf(node, "efc_malloc failed\n");
691		return -EIO;
692	}
693
694	/* Fill buffer with fc_id of active nodes */
695	i = 0;
696	xa_for_each(&nport->lookup, index, n) {
697		port_id = n->rnode.fc_id;
698		switch (port_id) {
699		case FC_FID_FLOGI:
700		case FC_FID_FCTRL:
701		case FC_FID_DIR_SERV:
702			break;
703		default:
704			if (port_id != FC_FID_DOM_MGR)
705				active_nodes[i++] = n;
706			break;
707		}
708	}
709
710	/* update the active nodes buffer */
711	for (i = 0; i < plist_count; i++) {
712		hton24(gidpt[i].fp_fid, port_id);
713
714		for (j = 0; j < port_count; j++) {
715			if (active_nodes[j] &&
716			    port_id == active_nodes[j]->rnode.fc_id) {
717				active_nodes[j] = NULL;
718			}
719		}
720
721		if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
722			break;
723	}
724
725	/* Those remaining in the active_nodes[] are now gone ! */
726	for (i = 0; i < port_count; i++) {
727		/*
728		 * if we're an initiator and the remote node
729		 * is a target, then post the node missing event.
730		 * if we're target and we have enabled
731		 * target RSCN, then post the node missing event.
732		 */
733		if (!active_nodes[i])
734			continue;
735
736		if ((node->nport->enable_ini && active_nodes[i]->targ) ||
737		    (node->nport->enable_tgt && enable_target_rscn(efc))) {
738			efc_node_post_event(active_nodes[i],
739					    EFC_EVT_NODE_MISSING, NULL);
740		} else {
741			node_printf(node,
742				    "GID_PT: skipping non-tgt port_id x%06x\n",
743				    active_nodes[i]->rnode.fc_id);
744		}
745	}
746	kfree(active_nodes);
747
748	for (i = 0; i < plist_count; i++) {
749		hton24(gidpt[i].fp_fid, port_id);
750
751		/* Don't create node for ourselves */
752		if (port_id == node->rnode.nport->fc_id) {
753			if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
754				break;
755			continue;
756		}
757
758		newnode = efc_node_find(nport, port_id);
759		if (!newnode) {
760			if (!node->nport->enable_ini)
761				continue;
762
763			newnode = efc_node_alloc(nport, port_id, false, false);
764			if (!newnode) {
765				efc_log_err(efc, "efc_node_alloc() failed\n");
766				return -EIO;
767			}
768			/*
769			 * send PLOGI automatically
770			 * if initiator
771			 */
772			efc_node_init_device(newnode, true);
773		}
774
775		if (node->nport->enable_ini && newnode->targ) {
776			efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
777					    NULL);
778		}
779
780		if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
781			break;
782	}
783	return 0;
784}
785
786void
787__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
788			enum efc_sm_event evt, void *arg)
789{
790	struct efc_node_cb *cbdata = arg;
791	struct efc_node *node = ctx->app;
792
793	efc_node_evt_set(ctx, evt, __func__);
794
795	node_sm_trace();
796	/*
797	 * Wait for a GIDPT response from the name server. Process the FC_IDs
798	 * that are reported by creating new remote ports, as needed.
799	 */
800
801	switch (evt) {
802	case EFC_EVT_SRRS_ELS_REQ_OK:	{
803		if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
804					  __efc_fabric_common, __func__)) {
805			return;
806		}
807		WARN_ON(!node->els_req_cnt);
808		node->els_req_cnt--;
809		/* sm: / process GIDPT payload */
810		efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
811					  cbdata->els_rsp.len);
812		efc_node_transition(node, __efc_ns_idle, NULL);
813		break;
814	}
815
816	case EFC_EVT_SRRS_ELS_REQ_FAIL:	{
817		/* not much we can do; will retry with the next RSCN */
818		node_printf(node, "GID_PT failed to complete\n");
819		WARN_ON(!node->els_req_cnt);
820		node->els_req_cnt--;
821		efc_node_transition(node, __efc_ns_idle, NULL);
822		break;
823	}
824
825	/* if receive RSCN here, queue up another discovery processing */
826	case EFC_EVT_RSCN_RCVD: {
827		node_printf(node, "RSCN received during GID_PT processing\n");
828		node->rscn_pending = true;
829		break;
830	}
831
832	default:
833		__efc_fabric_common(__func__, ctx, evt, arg);
834	}
835}
836
837void
838__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
839{
840	struct efc_node *node = ctx->app;
841	struct efc *efc = node->efc;
842
843	efc_node_evt_set(ctx, evt, __func__);
844
845	node_sm_trace();
846
847	/*
848	 * Wait for RSCN received events (posted from the fabric controller)
849	 * and restart the GIDPT name services query and processing.
850	 */
851
852	switch (evt) {
853	case EFC_EVT_ENTER:
854		if (!node->rscn_pending)
855			break;
856
857		node_printf(node, "RSCN pending, restart discovery\n");
858		node->rscn_pending = false;
859		fallthrough;
860
861	case EFC_EVT_RSCN_RCVD: {
862		/* sm: / send GIDPT */
863		/*
864		 * If target RSCN processing is enabled,
865		 * and this is target only (not initiator),
866		 * and tgt_rscn_delay is non-zero,
867		 * then we delay issuing the GID_PT
868		 */
869		if (efc->tgt_rscn_delay_msec != 0 &&
870		    !node->nport->enable_ini && node->nport->enable_tgt &&
871		    enable_target_rscn(efc)) {
872			efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
873		} else {
874			efc_ns_send_gidpt(node);
875			efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
876					    NULL);
877		}
878		break;
879	}
880
881	default:
882		__efc_fabric_common(__func__, ctx, evt, arg);
883	}
884}
885
886static void
887gidpt_delay_timer_cb(struct timer_list *t)
888{
889	struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
890
891	del_timer(&node->gidpt_delay_timer);
892
893	efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
894}
895
896void
897__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
898		     enum efc_sm_event evt, void *arg)
899{
900	struct efc_node *node = ctx->app;
901	struct efc *efc = node->efc;
902
903	efc_node_evt_set(ctx, evt, __func__);
904
905	node_sm_trace();
906
907	switch (evt) {
908	case EFC_EVT_ENTER: {
909		u64 delay_msec, tmp;
910
911		/*
912		 * Compute the delay time.
913		 * Set to tgt_rscn_delay, if the time since last GIDPT
914		 * is less than tgt_rscn_period, then use tgt_rscn_period.
915		 */
916		delay_msec = efc->tgt_rscn_delay_msec;
917		tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
918		if (tmp < efc->tgt_rscn_period_msec)
919			delay_msec = efc->tgt_rscn_period_msec;
920
921		timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
922			    0);
923		mod_timer(&node->gidpt_delay_timer,
924			  jiffies + msecs_to_jiffies(delay_msec));
925
926		break;
927	}
928
929	case EFC_EVT_GIDPT_DELAY_EXPIRED:
930		node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
931
932		efc_ns_send_gidpt(node);
933		efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
934		break;
935
936	case EFC_EVT_RSCN_RCVD: {
937		efc_log_debug(efc,
938			      "RSCN received while in GIDPT delay - no action\n");
939		break;
940	}
941
942	default:
943		__efc_fabric_common(__func__, ctx, evt, arg);
944	}
945}
946
947void
948__efc_fabctl_init(struct efc_sm_ctx *ctx,
949		  enum efc_sm_event evt, void *arg)
950{
951	struct efc_node *node = ctx->app;
952
953	node_sm_trace();
954
955	switch (evt) {
956	case EFC_EVT_ENTER:
957		/* no need to login to fabric controller, just send SCR */
958		efc_send_scr(node);
959		efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
960		break;
961
962	case EFC_EVT_NODE_ATTACH_OK:
963		node->attached = true;
964		break;
965
966	default:
967		__efc_fabric_common(__func__, ctx, evt, arg);
968	}
969}
970
971void
972__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
973			  enum efc_sm_event evt, void *arg)
974{
975	struct efc_node *node = ctx->app;
976
977	efc_node_evt_set(ctx, evt, __func__);
978
979	node_sm_trace();
980
981	/*
982	 * Fabric controller node state machine:
983	 * Wait for an SCR response from the fabric controller.
984	 */
985	switch (evt) {
986	case EFC_EVT_SRRS_ELS_REQ_OK:
987		if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
988					   __efc_fabric_common, __func__)) {
989			return;
990		}
991		WARN_ON(!node->els_req_cnt);
992		node->els_req_cnt--;
993		efc_node_transition(node, __efc_fabctl_ready, NULL);
994		break;
995
996	default:
997		__efc_fabric_common(__func__, ctx, evt, arg);
998	}
999}
1000
1001static void
1002efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
1003{
1004	struct efc *efc = node->efc;
1005	struct efc_nport *nport = node->nport;
1006	struct efc_node *ns;
1007
1008	/* Forward this event to the name-services node */
1009	ns = efc_node_find(nport, FC_FID_DIR_SERV);
1010	if (ns)
1011		efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
1012	else
1013		efc_log_warn(efc, "can't find name server node\n");
1014}
1015
1016void
1017__efc_fabctl_ready(struct efc_sm_ctx *ctx,
1018		   enum efc_sm_event evt, void *arg)
1019{
1020	struct efc_node_cb *cbdata = arg;
1021	struct efc_node *node = ctx->app;
1022
1023	efc_node_evt_set(ctx, evt, __func__);
1024
1025	node_sm_trace();
1026
1027	/*
1028	 * Fabric controller node state machine: Ready.
1029	 * In this state, the fabric controller sends a RSCN, which is received
1030	 * by this node and is forwarded to the name services node object; and
1031	 * the RSCN LS_ACC is sent.
1032	 */
1033	switch (evt) {
1034	case EFC_EVT_RSCN_RCVD: {
1035		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1036
1037		/*
1038		 * sm: / process RSCN (forward to name services node),
1039		 * send LS_ACC
1040		 */
1041		efc_process_rscn(node, cbdata);
1042		efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
1043		efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
1044				    NULL);
1045		break;
1046	}
1047
1048	default:
1049		__efc_fabric_common(__func__, ctx, evt, arg);
1050	}
1051}
1052
1053void
1054__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
1055			      enum efc_sm_event evt, void *arg)
1056{
1057	struct efc_node *node = ctx->app;
1058
1059	efc_node_evt_set(ctx, evt, __func__);
1060
1061	node_sm_trace();
1062
1063	switch (evt) {
1064	case EFC_EVT_ENTER:
1065		efc_node_hold_frames(node);
1066		break;
1067
1068	case EFC_EVT_EXIT:
1069		efc_node_accept_frames(node);
1070		break;
1071
1072	case EFC_EVT_SRRS_ELS_CMPL_OK:
1073		WARN_ON(!node->els_cmpl_cnt);
1074		node->els_cmpl_cnt--;
1075		efc_node_transition(node, __efc_fabctl_ready, NULL);
1076		break;
1077
1078	default:
1079		__efc_fabric_common(__func__, ctx, evt, arg);
1080	}
1081}
1082
1083static uint64_t
1084efc_get_wwpn(struct fc_els_flogi *sp)
1085{
1086	return be64_to_cpu(sp->fl_wwnn);
1087}
1088
1089static int
1090efc_rnode_is_winner(struct efc_nport *nport)
1091{
1092	struct fc_els_flogi *remote_sp;
1093	u64 remote_wwpn;
1094	u64 local_wwpn = nport->wwpn;
1095	u64 wwn_bump = 0;
1096
1097	remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
1098	remote_wwpn = efc_get_wwpn(remote_sp);
1099
1100	local_wwpn ^= wwn_bump;
1101
1102	efc_log_debug(nport->efc, "r: %llx\n",
1103		      be64_to_cpu(remote_sp->fl_wwpn));
1104	efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
1105
1106	if (remote_wwpn == local_wwpn) {
1107		efc_log_warn(nport->efc,
1108			     "WWPN of remote node [%08x %08x] matches local WWPN\n",
1109			     (u32)(local_wwpn >> 32ll),
1110			     (u32)local_wwpn);
1111		return -1;
1112	}
1113
1114	return (remote_wwpn > local_wwpn);
1115}
1116
1117void
1118__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
1119			     enum efc_sm_event evt, void *arg)
1120{
1121	struct efc_node *node = ctx->app;
1122	struct efc *efc = node->efc;
1123
1124	efc_node_evt_set(ctx, evt, __func__);
1125
1126	node_sm_trace();
1127
1128	switch (evt) {
1129	case EFC_EVT_ENTER:
1130		efc_node_hold_frames(node);
1131		break;
1132
1133	case EFC_EVT_EXIT:
1134		efc_node_accept_frames(node);
1135		break;
1136
1137	case EFC_EVT_DOMAIN_ATTACH_OK: {
1138		struct efc_nport *nport = node->nport;
1139		struct efc_node *rnode;
1140
1141		/*
1142		 * this transient node (SID=0 (recv'd FLOGI)
1143		 * or DID=fabric (sent FLOGI))
1144		 * is the p2p winner, will use a separate node
1145		 * to send PLOGI to peer
1146		 */
1147		WARN_ON(!node->nport->p2p_winner);
1148
1149		rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
1150		if (rnode) {
1151			/*
1152			 * the "other" transient p2p node has
1153			 * already kicked off the
1154			 * new node from which PLOGI is sent
1155			 */
1156			node_printf(node,
1157				    "Node with fc_id x%x already exists\n",
1158				    rnode->rnode.fc_id);
1159		} else {
1160			/*
1161			 * create new node (SID=1, DID=2)
1162			 * from which to send PLOGI
1163			 */
1164			rnode = efc_node_alloc(nport,
1165					       nport->p2p_remote_port_id,
1166						false, false);
1167			if (!rnode) {
1168				efc_log_err(efc, "node alloc failed\n");
1169				return;
1170			}
1171
1172			efc_fabric_notify_topology(node);
1173			/* sm: / allocate p2p remote node */
1174			efc_node_transition(rnode, __efc_p2p_rnode_init,
1175					    NULL);
1176		}
1177
1178		/*
1179		 * the transient node (SID=0 or DID=fabric)
1180		 * has served its purpose
1181		 */
1182		if (node->rnode.fc_id == 0) {
1183			/*
1184			 * if this is the SID=0 node,
1185			 * move to the init state in case peer
1186			 * has restarted FLOGI discovery and FLOGI is pending
1187			 */
1188			/* don't send PLOGI on efc_d_init entry */
1189			efc_node_init_device(node, false);
1190		} else {
1191			/*
1192			 * if this is the DID=fabric node
1193			 * (we initiated FLOGI), shut it down
1194			 */
1195			node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1196			efc_fabric_initiate_shutdown(node);
1197		}
1198		break;
1199	}
1200
1201	default:
1202		__efc_fabric_common(__func__, ctx, evt, arg);
1203	}
1204}
1205
1206void
1207__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
1208		     enum efc_sm_event evt, void *arg)
1209{
1210	struct efc_node_cb *cbdata = arg;
1211	struct efc_node *node = ctx->app;
1212
1213	efc_node_evt_set(ctx, evt, __func__);
1214
1215	node_sm_trace();
1216
1217	switch (evt) {
1218	case EFC_EVT_ENTER:
1219		/* sm: / send PLOGI */
1220		efc_send_plogi(node);
1221		efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
1222		break;
1223
1224	case EFC_EVT_ABTS_RCVD:
1225		/* sm: send BA_ACC */
1226		efc_send_bls_acc(node, cbdata->header->dma.virt);
1227
1228		break;
1229
1230	default:
1231		__efc_fabric_common(__func__, ctx, evt, arg);
1232	}
1233}
1234
1235void
1236__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
1237			      enum efc_sm_event evt, void *arg)
1238{
1239	struct efc_node_cb *cbdata = arg;
1240	struct efc_node *node = ctx->app;
1241
1242	efc_node_evt_set(ctx, evt, __func__);
1243
1244	node_sm_trace();
1245
1246	switch (evt) {
1247	case EFC_EVT_ENTER:
1248		efc_node_hold_frames(node);
1249		break;
1250
1251	case EFC_EVT_EXIT:
1252		efc_node_accept_frames(node);
1253		break;
1254
1255	case EFC_EVT_SRRS_ELS_CMPL_OK:
1256		WARN_ON(!node->els_cmpl_cnt);
1257		node->els_cmpl_cnt--;
1258
1259		/* sm: if p2p_winner / domain_attach */
1260		if (node->nport->p2p_winner) {
1261			efc_node_transition(node,
1262					    __efc_p2p_wait_domain_attach,
1263					NULL);
1264			if (!node->nport->domain->attached) {
1265				node_printf(node, "Domain not attached\n");
1266				efc_domain_attach(node->nport->domain,
1267						  node->nport->p2p_port_id);
1268			} else {
1269				node_printf(node, "Domain already attached\n");
1270				efc_node_post_event(node,
1271						    EFC_EVT_DOMAIN_ATTACH_OK,
1272						    NULL);
1273			}
1274		} else {
1275			/* this node has served its purpose;
1276			 * we'll expect a PLOGI on a separate
1277			 * node (remote SID=0x1); return this node
1278			 * to init state in case peer
1279			 * restarts discovery -- it may already
1280			 * have (pending frames may exist).
1281			 */
1282			/* don't send PLOGI on efc_d_init entry */
1283			efc_node_init_device(node, false);
1284		}
1285		break;
1286
1287	case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1288		/*
1289		 * LS_ACC failed, possibly due to link down;
1290		 * shutdown node and wait
1291		 * for FLOGI discovery to restart
1292		 */
1293		node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1294		WARN_ON(!node->els_cmpl_cnt);
1295		node->els_cmpl_cnt--;
1296		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1297		efc_fabric_initiate_shutdown(node);
1298		break;
1299
1300	case EFC_EVT_ABTS_RCVD: {
1301		/* sm: / send BA_ACC */
1302		efc_send_bls_acc(node, cbdata->header->dma.virt);
1303		break;
1304	}
1305
1306	default:
1307		__efc_fabric_common(__func__, ctx, evt, arg);
1308	}
1309}
1310
1311void
1312__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
1313			 enum efc_sm_event evt, void *arg)
1314{
1315	struct efc_node_cb *cbdata = arg;
1316	struct efc_node *node = ctx->app;
1317
1318	efc_node_evt_set(ctx, evt, __func__);
1319
1320	node_sm_trace();
1321
1322	switch (evt) {
1323	case EFC_EVT_SRRS_ELS_REQ_OK: {
1324		int rc;
1325
1326		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1327					   __efc_fabric_common, __func__)) {
1328			return;
1329		}
1330		WARN_ON(!node->els_req_cnt);
1331		node->els_req_cnt--;
1332		/* sm: / save sparams, efc_node_attach */
1333		efc_node_save_sparms(node, cbdata->els_rsp.virt);
1334		rc = efc_node_attach(node);
1335		efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1336		if (rc < 0)
1337			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1338					    NULL);
1339		break;
1340	}
1341	case EFC_EVT_SRRS_ELS_REQ_FAIL: {
1342		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1343					   __efc_fabric_common, __func__)) {
1344			return;
1345		}
1346		node_printf(node, "PLOGI failed, shutting down\n");
1347		WARN_ON(!node->els_req_cnt);
1348		node->els_req_cnt--;
1349		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1350		efc_fabric_initiate_shutdown(node);
1351		break;
1352	}
1353
1354	case EFC_EVT_PLOGI_RCVD: {
1355		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1356		/* if we're in external loopback mode, just send LS_ACC */
1357		if (node->efc->external_loopback) {
1358			efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
1359		} else {
1360			/*
1361			 * if this isn't external loopback,
1362			 * pass to default handler
1363			 */
1364			__efc_fabric_common(__func__, ctx, evt, arg);
1365		}
1366		break;
1367	}
1368	case EFC_EVT_PRLI_RCVD:
1369		/* I, or I+T */
1370		/* sent PLOGI and before completion was seen, received the
1371		 * PRLI from the remote node (WCQEs and RCQEs come in on
1372		 * different queues and order of processing cannot be assumed)
1373		 * Save OXID so PRLI can be sent after the attach and continue
1374		 * to wait for PLOGI response
1375		 */
1376		efc_process_prli_payload(node, cbdata->payload->dma.virt);
1377		efc_send_ls_acc_after_attach(node,
1378					     cbdata->header->dma.virt,
1379					     EFC_NODE_SEND_LS_ACC_PRLI);
1380		efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
1381				    NULL);
1382		break;
1383	default:
1384		__efc_fabric_common(__func__, ctx, evt, arg);
1385	}
1386}
1387
1388void
1389__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
1390				    enum efc_sm_event evt, void *arg)
1391{
1392	struct efc_node_cb *cbdata = arg;
1393	struct efc_node *node = ctx->app;
1394
1395	efc_node_evt_set(ctx, evt, __func__);
1396
1397	node_sm_trace();
1398
1399	switch (evt) {
1400	case EFC_EVT_ENTER:
1401		/*
1402		 * Since we've received a PRLI, we have a port login and will
1403		 * just need to wait for the PLOGI response to do the node
1404		 * attach and then we can send the LS_ACC for the PRLI. If,
1405		 * during this time, we receive FCP_CMNDs (which is possible
1406		 * since we've already sent a PRLI and our peer may have
1407		 * accepted).
1408		 * At this time, we are not waiting on any other unsolicited
1409		 * frames to continue with the login process. Thus, it will not
1410		 * hurt to hold frames here.
1411		 */
1412		efc_node_hold_frames(node);
1413		break;
1414
1415	case EFC_EVT_EXIT:
1416		efc_node_accept_frames(node);
1417		break;
1418
1419	case EFC_EVT_SRRS_ELS_REQ_OK: {	/* PLOGI response received */
1420		int rc;
1421
1422		/* Completion from PLOGI sent */
1423		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1424					   __efc_fabric_common, __func__)) {
1425			return;
1426		}
1427		WARN_ON(!node->els_req_cnt);
1428		node->els_req_cnt--;
1429		/* sm: / save sparams, efc_node_attach */
1430		efc_node_save_sparms(node, cbdata->els_rsp.virt);
1431		rc = efc_node_attach(node);
1432		efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1433		if (rc < 0)
1434			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1435					    NULL);
1436		break;
1437	}
1438	case EFC_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
1439	case EFC_EVT_SRRS_ELS_REQ_RJT:
1440		/* PLOGI failed, shutdown the node */
1441		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1442					   __efc_fabric_common, __func__)) {
1443			return;
1444		}
1445		WARN_ON(!node->els_req_cnt);
1446		node->els_req_cnt--;
1447		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1448		efc_fabric_initiate_shutdown(node);
1449		break;
1450
1451	default:
1452		__efc_fabric_common(__func__, ctx, evt, arg);
1453	}
1454}
1455
1456void
1457__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
1458			   enum efc_sm_event evt, void *arg)
1459{
1460	struct efc_node_cb *cbdata = arg;
1461	struct efc_node *node = ctx->app;
1462
1463	efc_node_evt_set(ctx, evt, __func__);
1464
1465	node_sm_trace();
1466
1467	switch (evt) {
1468	case EFC_EVT_ENTER:
1469		efc_node_hold_frames(node);
1470		break;
1471
1472	case EFC_EVT_EXIT:
1473		efc_node_accept_frames(node);
1474		break;
1475
1476	case EFC_EVT_NODE_ATTACH_OK:
1477		node->attached = true;
1478		switch (node->send_ls_acc) {
1479		case EFC_NODE_SEND_LS_ACC_PRLI: {
1480			efc_d_send_prli_rsp(node->ls_acc_io,
1481					    node->ls_acc_oxid);
1482			node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1483			node->ls_acc_io = NULL;
1484			break;
1485		}
1486		case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1487		case EFC_NODE_SEND_LS_ACC_NONE:
1488		default:
1489			/* Normal case for I */
1490			/* sm: send_plogi_acc is not set / send PLOGI acc */
1491			efc_node_transition(node, __efc_d_port_logged_in,
1492					    NULL);
1493			break;
1494		}
1495		break;
1496
1497	case EFC_EVT_NODE_ATTACH_FAIL:
1498		/* node attach failed, shutdown the node */
1499		node->attached = false;
1500		node_printf(node, "Node attach failed\n");
1501		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1502		efc_fabric_initiate_shutdown(node);
1503		break;
1504
1505	case EFC_EVT_SHUTDOWN:
1506		node_printf(node, "%s received\n", efc_sm_event_name(evt));
1507		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1508		efc_node_transition(node,
1509				    __efc_fabric_wait_attach_evt_shutdown,
1510				     NULL);
1511		break;
1512	case EFC_EVT_PRLI_RCVD:
1513		node_printf(node, "%s: PRLI received before node is attached\n",
1514			    efc_sm_event_name(evt));
1515		efc_process_prli_payload(node, cbdata->payload->dma.virt);
1516		efc_send_ls_acc_after_attach(node,
1517					     cbdata->header->dma.virt,
1518				EFC_NODE_SEND_LS_ACC_PRLI);
1519		break;
1520
1521	default:
1522		__efc_fabric_common(__func__, ctx, evt, arg);
1523	}
1524}
1525
1526int
1527efc_p2p_setup(struct efc_nport *nport)
1528{
1529	struct efc *efc = nport->efc;
1530	int rnode_winner;
1531
1532	rnode_winner = efc_rnode_is_winner(nport);
1533
1534	/* set nport flags to indicate p2p "winner" */
1535	if (rnode_winner == 1) {
1536		nport->p2p_remote_port_id = 0;
1537		nport->p2p_port_id = 0;
1538		nport->p2p_winner = false;
1539	} else if (rnode_winner == 0) {
1540		nport->p2p_remote_port_id = 2;
1541		nport->p2p_port_id = 1;
1542		nport->p2p_winner = true;
1543	} else {
1544		/* no winner; only okay if external loopback enabled */
1545		if (nport->efc->external_loopback) {
1546			/*
1547			 * External loopback mode enabled;
1548			 * local nport and remote node
1549			 * will be registered with an NPortID = 1;
1550			 */
1551			efc_log_debug(efc,
1552				      "External loopback mode enabled\n");
1553			nport->p2p_remote_port_id = 1;
1554			nport->p2p_port_id = 1;
1555			nport->p2p_winner = true;
1556		} else {
1557			efc_log_warn(efc,
1558				     "failed to determine p2p winner\n");
1559			return rnode_winner;
1560		}
1561	}
1562	return 0;
1563}
1564