1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7/*
8 * device_sm Node State Machine: Remote Device States
9 */
10
11#include "efc.h"
12#include "efc_device.h"
13#include "efc_fabric.h"
14
15void
16efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id)
17{
18	int rc = EFC_SCSI_CALL_COMPLETE;
19	struct efc *efc = node->efc;
20
21	node->ls_acc_oxid = ox_id;
22	node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI;
23
24	/*
25	 * Wait for backend session registration
26	 * to complete before sending PRLI resp
27	 */
28
29	if (node->init) {
30		efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n",
31			     node->display_name, node->wwpn, node->wwnn);
32		if (node->nport->enable_tgt)
33			rc = efc->tt.scsi_new_node(efc, node);
34	}
35
36	if (rc < 0)
37		efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL);
38
39	if (rc == EFC_SCSI_CALL_COMPLETE)
40		efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL);
41}
42
43static void
44__efc_d_common(const char *funcname, struct efc_sm_ctx *ctx,
45	       enum efc_sm_event evt, void *arg)
46{
47	struct efc_node *node = NULL;
48	struct efc *efc = NULL;
49
50	node = ctx->app;
51	efc = node->efc;
52
53	switch (evt) {
54	/* Handle shutdown events */
55	case EFC_EVT_SHUTDOWN:
56		efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
57			      funcname, efc_sm_event_name(evt));
58		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
59		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
60		break;
61	case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
62		efc_log_debug(efc, "[%s] %-20s %-20s\n",
63			      node->display_name, funcname,
64				efc_sm_event_name(evt));
65		node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
66		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
67		break;
68	case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
69		efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
70			      funcname, efc_sm_event_name(evt));
71		node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
72		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
73		break;
74
75	default:
76		/* call default event handler common to all nodes */
77		__efc_node_common(funcname, ctx, evt, arg);
78	}
79}
80
81static void
82__efc_d_wait_del_node(struct efc_sm_ctx *ctx,
83		      enum efc_sm_event evt, void *arg)
84{
85	struct efc_node *node = ctx->app;
86
87	efc_node_evt_set(ctx, evt, __func__);
88
89	/*
90	 * State is entered when a node sends a delete initiator/target call
91	 * to the target-server/initiator-client and needs to wait for that
92	 * work to complete.
93	 */
94	node_sm_trace();
95
96	switch (evt) {
97	case EFC_EVT_ENTER:
98		efc_node_hold_frames(node);
99		fallthrough;
100
101	case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
102	case EFC_EVT_ALL_CHILD_NODES_FREE:
103		/* These are expected events. */
104		break;
105
106	case EFC_EVT_NODE_DEL_INI_COMPLETE:
107	case EFC_EVT_NODE_DEL_TGT_COMPLETE:
108		/*
109		 * node has either been detached or is in the process
110		 * of being detached,
111		 * call common node's initiate cleanup function
112		 */
113		efc_node_initiate_cleanup(node);
114		break;
115
116	case EFC_EVT_EXIT:
117		efc_node_accept_frames(node);
118		break;
119
120	case EFC_EVT_SRRS_ELS_REQ_FAIL:
121		/* Can happen as ELS IO IO's complete */
122		WARN_ON(!node->els_req_cnt);
123		node->els_req_cnt--;
124		break;
125
126	/* ignore shutdown events as we're already in shutdown path */
127	case EFC_EVT_SHUTDOWN:
128		/* have default shutdown event take precedence */
129		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
130		fallthrough;
131
132	case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
133	case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
134		node_printf(node, "%s received\n", efc_sm_event_name(evt));
135		break;
136	case EFC_EVT_DOMAIN_ATTACH_OK:
137		/* don't care about domain_attach_ok */
138		break;
139	default:
140		__efc_d_common(__func__, ctx, evt, arg);
141	}
142}
143
144static void
145__efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx,
146			 enum efc_sm_event evt, void *arg)
147{
148	struct efc_node *node = ctx->app;
149
150	efc_node_evt_set(ctx, evt, __func__);
151
152	node_sm_trace();
153
154	switch (evt) {
155	case EFC_EVT_ENTER:
156		efc_node_hold_frames(node);
157		fallthrough;
158
159	case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
160	case EFC_EVT_ALL_CHILD_NODES_FREE:
161		/* These are expected events. */
162		break;
163
164	case EFC_EVT_NODE_DEL_INI_COMPLETE:
165	case EFC_EVT_NODE_DEL_TGT_COMPLETE:
166		efc_node_transition(node, __efc_d_wait_del_node, NULL);
167		break;
168
169	case EFC_EVT_EXIT:
170		efc_node_accept_frames(node);
171		break;
172
173	case EFC_EVT_SRRS_ELS_REQ_FAIL:
174		/* Can happen as ELS IO IO's complete */
175		WARN_ON(!node->els_req_cnt);
176		node->els_req_cnt--;
177		break;
178
179	/* ignore shutdown events as we're already in shutdown path */
180	case EFC_EVT_SHUTDOWN:
181		/* have default shutdown event take precedence */
182		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
183		fallthrough;
184
185	case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
186	case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
187		node_printf(node, "%s received\n", efc_sm_event_name(evt));
188		break;
189	case EFC_EVT_DOMAIN_ATTACH_OK:
190		/* don't care about domain_attach_ok */
191		break;
192	default:
193		__efc_d_common(__func__, ctx, evt, arg);
194	}
195}
196
197void
198__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
199			  enum efc_sm_event evt, void *arg)
200{
201	struct efc_node *node = ctx->app;
202	struct efc *efc = node->efc;
203
204	efc_node_evt_set(ctx, evt, __func__);
205
206	node_sm_trace();
207
208	switch (evt) {
209	case EFC_EVT_ENTER: {
210		int rc = EFC_SCSI_CALL_COMPLETE;
211
212		/* assume no wait needed */
213		node->els_io_enabled = false;
214
215		/* make necessary delete upcall(s) */
216		if (node->init && !node->targ) {
217			efc_log_info(node->efc,
218				     "[%s] delete (initiator) WWPN %s WWNN %s\n",
219				node->display_name,
220				node->wwpn, node->wwnn);
221			efc_node_transition(node,
222					    __efc_d_wait_del_node,
223					     NULL);
224			if (node->nport->enable_tgt)
225				rc = efc->tt.scsi_del_node(efc, node,
226					EFC_SCSI_INITIATOR_DELETED);
227
228			if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0)
229				efc_node_post_event(node,
230					EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
231
232		} else if (node->targ && !node->init) {
233			efc_log_info(node->efc,
234				     "[%s] delete (target) WWPN %s WWNN %s\n",
235				node->display_name,
236				node->wwpn, node->wwnn);
237			efc_node_transition(node,
238					    __efc_d_wait_del_node,
239					     NULL);
240			if (node->nport->enable_ini)
241				rc = efc->tt.scsi_del_node(efc, node,
242					EFC_SCSI_TARGET_DELETED);
243
244			if (rc == EFC_SCSI_CALL_COMPLETE)
245				efc_node_post_event(node,
246					EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
247
248		} else if (node->init && node->targ) {
249			efc_log_info(node->efc,
250				     "[%s] delete (I+T) WWPN %s WWNN %s\n",
251				node->display_name, node->wwpn, node->wwnn);
252			efc_node_transition(node, __efc_d_wait_del_ini_tgt,
253					    NULL);
254			if (node->nport->enable_tgt)
255				rc = efc->tt.scsi_del_node(efc, node,
256						EFC_SCSI_INITIATOR_DELETED);
257
258			if (rc == EFC_SCSI_CALL_COMPLETE)
259				efc_node_post_event(node,
260					EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
261			/* assume no wait needed */
262			rc = EFC_SCSI_CALL_COMPLETE;
263			if (node->nport->enable_ini)
264				rc = efc->tt.scsi_del_node(efc, node,
265						EFC_SCSI_TARGET_DELETED);
266
267			if (rc == EFC_SCSI_CALL_COMPLETE)
268				efc_node_post_event(node,
269					EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
270		}
271
272		/* we've initiated the upcalls as needed, now kick off the node
273		 * detach to precipitate the aborting of outstanding exchanges
274		 * associated with said node
275		 *
276		 * Beware: if we've made upcall(s), we've already transitioned
277		 * to a new state by the time we execute this.
278		 * consider doing this before the upcalls?
279		 */
280		if (node->attached) {
281			/* issue hw node free; don't care if succeeds right
282			 * away or sometime later, will check node->attached
283			 * later in shutdown process
284			 */
285			rc = efc_cmd_node_detach(efc, &node->rnode);
286			if (rc < 0)
287				node_printf(node,
288					    "Failed freeing HW node, rc=%d\n",
289					rc);
290		}
291
292		/* if neither initiator nor target, proceed to cleanup */
293		if (!node->init && !node->targ) {
294			/*
295			 * node has either been detached or is in
296			 * the process of being detached,
297			 * call common node's initiate cleanup function
298			 */
299			efc_node_initiate_cleanup(node);
300		}
301		break;
302	}
303	case EFC_EVT_ALL_CHILD_NODES_FREE:
304		/* Ignore, this can happen if an ELS is
305		 * aborted while in a delay/retry state
306		 */
307		break;
308	default:
309		__efc_d_common(__func__, ctx, evt, arg);
310	}
311}
312
313void
314__efc_d_wait_loop(struct efc_sm_ctx *ctx,
315		  enum efc_sm_event evt, void *arg)
316{
317	struct efc_node *node = ctx->app;
318
319	efc_node_evt_set(ctx, evt, __func__);
320
321	node_sm_trace();
322
323	switch (evt) {
324	case EFC_EVT_ENTER:
325		efc_node_hold_frames(node);
326		break;
327
328	case EFC_EVT_EXIT:
329		efc_node_accept_frames(node);
330		break;
331
332	case EFC_EVT_DOMAIN_ATTACH_OK: {
333		/* send PLOGI automatically if initiator */
334		efc_node_init_device(node, true);
335		break;
336	}
337	default:
338		__efc_d_common(__func__, ctx, evt, arg);
339	}
340}
341
342void
343efc_send_ls_acc_after_attach(struct efc_node *node,
344			     struct fc_frame_header *hdr,
345			     enum efc_node_send_ls_acc ls)
346{
347	u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
348
349	/* Save the OX_ID for sending LS_ACC sometime later */
350	WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE);
351
352	node->ls_acc_oxid = ox_id;
353	node->send_ls_acc = ls;
354	node->ls_acc_did = ntoh24(hdr->fh_d_id);
355}
356
357void
358efc_process_prli_payload(struct efc_node *node, void *prli)
359{
360	struct {
361		struct fc_els_prli prli;
362		struct fc_els_spp sp;
363	} *pp;
364
365	pp = prli;
366	node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0;
367	node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0;
368}
369
370void
371__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
372			    enum efc_sm_event evt, void *arg)
373{
374	struct efc_node *node = ctx->app;
375
376	efc_node_evt_set(ctx, evt, __func__);
377
378	node_sm_trace();
379
380	switch (evt) {
381	case EFC_EVT_ENTER:
382		efc_node_hold_frames(node);
383		break;
384
385	case EFC_EVT_EXIT:
386		efc_node_accept_frames(node);
387		break;
388
389	case EFC_EVT_SRRS_ELS_CMPL_FAIL:
390		WARN_ON(!node->els_cmpl_cnt);
391		node->els_cmpl_cnt--;
392		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
393		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
394		break;
395
396	case EFC_EVT_SRRS_ELS_CMPL_OK:	/* PLOGI ACC completions */
397		WARN_ON(!node->els_cmpl_cnt);
398		node->els_cmpl_cnt--;
399		efc_node_transition(node, __efc_d_port_logged_in, NULL);
400		break;
401
402	default:
403		__efc_d_common(__func__, ctx, evt, arg);
404	}
405}
406
407void
408__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
409		      enum efc_sm_event evt, void *arg)
410{
411	struct efc_node *node = ctx->app;
412
413	efc_node_evt_set(ctx, evt, __func__);
414
415	node_sm_trace();
416
417	switch (evt) {
418	case EFC_EVT_ENTER:
419		efc_node_hold_frames(node);
420		break;
421
422	case EFC_EVT_EXIT:
423		efc_node_accept_frames(node);
424		break;
425
426	case EFC_EVT_SRRS_ELS_REQ_OK:
427	case EFC_EVT_SRRS_ELS_REQ_RJT:
428	case EFC_EVT_SRRS_ELS_REQ_FAIL:
429		/* LOGO response received, sent shutdown */
430		if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO,
431					   __efc_d_common, __func__))
432			return;
433
434		WARN_ON(!node->els_req_cnt);
435		node->els_req_cnt--;
436		node_printf(node,
437			    "LOGO sent (evt=%s), shutdown node\n",
438			efc_sm_event_name(evt));
439		/* sm: / post explicit logout */
440		efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
441				    NULL);
442		break;
443
444	default:
445		__efc_d_common(__func__, ctx, evt, arg);
446	}
447}
448
449void
450efc_node_init_device(struct efc_node *node, bool send_plogi)
451{
452	node->send_plogi = send_plogi;
453	if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) &&
454	    (node->rnode.fc_id != FC_FID_DOM_MGR)) {
455		node->nodedb_state = __efc_d_init;
456		efc_node_transition(node, __efc_node_paused, NULL);
457	} else {
458		efc_node_transition(node, __efc_d_init, NULL);
459	}
460}
461
462static void
463efc_d_check_plogi_topology(struct efc_node *node, u32 d_id)
464{
465	switch (node->nport->topology) {
466	case EFC_NPORT_TOPO_P2P:
467		/* we're not attached and nport is p2p,
468		 * need to attach
469		 */
470		efc_domain_attach(node->nport->domain, d_id);
471		efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
472		break;
473	case EFC_NPORT_TOPO_FABRIC:
474		/* we're not attached and nport is fabric, domain
475		 * attach should have already been requested as part
476		 * of the fabric state machine, wait for it
477		 */
478		efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
479		break;
480	case EFC_NPORT_TOPO_UNKNOWN:
481		/* Two possibilities:
482		 * 1. received a PLOGI before our FLOGI has completed
483		 *    (possible since completion comes in on another
484		 *    CQ), thus we don't know what we're connected to
485		 *    yet; transition to a state to wait for the
486		 *    fabric node to tell us;
487		 * 2. PLOGI received before link went down and we
488		 * haven't performed domain attach yet.
489		 * Note: we cannot distinguish between 1. and 2.
490		 * so have to assume PLOGI
491		 * was received after link back up.
492		 */
493		node_printf(node, "received PLOGI, unknown topology did=0x%x\n",
494			    d_id);
495		efc_node_transition(node, __efc_d_wait_topology_notify, NULL);
496		break;
497	default:
498		node_printf(node, "received PLOGI, unexpected topology %d\n",
499			    node->nport->topology);
500	}
501}
502
503void
504__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
505{
506	struct efc_node_cb *cbdata = arg;
507	struct efc_node *node = ctx->app;
508
509	efc_node_evt_set(ctx, evt, __func__);
510
511	node_sm_trace();
512
513	/*
514	 * This state is entered when a node is instantiated,
515	 * either having been discovered from a name services query,
516	 * or having received a PLOGI/FLOGI.
517	 */
518	switch (evt) {
519	case EFC_EVT_ENTER:
520		if (!node->send_plogi)
521			break;
522		/* only send if we have initiator capability,
523		 * and domain is attached
524		 */
525		if (node->nport->enable_ini &&
526		    node->nport->domain->attached) {
527			efc_send_plogi(node);
528
529			efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL);
530		} else {
531			node_printf(node, "not sending plogi nport.ini=%d,",
532				    node->nport->enable_ini);
533			node_printf(node, "domain attached=%d\n",
534				    node->nport->domain->attached);
535		}
536		break;
537	case EFC_EVT_PLOGI_RCVD: {
538		/* T, or I+T */
539		struct fc_frame_header *hdr = cbdata->header->dma.virt;
540		int rc;
541
542		efc_node_save_sparms(node, cbdata->payload->dma.virt);
543		efc_send_ls_acc_after_attach(node,
544					     cbdata->header->dma.virt,
545					     EFC_NODE_SEND_LS_ACC_PLOGI);
546
547		/* domain not attached; several possibilities: */
548		if (!node->nport->domain->attached) {
549			efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id));
550			break;
551		}
552
553		/* domain already attached */
554		rc = efc_node_attach(node);
555		efc_node_transition(node, __efc_d_wait_node_attach, NULL);
556		if (rc < 0)
557			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
558
559		break;
560	}
561
562	case EFC_EVT_FDISC_RCVD: {
563		__efc_d_common(__func__, ctx, evt, arg);
564		break;
565	}
566
567	case EFC_EVT_FLOGI_RCVD: {
568		struct fc_frame_header *hdr = cbdata->header->dma.virt;
569		u32 d_id = ntoh24(hdr->fh_d_id);
570
571		/* sm: / save sparams, send FLOGI acc */
572		memcpy(node->nport->domain->flogi_service_params,
573		       cbdata->payload->dma.virt,
574		       sizeof(struct fc_els_flogi));
575
576		/* send FC LS_ACC response, override s_id */
577		efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
578
579		efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id);
580
581		if (efc_p2p_setup(node->nport)) {
582			node_printf(node, "p2p failed, shutting down node\n");
583			efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
584			break;
585		}
586
587		efc_node_transition(node,  __efc_p2p_wait_flogi_acc_cmpl, NULL);
588		break;
589	}
590
591	case EFC_EVT_LOGO_RCVD: {
592		struct fc_frame_header *hdr = cbdata->header->dma.virt;
593
594		if (!node->nport->domain->attached) {
595			/* most likely a frame left over from before a link
596			 * down; drop and
597			 * shut node down w/ "explicit logout" so pending
598			 * frames are processed
599			 */
600			node_printf(node, "%s domain not attached, dropping\n",
601				    efc_sm_event_name(evt));
602			efc_node_post_event(node,
603					EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
604			break;
605		}
606
607		efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
608		efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
609		break;
610	}
611
612	case EFC_EVT_PRLI_RCVD:
613	case EFC_EVT_PRLO_RCVD:
614	case EFC_EVT_PDISC_RCVD:
615	case EFC_EVT_ADISC_RCVD:
616	case EFC_EVT_RSCN_RCVD: {
617		struct fc_frame_header *hdr = cbdata->header->dma.virt;
618
619		if (!node->nport->domain->attached) {
620			/* most likely a frame left over from before a link
621			 * down; drop and shut node down w/ "explicit logout"
622			 * so pending frames are processed
623			 */
624			node_printf(node, "%s domain not attached, dropping\n",
625				    efc_sm_event_name(evt));
626
627			efc_node_post_event(node,
628					    EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
629					    NULL);
630			break;
631		}
632		node_printf(node, "%s received, sending reject\n",
633			    efc_sm_event_name(evt));
634
635		efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
636				ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
637
638		break;
639	}
640
641	case EFC_EVT_FCP_CMD_RCVD: {
642		/* note: problem, we're now expecting an ELS REQ completion
643		 * from both the LOGO and PLOGI
644		 */
645		if (!node->nport->domain->attached) {
646			/* most likely a frame left over from before a
647			 * link down; drop and
648			 * shut node down w/ "explicit logout" so pending
649			 * frames are processed
650			 */
651			node_printf(node, "%s domain not attached, dropping\n",
652				    efc_sm_event_name(evt));
653			efc_node_post_event(node,
654					    EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
655					    NULL);
656			break;
657		}
658
659		/* Send LOGO */
660		node_printf(node, "FCP_CMND received, send LOGO\n");
661		if (efc_send_logo(node)) {
662			/*
663			 * failed to send LOGO, go ahead and cleanup node
664			 * anyways
665			 */
666			node_printf(node, "Failed to send LOGO\n");
667			efc_node_post_event(node,
668					    EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
669					    NULL);
670		} else {
671			/* sent LOGO, wait for response */
672			efc_node_transition(node,
673					    __efc_d_wait_logo_rsp, NULL);
674		}
675		break;
676	}
677	case EFC_EVT_DOMAIN_ATTACH_OK:
678		/* don't care about domain_attach_ok */
679		break;
680
681	default:
682		__efc_d_common(__func__, ctx, evt, arg);
683	}
684}
685
686void
687__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
688		       enum efc_sm_event evt, void *arg)
689{
690	int rc;
691	struct efc_node_cb *cbdata = arg;
692	struct efc_node *node = ctx->app;
693
694	efc_node_evt_set(ctx, evt, __func__);
695
696	node_sm_trace();
697
698	switch (evt) {
699	case EFC_EVT_PLOGI_RCVD: {
700		/* T, or I+T */
701		/* received PLOGI with svc parms, go ahead and attach node
702		 * when PLOGI that was sent ultimately completes, it'll be a
703		 * no-op
704		 *
705		 * If there is an outstanding PLOGI sent, can we set a flag
706		 * to indicate that we don't want to retry it if it times out?
707		 */
708		efc_node_save_sparms(node, cbdata->payload->dma.virt);
709		efc_send_ls_acc_after_attach(node,
710					     cbdata->header->dma.virt,
711				EFC_NODE_SEND_LS_ACC_PLOGI);
712		/* sm: domain->attached / efc_node_attach */
713		rc = efc_node_attach(node);
714		efc_node_transition(node, __efc_d_wait_node_attach, NULL);
715		if (rc < 0)
716			efc_node_post_event(node,
717					    EFC_EVT_NODE_ATTACH_FAIL, NULL);
718
719		break;
720	}
721
722	case EFC_EVT_PRLI_RCVD:
723		/* I, or I+T */
724		/* sent PLOGI and before completion was seen, received the
725		 * PRLI from the remote node (WCQEs and RCQEs come in on
726		 * different queues and order of processing cannot be assumed)
727		 * Save OXID so PRLI can be sent after the attach and continue
728		 * to wait for PLOGI response
729		 */
730		efc_process_prli_payload(node, cbdata->payload->dma.virt);
731		efc_send_ls_acc_after_attach(node,
732					     cbdata->header->dma.virt,
733				EFC_NODE_SEND_LS_ACC_PRLI);
734		efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli,
735				    NULL);
736		break;
737
738	case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
739	case EFC_EVT_PRLO_RCVD:
740	case EFC_EVT_PDISC_RCVD:
741	case EFC_EVT_FDISC_RCVD:
742	case EFC_EVT_ADISC_RCVD:
743	case EFC_EVT_RSCN_RCVD:
744	case EFC_EVT_SCR_RCVD: {
745		struct fc_frame_header *hdr = cbdata->header->dma.virt;
746
747		node_printf(node, "%s received, sending reject\n",
748			    efc_sm_event_name(evt));
749
750		efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
751				ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
752
753		break;
754	}
755
756	case EFC_EVT_SRRS_ELS_REQ_OK:	/* PLOGI response received */
757		/* Completion from PLOGI sent */
758		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
759					   __efc_d_common, __func__))
760			return;
761
762		WARN_ON(!node->els_req_cnt);
763		node->els_req_cnt--;
764		/* sm: / save sparams, efc_node_attach */
765		efc_node_save_sparms(node, cbdata->els_rsp.virt);
766		rc = efc_node_attach(node);
767		efc_node_transition(node, __efc_d_wait_node_attach, NULL);
768		if (rc < 0)
769			efc_node_post_event(node,
770					    EFC_EVT_NODE_ATTACH_FAIL, NULL);
771
772		break;
773
774	case EFC_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
775		/* PLOGI failed, shutdown the node */
776		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
777					   __efc_d_common, __func__))
778			return;
779
780		WARN_ON(!node->els_req_cnt);
781		node->els_req_cnt--;
782		efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
783		break;
784
785	case EFC_EVT_SRRS_ELS_REQ_RJT:
786		/* Our PLOGI was rejected, this is ok in some cases */
787		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
788					   __efc_d_common, __func__))
789			return;
790
791		WARN_ON(!node->els_req_cnt);
792		node->els_req_cnt--;
793		break;
794
795	case EFC_EVT_FCP_CMD_RCVD: {
796		/* not logged in yet and outstanding PLOGI so don't send LOGO,
797		 * just drop
798		 */
799		node_printf(node, "FCP_CMND received, drop\n");
800		break;
801	}
802
803	default:
804		__efc_d_common(__func__, ctx, evt, arg);
805	}
806}
807
808void
809__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
810				  enum efc_sm_event evt, void *arg)
811{
812	int rc;
813	struct efc_node_cb *cbdata = arg;
814	struct efc_node *node = ctx->app;
815
816	efc_node_evt_set(ctx, evt, __func__);
817
818	node_sm_trace();
819
820	switch (evt) {
821	case EFC_EVT_ENTER:
822		/*
823		 * Since we've received a PRLI, we have a port login and will
824		 * just need to wait for the PLOGI response to do the node
825		 * attach and then we can send the LS_ACC for the PRLI. If,
826		 * during this time, we receive FCP_CMNDs (which is possible
827		 * since we've already sent a PRLI and our peer may have
828		 * accepted). At this time, we are not waiting on any other
829		 * unsolicited frames to continue with the login process. Thus,
830		 * it will not hurt to hold frames here.
831		 */
832		efc_node_hold_frames(node);
833		break;
834
835	case EFC_EVT_EXIT:
836		efc_node_accept_frames(node);
837		break;
838
839	case EFC_EVT_SRRS_ELS_REQ_OK:	/* PLOGI response received */
840		/* Completion from PLOGI sent */
841		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
842					   __efc_d_common, __func__))
843			return;
844
845		WARN_ON(!node->els_req_cnt);
846		node->els_req_cnt--;
847		/* sm: / save sparams, efc_node_attach */
848		efc_node_save_sparms(node, cbdata->els_rsp.virt);
849		rc = efc_node_attach(node);
850		efc_node_transition(node, __efc_d_wait_node_attach, NULL);
851		if (rc < 0)
852			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
853					    NULL);
854
855		break;
856
857	case EFC_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
858	case EFC_EVT_SRRS_ELS_REQ_RJT:
859		/* PLOGI failed, shutdown the node */
860		if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
861					   __efc_d_common, __func__))
862			return;
863
864		WARN_ON(!node->els_req_cnt);
865		node->els_req_cnt--;
866		efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
867		break;
868
869	default:
870		__efc_d_common(__func__, ctx, evt, arg);
871	}
872}
873
874void
875__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
876			   enum efc_sm_event evt, void *arg)
877{
878	int rc;
879	struct efc_node *node = ctx->app;
880
881	efc_node_evt_set(ctx, evt, __func__);
882
883	node_sm_trace();
884
885	switch (evt) {
886	case EFC_EVT_ENTER:
887		efc_node_hold_frames(node);
888		break;
889
890	case EFC_EVT_EXIT:
891		efc_node_accept_frames(node);
892		break;
893
894	case EFC_EVT_DOMAIN_ATTACH_OK:
895		WARN_ON(!node->nport->domain->attached);
896		/* sm: / efc_node_attach */
897		rc = efc_node_attach(node);
898		efc_node_transition(node, __efc_d_wait_node_attach, NULL);
899		if (rc < 0)
900			efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
901					    NULL);
902
903		break;
904
905	default:
906		__efc_d_common(__func__, ctx, evt, arg);
907	}
908}
909
910void
911__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
912			     enum efc_sm_event evt, void *arg)
913{
914	int rc;
915	struct efc_node *node = ctx->app;
916
917	efc_node_evt_set(ctx, evt, __func__);
918
919	node_sm_trace();
920
921	switch (evt) {
922	case EFC_EVT_ENTER:
923		efc_node_hold_frames(node);
924		break;
925
926	case EFC_EVT_EXIT:
927		efc_node_accept_frames(node);
928		break;
929
930	case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
931		enum efc_nport_topology *topology = arg;
932
933		WARN_ON(node->nport->domain->attached);
934
935		WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
936
937		node_printf(node, "topology notification, topology=%d\n",
938			    *topology);
939
940		/* At the time the PLOGI was received, the topology was unknown,
941		 * so we didn't know which node would perform the domain attach:
942		 * 1. The node from which the PLOGI was sent (p2p) or
943		 * 2. The node to which the FLOGI was sent (fabric).
944		 */
945		if (*topology == EFC_NPORT_TOPO_P2P) {
946			/* if this is p2p, need to attach to the domain using
947			 * the d_id from the PLOGI received
948			 */
949			efc_domain_attach(node->nport->domain,
950					  node->ls_acc_did);
951		}
952		/* else, if this is fabric, the domain attach
953		 * should be performed by the fabric node (node sending FLOGI);
954		 * just wait for attach to complete
955		 */
956
957		efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
958		break;
959	}
960	case EFC_EVT_DOMAIN_ATTACH_OK:
961		WARN_ON(!node->nport->domain->attached);
962		node_printf(node, "domain attach ok\n");
963		/* sm: / efc_node_attach */
964		rc = efc_node_attach(node);
965		efc_node_transition(node, __efc_d_wait_node_attach, NULL);
966		if (rc < 0)
967			efc_node_post_event(node,
968					    EFC_EVT_NODE_ATTACH_FAIL, NULL);
969
970		break;
971
972	default:
973		__efc_d_common(__func__, ctx, evt, arg);
974	}
975}
976
977void
978__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
979			 enum efc_sm_event evt, void *arg)
980{
981	struct efc_node *node = ctx->app;
982
983	efc_node_evt_set(ctx, evt, __func__);
984
985	node_sm_trace();
986
987	switch (evt) {
988	case EFC_EVT_ENTER:
989		efc_node_hold_frames(node);
990		break;
991
992	case EFC_EVT_EXIT:
993		efc_node_accept_frames(node);
994		break;
995
996	case EFC_EVT_NODE_ATTACH_OK:
997		node->attached = true;
998		switch (node->send_ls_acc) {
999		case EFC_NODE_SEND_LS_ACC_PLOGI: {
1000			/* sm: send_plogi_acc is set / send PLOGI acc */
1001			/* Normal case for T, or I+T */
1002			efc_send_plogi_acc(node, node->ls_acc_oxid);
1003			efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl,
1004					    NULL);
1005			node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1006			node->ls_acc_io = NULL;
1007			break;
1008		}
1009		case EFC_NODE_SEND_LS_ACC_PRLI: {
1010			efc_d_send_prli_rsp(node, node->ls_acc_oxid);
1011			node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1012			node->ls_acc_io = NULL;
1013			break;
1014		}
1015		case EFC_NODE_SEND_LS_ACC_NONE:
1016		default:
1017			/* Normal case for I */
1018			/* sm: send_plogi_acc is not set / send PLOGI acc */
1019			efc_node_transition(node,
1020					    __efc_d_port_logged_in, NULL);
1021			break;
1022		}
1023		break;
1024
1025	case EFC_EVT_NODE_ATTACH_FAIL:
1026		/* node attach failed, shutdown the node */
1027		node->attached = false;
1028		node_printf(node, "node attach failed\n");
1029		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1030		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
1031		break;
1032
1033	/* Handle shutdown events */
1034	case EFC_EVT_SHUTDOWN:
1035		node_printf(node, "%s received\n", efc_sm_event_name(evt));
1036		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1037		efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
1038				    NULL);
1039		break;
1040	case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
1041		node_printf(node, "%s received\n", efc_sm_event_name(evt));
1042		node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
1043		efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
1044				    NULL);
1045		break;
1046	case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
1047		node_printf(node, "%s received\n", efc_sm_event_name(evt));
1048		node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
1049		efc_node_transition(node,
1050				    __efc_d_wait_attach_evt_shutdown, NULL);
1051		break;
1052	default:
1053		__efc_d_common(__func__, ctx, evt, arg);
1054	}
1055}
1056
1057void
1058__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
1059				 enum efc_sm_event evt, void *arg)
1060{
1061	struct efc_node *node = ctx->app;
1062
1063	efc_node_evt_set(ctx, evt, __func__);
1064
1065	node_sm_trace();
1066
1067	switch (evt) {
1068	case EFC_EVT_ENTER:
1069		efc_node_hold_frames(node);
1070		break;
1071
1072	case EFC_EVT_EXIT:
1073		efc_node_accept_frames(node);
1074		break;
1075
1076	/* wait for any of these attach events and then shutdown */
1077	case EFC_EVT_NODE_ATTACH_OK:
1078		node->attached = true;
1079		node_printf(node, "Attach evt=%s, proceed to shutdown\n",
1080			    efc_sm_event_name(evt));
1081		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
1082		break;
1083
1084	case EFC_EVT_NODE_ATTACH_FAIL:
1085		/* node attach failed, shutdown the node */
1086		node->attached = false;
1087		node_printf(node, "Attach evt=%s, proceed to shutdown\n",
1088			    efc_sm_event_name(evt));
1089		efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
1090		break;
1091
1092	/* ignore shutdown events as we're already in shutdown path */
1093	case EFC_EVT_SHUTDOWN:
1094		/* have default shutdown event take precedence */
1095		node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1096		fallthrough;
1097
1098	case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
1099	case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
1100		node_printf(node, "%s received\n", efc_sm_event_name(evt));
1101		break;
1102
1103	default:
1104		__efc_d_common(__func__, ctx, evt, arg);
1105	}
1106}
1107
1108void
1109__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
1110		       enum efc_sm_event evt, void *arg)
1111{
1112	struct efc_node_cb *cbdata = arg;
1113	struct efc_node *node = ctx->app;
1114
1115	efc_node_evt_set(ctx, evt, __func__);
1116
1117	node_sm_trace();
1118
1119	switch (evt) {
1120	case EFC_EVT_ENTER:
1121		/* Normal case for I or I+T */
1122		if (node->nport->enable_ini &&
1123		    !(node->rnode.fc_id != FC_FID_DOM_MGR)) {
1124			/* sm: if enable_ini / send PRLI */
1125			efc_send_prli(node);
1126			/* can now expect ELS_REQ_OK/FAIL/RJT */
1127		}
1128		break;
1129
1130	case EFC_EVT_FCP_CMD_RCVD: {
1131		break;
1132	}
1133
1134	case EFC_EVT_PRLI_RCVD: {
1135		/* Normal case for T or I+T */
1136		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1137		struct {
1138			struct fc_els_prli prli;
1139			struct fc_els_spp sp;
1140		} *pp;
1141
1142		pp = cbdata->payload->dma.virt;
1143		if (pp->sp.spp_type != FC_TYPE_FCP) {
1144			/*Only FCP is supported*/
1145			efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
1146					ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
1147			break;
1148		}
1149
1150		efc_process_prli_payload(node, cbdata->payload->dma.virt);
1151		efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id));
1152		break;
1153	}
1154
1155	case EFC_EVT_NODE_SESS_REG_OK:
1156		if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI)
1157			efc_send_prli_acc(node, node->ls_acc_oxid);
1158
1159		node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1160		efc_node_transition(node, __efc_d_device_ready, NULL);
1161		break;
1162
1163	case EFC_EVT_NODE_SESS_REG_FAIL:
1164		efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB,
1165				ELS_EXPL_UNSUPR, 0);
1166		node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1167		break;
1168
1169	case EFC_EVT_SRRS_ELS_REQ_OK: {	/* PRLI response */
1170		/* Normal case for I or I+T */
1171		if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
1172					   __efc_d_common, __func__))
1173			return;
1174
1175		WARN_ON(!node->els_req_cnt);
1176		node->els_req_cnt--;
1177		/* sm: / process PRLI payload */
1178		efc_process_prli_payload(node, cbdata->els_rsp.virt);
1179		efc_node_transition(node, __efc_d_device_ready, NULL);
1180		break;
1181	}
1182
1183	case EFC_EVT_SRRS_ELS_REQ_FAIL: {	/* PRLI response failed */
1184		/* I, I+T, assume some link failure, shutdown node */
1185		if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
1186					   __efc_d_common, __func__))
1187			return;
1188
1189		WARN_ON(!node->els_req_cnt);
1190		node->els_req_cnt--;
1191		efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
1192		break;
1193	}
1194
1195	case EFC_EVT_SRRS_ELS_REQ_RJT: {
1196		/* PRLI rejected by remote
1197		 * Normal for I, I+T (connected to an I)
1198		 * Node doesn't want to be a target, stay here and wait for a
1199		 * PRLI from the remote node
1200		 * if it really wants to connect to us as target
1201		 */
1202		if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
1203					   __efc_d_common, __func__))
1204			return;
1205
1206		WARN_ON(!node->els_req_cnt);
1207		node->els_req_cnt--;
1208		break;
1209	}
1210
1211	case EFC_EVT_SRRS_ELS_CMPL_OK: {
1212		/* Normal T, I+T, target-server rejected the process login */
1213		/* This would be received only in the case where we sent
1214		 * LS_RJT for the PRLI, so
1215		 * do nothing.   (note: as T only we could shutdown the node)
1216		 */
1217		WARN_ON(!node->els_cmpl_cnt);
1218		node->els_cmpl_cnt--;
1219		break;
1220	}
1221
1222	case EFC_EVT_PLOGI_RCVD: {
1223		/*sm: / save sparams, set send_plogi_acc,
1224		 *post implicit logout
1225		 * Save plogi parameters
1226		 */
1227		efc_node_save_sparms(node, cbdata->payload->dma.virt);
1228		efc_send_ls_acc_after_attach(node,
1229					     cbdata->header->dma.virt,
1230				EFC_NODE_SEND_LS_ACC_PLOGI);
1231
1232		/* Restart node attach with new service parameters,
1233		 * and send ACC
1234		 */
1235		efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
1236				    NULL);
1237		break;
1238	}
1239
1240	case EFC_EVT_LOGO_RCVD: {
1241		/* I, T, I+T */
1242		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1243
1244		node_printf(node, "%s received attached=%d\n",
1245			    efc_sm_event_name(evt),
1246					node->attached);
1247		/* sm: / send LOGO acc */
1248		efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
1249		efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
1250		break;
1251	}
1252
1253	default:
1254		__efc_d_common(__func__, ctx, evt, arg);
1255	}
1256}
1257
1258void
1259__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
1260			   enum efc_sm_event evt, void *arg)
1261{
1262	struct efc_node *node = ctx->app;
1263
1264	efc_node_evt_set(ctx, evt, __func__);
1265
1266	node_sm_trace();
1267
1268	switch (evt) {
1269	case EFC_EVT_ENTER:
1270		efc_node_hold_frames(node);
1271		break;
1272
1273	case EFC_EVT_EXIT:
1274		efc_node_accept_frames(node);
1275		break;
1276
1277	case EFC_EVT_SRRS_ELS_CMPL_OK:
1278	case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1279		/* sm: / post explicit logout */
1280		WARN_ON(!node->els_cmpl_cnt);
1281		node->els_cmpl_cnt--;
1282		efc_node_post_event(node,
1283				    EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1284		break;
1285	default:
1286		__efc_d_common(__func__, ctx, evt, arg);
1287	}
1288}
1289
1290void
1291__efc_d_device_ready(struct efc_sm_ctx *ctx,
1292		     enum efc_sm_event evt, void *arg)
1293{
1294	struct efc_node_cb *cbdata = arg;
1295	struct efc_node *node = ctx->app;
1296	struct efc *efc = node->efc;
1297
1298	efc_node_evt_set(ctx, evt, __func__);
1299
1300	if (evt != EFC_EVT_FCP_CMD_RCVD)
1301		node_sm_trace();
1302
1303	switch (evt) {
1304	case EFC_EVT_ENTER:
1305		node->fcp_enabled = true;
1306		if (node->targ) {
1307			efc_log_info(efc,
1308				     "[%s] found (target) WWPN %s WWNN %s\n",
1309				node->display_name,
1310				node->wwpn, node->wwnn);
1311			if (node->nport->enable_ini)
1312				efc->tt.scsi_new_node(efc, node);
1313		}
1314		break;
1315
1316	case EFC_EVT_EXIT:
1317		node->fcp_enabled = false;
1318		break;
1319
1320	case EFC_EVT_PLOGI_RCVD: {
1321		/* sm: / save sparams, set send_plogi_acc, post implicit
1322		 * logout
1323		 * Save plogi parameters
1324		 */
1325		efc_node_save_sparms(node, cbdata->payload->dma.virt);
1326		efc_send_ls_acc_after_attach(node,
1327					     cbdata->header->dma.virt,
1328				EFC_NODE_SEND_LS_ACC_PLOGI);
1329
1330		/*
1331		 * Restart node attach with new service parameters,
1332		 * and send ACC
1333		 */
1334		efc_node_post_event(node,
1335				    EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1336		break;
1337	}
1338
1339	case EFC_EVT_PRLI_RCVD: {
1340		/* T, I+T: remote initiator is slow to get started */
1341		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1342		struct {
1343			struct fc_els_prli prli;
1344			struct fc_els_spp sp;
1345		} *pp;
1346
1347		pp = cbdata->payload->dma.virt;
1348		if (pp->sp.spp_type != FC_TYPE_FCP) {
1349			/*Only FCP is supported*/
1350			efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
1351					ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
1352			break;
1353		}
1354
1355		efc_process_prli_payload(node, cbdata->payload->dma.virt);
1356		efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id));
1357		break;
1358	}
1359
1360	case EFC_EVT_PRLO_RCVD: {
1361		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1362		/* sm: / send PRLO acc */
1363		efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id));
1364		/* need implicit logout? */
1365		break;
1366	}
1367
1368	case EFC_EVT_LOGO_RCVD: {
1369		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1370
1371		node_printf(node, "%s received attached=%d\n",
1372			    efc_sm_event_name(evt), node->attached);
1373		/* sm: / send LOGO acc */
1374		efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
1375		efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
1376		break;
1377	}
1378
1379	case EFC_EVT_ADISC_RCVD: {
1380		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1381		/* sm: / send ADISC acc */
1382		efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id));
1383		break;
1384	}
1385
1386	case EFC_EVT_ABTS_RCVD:
1387		/* sm: / process ABTS */
1388		efc_log_err(efc, "Unexpected event:%s\n",
1389			    efc_sm_event_name(evt));
1390		break;
1391
1392	case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1393		break;
1394
1395	case EFC_EVT_NODE_REFOUND:
1396		break;
1397
1398	case EFC_EVT_NODE_MISSING:
1399		if (node->nport->enable_rscn)
1400			efc_node_transition(node, __efc_d_device_gone, NULL);
1401
1402		break;
1403
1404	case EFC_EVT_SRRS_ELS_CMPL_OK:
1405		/* T, or I+T, PRLI accept completed ok */
1406		WARN_ON(!node->els_cmpl_cnt);
1407		node->els_cmpl_cnt--;
1408		break;
1409
1410	case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1411		/* T, or I+T, PRLI accept failed to complete */
1412		WARN_ON(!node->els_cmpl_cnt);
1413		node->els_cmpl_cnt--;
1414		node_printf(node, "Failed to send PRLI LS_ACC\n");
1415		break;
1416
1417	default:
1418		__efc_d_common(__func__, ctx, evt, arg);
1419	}
1420}
1421
1422void
1423__efc_d_device_gone(struct efc_sm_ctx *ctx,
1424		    enum efc_sm_event evt, void *arg)
1425{
1426	struct efc_node_cb *cbdata = arg;
1427	struct efc_node *node = ctx->app;
1428	struct efc *efc = node->efc;
1429
1430	efc_node_evt_set(ctx, evt, __func__);
1431
1432	node_sm_trace();
1433
1434	switch (evt) {
1435	case EFC_EVT_ENTER: {
1436		int rc = EFC_SCSI_CALL_COMPLETE;
1437		int rc_2 = EFC_SCSI_CALL_COMPLETE;
1438		static const char * const labels[] = {
1439			"none", "initiator", "target", "initiator+target"
1440		};
1441
1442		efc_log_info(efc, "[%s] missing (%s)    WWPN %s WWNN %s\n",
1443			     node->display_name,
1444				labels[(node->targ << 1) | (node->init)],
1445						node->wwpn, node->wwnn);
1446
1447		switch (efc_node_get_enable(node)) {
1448		case EFC_NODE_ENABLE_T_TO_T:
1449		case EFC_NODE_ENABLE_I_TO_T:
1450		case EFC_NODE_ENABLE_IT_TO_T:
1451			rc = efc->tt.scsi_del_node(efc, node,
1452				EFC_SCSI_TARGET_MISSING);
1453			break;
1454
1455		case EFC_NODE_ENABLE_T_TO_I:
1456		case EFC_NODE_ENABLE_I_TO_I:
1457		case EFC_NODE_ENABLE_IT_TO_I:
1458			rc = efc->tt.scsi_del_node(efc, node,
1459				EFC_SCSI_INITIATOR_MISSING);
1460			break;
1461
1462		case EFC_NODE_ENABLE_T_TO_IT:
1463			rc = efc->tt.scsi_del_node(efc, node,
1464				EFC_SCSI_INITIATOR_MISSING);
1465			break;
1466
1467		case EFC_NODE_ENABLE_I_TO_IT:
1468			rc = efc->tt.scsi_del_node(efc, node,
1469						  EFC_SCSI_TARGET_MISSING);
1470			break;
1471
1472		case EFC_NODE_ENABLE_IT_TO_IT:
1473			rc = efc->tt.scsi_del_node(efc, node,
1474				EFC_SCSI_INITIATOR_MISSING);
1475			rc_2 = efc->tt.scsi_del_node(efc, node,
1476				EFC_SCSI_TARGET_MISSING);
1477			break;
1478
1479		default:
1480			rc = EFC_SCSI_CALL_COMPLETE;
1481			break;
1482		}
1483
1484		if (rc == EFC_SCSI_CALL_COMPLETE &&
1485		    rc_2 == EFC_SCSI_CALL_COMPLETE)
1486			efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
1487
1488		break;
1489	}
1490	case EFC_EVT_NODE_REFOUND:
1491		/* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
1492
1493		/* reauthenticate with PLOGI/PRLI */
1494		/* efc_node_transition(node, __efc_d_discovered, NULL); */
1495
1496		/* reauthenticate with ADISC */
1497		/* sm: / send ADISC */
1498		efc_send_adisc(node);
1499		efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL);
1500		break;
1501
1502	case EFC_EVT_PLOGI_RCVD: {
1503		/* sm: / save sparams, set send_plogi_acc, post implicit
1504		 * logout
1505		 * Save plogi parameters
1506		 */
1507		efc_node_save_sparms(node, cbdata->payload->dma.virt);
1508		efc_send_ls_acc_after_attach(node,
1509					     cbdata->header->dma.virt,
1510				EFC_NODE_SEND_LS_ACC_PLOGI);
1511
1512		/*
1513		 * Restart node attach with new service parameters, and send
1514		 * ACC
1515		 */
1516		efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
1517				    NULL);
1518		break;
1519	}
1520
1521	case EFC_EVT_FCP_CMD_RCVD: {
1522		/* most likely a stale frame (received prior to link down),
1523		 * if attempt to send LOGO, will probably timeout and eat
1524		 * up 20s; thus, drop FCP_CMND
1525		 */
1526		node_printf(node, "FCP_CMND received, drop\n");
1527		break;
1528	}
1529	case EFC_EVT_LOGO_RCVD: {
1530		/* I, T, I+T */
1531		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1532
1533		node_printf(node, "%s received attached=%d\n",
1534			    efc_sm_event_name(evt), node->attached);
1535		/* sm: / send LOGO acc */
1536		efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
1537		efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
1538		break;
1539	}
1540	default:
1541		__efc_d_common(__func__, ctx, evt, arg);
1542	}
1543}
1544
1545void
1546__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
1547		       enum efc_sm_event evt, void *arg)
1548{
1549	struct efc_node_cb *cbdata = arg;
1550	struct efc_node *node = ctx->app;
1551
1552	efc_node_evt_set(ctx, evt, __func__);
1553
1554	node_sm_trace();
1555
1556	switch (evt) {
1557	case EFC_EVT_SRRS_ELS_REQ_OK:
1558		if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
1559					   __efc_d_common, __func__))
1560			return;
1561
1562		WARN_ON(!node->els_req_cnt);
1563		node->els_req_cnt--;
1564		efc_node_transition(node, __efc_d_device_ready, NULL);
1565		break;
1566
1567	case EFC_EVT_SRRS_ELS_REQ_RJT:
1568		/* received an LS_RJT, in this case, send shutdown
1569		 * (explicit logo) event which will unregister the node,
1570		 * and start over with PLOGI
1571		 */
1572		if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
1573					   __efc_d_common, __func__))
1574			return;
1575
1576		WARN_ON(!node->els_req_cnt);
1577		node->els_req_cnt--;
1578		/* sm: / post explicit logout */
1579		efc_node_post_event(node,
1580				    EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
1581				     NULL);
1582		break;
1583
1584	case EFC_EVT_LOGO_RCVD: {
1585		/* In this case, we have the equivalent of an LS_RJT for
1586		 * the ADISC, so we need to abort the ADISC, and re-login
1587		 * with PLOGI
1588		 */
1589		/* sm: / request abort, send LOGO acc */
1590		struct fc_frame_header *hdr = cbdata->header->dma.virt;
1591
1592		node_printf(node, "%s received attached=%d\n",
1593			    efc_sm_event_name(evt), node->attached);
1594
1595		efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
1596		efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
1597		break;
1598	}
1599	default:
1600		__efc_d_common(__func__, ctx, evt, arg);
1601	}
1602}
1603