1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7 * EMULEX and SLI are trademarks of Emulex.                        *
8 * www.broadcom.com                                                *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10 *                                                                 *
11 * This program is free software; you can redistribute it and/or   *
12 * modify it under the terms of version 2 of the GNU General       *
13 * Public License as published by the Free Software Foundation.    *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20 * more details, a copy of which can be found in the file COPYING  *
21 * included with this package.                                     *
22 *******************************************************************/
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/pci.h>
28#include <linux/kthread.h>
29#include <linux/interrupt.h>
30#include <linux/lockdep.h>
31#include <linux/utsname.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_sli.h"
44#include "lpfc_sli4.h"
45#include "lpfc.h"
46#include "lpfc_scsi.h"
47#include "lpfc_nvme.h"
48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
50#include "lpfc_vport.h"
51#include "lpfc_debugfs.h"
52
53/* AlpaArray for assignment of scsid for scan-down and bind_method */
54static uint8_t lpfcAlpaArray[] = {
55	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
68};
69
70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
73static int lpfc_fcf_inuse(struct lpfc_hba *);
74static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
75
76void
77lpfc_terminate_rport_io(struct fc_rport *rport)
78{
79	struct lpfc_rport_data *rdata;
80	struct lpfc_nodelist * ndlp;
81	struct lpfc_hba *phba;
82
83	rdata = rport->dd_data;
84	ndlp = rdata->pnode;
85
86	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
87		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
88			printk(KERN_ERR "Cannot find remote node"
89			" to terminate I/O Data x%x\n",
90			rport->port_id);
91		return;
92	}
93
94	phba  = ndlp->phba;
95
96	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
97		"rport terminate: sid:x%x did:x%x flg:x%x",
98		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
99
100	if (ndlp->nlp_sid != NLP_NO_SID) {
101		lpfc_sli_abort_iocb(ndlp->vport,
102			&phba->sli.sli3_ring[LPFC_FCP_RING],
103			ndlp->nlp_sid, 0, LPFC_CTX_TGT);
104	}
105}
106
107/*
108 * This function will be called when dev_loss_tmo fire.
109 */
110void
111lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
112{
113	struct lpfc_rport_data *rdata;
114	struct lpfc_nodelist * ndlp;
115	struct lpfc_vport *vport;
116	struct Scsi_Host *shost;
117	struct lpfc_hba   *phba;
118	struct lpfc_work_evt *evtp;
119	int  put_node;
120	int  put_rport;
121	unsigned long iflags;
122
123	rdata = rport->dd_data;
124	ndlp = rdata->pnode;
125	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
126		return;
127
128	vport = ndlp->vport;
129	phba  = vport->phba;
130
131	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
132		"rport devlosscb: sid:x%x did:x%x flg:x%x",
133		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
134
135	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
136			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
137			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
138
139	/* Don't defer this if we are in the process of deleting the vport
140	 * or unloading the driver. The unload will cleanup the node
141	 * appropriately we just need to cleanup the ndlp rport info here.
142	 */
143	if (vport->load_flag & FC_UNLOADING) {
144		put_node = rdata->pnode != NULL;
145		put_rport = ndlp->rport != NULL;
146		rdata->pnode = NULL;
147		ndlp->rport = NULL;
148		if (put_node)
149			lpfc_nlp_put(ndlp);
150		if (put_rport)
151			put_device(&rport->dev);
152		return;
153	}
154
155	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
156		return;
157
158	if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
159		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
160				 "6789 rport name %llx != node port name %llx",
161				 rport->port_name,
162				 wwn_to_u64(ndlp->nlp_portname.u.wwn));
163
164	evtp = &ndlp->dev_loss_evt;
165
166	if (!list_empty(&evtp->evt_listp)) {
167		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
168				 "6790 rport name %llx dev_loss_evt pending",
169				 rport->port_name);
170		return;
171	}
172
173	shost = lpfc_shost_from_vport(vport);
174	spin_lock_irqsave(shost->host_lock, iflags);
175	ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
176	spin_unlock_irqrestore(shost->host_lock, iflags);
177
178	/* We need to hold the node by incrementing the reference
179	 * count until this queued work is done
180	 */
181	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
182
183	spin_lock_irqsave(&phba->hbalock, iflags);
184	if (evtp->evt_arg1) {
185		evtp->evt = LPFC_EVT_DEV_LOSS;
186		list_add_tail(&evtp->evt_listp, &phba->work_list);
187		lpfc_worker_wake_up(phba);
188	}
189	spin_unlock_irqrestore(&phba->hbalock, iflags);
190
191	return;
192}
193
194/**
195 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
196 * @ndlp: Pointer to remote node object.
197 *
198 * This function is called from the worker thread when devloss timeout timer
199 * expires. For SLI4 host, this routine shall return 1 when at lease one
200 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
201 * routine shall return 0 when there is no remote node is still in use of FCF
202 * when devloss timeout happened to this @ndlp.
203 **/
204static int
205lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
206{
207	struct lpfc_rport_data *rdata;
208	struct fc_rport   *rport;
209	struct lpfc_vport *vport;
210	struct lpfc_hba   *phba;
211	struct Scsi_Host  *shost;
212	uint8_t *name;
213	int  put_node;
214	int warn_on = 0;
215	int fcf_inuse = 0;
216	unsigned long iflags;
217
218	rport = ndlp->rport;
219	vport = ndlp->vport;
220	shost = lpfc_shost_from_vport(vport);
221
222	spin_lock_irqsave(shost->host_lock, iflags);
223	ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
224	spin_unlock_irqrestore(shost->host_lock, iflags);
225
226	if (!rport)
227		return fcf_inuse;
228
229	name = (uint8_t *) &ndlp->nlp_portname;
230	phba  = vport->phba;
231
232	if (phba->sli_rev == LPFC_SLI_REV4)
233		fcf_inuse = lpfc_fcf_inuse(phba);
234
235	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
236		"rport devlosstmo:did:x%x type:x%x id:x%x",
237		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
238
239	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
240			 "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
241			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
242
243	/*
244	 * lpfc_nlp_remove if reached with dangling rport drops the
245	 * reference. To make sure that does not happen clear rport
246	 * pointer in ndlp before lpfc_nlp_put.
247	 */
248	rdata = rport->dd_data;
249
250	/* Don't defer this if we are in the process of deleting the vport
251	 * or unloading the driver. The unload will cleanup the node
252	 * appropriately we just need to cleanup the ndlp rport info here.
253	 */
254	if (vport->load_flag & FC_UNLOADING) {
255		if (ndlp->nlp_sid != NLP_NO_SID) {
256			/* flush the target */
257			lpfc_sli_abort_iocb(vport,
258					    &phba->sli.sli3_ring[LPFC_FCP_RING],
259					    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
260		}
261		put_node = rdata->pnode != NULL;
262		rdata->pnode = NULL;
263		ndlp->rport = NULL;
264		if (put_node)
265			lpfc_nlp_put(ndlp);
266		put_device(&rport->dev);
267
268		return fcf_inuse;
269	}
270
271	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
272		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
273				 "0284 Devloss timeout Ignored on "
274				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
275				 "NPort x%x\n",
276				 *name, *(name+1), *(name+2), *(name+3),
277				 *(name+4), *(name+5), *(name+6), *(name+7),
278				 ndlp->nlp_DID);
279		return fcf_inuse;
280	}
281
282	put_node = rdata->pnode != NULL;
283	rdata->pnode = NULL;
284	ndlp->rport = NULL;
285	if (put_node)
286		lpfc_nlp_put(ndlp);
287	put_device(&rport->dev);
288
289	if (ndlp->nlp_type & NLP_FABRIC)
290		return fcf_inuse;
291
292	if (ndlp->nlp_sid != NLP_NO_SID) {
293		warn_on = 1;
294		lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
295				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
296	}
297
298	if (warn_on) {
299		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
300				 "0203 Devloss timeout on "
301				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
302				 "NPort x%06x Data: x%x x%x x%x\n",
303				 *name, *(name+1), *(name+2), *(name+3),
304				 *(name+4), *(name+5), *(name+6), *(name+7),
305				 ndlp->nlp_DID, ndlp->nlp_flag,
306				 ndlp->nlp_state, ndlp->nlp_rpi);
307	} else {
308		lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
309				 "0204 Devloss timeout on "
310				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
311				 "NPort x%06x Data: x%x x%x x%x\n",
312				 *name, *(name+1), *(name+2), *(name+3),
313				 *(name+4), *(name+5), *(name+6), *(name+7),
314				 ndlp->nlp_DID, ndlp->nlp_flag,
315				 ndlp->nlp_state, ndlp->nlp_rpi);
316	}
317
318	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
319	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
320	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
321	    (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
322	    (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
323		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
324
325	return fcf_inuse;
326}
327
328/**
329 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
330 * @phba: Pointer to hba context object.
331 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
332 * @nlp_did: remote node identifer with devloss timeout.
333 *
334 * This function is called from the worker thread after invoking devloss
335 * timeout handler and releasing the reference count for the ndlp with
336 * which the devloss timeout was handled for SLI4 host. For the devloss
337 * timeout of the last remote node which had been in use of FCF, when this
338 * routine is invoked, it shall be guaranteed that none of the remote are
339 * in-use of FCF. When devloss timeout to the last remote using the FCF,
340 * if the FIP engine is neither in FCF table scan process nor roundrobin
341 * failover process, the in-use FCF shall be unregistered. If the FIP
342 * engine is in FCF discovery process, the devloss timeout state shall
343 * be set for either the FCF table scan process or roundrobin failover
344 * process to unregister the in-use FCF.
345 **/
346static void
347lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
348				    uint32_t nlp_did)
349{
350	/* If devloss timeout happened to a remote node when FCF had no
351	 * longer been in-use, do nothing.
352	 */
353	if (!fcf_inuse)
354		return;
355
356	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
357		spin_lock_irq(&phba->hbalock);
358		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
359			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
360				spin_unlock_irq(&phba->hbalock);
361				return;
362			}
363			phba->hba_flag |= HBA_DEVLOSS_TMO;
364			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
365					"2847 Last remote node (x%x) using "
366					"FCF devloss tmo\n", nlp_did);
367		}
368		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
369			spin_unlock_irq(&phba->hbalock);
370			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
371					"2868 Devloss tmo to FCF rediscovery "
372					"in progress\n");
373			return;
374		}
375		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
376			spin_unlock_irq(&phba->hbalock);
377			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
378					"2869 Devloss tmo to idle FIP engine, "
379					"unreg in-use FCF and rescan.\n");
380			/* Unregister in-use FCF and rescan */
381			lpfc_unregister_fcf_rescan(phba);
382			return;
383		}
384		spin_unlock_irq(&phba->hbalock);
385		if (phba->hba_flag & FCF_TS_INPROG)
386			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
387					"2870 FCF table scan in progress\n");
388		if (phba->hba_flag & FCF_RR_INPROG)
389			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
390					"2871 FLOGI roundrobin FCF failover "
391					"in progress\n");
392	}
393	lpfc_unregister_unused_fcf(phba);
394}
395
396/**
397 * lpfc_alloc_fast_evt - Allocates data structure for posting event
398 * @phba: Pointer to hba context object.
399 *
400 * This function is called from the functions which need to post
401 * events from interrupt context. This function allocates data
402 * structure required for posting event. It also keeps track of
403 * number of events pending and prevent event storm when there are
404 * too many events.
405 **/
406struct lpfc_fast_path_event *
407lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
408	struct lpfc_fast_path_event *ret;
409
410	/* If there are lot of fast event do not exhaust memory due to this */
411	if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
412		return NULL;
413
414	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
415			GFP_ATOMIC);
416	if (ret) {
417		atomic_inc(&phba->fast_event_count);
418		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
419		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
420	}
421	return ret;
422}
423
424/**
425 * lpfc_free_fast_evt - Frees event data structure
426 * @phba: Pointer to hba context object.
427 * @evt:  Event object which need to be freed.
428 *
429 * This function frees the data structure required for posting
430 * events.
431 **/
432void
433lpfc_free_fast_evt(struct lpfc_hba *phba,
434		struct lpfc_fast_path_event *evt) {
435
436	atomic_dec(&phba->fast_event_count);
437	kfree(evt);
438}
439
440/**
441 * lpfc_send_fastpath_evt - Posts events generated from fast path
442 * @phba: Pointer to hba context object.
443 * @evtp: Event data structure.
444 *
445 * This function is called from worker thread, when the interrupt
446 * context need to post an event. This function posts the event
447 * to fc transport netlink interface.
448 **/
449static void
450lpfc_send_fastpath_evt(struct lpfc_hba *phba,
451		struct lpfc_work_evt *evtp)
452{
453	unsigned long evt_category, evt_sub_category;
454	struct lpfc_fast_path_event *fast_evt_data;
455	char *evt_data;
456	uint32_t evt_data_size;
457	struct Scsi_Host *shost;
458
459	fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
460		work_evt);
461
462	evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
463	evt_sub_category = (unsigned long) fast_evt_data->un.
464			fabric_evt.subcategory;
465	shost = lpfc_shost_from_vport(fast_evt_data->vport);
466	if (evt_category == FC_REG_FABRIC_EVENT) {
467		if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
468			evt_data = (char *) &fast_evt_data->un.read_check_error;
469			evt_data_size = sizeof(fast_evt_data->un.
470				read_check_error);
471		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
472			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
473			evt_data = (char *) &fast_evt_data->un.fabric_evt;
474			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
475		} else {
476			lpfc_free_fast_evt(phba, fast_evt_data);
477			return;
478		}
479	} else if (evt_category == FC_REG_SCSI_EVENT) {
480		switch (evt_sub_category) {
481		case LPFC_EVENT_QFULL:
482		case LPFC_EVENT_DEVBSY:
483			evt_data = (char *) &fast_evt_data->un.scsi_evt;
484			evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
485			break;
486		case LPFC_EVENT_CHECK_COND:
487			evt_data = (char *) &fast_evt_data->un.check_cond_evt;
488			evt_data_size =  sizeof(fast_evt_data->un.
489				check_cond_evt);
490			break;
491		case LPFC_EVENT_VARQUEDEPTH:
492			evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
493			evt_data_size = sizeof(fast_evt_data->un.
494				queue_depth_evt);
495			break;
496		default:
497			lpfc_free_fast_evt(phba, fast_evt_data);
498			return;
499		}
500	} else {
501		lpfc_free_fast_evt(phba, fast_evt_data);
502		return;
503	}
504
505	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
506		fc_host_post_vendor_event(shost,
507			fc_get_event_number(),
508			evt_data_size,
509			evt_data,
510			LPFC_NL_VENDOR_ID);
511
512	lpfc_free_fast_evt(phba, fast_evt_data);
513	return;
514}
515
516static void
517lpfc_work_list_done(struct lpfc_hba *phba)
518{
519	struct lpfc_work_evt  *evtp = NULL;
520	struct lpfc_nodelist  *ndlp;
521	int free_evt;
522	int fcf_inuse;
523	uint32_t nlp_did;
524
525	spin_lock_irq(&phba->hbalock);
526	while (!list_empty(&phba->work_list)) {
527		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
528				 evt_listp);
529		spin_unlock_irq(&phba->hbalock);
530		free_evt = 1;
531		switch (evtp->evt) {
532		case LPFC_EVT_ELS_RETRY:
533			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
534			lpfc_els_retry_delay_handler(ndlp);
535			free_evt = 0; /* evt is part of ndlp */
536			/* decrement the node reference count held
537			 * for this queued work
538			 */
539			lpfc_nlp_put(ndlp);
540			break;
541		case LPFC_EVT_DEV_LOSS:
542			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
543			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
544			free_evt = 0;
545			/* decrement the node reference count held for
546			 * this queued work
547			 */
548			nlp_did = ndlp->nlp_DID;
549			lpfc_nlp_put(ndlp);
550			if (phba->sli_rev == LPFC_SLI_REV4)
551				lpfc_sli4_post_dev_loss_tmo_handler(phba,
552								    fcf_inuse,
553								    nlp_did);
554			break;
555		case LPFC_EVT_RECOVER_PORT:
556			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
557			lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
558			free_evt = 0;
559			/* decrement the node reference count held for
560			 * this queued work
561			 */
562			lpfc_nlp_put(ndlp);
563			break;
564		case LPFC_EVT_ONLINE:
565			if (phba->link_state < LPFC_LINK_DOWN)
566				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
567			else
568				*(int *) (evtp->evt_arg1) = 0;
569			complete((struct completion *)(evtp->evt_arg2));
570			break;
571		case LPFC_EVT_OFFLINE_PREP:
572			if (phba->link_state >= LPFC_LINK_DOWN)
573				lpfc_offline_prep(phba, LPFC_MBX_WAIT);
574			*(int *)(evtp->evt_arg1) = 0;
575			complete((struct completion *)(evtp->evt_arg2));
576			break;
577		case LPFC_EVT_OFFLINE:
578			lpfc_offline(phba);
579			lpfc_sli_brdrestart(phba);
580			*(int *)(evtp->evt_arg1) =
581				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
582			lpfc_unblock_mgmt_io(phba);
583			complete((struct completion *)(evtp->evt_arg2));
584			break;
585		case LPFC_EVT_WARM_START:
586			lpfc_offline(phba);
587			lpfc_reset_barrier(phba);
588			lpfc_sli_brdreset(phba);
589			lpfc_hba_down_post(phba);
590			*(int *)(evtp->evt_arg1) =
591				lpfc_sli_brdready(phba, HS_MBRDY);
592			lpfc_unblock_mgmt_io(phba);
593			complete((struct completion *)(evtp->evt_arg2));
594			break;
595		case LPFC_EVT_KILL:
596			lpfc_offline(phba);
597			*(int *)(evtp->evt_arg1)
598				= (phba->pport->stopped)
599				        ? 0 : lpfc_sli_brdkill(phba);
600			lpfc_unblock_mgmt_io(phba);
601			complete((struct completion *)(evtp->evt_arg2));
602			break;
603		case LPFC_EVT_FASTPATH_MGMT_EVT:
604			lpfc_send_fastpath_evt(phba, evtp);
605			free_evt = 0;
606			break;
607		case LPFC_EVT_RESET_HBA:
608			if (!(phba->pport->load_flag & FC_UNLOADING))
609				lpfc_reset_hba(phba);
610			break;
611		}
612		if (free_evt)
613			kfree(evtp);
614		spin_lock_irq(&phba->hbalock);
615	}
616	spin_unlock_irq(&phba->hbalock);
617
618}
619
620static void
621lpfc_work_done(struct lpfc_hba *phba)
622{
623	struct lpfc_sli_ring *pring;
624	uint32_t ha_copy, status, control, work_port_events;
625	struct lpfc_vport **vports;
626	struct lpfc_vport *vport;
627	int i;
628
629	spin_lock_irq(&phba->hbalock);
630	ha_copy = phba->work_ha;
631	phba->work_ha = 0;
632	spin_unlock_irq(&phba->hbalock);
633
634	/* First, try to post the next mailbox command to SLI4 device */
635	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
636		lpfc_sli4_post_async_mbox(phba);
637
638	if (ha_copy & HA_ERATT) {
639		/* Handle the error attention event */
640		lpfc_handle_eratt(phba);
641
642		if (phba->fw_dump_cmpl) {
643			complete(phba->fw_dump_cmpl);
644			phba->fw_dump_cmpl = NULL;
645		}
646	}
647
648	if (ha_copy & HA_MBATT)
649		lpfc_sli_handle_mb_event(phba);
650
651	if (ha_copy & HA_LATT)
652		lpfc_handle_latt(phba);
653
654	/* Process SLI4 events */
655	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
656		if (phba->hba_flag & HBA_RRQ_ACTIVE)
657			lpfc_handle_rrq_active(phba);
658		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
659			lpfc_sli4_els_xri_abort_event_proc(phba);
660		if (phba->hba_flag & ASYNC_EVENT)
661			lpfc_sli4_async_event_proc(phba);
662		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
663			spin_lock_irq(&phba->hbalock);
664			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
665			spin_unlock_irq(&phba->hbalock);
666			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
667		}
668		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
669			lpfc_sli4_fcf_redisc_event_proc(phba);
670	}
671
672	vports = lpfc_create_vport_work_array(phba);
673	if (vports != NULL)
674		for (i = 0; i <= phba->max_vports; i++) {
675			/*
676			 * We could have no vports in array if unloading, so if
677			 * this happens then just use the pport
678			 */
679			if (vports[i] == NULL && i == 0)
680				vport = phba->pport;
681			else
682				vport = vports[i];
683			if (vport == NULL)
684				break;
685			spin_lock_irq(&vport->work_port_lock);
686			work_port_events = vport->work_port_events;
687			vport->work_port_events &= ~work_port_events;
688			spin_unlock_irq(&vport->work_port_lock);
689			if (work_port_events & WORKER_DISC_TMO)
690				lpfc_disc_timeout_handler(vport);
691			if (work_port_events & WORKER_ELS_TMO)
692				lpfc_els_timeout_handler(vport);
693			if (work_port_events & WORKER_HB_TMO)
694				lpfc_hb_timeout_handler(phba);
695			if (work_port_events & WORKER_MBOX_TMO)
696				lpfc_mbox_timeout_handler(phba);
697			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
698				lpfc_unblock_fabric_iocbs(phba);
699			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
700				lpfc_ramp_down_queue_handler(phba);
701			if (work_port_events & WORKER_DELAYED_DISC_TMO)
702				lpfc_delayed_disc_timeout_handler(vport);
703		}
704	lpfc_destroy_vport_work_array(phba, vports);
705
706	pring = lpfc_phba_elsring(phba);
707	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
708	status >>= (4*LPFC_ELS_RING);
709	if (pring && (status & HA_RXMASK ||
710		      pring->flag & LPFC_DEFERRED_RING_EVENT ||
711		      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
712		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
713			pring->flag |= LPFC_DEFERRED_RING_EVENT;
714			/* Preserve legacy behavior. */
715			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
716				set_bit(LPFC_DATA_READY, &phba->data_flags);
717		} else {
718			/* Driver could have abort request completed in queue
719			 * when link goes down.  Allow for this transition.
720			 */
721			if (phba->link_state >= LPFC_LINK_DOWN ||
722			    phba->link_flag & LS_MDS_LOOPBACK) {
723				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
724				lpfc_sli_handle_slow_ring_event(phba, pring,
725								(status &
726								HA_RXMASK));
727			}
728		}
729		if (phba->sli_rev == LPFC_SLI_REV4)
730			lpfc_drain_txq(phba);
731		/*
732		 * Turn on Ring interrupts
733		 */
734		if (phba->sli_rev <= LPFC_SLI_REV3) {
735			spin_lock_irq(&phba->hbalock);
736			control = readl(phba->HCregaddr);
737			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
738				lpfc_debugfs_slow_ring_trc(phba,
739					"WRK Enable ring: cntl:x%x hacopy:x%x",
740					control, ha_copy, 0);
741
742				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
743				writel(control, phba->HCregaddr);
744				readl(phba->HCregaddr); /* flush */
745			} else {
746				lpfc_debugfs_slow_ring_trc(phba,
747					"WRK Ring ok:     cntl:x%x hacopy:x%x",
748					control, ha_copy, 0);
749			}
750			spin_unlock_irq(&phba->hbalock);
751		}
752	}
753	lpfc_work_list_done(phba);
754}
755
756int
757lpfc_do_work(void *p)
758{
759	struct lpfc_hba *phba = p;
760	int rc;
761
762	set_user_nice(current, MIN_NICE);
763	current->flags |= PF_NOFREEZE;
764	phba->data_flags = 0;
765
766	while (!kthread_should_stop()) {
767		/* wait and check worker queue activities */
768		rc = wait_event_interruptible(phba->work_waitq,
769					(test_and_clear_bit(LPFC_DATA_READY,
770							    &phba->data_flags)
771					 || kthread_should_stop()));
772		/* Signal wakeup shall terminate the worker thread */
773		if (rc) {
774			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
775					"0433 Wakeup on signal: rc=x%x\n", rc);
776			break;
777		}
778
779		/* Attend pending lpfc data processing */
780		lpfc_work_done(phba);
781	}
782	phba->worker_thread = NULL;
783	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
784			"0432 Worker thread stopped.\n");
785	return 0;
786}
787
788/*
789 * This is only called to handle FC worker events. Since this a rare
790 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
791 * embedding it in the IOCB.
792 */
793int
794lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
795		      uint32_t evt)
796{
797	struct lpfc_work_evt  *evtp;
798	unsigned long flags;
799
800	/*
801	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
802	 * be queued to worker thread for processing
803	 */
804	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
805	if (!evtp)
806		return 0;
807
808	evtp->evt_arg1  = arg1;
809	evtp->evt_arg2  = arg2;
810	evtp->evt       = evt;
811
812	spin_lock_irqsave(&phba->hbalock, flags);
813	list_add_tail(&evtp->evt_listp, &phba->work_list);
814	spin_unlock_irqrestore(&phba->hbalock, flags);
815
816	lpfc_worker_wake_up(phba);
817
818	return 1;
819}
820
821void
822lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
823{
824	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
825	struct lpfc_hba  *phba = vport->phba;
826	struct lpfc_nodelist *ndlp, *next_ndlp;
827
828	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
829		if (!NLP_CHK_NODE_ACT(ndlp))
830			continue;
831		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
832			continue;
833		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
834			((vport->port_type == LPFC_NPIV_PORT) &&
835			(ndlp->nlp_DID == NameServer_DID)))
836			lpfc_unreg_rpi(vport, ndlp);
837
838		/* Leave Fabric nodes alone on link down */
839		if ((phba->sli_rev < LPFC_SLI_REV4) &&
840		    (!remove && ndlp->nlp_type & NLP_FABRIC))
841			continue;
842
843		/* Notify transport of connectivity loss to trigger cleanup. */
844		if (phba->nvmet_support &&
845		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
846			lpfc_nvmet_invalidate_host(phba, ndlp);
847
848		lpfc_disc_state_machine(vport, ndlp, NULL,
849					remove
850					? NLP_EVT_DEVICE_RM
851					: NLP_EVT_DEVICE_RECOVERY);
852	}
853	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
854		if (phba->sli_rev == LPFC_SLI_REV4)
855			lpfc_sli4_unreg_all_rpis(vport);
856		lpfc_mbx_unreg_vpi(vport);
857		spin_lock_irq(shost->host_lock);
858		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
859		spin_unlock_irq(shost->host_lock);
860	}
861}
862
863void
864lpfc_port_link_failure(struct lpfc_vport *vport)
865{
866	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
867
868	/* Cleanup any outstanding received buffers */
869	lpfc_cleanup_rcv_buffers(vport);
870
871	/* Cleanup any outstanding RSCN activity */
872	lpfc_els_flush_rscn(vport);
873
874	/* Cleanup any outstanding ELS commands */
875	lpfc_els_flush_cmd(vport);
876
877	lpfc_cleanup_rpis(vport, 0);
878
879	/* Turn off discovery timer if its running */
880	lpfc_can_disctmo(vport);
881}
882
883void
884lpfc_linkdown_port(struct lpfc_vport *vport)
885{
886	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
887
888	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
889		fc_host_post_event(shost, fc_get_event_number(),
890				   FCH_EVT_LINKDOWN, 0);
891
892	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
893		"Link Down:       state:x%x rtry:x%x flg:x%x",
894		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
895
896	lpfc_port_link_failure(vport);
897
898	/* Stop delayed Nport discovery */
899	spin_lock_irq(shost->host_lock);
900	vport->fc_flag &= ~FC_DISC_DELAYED;
901	spin_unlock_irq(shost->host_lock);
902	del_timer_sync(&vport->delayed_disc_tmo);
903}
904
905int
906lpfc_linkdown(struct lpfc_hba *phba)
907{
908	struct lpfc_vport *vport = phba->pport;
909	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
910	struct lpfc_vport **vports;
911	LPFC_MBOXQ_t          *mb;
912	int i;
913
914	if (phba->link_state == LPFC_LINK_DOWN)
915		return 0;
916
917	/* Block all SCSI stack I/Os */
918	lpfc_scsi_dev_block(phba);
919
920	phba->defer_flogi_acc_flag = false;
921
922	spin_lock_irq(&phba->hbalock);
923	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
924	spin_unlock_irq(&phba->hbalock);
925	if (phba->link_state > LPFC_LINK_DOWN) {
926		phba->link_state = LPFC_LINK_DOWN;
927		if (phba->sli4_hba.conf_trunk) {
928			phba->trunk_link.link0.state = 0;
929			phba->trunk_link.link1.state = 0;
930			phba->trunk_link.link2.state = 0;
931			phba->trunk_link.link3.state = 0;
932			phba->sli4_hba.link_state.logical_speed =
933						LPFC_LINK_SPEED_UNKNOWN;
934		}
935		spin_lock_irq(shost->host_lock);
936		phba->pport->fc_flag &= ~FC_LBIT;
937		spin_unlock_irq(shost->host_lock);
938	}
939	vports = lpfc_create_vport_work_array(phba);
940	if (vports != NULL) {
941		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
942			/* Issue a LINK DOWN event to all nodes */
943			lpfc_linkdown_port(vports[i]);
944
945			vports[i]->fc_myDID = 0;
946
947			if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
948			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
949				if (phba->nvmet_support)
950					lpfc_nvmet_update_targetport(phba);
951				else
952					lpfc_nvme_update_localport(vports[i]);
953			}
954		}
955	}
956	lpfc_destroy_vport_work_array(phba, vports);
957
958	/* Clean up any SLI3 firmware default rpi's */
959	if (phba->sli_rev > LPFC_SLI_REV3)
960		goto skip_unreg_did;
961
962	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
963	if (mb) {
964		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
965		mb->vport = vport;
966		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
967		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
968		    == MBX_NOT_FINISHED) {
969			mempool_free(mb, phba->mbox_mem_pool);
970		}
971	}
972
973 skip_unreg_did:
974	/* Setup myDID for link up if we are in pt2pt mode */
975	if (phba->pport->fc_flag & FC_PT2PT) {
976		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
977		if (mb) {
978			lpfc_config_link(phba, mb);
979			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
980			mb->vport = vport;
981			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
982			    == MBX_NOT_FINISHED) {
983				mempool_free(mb, phba->mbox_mem_pool);
984			}
985		}
986		spin_lock_irq(shost->host_lock);
987		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
988		phba->pport->rcv_flogi_cnt = 0;
989		spin_unlock_irq(shost->host_lock);
990	}
991	return 0;
992}
993
994static void
995lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
996{
997	struct lpfc_nodelist *ndlp;
998
999	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1000		ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1001		if (!NLP_CHK_NODE_ACT(ndlp))
1002			continue;
1003		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1004			continue;
1005		if (ndlp->nlp_type & NLP_FABRIC) {
1006			/* On Linkup its safe to clean up the ndlp
1007			 * from Fabric connections.
1008			 */
1009			if (ndlp->nlp_DID != Fabric_DID)
1010				lpfc_unreg_rpi(vport, ndlp);
1011			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1012		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1013			/* Fail outstanding IO now since device is
1014			 * marked for PLOGI.
1015			 */
1016			lpfc_unreg_rpi(vport, ndlp);
1017		}
1018	}
1019}
1020
1021static void
1022lpfc_linkup_port(struct lpfc_vport *vport)
1023{
1024	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1025	struct lpfc_hba  *phba = vport->phba;
1026
1027	if ((vport->load_flag & FC_UNLOADING) != 0)
1028		return;
1029
1030	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1031		"Link Up:         top:x%x speed:x%x flg:x%x",
1032		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1033
1034	/* If NPIV is not enabled, only bring the physical port up */
1035	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1036		(vport != phba->pport))
1037		return;
1038
1039	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1040		fc_host_post_event(shost, fc_get_event_number(),
1041				   FCH_EVT_LINKUP, 0);
1042
1043	spin_lock_irq(shost->host_lock);
1044	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1045			    FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1046	vport->fc_flag |= FC_NDISC_ACTIVE;
1047	vport->fc_ns_retry = 0;
1048	spin_unlock_irq(shost->host_lock);
1049
1050	if (vport->fc_flag & FC_LBIT)
1051		lpfc_linkup_cleanup_nodes(vport);
1052
1053}
1054
1055static int
1056lpfc_linkup(struct lpfc_hba *phba)
1057{
1058	struct lpfc_vport **vports;
1059	int i;
1060	struct Scsi_Host  *shost = lpfc_shost_from_vport(phba->pport);
1061
1062	phba->link_state = LPFC_LINK_UP;
1063
1064	/* Unblock fabric iocbs if they are blocked */
1065	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1066	del_timer_sync(&phba->fabric_block_timer);
1067
1068	vports = lpfc_create_vport_work_array(phba);
1069	if (vports != NULL)
1070		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1071			lpfc_linkup_port(vports[i]);
1072	lpfc_destroy_vport_work_array(phba, vports);
1073
1074	/* Clear the pport flogi counter in case the link down was
1075	 * absorbed without an ACQE. No lock here - in worker thread
1076	 * and discovery is synchronized.
1077	 */
1078	spin_lock_irq(shost->host_lock);
1079	phba->pport->rcv_flogi_cnt = 0;
1080	spin_unlock_irq(shost->host_lock);
1081
1082	/* reinitialize initial FLOGI flag */
1083	phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1084	phba->defer_flogi_acc_flag = false;
1085
1086	return 0;
1087}
1088
1089/*
1090 * This routine handles processing a CLEAR_LA mailbox
1091 * command upon completion. It is setup in the LPFC_MBOXQ
1092 * as the completion routine when the command is
1093 * handed off to the SLI layer. SLI3 only.
1094 */
1095static void
1096lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1097{
1098	struct lpfc_vport *vport = pmb->vport;
1099	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1100	struct lpfc_sli   *psli = &phba->sli;
1101	MAILBOX_t *mb = &pmb->u.mb;
1102	uint32_t control;
1103
1104	/* Since we don't do discovery right now, turn these off here */
1105	psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1106	psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1107
1108	/* Check for error */
1109	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1110		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1111		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1112				 "0320 CLEAR_LA mbxStatus error x%x hba "
1113				 "state x%x\n",
1114				 mb->mbxStatus, vport->port_state);
1115		phba->link_state = LPFC_HBA_ERROR;
1116		goto out;
1117	}
1118
1119	if (vport->port_type == LPFC_PHYSICAL_PORT)
1120		phba->link_state = LPFC_HBA_READY;
1121
1122	spin_lock_irq(&phba->hbalock);
1123	psli->sli_flag |= LPFC_PROCESS_LA;
1124	control = readl(phba->HCregaddr);
1125	control |= HC_LAINT_ENA;
1126	writel(control, phba->HCregaddr);
1127	readl(phba->HCregaddr); /* flush */
1128	spin_unlock_irq(&phba->hbalock);
1129	mempool_free(pmb, phba->mbox_mem_pool);
1130	return;
1131
1132out:
1133	/* Device Discovery completes */
1134	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1135			 "0225 Device Discovery completes\n");
1136	mempool_free(pmb, phba->mbox_mem_pool);
1137
1138	spin_lock_irq(shost->host_lock);
1139	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1140	spin_unlock_irq(shost->host_lock);
1141
1142	lpfc_can_disctmo(vport);
1143
1144	/* turn on Link Attention interrupts */
1145
1146	spin_lock_irq(&phba->hbalock);
1147	psli->sli_flag |= LPFC_PROCESS_LA;
1148	control = readl(phba->HCregaddr);
1149	control |= HC_LAINT_ENA;
1150	writel(control, phba->HCregaddr);
1151	readl(phba->HCregaddr); /* flush */
1152	spin_unlock_irq(&phba->hbalock);
1153
1154	return;
1155}
1156
1157void
1158lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1159{
1160	struct lpfc_vport *vport = pmb->vport;
1161	LPFC_MBOXQ_t *sparam_mb;
1162	struct lpfc_dmabuf *sparam_mp;
1163	u16 status = pmb->u.mb.mbxStatus;
1164	int rc;
1165
1166	mempool_free(pmb, phba->mbox_mem_pool);
1167
1168	if (status)
1169		goto out;
1170
1171	/* don't perform discovery for SLI4 loopback diagnostic test */
1172	if ((phba->sli_rev == LPFC_SLI_REV4) &&
1173	    !(phba->hba_flag & HBA_FCOE_MODE) &&
1174	    (phba->link_flag & LS_LOOPBACK_MODE))
1175		return;
1176
1177	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1178	    vport->fc_flag & FC_PUBLIC_LOOP &&
1179	    !(vport->fc_flag & FC_LBIT)) {
1180			/* Need to wait for FAN - use discovery timer
1181			 * for timeout.  port_state is identically
1182			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1183			 */
1184			lpfc_set_disctmo(vport);
1185			return;
1186	}
1187
1188	/* Start discovery by sending a FLOGI. port_state is identically
1189	 * LPFC_FLOGI while waiting for FLOGI cmpl.
1190	 */
1191	if (vport->port_state != LPFC_FLOGI) {
1192		/* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
1193		 * bb-credit recovery is in place.
1194		 */
1195		if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1196		    !(phba->link_flag & LS_LOOPBACK_MODE)) {
1197			sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1198						  GFP_KERNEL);
1199			if (!sparam_mb)
1200				goto sparam_out;
1201
1202			rc = lpfc_read_sparam(phba, sparam_mb, 0);
1203			if (rc) {
1204				mempool_free(sparam_mb, phba->mbox_mem_pool);
1205				goto sparam_out;
1206			}
1207			sparam_mb->vport = vport;
1208			sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1209			rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1210			if (rc == MBX_NOT_FINISHED) {
1211				sparam_mp = (struct lpfc_dmabuf *)
1212						sparam_mb->ctx_buf;
1213				lpfc_mbuf_free(phba, sparam_mp->virt,
1214					       sparam_mp->phys);
1215				kfree(sparam_mp);
1216				sparam_mb->ctx_buf = NULL;
1217				mempool_free(sparam_mb, phba->mbox_mem_pool);
1218				goto sparam_out;
1219			}
1220
1221			phba->hba_flag |= HBA_DEFER_FLOGI;
1222		}  else {
1223			lpfc_initial_flogi(vport);
1224		}
1225	} else {
1226		if (vport->fc_flag & FC_PT2PT)
1227			lpfc_disc_start(vport);
1228	}
1229	return;
1230
1231out:
1232	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1233			 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
1234			 status, vport->port_state);
1235
1236sparam_out:
1237	lpfc_linkdown(phba);
1238
1239	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1240			 "0200 CONFIG_LINK bad hba state x%x\n",
1241			 vport->port_state);
1242
1243	lpfc_issue_clear_la(phba, vport);
1244	return;
1245}
1246
1247/**
1248 * lpfc_sli4_clear_fcf_rr_bmask
1249 * @phba: pointer to the struct lpfc_hba for this port.
1250 * This fucnction resets the round robin bit mask and clears the
1251 * fcf priority list. The list deletions are done while holding the
1252 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1253 * from the lpfc_fcf_pri record.
1254 **/
1255void
1256lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1257{
1258	struct lpfc_fcf_pri *fcf_pri;
1259	struct lpfc_fcf_pri *next_fcf_pri;
1260	memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1261	spin_lock_irq(&phba->hbalock);
1262	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1263				&phba->fcf.fcf_pri_list, list) {
1264		list_del_init(&fcf_pri->list);
1265		fcf_pri->fcf_rec.flag = 0;
1266	}
1267	spin_unlock_irq(&phba->hbalock);
1268}
1269static void
1270lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1271{
1272	struct lpfc_vport *vport = mboxq->vport;
1273
1274	if (mboxq->u.mb.mbxStatus) {
1275		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1276				 "2017 REG_FCFI mbxStatus error x%x "
1277				 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
1278				 vport->port_state);
1279		goto fail_out;
1280	}
1281
1282	/* Start FCoE discovery by sending a FLOGI. */
1283	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1284	/* Set the FCFI registered flag */
1285	spin_lock_irq(&phba->hbalock);
1286	phba->fcf.fcf_flag |= FCF_REGISTERED;
1287	spin_unlock_irq(&phba->hbalock);
1288
1289	/* If there is a pending FCoE event, restart FCF table scan. */
1290	if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1291		lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1292		goto fail_out;
1293
1294	/* Mark successful completion of FCF table scan */
1295	spin_lock_irq(&phba->hbalock);
1296	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1297	phba->hba_flag &= ~FCF_TS_INPROG;
1298	if (vport->port_state != LPFC_FLOGI) {
1299		phba->hba_flag |= FCF_RR_INPROG;
1300		spin_unlock_irq(&phba->hbalock);
1301		lpfc_issue_init_vfi(vport);
1302		goto out;
1303	}
1304	spin_unlock_irq(&phba->hbalock);
1305	goto out;
1306
1307fail_out:
1308	spin_lock_irq(&phba->hbalock);
1309	phba->hba_flag &= ~FCF_RR_INPROG;
1310	spin_unlock_irq(&phba->hbalock);
1311out:
1312	mempool_free(mboxq, phba->mbox_mem_pool);
1313}
1314
1315/**
1316 * lpfc_fab_name_match - Check if the fcf fabric name match.
1317 * @fab_name: pointer to fabric name.
1318 * @new_fcf_record: pointer to fcf record.
1319 *
1320 * This routine compare the fcf record's fabric name with provided
1321 * fabric name. If the fabric name are identical this function
1322 * returns 1 else return 0.
1323 **/
1324static uint32_t
1325lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1326{
1327	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1328		return 0;
1329	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1330		return 0;
1331	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1332		return 0;
1333	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1334		return 0;
1335	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1336		return 0;
1337	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1338		return 0;
1339	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1340		return 0;
1341	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1342		return 0;
1343	return 1;
1344}
1345
1346/**
1347 * lpfc_sw_name_match - Check if the fcf switch name match.
1348 * @sw_name: pointer to switch name.
1349 * @new_fcf_record: pointer to fcf record.
1350 *
1351 * This routine compare the fcf record's switch name with provided
1352 * switch name. If the switch name are identical this function
1353 * returns 1 else return 0.
1354 **/
1355static uint32_t
1356lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1357{
1358	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1359		return 0;
1360	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1361		return 0;
1362	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1363		return 0;
1364	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1365		return 0;
1366	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1367		return 0;
1368	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1369		return 0;
1370	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1371		return 0;
1372	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1373		return 0;
1374	return 1;
1375}
1376
1377/**
1378 * lpfc_mac_addr_match - Check if the fcf mac address match.
1379 * @mac_addr: pointer to mac address.
1380 * @new_fcf_record: pointer to fcf record.
1381 *
1382 * This routine compare the fcf record's mac address with HBA's
1383 * FCF mac address. If the mac addresses are identical this function
1384 * returns 1 else return 0.
1385 **/
1386static uint32_t
1387lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1388{
1389	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1390		return 0;
1391	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1392		return 0;
1393	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1394		return 0;
1395	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1396		return 0;
1397	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1398		return 0;
1399	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1400		return 0;
1401	return 1;
1402}
1403
1404static bool
1405lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1406{
1407	return (curr_vlan_id == new_vlan_id);
1408}
1409
1410/**
1411 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1412 * @phba: pointer to lpfc hba data structure.
1413 * @fcf_index: Index for the lpfc_fcf_record.
1414 * @new_fcf_record: pointer to hba fcf record.
1415 *
1416 * This routine updates the driver FCF priority record from the new HBA FCF
1417 * record. The hbalock is asserted held in the code path calling this
1418 * routine.
1419 **/
1420static void
1421__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1422				 struct fcf_record *new_fcf_record
1423				 )
1424{
1425	struct lpfc_fcf_pri *fcf_pri;
1426
1427	fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1428	fcf_pri->fcf_rec.fcf_index = fcf_index;
1429	/* FCF record priority */
1430	fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1431
1432}
1433
1434/**
1435 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1436 * @fcf_rec: pointer to driver fcf record.
1437 * @new_fcf_record: pointer to fcf record.
1438 *
1439 * This routine copies the FCF information from the FCF
1440 * record to lpfc_hba data structure.
1441 **/
1442static void
1443lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1444		     struct fcf_record *new_fcf_record)
1445{
1446	/* Fabric name */
1447	fcf_rec->fabric_name[0] =
1448		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1449	fcf_rec->fabric_name[1] =
1450		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1451	fcf_rec->fabric_name[2] =
1452		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1453	fcf_rec->fabric_name[3] =
1454		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1455	fcf_rec->fabric_name[4] =
1456		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1457	fcf_rec->fabric_name[5] =
1458		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1459	fcf_rec->fabric_name[6] =
1460		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1461	fcf_rec->fabric_name[7] =
1462		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1463	/* Mac address */
1464	fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1465	fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1466	fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1467	fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1468	fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1469	fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1470	/* FCF record index */
1471	fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1472	/* FCF record priority */
1473	fcf_rec->priority = new_fcf_record->fip_priority;
1474	/* Switch name */
1475	fcf_rec->switch_name[0] =
1476		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1477	fcf_rec->switch_name[1] =
1478		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1479	fcf_rec->switch_name[2] =
1480		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1481	fcf_rec->switch_name[3] =
1482		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1483	fcf_rec->switch_name[4] =
1484		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1485	fcf_rec->switch_name[5] =
1486		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1487	fcf_rec->switch_name[6] =
1488		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1489	fcf_rec->switch_name[7] =
1490		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1491}
1492
1493/**
1494 * lpfc_update_fcf_record - Update driver fcf record
1495 * @phba: pointer to lpfc hba data structure.
1496 * @fcf_rec: pointer to driver fcf record.
1497 * @new_fcf_record: pointer to hba fcf record.
1498 * @addr_mode: address mode to be set to the driver fcf record.
1499 * @vlan_id: vlan tag to be set to the driver fcf record.
1500 * @flag: flag bits to be set to the driver fcf record.
1501 *
1502 * This routine updates the driver FCF record from the new HBA FCF record
1503 * together with the address mode, vlan_id, and other informations. This
1504 * routine is called with the hbalock held.
1505 **/
1506static void
1507__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1508		       struct fcf_record *new_fcf_record, uint32_t addr_mode,
1509		       uint16_t vlan_id, uint32_t flag)
1510{
1511	lockdep_assert_held(&phba->hbalock);
1512
1513	/* Copy the fields from the HBA's FCF record */
1514	lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1515	/* Update other fields of driver FCF record */
1516	fcf_rec->addr_mode = addr_mode;
1517	fcf_rec->vlan_id = vlan_id;
1518	fcf_rec->flag |= (flag | RECORD_VALID);
1519	__lpfc_update_fcf_record_pri(phba,
1520		bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1521				 new_fcf_record);
1522}
1523
1524/**
1525 * lpfc_register_fcf - Register the FCF with hba.
1526 * @phba: pointer to lpfc hba data structure.
1527 *
1528 * This routine issues a register fcfi mailbox command to register
1529 * the fcf with HBA.
1530 **/
1531static void
1532lpfc_register_fcf(struct lpfc_hba *phba)
1533{
1534	LPFC_MBOXQ_t *fcf_mbxq;
1535	int rc;
1536
1537	spin_lock_irq(&phba->hbalock);
1538	/* If the FCF is not available do nothing. */
1539	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1540		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1541		spin_unlock_irq(&phba->hbalock);
1542		return;
1543	}
1544
1545	/* The FCF is already registered, start discovery */
1546	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1547		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1548		phba->hba_flag &= ~FCF_TS_INPROG;
1549		if (phba->pport->port_state != LPFC_FLOGI &&
1550		    phba->pport->fc_flag & FC_FABRIC) {
1551			phba->hba_flag |= FCF_RR_INPROG;
1552			spin_unlock_irq(&phba->hbalock);
1553			lpfc_initial_flogi(phba->pport);
1554			return;
1555		}
1556		spin_unlock_irq(&phba->hbalock);
1557		return;
1558	}
1559	spin_unlock_irq(&phba->hbalock);
1560
1561	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1562	if (!fcf_mbxq) {
1563		spin_lock_irq(&phba->hbalock);
1564		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1565		spin_unlock_irq(&phba->hbalock);
1566		return;
1567	}
1568
1569	lpfc_reg_fcfi(phba, fcf_mbxq);
1570	fcf_mbxq->vport = phba->pport;
1571	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1572	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1573	if (rc == MBX_NOT_FINISHED) {
1574		spin_lock_irq(&phba->hbalock);
1575		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1576		spin_unlock_irq(&phba->hbalock);
1577		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1578	}
1579
1580	return;
1581}
1582
1583/**
1584 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1585 * @phba: pointer to lpfc hba data structure.
1586 * @new_fcf_record: pointer to fcf record.
1587 * @boot_flag: Indicates if this record used by boot bios.
1588 * @addr_mode: The address mode to be used by this FCF
1589 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1590 *
1591 * This routine compare the fcf record with connect list obtained from the
1592 * config region to decide if this FCF can be used for SAN discovery. It returns
1593 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1594 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1595 * is used by boot bios and addr_mode will indicate the addressing mode to be
1596 * used for this FCF when the function returns.
1597 * If the FCF record need to be used with a particular vlan id, the vlan is
1598 * set in the vlan_id on return of the function. If not VLAN tagging need to
1599 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1600 **/
1601static int
1602lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1603			struct fcf_record *new_fcf_record,
1604			uint32_t *boot_flag, uint32_t *addr_mode,
1605			uint16_t *vlan_id)
1606{
1607	struct lpfc_fcf_conn_entry *conn_entry;
1608	int i, j, fcf_vlan_id = 0;
1609
1610	/* Find the lowest VLAN id in the FCF record */
1611	for (i = 0; i < 512; i++) {
1612		if (new_fcf_record->vlan_bitmap[i]) {
1613			fcf_vlan_id = i * 8;
1614			j = 0;
1615			while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1616				j++;
1617				fcf_vlan_id++;
1618			}
1619			break;
1620		}
1621	}
1622
1623	/* FCF not valid/available or solicitation in progress */
1624	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1625	    !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1626	    bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1627		return 0;
1628
1629	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1630		*boot_flag = 0;
1631		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1632				new_fcf_record);
1633		if (phba->valid_vlan)
1634			*vlan_id = phba->vlan_id;
1635		else
1636			*vlan_id = LPFC_FCOE_NULL_VID;
1637		return 1;
1638	}
1639
1640	/*
1641	 * If there are no FCF connection table entry, driver connect to all
1642	 * FCFs.
1643	 */
1644	if (list_empty(&phba->fcf_conn_rec_list)) {
1645		*boot_flag = 0;
1646		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1647			new_fcf_record);
1648
1649		/*
1650		 * When there are no FCF connect entries, use driver's default
1651		 * addressing mode - FPMA.
1652		 */
1653		if (*addr_mode & LPFC_FCF_FPMA)
1654			*addr_mode = LPFC_FCF_FPMA;
1655
1656		/* If FCF record report a vlan id use that vlan id */
1657		if (fcf_vlan_id)
1658			*vlan_id = fcf_vlan_id;
1659		else
1660			*vlan_id = LPFC_FCOE_NULL_VID;
1661		return 1;
1662	}
1663
1664	list_for_each_entry(conn_entry,
1665			    &phba->fcf_conn_rec_list, list) {
1666		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1667			continue;
1668
1669		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1670			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1671					     new_fcf_record))
1672			continue;
1673		if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1674			!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1675					    new_fcf_record))
1676			continue;
1677		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1678			/*
1679			 * If the vlan bit map does not have the bit set for the
1680			 * vlan id to be used, then it is not a match.
1681			 */
1682			if (!(new_fcf_record->vlan_bitmap
1683				[conn_entry->conn_rec.vlan_tag / 8] &
1684				(1 << (conn_entry->conn_rec.vlan_tag % 8))))
1685				continue;
1686		}
1687
1688		/*
1689		 * If connection record does not support any addressing mode,
1690		 * skip the FCF record.
1691		 */
1692		if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1693			& (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1694			continue;
1695
1696		/*
1697		 * Check if the connection record specifies a required
1698		 * addressing mode.
1699		 */
1700		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1701			!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1702
1703			/*
1704			 * If SPMA required but FCF not support this continue.
1705			 */
1706			if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1707				!(bf_get(lpfc_fcf_record_mac_addr_prov,
1708					new_fcf_record) & LPFC_FCF_SPMA))
1709				continue;
1710
1711			/*
1712			 * If FPMA required but FCF not support this continue.
1713			 */
1714			if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1715				!(bf_get(lpfc_fcf_record_mac_addr_prov,
1716				new_fcf_record) & LPFC_FCF_FPMA))
1717				continue;
1718		}
1719
1720		/*
1721		 * This fcf record matches filtering criteria.
1722		 */
1723		if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1724			*boot_flag = 1;
1725		else
1726			*boot_flag = 0;
1727
1728		/*
1729		 * If user did not specify any addressing mode, or if the
1730		 * preferred addressing mode specified by user is not supported
1731		 * by FCF, allow fabric to pick the addressing mode.
1732		 */
1733		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1734				new_fcf_record);
1735		/*
1736		 * If the user specified a required address mode, assign that
1737		 * address mode
1738		 */
1739		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1740			(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1741			*addr_mode = (conn_entry->conn_rec.flags &
1742				FCFCNCT_AM_SPMA) ?
1743				LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1744		/*
1745		 * If the user specified a preferred address mode, use the
1746		 * addr mode only if FCF support the addr_mode.
1747		 */
1748		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1749			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1750			(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1751			(*addr_mode & LPFC_FCF_SPMA))
1752				*addr_mode = LPFC_FCF_SPMA;
1753		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1754			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1755			!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1756			(*addr_mode & LPFC_FCF_FPMA))
1757				*addr_mode = LPFC_FCF_FPMA;
1758
1759		/* If matching connect list has a vlan id, use it */
1760		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1761			*vlan_id = conn_entry->conn_rec.vlan_tag;
1762		/*
1763		 * If no vlan id is specified in connect list, use the vlan id
1764		 * in the FCF record
1765		 */
1766		else if (fcf_vlan_id)
1767			*vlan_id = fcf_vlan_id;
1768		else
1769			*vlan_id = LPFC_FCOE_NULL_VID;
1770
1771		return 1;
1772	}
1773
1774	return 0;
1775}
1776
1777/**
1778 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1779 * @phba: pointer to lpfc hba data structure.
1780 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1781 *
1782 * This function check if there is any fcoe event pending while driver
1783 * scan FCF entries. If there is any pending event, it will restart the
1784 * FCF saning and return 1 else return 0.
1785 */
1786int
1787lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1788{
1789	/*
1790	 * If the Link is up and no FCoE events while in the
1791	 * FCF discovery, no need to restart FCF discovery.
1792	 */
1793	if ((phba->link_state  >= LPFC_LINK_UP) &&
1794	    (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1795		return 0;
1796
1797	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1798			"2768 Pending link or FCF event during current "
1799			"handling of the previous event: link_state:x%x, "
1800			"evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1801			phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1802			phba->fcoe_eventtag);
1803
1804	spin_lock_irq(&phba->hbalock);
1805	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1806	spin_unlock_irq(&phba->hbalock);
1807
1808	if (phba->link_state >= LPFC_LINK_UP) {
1809		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1810				"2780 Restart FCF table scan due to "
1811				"pending FCF event:evt_tag_at_scan:x%x, "
1812				"evt_tag_current:x%x\n",
1813				phba->fcoe_eventtag_at_fcf_scan,
1814				phba->fcoe_eventtag);
1815		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1816	} else {
1817		/*
1818		 * Do not continue FCF discovery and clear FCF_TS_INPROG
1819		 * flag
1820		 */
1821		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1822				"2833 Stop FCF discovery process due to link "
1823				"state change (x%x)\n", phba->link_state);
1824		spin_lock_irq(&phba->hbalock);
1825		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1826		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1827		spin_unlock_irq(&phba->hbalock);
1828	}
1829
1830	/* Unregister the currently registered FCF if required */
1831	if (unreg_fcf) {
1832		spin_lock_irq(&phba->hbalock);
1833		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1834		spin_unlock_irq(&phba->hbalock);
1835		lpfc_sli4_unregister_fcf(phba);
1836	}
1837	return 1;
1838}
1839
1840/**
1841 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1842 * @phba: pointer to lpfc hba data structure.
1843 * @fcf_cnt: number of eligible fcf record seen so far.
1844 *
1845 * This function makes an running random selection decision on FCF record to
1846 * use through a sequence of @fcf_cnt eligible FCF records with equal
1847 * probability. To perform integer manunipulation of random numbers with
1848 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1849 * from prandom_u32() are taken as the random random number generated.
1850 *
1851 * Returns true when outcome is for the newly read FCF record should be
1852 * chosen; otherwise, return false when outcome is for keeping the previously
1853 * chosen FCF record.
1854 **/
1855static bool
1856lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1857{
1858	uint32_t rand_num;
1859
1860	/* Get 16-bit uniform random number */
1861	rand_num = 0xFFFF & prandom_u32();
1862
1863	/* Decision with probability 1/fcf_cnt */
1864	if ((fcf_cnt * rand_num) < 0xFFFF)
1865		return true;
1866	else
1867		return false;
1868}
1869
1870/**
1871 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1872 * @phba: pointer to lpfc hba data structure.
1873 * @mboxq: pointer to mailbox object.
1874 * @next_fcf_index: pointer to holder of next fcf index.
1875 *
1876 * This routine parses the non-embedded fcf mailbox command by performing the
1877 * necessarily error checking, non-embedded read FCF record mailbox command
1878 * SGE parsing, and endianness swapping.
1879 *
1880 * Returns the pointer to the new FCF record in the non-embedded mailbox
1881 * command DMA memory if successfully, other NULL.
1882 */
1883static struct fcf_record *
1884lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1885			     uint16_t *next_fcf_index)
1886{
1887	void *virt_addr;
1888	struct lpfc_mbx_sge sge;
1889	struct lpfc_mbx_read_fcf_tbl *read_fcf;
1890	uint32_t shdr_status, shdr_add_status, if_type;
1891	union lpfc_sli4_cfg_shdr *shdr;
1892	struct fcf_record *new_fcf_record;
1893
1894	/* Get the first SGE entry from the non-embedded DMA memory. This
1895	 * routine only uses a single SGE.
1896	 */
1897	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1898	if (unlikely(!mboxq->sge_array)) {
1899		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1900				"2524 Failed to get the non-embedded SGE "
1901				"virtual address\n");
1902		return NULL;
1903	}
1904	virt_addr = mboxq->sge_array->addr[0];
1905
1906	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1907	lpfc_sli_pcimem_bcopy(shdr, shdr,
1908			      sizeof(union lpfc_sli4_cfg_shdr));
1909	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1910	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1911	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1912	if (shdr_status || shdr_add_status) {
1913		if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1914					if_type == LPFC_SLI_INTF_IF_TYPE_2)
1915			lpfc_printf_log(phba, KERN_ERR,
1916					LOG_TRACE_EVENT,
1917					"2726 READ_FCF_RECORD Indicates empty "
1918					"FCF table.\n");
1919		else
1920			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1921					"2521 READ_FCF_RECORD mailbox failed "
1922					"with status x%x add_status x%x, "
1923					"mbx\n", shdr_status, shdr_add_status);
1924		return NULL;
1925	}
1926
1927	/* Interpreting the returned information of the FCF record */
1928	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1929	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1930			      sizeof(struct lpfc_mbx_read_fcf_tbl));
1931	*next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1932	new_fcf_record = (struct fcf_record *)(virt_addr +
1933			  sizeof(struct lpfc_mbx_read_fcf_tbl));
1934	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1935				offsetof(struct fcf_record, vlan_bitmap));
1936	new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1937	new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1938
1939	return new_fcf_record;
1940}
1941
1942/**
1943 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1944 * @phba: pointer to lpfc hba data structure.
1945 * @fcf_record: pointer to the fcf record.
1946 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1947 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1948 *
1949 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1950 * enabled.
1951 **/
1952static void
1953lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1954			      struct fcf_record *fcf_record,
1955			      uint16_t vlan_id,
1956			      uint16_t next_fcf_index)
1957{
1958	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1959			"2764 READ_FCF_RECORD:\n"
1960			"\tFCF_Index     : x%x\n"
1961			"\tFCF_Avail     : x%x\n"
1962			"\tFCF_Valid     : x%x\n"
1963			"\tFCF_SOL       : x%x\n"
1964			"\tFIP_Priority  : x%x\n"
1965			"\tMAC_Provider  : x%x\n"
1966			"\tLowest VLANID : x%x\n"
1967			"\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
1968			"\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1969			"\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1970			"\tNext_FCF_Index: x%x\n",
1971			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1972			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1973			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1974			bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1975			fcf_record->fip_priority,
1976			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1977			vlan_id,
1978			bf_get(lpfc_fcf_record_mac_0, fcf_record),
1979			bf_get(lpfc_fcf_record_mac_1, fcf_record),
1980			bf_get(lpfc_fcf_record_mac_2, fcf_record),
1981			bf_get(lpfc_fcf_record_mac_3, fcf_record),
1982			bf_get(lpfc_fcf_record_mac_4, fcf_record),
1983			bf_get(lpfc_fcf_record_mac_5, fcf_record),
1984			bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1985			bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1986			bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1987			bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1988			bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1989			bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1990			bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1991			bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1992			bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1993			bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1994			bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1995			bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1996			bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1997			bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1998			bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1999			bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
2000			next_fcf_index);
2001}
2002
2003/**
2004 * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
2005 * @phba: pointer to lpfc hba data structure.
2006 * @fcf_rec: pointer to an existing FCF record.
2007 * @new_fcf_record: pointer to a new FCF record.
2008 * @new_vlan_id: vlan id from the new FCF record.
2009 *
2010 * This function performs matching test of a new FCF record against an existing
2011 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
2012 * will not be used as part of the FCF record matching criteria.
2013 *
2014 * Returns true if all the fields matching, otherwise returns false.
2015 */
2016static bool
2017lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
2018			   struct lpfc_fcf_rec *fcf_rec,
2019			   struct fcf_record *new_fcf_record,
2020			   uint16_t new_vlan_id)
2021{
2022	if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
2023		if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
2024			return false;
2025	if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
2026		return false;
2027	if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
2028		return false;
2029	if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
2030		return false;
2031	if (fcf_rec->priority != new_fcf_record->fip_priority)
2032		return false;
2033	return true;
2034}
2035
2036/**
2037 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
2038 * @vport: Pointer to vport object.
2039 * @fcf_index: index to next fcf.
2040 *
2041 * This function processing the roundrobin fcf failover to next fcf index.
2042 * When this function is invoked, there will be a current fcf registered
2043 * for flogi.
2044 * Return: 0 for continue retrying flogi on currently registered fcf;
2045 *         1 for stop flogi on currently registered fcf;
2046 */
2047int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2048{
2049	struct lpfc_hba *phba = vport->phba;
2050	int rc;
2051
2052	if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2053		spin_lock_irq(&phba->hbalock);
2054		if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2055			spin_unlock_irq(&phba->hbalock);
2056			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2057					"2872 Devloss tmo with no eligible "
2058					"FCF, unregister in-use FCF (x%x) "
2059					"and rescan FCF table\n",
2060					phba->fcf.current_rec.fcf_indx);
2061			lpfc_unregister_fcf_rescan(phba);
2062			goto stop_flogi_current_fcf;
2063		}
2064		/* Mark the end to FLOGI roundrobin failover */
2065		phba->hba_flag &= ~FCF_RR_INPROG;
2066		/* Allow action to new fcf asynchronous event */
2067		phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2068		spin_unlock_irq(&phba->hbalock);
2069		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2070				"2865 No FCF available, stop roundrobin FCF "
2071				"failover and change port state:x%x/x%x\n",
2072				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2073		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2074
2075		if (!phba->fcf.fcf_redisc_attempted) {
2076			lpfc_unregister_fcf(phba);
2077
2078			rc = lpfc_sli4_redisc_fcf_table(phba);
2079			if (!rc) {
2080				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2081						"3195 Rediscover FCF table\n");
2082				phba->fcf.fcf_redisc_attempted = 1;
2083				lpfc_sli4_clear_fcf_rr_bmask(phba);
2084			} else {
2085				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2086						"3196 Rediscover FCF table "
2087						"failed. Status:x%x\n", rc);
2088			}
2089		} else {
2090			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2091					"3197 Already rediscover FCF table "
2092					"attempted. No more retry\n");
2093		}
2094		goto stop_flogi_current_fcf;
2095	} else {
2096		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2097				"2794 Try FLOGI roundrobin FCF failover to "
2098				"(x%x)\n", fcf_index);
2099		rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2100		if (rc)
2101			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2102					"2761 FLOGI roundrobin FCF failover "
2103					"failed (rc:x%x) to read FCF (x%x)\n",
2104					rc, phba->fcf.current_rec.fcf_indx);
2105		else
2106			goto stop_flogi_current_fcf;
2107	}
2108	return 0;
2109
2110stop_flogi_current_fcf:
2111	lpfc_can_disctmo(vport);
2112	return 1;
2113}
2114
2115/**
2116 * lpfc_sli4_fcf_pri_list_del
2117 * @phba: pointer to lpfc hba data structure.
2118 * @fcf_index: the index of the fcf record to delete
2119 * This routine checks the on list flag of the fcf_index to be deleted.
2120 * If it is one the list then it is removed from the list, and the flag
2121 * is cleared. This routine grab the hbalock before removing the fcf
2122 * record from the list.
2123 **/
2124static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2125			uint16_t fcf_index)
2126{
2127	struct lpfc_fcf_pri *new_fcf_pri;
2128
2129	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2130	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2131		"3058 deleting idx x%x pri x%x flg x%x\n",
2132		fcf_index, new_fcf_pri->fcf_rec.priority,
2133		 new_fcf_pri->fcf_rec.flag);
2134	spin_lock_irq(&phba->hbalock);
2135	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2136		if (phba->fcf.current_rec.priority ==
2137				new_fcf_pri->fcf_rec.priority)
2138			phba->fcf.eligible_fcf_cnt--;
2139		list_del_init(&new_fcf_pri->list);
2140		new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2141	}
2142	spin_unlock_irq(&phba->hbalock);
2143}
2144
2145/**
2146 * lpfc_sli4_set_fcf_flogi_fail
2147 * @phba: pointer to lpfc hba data structure.
2148 * @fcf_index: the index of the fcf record to update
2149 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2150 * flag so the the round robin slection for the particular priority level
2151 * will try a different fcf record that does not have this bit set.
2152 * If the fcf record is re-read for any reason this flag is cleared brfore
2153 * adding it to the priority list.
2154 **/
2155void
2156lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2157{
2158	struct lpfc_fcf_pri *new_fcf_pri;
2159	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2160	spin_lock_irq(&phba->hbalock);
2161	new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2162	spin_unlock_irq(&phba->hbalock);
2163}
2164
2165/**
2166 * lpfc_sli4_fcf_pri_list_add
2167 * @phba: pointer to lpfc hba data structure.
2168 * @fcf_index: the index of the fcf record to add
2169 * @new_fcf_record: pointer to a new FCF record.
2170 * This routine checks the priority of the fcf_index to be added.
2171 * If it is a lower priority than the current head of the fcf_pri list
2172 * then it is added to the list in the right order.
2173 * If it is the same priority as the current head of the list then it
2174 * is added to the head of the list and its bit in the rr_bmask is set.
2175 * If the fcf_index to be added is of a higher priority than the current
2176 * head of the list then the rr_bmask is cleared, its bit is set in the
2177 * rr_bmask and it is added to the head of the list.
2178 * returns:
2179 * 0=success 1=failure
2180 **/
2181static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2182	uint16_t fcf_index,
2183	struct fcf_record *new_fcf_record)
2184{
2185	uint16_t current_fcf_pri;
2186	uint16_t last_index;
2187	struct lpfc_fcf_pri *fcf_pri;
2188	struct lpfc_fcf_pri *next_fcf_pri;
2189	struct lpfc_fcf_pri *new_fcf_pri;
2190	int ret;
2191
2192	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2193	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2194		"3059 adding idx x%x pri x%x flg x%x\n",
2195		fcf_index, new_fcf_record->fip_priority,
2196		 new_fcf_pri->fcf_rec.flag);
2197	spin_lock_irq(&phba->hbalock);
2198	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2199		list_del_init(&new_fcf_pri->list);
2200	new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2201	new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2202	if (list_empty(&phba->fcf.fcf_pri_list)) {
2203		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2204		ret = lpfc_sli4_fcf_rr_index_set(phba,
2205				new_fcf_pri->fcf_rec.fcf_index);
2206		goto out;
2207	}
2208
2209	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2210				LPFC_SLI4_FCF_TBL_INDX_MAX);
2211	if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2212		ret = 0; /* Empty rr list */
2213		goto out;
2214	}
2215	current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2216	if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
2217		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2218		if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
2219			memset(phba->fcf.fcf_rr_bmask, 0,
2220				sizeof(*phba->fcf.fcf_rr_bmask));
2221			/* fcfs_at_this_priority_level = 1; */
2222			phba->fcf.eligible_fcf_cnt = 1;
2223		} else
2224			/* fcfs_at_this_priority_level++; */
2225			phba->fcf.eligible_fcf_cnt++;
2226		ret = lpfc_sli4_fcf_rr_index_set(phba,
2227				new_fcf_pri->fcf_rec.fcf_index);
2228		goto out;
2229	}
2230
2231	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2232				&phba->fcf.fcf_pri_list, list) {
2233		if (new_fcf_pri->fcf_rec.priority <=
2234				fcf_pri->fcf_rec.priority) {
2235			if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2236				list_add(&new_fcf_pri->list,
2237						&phba->fcf.fcf_pri_list);
2238			else
2239				list_add(&new_fcf_pri->list,
2240					 &((struct lpfc_fcf_pri *)
2241					fcf_pri->list.prev)->list);
2242			ret = 0;
2243			goto out;
2244		} else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2245			|| new_fcf_pri->fcf_rec.priority <
2246				next_fcf_pri->fcf_rec.priority) {
2247			list_add(&new_fcf_pri->list, &fcf_pri->list);
2248			ret = 0;
2249			goto out;
2250		}
2251		if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2252			continue;
2253
2254	}
2255	ret = 1;
2256out:
2257	/* we use = instead of |= to clear the FLOGI_FAILED flag. */
2258	new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2259	spin_unlock_irq(&phba->hbalock);
2260	return ret;
2261}
2262
2263/**
2264 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2265 * @phba: pointer to lpfc hba data structure.
2266 * @mboxq: pointer to mailbox object.
2267 *
2268 * This function iterates through all the fcf records available in
2269 * HBA and chooses the optimal FCF record for discovery. After finding
2270 * the FCF for discovery it registers the FCF record and kicks start
2271 * discovery.
2272 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2273 * use an FCF record which matches fabric name and mac address of the
2274 * currently used FCF record.
2275 * If the driver supports only one FCF, it will try to use the FCF record
2276 * used by BOOT_BIOS.
2277 */
2278void
2279lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2280{
2281	struct fcf_record *new_fcf_record;
2282	uint32_t boot_flag, addr_mode;
2283	uint16_t fcf_index, next_fcf_index;
2284	struct lpfc_fcf_rec *fcf_rec = NULL;
2285	uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2286	bool select_new_fcf;
2287	int rc;
2288
2289	/* If there is pending FCoE event restart FCF table scan */
2290	if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2291		lpfc_sli4_mbox_cmd_free(phba, mboxq);
2292		return;
2293	}
2294
2295	/* Parse the FCF record from the non-embedded mailbox command */
2296	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2297						      &next_fcf_index);
2298	if (!new_fcf_record) {
2299		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300				"2765 Mailbox command READ_FCF_RECORD "
2301				"failed to retrieve a FCF record.\n");
2302		/* Let next new FCF event trigger fast failover */
2303		spin_lock_irq(&phba->hbalock);
2304		phba->hba_flag &= ~FCF_TS_INPROG;
2305		spin_unlock_irq(&phba->hbalock);
2306		lpfc_sli4_mbox_cmd_free(phba, mboxq);
2307		return;
2308	}
2309
2310	/* Check the FCF record against the connection list */
2311	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2312				      &addr_mode, &vlan_id);
2313
2314	/* Log the FCF record information if turned on */
2315	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2316				      next_fcf_index);
2317
2318	/*
2319	 * If the fcf record does not match with connect list entries
2320	 * read the next entry; otherwise, this is an eligible FCF
2321	 * record for roundrobin FCF failover.
2322	 */
2323	if (!rc) {
2324		lpfc_sli4_fcf_pri_list_del(phba,
2325					bf_get(lpfc_fcf_record_fcf_index,
2326					       new_fcf_record));
2327		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2328				"2781 FCF (x%x) failed connection "
2329				"list check: (x%x/x%x/%x)\n",
2330				bf_get(lpfc_fcf_record_fcf_index,
2331				       new_fcf_record),
2332				bf_get(lpfc_fcf_record_fcf_avail,
2333				       new_fcf_record),
2334				bf_get(lpfc_fcf_record_fcf_valid,
2335				       new_fcf_record),
2336				bf_get(lpfc_fcf_record_fcf_sol,
2337				       new_fcf_record));
2338		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2339		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2340		    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2341			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2342			    phba->fcf.current_rec.fcf_indx) {
2343				lpfc_printf_log(phba, KERN_ERR,
2344						LOG_TRACE_EVENT,
2345					"2862 FCF (x%x) matches property "
2346					"of in-use FCF (x%x)\n",
2347					bf_get(lpfc_fcf_record_fcf_index,
2348					       new_fcf_record),
2349					phba->fcf.current_rec.fcf_indx);
2350				goto read_next_fcf;
2351			}
2352			/*
2353			 * In case the current in-use FCF record becomes
2354			 * invalid/unavailable during FCF discovery that
2355			 * was not triggered by fast FCF failover process,
2356			 * treat it as fast FCF failover.
2357			 */
2358			if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2359			    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2360				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2361						"2835 Invalid in-use FCF "
2362						"(x%x), enter FCF failover "
2363						"table scan.\n",
2364						phba->fcf.current_rec.fcf_indx);
2365				spin_lock_irq(&phba->hbalock);
2366				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2367				spin_unlock_irq(&phba->hbalock);
2368				lpfc_sli4_mbox_cmd_free(phba, mboxq);
2369				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2370						LPFC_FCOE_FCF_GET_FIRST);
2371				return;
2372			}
2373		}
2374		goto read_next_fcf;
2375	} else {
2376		fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2377		rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2378							new_fcf_record);
2379		if (rc)
2380			goto read_next_fcf;
2381	}
2382
2383	/*
2384	 * If this is not the first FCF discovery of the HBA, use last
2385	 * FCF record for the discovery. The condition that a rescan
2386	 * matches the in-use FCF record: fabric name, switch name, mac
2387	 * address, and vlan_id.
2388	 */
2389	spin_lock_irq(&phba->hbalock);
2390	if (phba->fcf.fcf_flag & FCF_IN_USE) {
2391		if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2392			lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2393		    new_fcf_record, vlan_id)) {
2394			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2395			    phba->fcf.current_rec.fcf_indx) {
2396				phba->fcf.fcf_flag |= FCF_AVAILABLE;
2397				if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2398					/* Stop FCF redisc wait timer */
2399					__lpfc_sli4_stop_fcf_redisc_wait_timer(
2400									phba);
2401				else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2402					/* Fast failover, mark completed */
2403					phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2404				spin_unlock_irq(&phba->hbalock);
2405				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2406						"2836 New FCF matches in-use "
2407						"FCF (x%x), port_state:x%x, "
2408						"fc_flag:x%x\n",
2409						phba->fcf.current_rec.fcf_indx,
2410						phba->pport->port_state,
2411						phba->pport->fc_flag);
2412				goto out;
2413			} else
2414				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2415					"2863 New FCF (x%x) matches "
2416					"property of in-use FCF (x%x)\n",
2417					bf_get(lpfc_fcf_record_fcf_index,
2418					       new_fcf_record),
2419					phba->fcf.current_rec.fcf_indx);
2420		}
2421		/*
2422		 * Read next FCF record from HBA searching for the matching
2423		 * with in-use record only if not during the fast failover
2424		 * period. In case of fast failover period, it shall try to
2425		 * determine whether the FCF record just read should be the
2426		 * next candidate.
2427		 */
2428		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2429			spin_unlock_irq(&phba->hbalock);
2430			goto read_next_fcf;
2431		}
2432	}
2433	/*
2434	 * Update on failover FCF record only if it's in FCF fast-failover
2435	 * period; otherwise, update on current FCF record.
2436	 */
2437	if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2438		fcf_rec = &phba->fcf.failover_rec;
2439	else
2440		fcf_rec = &phba->fcf.current_rec;
2441
2442	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2443		/*
2444		 * If the driver FCF record does not have boot flag
2445		 * set and new hba fcf record has boot flag set, use
2446		 * the new hba fcf record.
2447		 */
2448		if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2449			/* Choose this FCF record */
2450			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2451					"2837 Update current FCF record "
2452					"(x%x) with new FCF record (x%x)\n",
2453					fcf_rec->fcf_indx,
2454					bf_get(lpfc_fcf_record_fcf_index,
2455					new_fcf_record));
2456			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2457					addr_mode, vlan_id, BOOT_ENABLE);
2458			spin_unlock_irq(&phba->hbalock);
2459			goto read_next_fcf;
2460		}
2461		/*
2462		 * If the driver FCF record has boot flag set and the
2463		 * new hba FCF record does not have boot flag, read
2464		 * the next FCF record.
2465		 */
2466		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2467			spin_unlock_irq(&phba->hbalock);
2468			goto read_next_fcf;
2469		}
2470		/*
2471		 * If the new hba FCF record has lower priority value
2472		 * than the driver FCF record, use the new record.
2473		 */
2474		if (new_fcf_record->fip_priority < fcf_rec->priority) {
2475			/* Choose the new FCF record with lower priority */
2476			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2477					"2838 Update current FCF record "
2478					"(x%x) with new FCF record (x%x)\n",
2479					fcf_rec->fcf_indx,
2480					bf_get(lpfc_fcf_record_fcf_index,
2481					       new_fcf_record));
2482			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2483					addr_mode, vlan_id, 0);
2484			/* Reset running random FCF selection count */
2485			phba->fcf.eligible_fcf_cnt = 1;
2486		} else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2487			/* Update running random FCF selection count */
2488			phba->fcf.eligible_fcf_cnt++;
2489			select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2490						phba->fcf.eligible_fcf_cnt);
2491			if (select_new_fcf) {
2492				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2493					"2839 Update current FCF record "
2494					"(x%x) with new FCF record (x%x)\n",
2495					fcf_rec->fcf_indx,
2496					bf_get(lpfc_fcf_record_fcf_index,
2497					       new_fcf_record));
2498				/* Choose the new FCF by random selection */
2499				__lpfc_update_fcf_record(phba, fcf_rec,
2500							 new_fcf_record,
2501							 addr_mode, vlan_id, 0);
2502			}
2503		}
2504		spin_unlock_irq(&phba->hbalock);
2505		goto read_next_fcf;
2506	}
2507	/*
2508	 * This is the first suitable FCF record, choose this record for
2509	 * initial best-fit FCF.
2510	 */
2511	if (fcf_rec) {
2512		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2513				"2840 Update initial FCF candidate "
2514				"with FCF (x%x)\n",
2515				bf_get(lpfc_fcf_record_fcf_index,
2516				       new_fcf_record));
2517		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2518					 addr_mode, vlan_id, (boot_flag ?
2519					 BOOT_ENABLE : 0));
2520		phba->fcf.fcf_flag |= FCF_AVAILABLE;
2521		/* Setup initial running random FCF selection count */
2522		phba->fcf.eligible_fcf_cnt = 1;
2523	}
2524	spin_unlock_irq(&phba->hbalock);
2525	goto read_next_fcf;
2526
2527read_next_fcf:
2528	lpfc_sli4_mbox_cmd_free(phba, mboxq);
2529	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2530		if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2531			/*
2532			 * Case of FCF fast failover scan
2533			 */
2534
2535			/*
2536			 * It has not found any suitable FCF record, cancel
2537			 * FCF scan inprogress, and do nothing
2538			 */
2539			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2540				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2541					       "2782 No suitable FCF found: "
2542					       "(x%x/x%x)\n",
2543					       phba->fcoe_eventtag_at_fcf_scan,
2544					       bf_get(lpfc_fcf_record_fcf_index,
2545						      new_fcf_record));
2546				spin_lock_irq(&phba->hbalock);
2547				if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2548					phba->hba_flag &= ~FCF_TS_INPROG;
2549					spin_unlock_irq(&phba->hbalock);
2550					/* Unregister in-use FCF and rescan */
2551					lpfc_printf_log(phba, KERN_INFO,
2552							LOG_FIP,
2553							"2864 On devloss tmo "
2554							"unreg in-use FCF and "
2555							"rescan FCF table\n");
2556					lpfc_unregister_fcf_rescan(phba);
2557					return;
2558				}
2559				/*
2560				 * Let next new FCF event trigger fast failover
2561				 */
2562				phba->hba_flag &= ~FCF_TS_INPROG;
2563				spin_unlock_irq(&phba->hbalock);
2564				return;
2565			}
2566			/*
2567			 * It has found a suitable FCF record that is not
2568			 * the same as in-use FCF record, unregister the
2569			 * in-use FCF record, replace the in-use FCF record
2570			 * with the new FCF record, mark FCF fast failover
2571			 * completed, and then start register the new FCF
2572			 * record.
2573			 */
2574
2575			/* Unregister the current in-use FCF record */
2576			lpfc_unregister_fcf(phba);
2577
2578			/* Replace in-use record with the new record */
2579			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2580					"2842 Replace in-use FCF (x%x) "
2581					"with failover FCF (x%x)\n",
2582					phba->fcf.current_rec.fcf_indx,
2583					phba->fcf.failover_rec.fcf_indx);
2584			memcpy(&phba->fcf.current_rec,
2585			       &phba->fcf.failover_rec,
2586			       sizeof(struct lpfc_fcf_rec));
2587			/*
2588			 * Mark the fast FCF failover rediscovery completed
2589			 * and the start of the first round of the roundrobin
2590			 * FCF failover.
2591			 */
2592			spin_lock_irq(&phba->hbalock);
2593			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2594			spin_unlock_irq(&phba->hbalock);
2595			/* Register to the new FCF record */
2596			lpfc_register_fcf(phba);
2597		} else {
2598			/*
2599			 * In case of transaction period to fast FCF failover,
2600			 * do nothing when search to the end of the FCF table.
2601			 */
2602			if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2603			    (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2604				return;
2605
2606			if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2607				phba->fcf.fcf_flag & FCF_IN_USE) {
2608				/*
2609				 * In case the current in-use FCF record no
2610				 * longer existed during FCF discovery that
2611				 * was not triggered by fast FCF failover
2612				 * process, treat it as fast FCF failover.
2613				 */
2614				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2615						"2841 In-use FCF record (x%x) "
2616						"not reported, entering fast "
2617						"FCF failover mode scanning.\n",
2618						phba->fcf.current_rec.fcf_indx);
2619				spin_lock_irq(&phba->hbalock);
2620				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2621				spin_unlock_irq(&phba->hbalock);
2622				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2623						LPFC_FCOE_FCF_GET_FIRST);
2624				return;
2625			}
2626			/* Register to the new FCF record */
2627			lpfc_register_fcf(phba);
2628		}
2629	} else
2630		lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2631	return;
2632
2633out:
2634	lpfc_sli4_mbox_cmd_free(phba, mboxq);
2635	lpfc_register_fcf(phba);
2636
2637	return;
2638}
2639
2640/**
2641 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2642 * @phba: pointer to lpfc hba data structure.
2643 * @mboxq: pointer to mailbox object.
2644 *
2645 * This is the callback function for FLOGI failure roundrobin FCF failover
2646 * read FCF record mailbox command from the eligible FCF record bmask for
2647 * performing the failover. If the FCF read back is not valid/available, it
2648 * fails through to retrying FLOGI to the currently registered FCF again.
2649 * Otherwise, if the FCF read back is valid and available, it will set the
2650 * newly read FCF record to the failover FCF record, unregister currently
2651 * registered FCF record, copy the failover FCF record to the current
2652 * FCF record, and then register the current FCF record before proceeding
2653 * to trying FLOGI on the new failover FCF.
2654 */
2655void
2656lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2657{
2658	struct fcf_record *new_fcf_record;
2659	uint32_t boot_flag, addr_mode;
2660	uint16_t next_fcf_index, fcf_index;
2661	uint16_t current_fcf_index;
2662	uint16_t vlan_id;
2663	int rc;
2664
2665	/* If link state is not up, stop the roundrobin failover process */
2666	if (phba->link_state < LPFC_LINK_UP) {
2667		spin_lock_irq(&phba->hbalock);
2668		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2669		phba->hba_flag &= ~FCF_RR_INPROG;
2670		spin_unlock_irq(&phba->hbalock);
2671		goto out;
2672	}
2673
2674	/* Parse the FCF record from the non-embedded mailbox command */
2675	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2676						      &next_fcf_index);
2677	if (!new_fcf_record) {
2678		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2679				"2766 Mailbox command READ_FCF_RECORD "
2680				"failed to retrieve a FCF record. "
2681				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2682				phba->fcf.fcf_flag);
2683		lpfc_unregister_fcf_rescan(phba);
2684		goto out;
2685	}
2686
2687	/* Get the needed parameters from FCF record */
2688	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2689				      &addr_mode, &vlan_id);
2690
2691	/* Log the FCF record information if turned on */
2692	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2693				      next_fcf_index);
2694
2695	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2696	if (!rc) {
2697		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2698				"2848 Remove ineligible FCF (x%x) from "
2699				"from roundrobin bmask\n", fcf_index);
2700		/* Clear roundrobin bmask bit for ineligible FCF */
2701		lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2702		/* Perform next round of roundrobin FCF failover */
2703		fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2704		rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2705		if (rc)
2706			goto out;
2707		goto error_out;
2708	}
2709
2710	if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2711		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2712				"2760 Perform FLOGI roundrobin FCF failover: "
2713				"FCF (x%x) back to FCF (x%x)\n",
2714				phba->fcf.current_rec.fcf_indx, fcf_index);
2715		/* Wait 500 ms before retrying FLOGI to current FCF */
2716		msleep(500);
2717		lpfc_issue_init_vfi(phba->pport);
2718		goto out;
2719	}
2720
2721	/* Upload new FCF record to the failover FCF record */
2722	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2723			"2834 Update current FCF (x%x) with new FCF (x%x)\n",
2724			phba->fcf.failover_rec.fcf_indx, fcf_index);
2725	spin_lock_irq(&phba->hbalock);
2726	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2727				 new_fcf_record, addr_mode, vlan_id,
2728				 (boot_flag ? BOOT_ENABLE : 0));
2729	spin_unlock_irq(&phba->hbalock);
2730
2731	current_fcf_index = phba->fcf.current_rec.fcf_indx;
2732
2733	/* Unregister the current in-use FCF record */
2734	lpfc_unregister_fcf(phba);
2735
2736	/* Replace in-use record with the new record */
2737	memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2738	       sizeof(struct lpfc_fcf_rec));
2739
2740	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2741			"2783 Perform FLOGI roundrobin FCF failover: FCF "
2742			"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2743
2744error_out:
2745	lpfc_register_fcf(phba);
2746out:
2747	lpfc_sli4_mbox_cmd_free(phba, mboxq);
2748}
2749
2750/**
2751 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2752 * @phba: pointer to lpfc hba data structure.
2753 * @mboxq: pointer to mailbox object.
2754 *
2755 * This is the callback function of read FCF record mailbox command for
2756 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2757 * failover when a new FCF event happened. If the FCF read back is
2758 * valid/available and it passes the connection list check, it updates
2759 * the bmask for the eligible FCF record for roundrobin failover.
2760 */
2761void
2762lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2763{
2764	struct fcf_record *new_fcf_record;
2765	uint32_t boot_flag, addr_mode;
2766	uint16_t fcf_index, next_fcf_index;
2767	uint16_t vlan_id;
2768	int rc;
2769
2770	/* If link state is not up, no need to proceed */
2771	if (phba->link_state < LPFC_LINK_UP)
2772		goto out;
2773
2774	/* If FCF discovery period is over, no need to proceed */
2775	if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2776		goto out;
2777
2778	/* Parse the FCF record from the non-embedded mailbox command */
2779	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2780						      &next_fcf_index);
2781	if (!new_fcf_record) {
2782		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2783				"2767 Mailbox command READ_FCF_RECORD "
2784				"failed to retrieve a FCF record.\n");
2785		goto out;
2786	}
2787
2788	/* Check the connection list for eligibility */
2789	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2790				      &addr_mode, &vlan_id);
2791
2792	/* Log the FCF record information if turned on */
2793	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2794				      next_fcf_index);
2795
2796	if (!rc)
2797		goto out;
2798
2799	/* Update the eligible FCF record index bmask */
2800	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2801
2802	rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2803
2804out:
2805	lpfc_sli4_mbox_cmd_free(phba, mboxq);
2806}
2807
2808/**
2809 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2810 * @phba: pointer to lpfc hba data structure.
2811 * @mboxq: pointer to mailbox data structure.
2812 *
2813 * This function handles completion of init vfi mailbox command.
2814 */
2815static void
2816lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2817{
2818	struct lpfc_vport *vport = mboxq->vport;
2819
2820	/*
2821	 * VFI not supported on interface type 0, just do the flogi
2822	 * Also continue if the VFI is in use - just use the same one.
2823	 */
2824	if (mboxq->u.mb.mbxStatus &&
2825	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2826			LPFC_SLI_INTF_IF_TYPE_0) &&
2827	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2828		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2829				 "2891 Init VFI mailbox failed 0x%x\n",
2830				 mboxq->u.mb.mbxStatus);
2831		mempool_free(mboxq, phba->mbox_mem_pool);
2832		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2833		return;
2834	}
2835
2836	lpfc_initial_flogi(vport);
2837	mempool_free(mboxq, phba->mbox_mem_pool);
2838	return;
2839}
2840
2841/**
2842 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2843 * @vport: pointer to lpfc_vport data structure.
2844 *
2845 * This function issue a init_vfi mailbox command to initialize the VFI and
2846 * VPI for the physical port.
2847 */
2848void
2849lpfc_issue_init_vfi(struct lpfc_vport *vport)
2850{
2851	LPFC_MBOXQ_t *mboxq;
2852	int rc;
2853	struct lpfc_hba *phba = vport->phba;
2854
2855	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2856	if (!mboxq) {
2857		lpfc_printf_vlog(vport, KERN_ERR,
2858			LOG_TRACE_EVENT, "2892 Failed to allocate "
2859			"init_vfi mailbox\n");
2860		return;
2861	}
2862	lpfc_init_vfi(mboxq, vport);
2863	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2864	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2865	if (rc == MBX_NOT_FINISHED) {
2866		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2867				 "2893 Failed to issue init_vfi mailbox\n");
2868		mempool_free(mboxq, vport->phba->mbox_mem_pool);
2869	}
2870}
2871
2872/**
2873 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2874 * @phba: pointer to lpfc hba data structure.
2875 * @mboxq: pointer to mailbox data structure.
2876 *
2877 * This function handles completion of init vpi mailbox command.
2878 */
2879void
2880lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2881{
2882	struct lpfc_vport *vport = mboxq->vport;
2883	struct lpfc_nodelist *ndlp;
2884	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2885
2886	if (mboxq->u.mb.mbxStatus) {
2887		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2888				 "2609 Init VPI mailbox failed 0x%x\n",
2889				 mboxq->u.mb.mbxStatus);
2890		mempool_free(mboxq, phba->mbox_mem_pool);
2891		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2892		return;
2893	}
2894	spin_lock_irq(shost->host_lock);
2895	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2896	spin_unlock_irq(shost->host_lock);
2897
2898	/* If this port is physical port or FDISC is done, do reg_vpi */
2899	if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2900			ndlp = lpfc_findnode_did(vport, Fabric_DID);
2901			if (!ndlp)
2902				lpfc_printf_vlog(vport, KERN_ERR,
2903					LOG_TRACE_EVENT,
2904					"2731 Cannot find fabric "
2905					"controller node\n");
2906			else
2907				lpfc_register_new_vport(phba, vport, ndlp);
2908			mempool_free(mboxq, phba->mbox_mem_pool);
2909			return;
2910	}
2911
2912	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2913		lpfc_initial_fdisc(vport);
2914	else {
2915		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2916		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2917				 "2606 No NPIV Fabric support\n");
2918	}
2919	mempool_free(mboxq, phba->mbox_mem_pool);
2920	return;
2921}
2922
2923/**
2924 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2925 * @vport: pointer to lpfc_vport data structure.
2926 *
2927 * This function issue a init_vpi mailbox command to initialize
2928 * VPI for the vport.
2929 */
2930void
2931lpfc_issue_init_vpi(struct lpfc_vport *vport)
2932{
2933	LPFC_MBOXQ_t *mboxq;
2934	int rc, vpi;
2935
2936	if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2937		vpi = lpfc_alloc_vpi(vport->phba);
2938		if (!vpi) {
2939			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2940					 "3303 Failed to obtain vport vpi\n");
2941			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2942			return;
2943		}
2944		vport->vpi = vpi;
2945	}
2946
2947	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2948	if (!mboxq) {
2949		lpfc_printf_vlog(vport, KERN_ERR,
2950			LOG_TRACE_EVENT, "2607 Failed to allocate "
2951			"init_vpi mailbox\n");
2952		return;
2953	}
2954	lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2955	mboxq->vport = vport;
2956	mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2957	rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2958	if (rc == MBX_NOT_FINISHED) {
2959		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2960				 "2608 Failed to issue init_vpi mailbox\n");
2961		mempool_free(mboxq, vport->phba->mbox_mem_pool);
2962	}
2963}
2964
2965/**
2966 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2967 * @phba: pointer to lpfc hba data structure.
2968 *
2969 * This function loops through the list of vports on the @phba and issues an
2970 * FDISC if possible.
2971 */
2972void
2973lpfc_start_fdiscs(struct lpfc_hba *phba)
2974{
2975	struct lpfc_vport **vports;
2976	int i;
2977
2978	vports = lpfc_create_vport_work_array(phba);
2979	if (vports != NULL) {
2980		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2981			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2982				continue;
2983			/* There are no vpi for this vport */
2984			if (vports[i]->vpi > phba->max_vpi) {
2985				lpfc_vport_set_state(vports[i],
2986						     FC_VPORT_FAILED);
2987				continue;
2988			}
2989			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2990				lpfc_vport_set_state(vports[i],
2991						     FC_VPORT_LINKDOWN);
2992				continue;
2993			}
2994			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2995				lpfc_issue_init_vpi(vports[i]);
2996				continue;
2997			}
2998			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2999				lpfc_initial_fdisc(vports[i]);
3000			else {
3001				lpfc_vport_set_state(vports[i],
3002						     FC_VPORT_NO_FABRIC_SUPP);
3003				lpfc_printf_vlog(vports[i], KERN_ERR,
3004						 LOG_TRACE_EVENT,
3005						 "0259 No NPIV "
3006						 "Fabric support\n");
3007			}
3008		}
3009	}
3010	lpfc_destroy_vport_work_array(phba, vports);
3011}
3012
3013void
3014lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3015{
3016	struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
3017	struct lpfc_vport *vport = mboxq->vport;
3018	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3019
3020	/*
3021	 * VFI not supported for interface type 0, so ignore any mailbox
3022	 * error (except VFI in use) and continue with the discovery.
3023	 */
3024	if (mboxq->u.mb.mbxStatus &&
3025	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3026			LPFC_SLI_INTF_IF_TYPE_0) &&
3027	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3028		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3029				 "2018 REG_VFI mbxStatus error x%x "
3030				 "HBA state x%x\n",
3031				 mboxq->u.mb.mbxStatus, vport->port_state);
3032		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3033			/* FLOGI failed, use loop map to make discovery list */
3034			lpfc_disc_list_loopmap(vport);
3035			/* Start discovery */
3036			lpfc_disc_start(vport);
3037			goto out_free_mem;
3038		}
3039		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3040		goto out_free_mem;
3041	}
3042
3043	/* If the VFI is already registered, there is nothing else to do
3044	 * Unless this was a VFI update and we are in PT2PT mode, then
3045	 * we should drop through to set the port state to ready.
3046	 */
3047	if (vport->fc_flag & FC_VFI_REGISTERED)
3048		if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3049		      vport->fc_flag & FC_PT2PT))
3050			goto out_free_mem;
3051
3052	/* The VPI is implicitly registered when the VFI is registered */
3053	spin_lock_irq(shost->host_lock);
3054	vport->vpi_state |= LPFC_VPI_REGISTERED;
3055	vport->fc_flag |= FC_VFI_REGISTERED;
3056	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3057	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3058	spin_unlock_irq(shost->host_lock);
3059
3060	/* In case SLI4 FC loopback test, we are ready */
3061	if ((phba->sli_rev == LPFC_SLI_REV4) &&
3062	    (phba->link_flag & LS_LOOPBACK_MODE)) {
3063		phba->link_state = LPFC_HBA_READY;
3064		goto out_free_mem;
3065	}
3066
3067	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3068			 "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
3069			 "alpacnt:%d LinkState:%x topology:%x\n",
3070			 vport->port_state, vport->fc_flag, vport->fc_myDID,
3071			 vport->phba->alpa_map[0],
3072			 phba->link_state, phba->fc_topology);
3073
3074	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3075		/*
3076		 * For private loop or for NPort pt2pt,
3077		 * just start discovery and we are done.
3078		 */
3079		if ((vport->fc_flag & FC_PT2PT) ||
3080		    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3081		    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3082
3083			/* Use loop map to make discovery list */
3084			lpfc_disc_list_loopmap(vport);
3085			/* Start discovery */
3086			if (vport->fc_flag & FC_PT2PT)
3087				vport->port_state = LPFC_VPORT_READY;
3088			else
3089				lpfc_disc_start(vport);
3090		} else {
3091			lpfc_start_fdiscs(phba);
3092			lpfc_do_scr_ns_plogi(phba, vport);
3093		}
3094	}
3095
3096out_free_mem:
3097	mempool_free(mboxq, phba->mbox_mem_pool);
3098	if (dmabuf) {
3099		lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3100		kfree(dmabuf);
3101	}
3102	return;
3103}
3104
3105static void
3106lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3107{
3108	MAILBOX_t *mb = &pmb->u.mb;
3109	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3110	struct lpfc_vport  *vport = pmb->vport;
3111	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3112	struct serv_parm *sp = &vport->fc_sparam;
3113	uint32_t ed_tov;
3114
3115	/* Check for error */
3116	if (mb->mbxStatus) {
3117		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
3118		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3119				 "0319 READ_SPARAM mbxStatus error x%x "
3120				 "hba state x%x>\n",
3121				 mb->mbxStatus, vport->port_state);
3122		lpfc_linkdown(phba);
3123		goto out;
3124	}
3125
3126	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3127	       sizeof (struct serv_parm));
3128
3129	ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3130	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
3131		ed_tov = (ed_tov + 999999) / 1000000;
3132
3133	phba->fc_edtov = ed_tov;
3134	phba->fc_ratov = (2 * ed_tov) / 1000;
3135	if (phba->fc_ratov < FF_DEF_RATOV) {
3136		/* RA_TOV should be atleast 10sec for initial flogi */
3137		phba->fc_ratov = FF_DEF_RATOV;
3138	}
3139
3140	lpfc_update_vport_wwn(vport);
3141	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3142	if (vport->port_type == LPFC_PHYSICAL_PORT) {
3143		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3144		memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3145	}
3146
3147	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3148	kfree(mp);
3149	mempool_free(pmb, phba->mbox_mem_pool);
3150
3151	/* Check if sending the FLOGI is being deferred to after we get
3152	 * up to date CSPs from MBX_READ_SPARAM.
3153	 */
3154	if (phba->hba_flag & HBA_DEFER_FLOGI) {
3155		lpfc_initial_flogi(vport);
3156		phba->hba_flag &= ~HBA_DEFER_FLOGI;
3157	}
3158	return;
3159
3160out:
3161	pmb->ctx_buf = NULL;
3162	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3163	kfree(mp);
3164	lpfc_issue_clear_la(phba, vport);
3165	mempool_free(pmb, phba->mbox_mem_pool);
3166	return;
3167}
3168
3169static void
3170lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3171{
3172	struct lpfc_vport *vport = phba->pport;
3173	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3174	struct Scsi_Host *shost;
3175	int i;
3176	struct lpfc_dmabuf *mp;
3177	int rc;
3178	struct fcf_record *fcf_record;
3179	uint32_t fc_flags = 0;
3180	unsigned long iflags;
3181
3182	spin_lock_irqsave(&phba->hbalock, iflags);
3183	phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3184
3185	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3186		switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3187		case LPFC_LINK_SPEED_1GHZ:
3188		case LPFC_LINK_SPEED_2GHZ:
3189		case LPFC_LINK_SPEED_4GHZ:
3190		case LPFC_LINK_SPEED_8GHZ:
3191		case LPFC_LINK_SPEED_10GHZ:
3192		case LPFC_LINK_SPEED_16GHZ:
3193		case LPFC_LINK_SPEED_32GHZ:
3194		case LPFC_LINK_SPEED_64GHZ:
3195		case LPFC_LINK_SPEED_128GHZ:
3196			break;
3197		default:
3198			phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3199			break;
3200		}
3201	}
3202
3203	if (phba->fc_topology &&
3204	    phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3205		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3206				"3314 Toplogy changed was 0x%x is 0x%x\n",
3207				phba->fc_topology,
3208				bf_get(lpfc_mbx_read_top_topology, la));
3209		phba->fc_topology_changed = 1;
3210	}
3211
3212	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3213	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3214
3215	shost = lpfc_shost_from_vport(vport);
3216	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3217		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3218
3219		/* if npiv is enabled and this adapter supports npiv log
3220		 * a message that npiv is not supported in this topology
3221		 */
3222		if (phba->cfg_enable_npiv && phba->max_vpi)
3223			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3224				"1309 Link Up Event npiv not supported in loop "
3225				"topology\n");
3226				/* Get Loop Map information */
3227		if (bf_get(lpfc_mbx_read_top_il, la))
3228			fc_flags |= FC_LBIT;
3229
3230		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3231		i = la->lilpBde64.tus.f.bdeSize;
3232
3233		if (i == 0) {
3234			phba->alpa_map[0] = 0;
3235		} else {
3236			if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3237				int numalpa, j, k;
3238				union {
3239					uint8_t pamap[16];
3240					struct {
3241						uint32_t wd1;
3242						uint32_t wd2;
3243						uint32_t wd3;
3244						uint32_t wd4;
3245					} pa;
3246				} un;
3247				numalpa = phba->alpa_map[0];
3248				j = 0;
3249				while (j < numalpa) {
3250					memset(un.pamap, 0, 16);
3251					for (k = 1; j < numalpa; k++) {
3252						un.pamap[k - 1] =
3253							phba->alpa_map[j + 1];
3254						j++;
3255						if (k == 16)
3256							break;
3257					}
3258					/* Link Up Event ALPA map */
3259					lpfc_printf_log(phba,
3260							KERN_WARNING,
3261							LOG_LINK_EVENT,
3262							"1304 Link Up Event "
3263							"ALPA map Data: x%x "
3264							"x%x x%x x%x\n",
3265							un.pa.wd1, un.pa.wd2,
3266							un.pa.wd3, un.pa.wd4);
3267				}
3268			}
3269		}
3270	} else {
3271		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3272			if (phba->max_vpi && phba->cfg_enable_npiv &&
3273			   (phba->sli_rev >= LPFC_SLI_REV3))
3274				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3275		}
3276		vport->fc_myDID = phba->fc_pref_DID;
3277		fc_flags |= FC_LBIT;
3278	}
3279	spin_unlock_irqrestore(&phba->hbalock, iflags);
3280
3281	if (fc_flags) {
3282		spin_lock_irqsave(shost->host_lock, iflags);
3283		vport->fc_flag |= fc_flags;
3284		spin_unlock_irqrestore(shost->host_lock, iflags);
3285	}
3286
3287	lpfc_linkup(phba);
3288	sparam_mbox = NULL;
3289
3290	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3291	if (!sparam_mbox)
3292		goto out;
3293
3294	rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3295	if (rc) {
3296		mempool_free(sparam_mbox, phba->mbox_mem_pool);
3297		goto out;
3298	}
3299	sparam_mbox->vport = vport;
3300	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3301	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3302	if (rc == MBX_NOT_FINISHED) {
3303		mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
3304		lpfc_mbuf_free(phba, mp->virt, mp->phys);
3305		kfree(mp);
3306		mempool_free(sparam_mbox, phba->mbox_mem_pool);
3307		goto out;
3308	}
3309
3310	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3311		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3312		if (!cfglink_mbox)
3313			goto out;
3314		vport->port_state = LPFC_LOCAL_CFG_LINK;
3315		lpfc_config_link(phba, cfglink_mbox);
3316		cfglink_mbox->vport = vport;
3317		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3318		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3319		if (rc == MBX_NOT_FINISHED) {
3320			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3321			goto out;
3322		}
3323	} else {
3324		vport->port_state = LPFC_VPORT_UNKNOWN;
3325		/*
3326		 * Add the driver's default FCF record at FCF index 0 now. This
3327		 * is phase 1 implementation that support FCF index 0 and driver
3328		 * defaults.
3329		 */
3330		if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3331			fcf_record = kzalloc(sizeof(struct fcf_record),
3332					GFP_KERNEL);
3333			if (unlikely(!fcf_record)) {
3334				lpfc_printf_log(phba, KERN_ERR,
3335					LOG_TRACE_EVENT,
3336					"2554 Could not allocate memory for "
3337					"fcf record\n");
3338				rc = -ENODEV;
3339				goto out;
3340			}
3341
3342			lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3343						LPFC_FCOE_FCF_DEF_INDEX);
3344			rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3345			if (unlikely(rc)) {
3346				lpfc_printf_log(phba, KERN_ERR,
3347					LOG_TRACE_EVENT,
3348					"2013 Could not manually add FCF "
3349					"record 0, status %d\n", rc);
3350				rc = -ENODEV;
3351				kfree(fcf_record);
3352				goto out;
3353			}
3354			kfree(fcf_record);
3355		}
3356		/*
3357		 * The driver is expected to do FIP/FCF. Call the port
3358		 * and get the FCF Table.
3359		 */
3360		spin_lock_irqsave(&phba->hbalock, iflags);
3361		if (phba->hba_flag & FCF_TS_INPROG) {
3362			spin_unlock_irqrestore(&phba->hbalock, iflags);
3363			return;
3364		}
3365		/* This is the initial FCF discovery scan */
3366		phba->fcf.fcf_flag |= FCF_INIT_DISC;
3367		spin_unlock_irqrestore(&phba->hbalock, iflags);
3368		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3369				"2778 Start FCF table scan at linkup\n");
3370		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3371						     LPFC_FCOE_FCF_GET_FIRST);
3372		if (rc) {
3373			spin_lock_irqsave(&phba->hbalock, iflags);
3374			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3375			spin_unlock_irqrestore(&phba->hbalock, iflags);
3376			goto out;
3377		}
3378		/* Reset FCF roundrobin bmask for new discovery */
3379		lpfc_sli4_clear_fcf_rr_bmask(phba);
3380	}
3381
3382	/* Prepare for LINK up registrations */
3383	memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3384	scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3385		  init_utsname()->nodename);
3386	return;
3387out:
3388	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3389	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3390			 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3391			 vport->port_state, sparam_mbox, cfglink_mbox);
3392	lpfc_issue_clear_la(phba, vport);
3393	return;
3394}
3395
3396static void
3397lpfc_enable_la(struct lpfc_hba *phba)
3398{
3399	uint32_t control;
3400	struct lpfc_sli *psli = &phba->sli;
3401	spin_lock_irq(&phba->hbalock);
3402	psli->sli_flag |= LPFC_PROCESS_LA;
3403	if (phba->sli_rev <= LPFC_SLI_REV3) {
3404		control = readl(phba->HCregaddr);
3405		control |= HC_LAINT_ENA;
3406		writel(control, phba->HCregaddr);
3407		readl(phba->HCregaddr); /* flush */
3408	}
3409	spin_unlock_irq(&phba->hbalock);
3410}
3411
3412static void
3413lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3414{
3415	lpfc_linkdown(phba);
3416	lpfc_enable_la(phba);
3417	lpfc_unregister_unused_fcf(phba);
3418	/* turn on Link Attention interrupts - no CLEAR_LA needed */
3419}
3420
3421
3422/*
3423 * This routine handles processing a READ_TOPOLOGY mailbox
3424 * command upon completion. It is setup in the LPFC_MBOXQ
3425 * as the completion routine when the command is
3426 * handed off to the SLI layer. SLI4 only.
3427 */
3428void
3429lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3430{
3431	struct lpfc_vport *vport = pmb->vport;
3432	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3433	struct lpfc_mbx_read_top *la;
3434	struct lpfc_sli_ring *pring;
3435	MAILBOX_t *mb = &pmb->u.mb;
3436	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3437	uint8_t attn_type;
3438	unsigned long iflags;
3439
3440	/* Unblock ELS traffic */
3441	pring = lpfc_phba_elsring(phba);
3442	if (pring)
3443		pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3444
3445	/* Check for error */
3446	if (mb->mbxStatus) {
3447		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3448				"1307 READ_LA mbox error x%x state x%x\n",
3449				mb->mbxStatus, vport->port_state);
3450		lpfc_mbx_issue_link_down(phba);
3451		phba->link_state = LPFC_HBA_ERROR;
3452		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3453	}
3454
3455	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3456	attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3457
3458	memcpy(&phba->alpa_map[0], mp->virt, 128);
3459
3460	spin_lock_irqsave(shost->host_lock, iflags);
3461	if (bf_get(lpfc_mbx_read_top_pb, la))
3462		vport->fc_flag |= FC_BYPASSED_MODE;
3463	else
3464		vport->fc_flag &= ~FC_BYPASSED_MODE;
3465	spin_unlock_irqrestore(shost->host_lock, iflags);
3466
3467	if (phba->fc_eventTag <= la->eventTag) {
3468		phba->fc_stat.LinkMultiEvent++;
3469		if (attn_type == LPFC_ATT_LINK_UP)
3470			if (phba->fc_eventTag != 0)
3471				lpfc_linkdown(phba);
3472	}
3473
3474	phba->fc_eventTag = la->eventTag;
3475	if (phba->sli_rev < LPFC_SLI_REV4) {
3476		spin_lock_irqsave(&phba->hbalock, iflags);
3477		if (bf_get(lpfc_mbx_read_top_mm, la))
3478			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3479		else
3480			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3481		spin_unlock_irqrestore(&phba->hbalock, iflags);
3482	}
3483
3484	phba->link_events++;
3485	if ((attn_type == LPFC_ATT_LINK_UP) &&
3486	    !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3487		phba->fc_stat.LinkUp++;
3488		if (phba->link_flag & LS_LOOPBACK_MODE) {
3489			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3490					"1306 Link Up Event in loop back mode "
3491					"x%x received Data: x%x x%x x%x x%x\n",
3492					la->eventTag, phba->fc_eventTag,
3493					bf_get(lpfc_mbx_read_top_alpa_granted,
3494					       la),
3495					bf_get(lpfc_mbx_read_top_link_spd, la),
3496					phba->alpa_map[0]);
3497		} else {
3498			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3499					"1303 Link Up Event x%x received "
3500					"Data: x%x x%x x%x x%x x%x x%x %d\n",
3501					la->eventTag, phba->fc_eventTag,
3502					bf_get(lpfc_mbx_read_top_alpa_granted,
3503					       la),
3504					bf_get(lpfc_mbx_read_top_link_spd, la),
3505					phba->alpa_map[0],
3506					bf_get(lpfc_mbx_read_top_mm, la),
3507					bf_get(lpfc_mbx_read_top_fa, la),
3508					phba->wait_4_mlo_maint_flg);
3509		}
3510		lpfc_mbx_process_link_up(phba, la);
3511	} else if (attn_type == LPFC_ATT_LINK_DOWN ||
3512		   attn_type == LPFC_ATT_UNEXP_WWPN) {
3513		phba->fc_stat.LinkDown++;
3514		if (phba->link_flag & LS_LOOPBACK_MODE)
3515			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3516				"1308 Link Down Event in loop back mode "
3517				"x%x received "
3518				"Data: x%x x%x x%x\n",
3519				la->eventTag, phba->fc_eventTag,
3520				phba->pport->port_state, vport->fc_flag);
3521		else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3522			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3523				"1313 Link Down Unexpected FA WWPN Event x%x "
3524				"received Data: x%x x%x x%x x%x x%x\n",
3525				la->eventTag, phba->fc_eventTag,
3526				phba->pport->port_state, vport->fc_flag,
3527				bf_get(lpfc_mbx_read_top_mm, la),
3528				bf_get(lpfc_mbx_read_top_fa, la));
3529		else
3530			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3531				"1305 Link Down Event x%x received "
3532				"Data: x%x x%x x%x x%x x%x\n",
3533				la->eventTag, phba->fc_eventTag,
3534				phba->pport->port_state, vport->fc_flag,
3535				bf_get(lpfc_mbx_read_top_mm, la),
3536				bf_get(lpfc_mbx_read_top_fa, la));
3537		lpfc_mbx_issue_link_down(phba);
3538	}
3539	if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3540	    attn_type == LPFC_ATT_LINK_UP) {
3541		if (phba->link_state != LPFC_LINK_DOWN) {
3542			phba->fc_stat.LinkDown++;
3543			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3544				"1312 Link Down Event x%x received "
3545				"Data: x%x x%x x%x\n",
3546				la->eventTag, phba->fc_eventTag,
3547				phba->pport->port_state, vport->fc_flag);
3548			lpfc_mbx_issue_link_down(phba);
3549		} else
3550			lpfc_enable_la(phba);
3551
3552		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3553				"1310 Menlo Maint Mode Link up Event x%x rcvd "
3554				"Data: x%x x%x x%x\n",
3555				la->eventTag, phba->fc_eventTag,
3556				phba->pport->port_state, vport->fc_flag);
3557		/*
3558		 * The cmnd that triggered this will be waiting for this
3559		 * signal.
3560		 */
3561		/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3562		if (phba->wait_4_mlo_maint_flg) {
3563			phba->wait_4_mlo_maint_flg = 0;
3564			wake_up_interruptible(&phba->wait_4_mlo_m_q);
3565		}
3566	}
3567
3568	if ((phba->sli_rev < LPFC_SLI_REV4) &&
3569	    bf_get(lpfc_mbx_read_top_fa, la)) {
3570		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3571			lpfc_issue_clear_la(phba, vport);
3572		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3573				"1311 fa %d\n",
3574				bf_get(lpfc_mbx_read_top_fa, la));
3575	}
3576
3577lpfc_mbx_cmpl_read_topology_free_mbuf:
3578	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3579	kfree(mp);
3580	mempool_free(pmb, phba->mbox_mem_pool);
3581	return;
3582}
3583
3584/*
3585 * This routine handles processing a REG_LOGIN mailbox
3586 * command upon completion. It is setup in the LPFC_MBOXQ
3587 * as the completion routine when the command is
3588 * handed off to the SLI layer.
3589 */
3590void
3591lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3592{
3593	struct lpfc_vport  *vport = pmb->vport;
3594	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3595	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3596	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3597
3598	pmb->ctx_buf = NULL;
3599	pmb->ctx_ndlp = NULL;
3600
3601	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3602			 "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
3603			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3604			 kref_read(&ndlp->kref),
3605			 ndlp->nlp_usg_map, ndlp);
3606	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3607		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3608
3609	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3610	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3611		/* We rcvd a rscn after issuing this
3612		 * mbox reg login, we may have cycled
3613		 * back through the state and be
3614		 * back at reg login state so this
3615		 * mbox needs to be ignored becase
3616		 * there is another reg login in
3617		 * process.
3618		 */
3619		spin_lock_irq(shost->host_lock);
3620		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3621		spin_unlock_irq(shost->host_lock);
3622
3623		/*
3624		 * We cannot leave the RPI registered because
3625		 * if we go thru discovery again for this ndlp
3626		 * a subsequent REG_RPI will fail.
3627		 */
3628		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3629		lpfc_unreg_rpi(vport, ndlp);
3630	}
3631
3632	/* Call state machine */
3633	lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3634
3635	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3636	kfree(mp);
3637	mempool_free(pmb, phba->mbox_mem_pool);
3638	/* decrement the node reference count held for this callback
3639	 * function.
3640	 */
3641	lpfc_nlp_put(ndlp);
3642
3643	return;
3644}
3645
3646static void
3647lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3648{
3649	MAILBOX_t *mb = &pmb->u.mb;
3650	struct lpfc_vport *vport = pmb->vport;
3651	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3652
3653	switch (mb->mbxStatus) {
3654	case 0x0011:
3655	case 0x0020:
3656		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3657				 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3658				 mb->mbxStatus);
3659		break;
3660	/* If VPI is busy, reset the HBA */
3661	case 0x9700:
3662		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3663			"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3664			vport->vpi, mb->mbxStatus);
3665		if (!(phba->pport->load_flag & FC_UNLOADING))
3666			lpfc_workq_post_event(phba, NULL, NULL,
3667				LPFC_EVT_RESET_HBA);
3668	}
3669	spin_lock_irq(shost->host_lock);
3670	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3671	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3672	spin_unlock_irq(shost->host_lock);
3673	vport->unreg_vpi_cmpl = VPORT_OK;
3674	mempool_free(pmb, phba->mbox_mem_pool);
3675	lpfc_cleanup_vports_rrqs(vport, NULL);
3676	/*
3677	 * This shost reference might have been taken at the beginning of
3678	 * lpfc_vport_delete()
3679	 */
3680	if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3681		scsi_host_put(shost);
3682}
3683
3684int
3685lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3686{
3687	struct lpfc_hba  *phba = vport->phba;
3688	LPFC_MBOXQ_t *mbox;
3689	int rc;
3690
3691	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3692	if (!mbox)
3693		return 1;
3694
3695	lpfc_unreg_vpi(phba, vport->vpi, mbox);
3696	mbox->vport = vport;
3697	mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3698	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3699	if (rc == MBX_NOT_FINISHED) {
3700		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3701				 "1800 Could not issue unreg_vpi\n");
3702		mempool_free(mbox, phba->mbox_mem_pool);
3703		vport->unreg_vpi_cmpl = VPORT_ERROR;
3704		return rc;
3705	}
3706	return 0;
3707}
3708
3709static void
3710lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3711{
3712	struct lpfc_vport *vport = pmb->vport;
3713	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3714	MAILBOX_t *mb = &pmb->u.mb;
3715
3716	switch (mb->mbxStatus) {
3717	case 0x0011:
3718	case 0x9601:
3719	case 0x9602:
3720		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3721				 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3722				 mb->mbxStatus);
3723		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3724		spin_lock_irq(shost->host_lock);
3725		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3726		spin_unlock_irq(shost->host_lock);
3727		vport->fc_myDID = 0;
3728
3729		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3730		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3731			if (phba->nvmet_support)
3732				lpfc_nvmet_update_targetport(phba);
3733			else
3734				lpfc_nvme_update_localport(vport);
3735		}
3736		goto out;
3737	}
3738
3739	spin_lock_irq(shost->host_lock);
3740	vport->vpi_state |= LPFC_VPI_REGISTERED;
3741	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3742	spin_unlock_irq(shost->host_lock);
3743	vport->num_disc_nodes = 0;
3744	/* go thru NPR list and issue ELS PLOGIs */
3745	if (vport->fc_npr_cnt)
3746		lpfc_els_disc_plogi(vport);
3747
3748	if (!vport->num_disc_nodes) {
3749		spin_lock_irq(shost->host_lock);
3750		vport->fc_flag &= ~FC_NDISC_ACTIVE;
3751		spin_unlock_irq(shost->host_lock);
3752		lpfc_can_disctmo(vport);
3753	}
3754	vport->port_state = LPFC_VPORT_READY;
3755
3756out:
3757	mempool_free(pmb, phba->mbox_mem_pool);
3758	return;
3759}
3760
3761/**
3762 * lpfc_create_static_vport - Read HBA config region to create static vports.
3763 * @phba: pointer to lpfc hba data structure.
3764 *
3765 * This routine issue a DUMP mailbox command for config region 22 to get
3766 * the list of static vports to be created. The function create vports
3767 * based on the information returned from the HBA.
3768 **/
3769void
3770lpfc_create_static_vport(struct lpfc_hba *phba)
3771{
3772	LPFC_MBOXQ_t *pmb = NULL;
3773	MAILBOX_t *mb;
3774	struct static_vport_info *vport_info;
3775	int mbx_wait_rc = 0, i;
3776	struct fc_vport_identifiers vport_id;
3777	struct fc_vport *new_fc_vport;
3778	struct Scsi_Host *shost;
3779	struct lpfc_vport *vport;
3780	uint16_t offset = 0;
3781	uint8_t *vport_buff;
3782	struct lpfc_dmabuf *mp;
3783	uint32_t byte_count = 0;
3784
3785	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3786	if (!pmb) {
3787		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3788				"0542 lpfc_create_static_vport failed to"
3789				" allocate mailbox memory\n");
3790		return;
3791	}
3792	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3793	mb = &pmb->u.mb;
3794
3795	vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3796	if (!vport_info) {
3797		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3798				"0543 lpfc_create_static_vport failed to"
3799				" allocate vport_info\n");
3800		mempool_free(pmb, phba->mbox_mem_pool);
3801		return;
3802	}
3803
3804	vport_buff = (uint8_t *) vport_info;
3805	do {
3806		/* free dma buffer from previous round */
3807		if (pmb->ctx_buf) {
3808			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3809			lpfc_mbuf_free(phba, mp->virt, mp->phys);
3810			kfree(mp);
3811		}
3812		if (lpfc_dump_static_vport(phba, pmb, offset))
3813			goto out;
3814
3815		pmb->vport = phba->pport;
3816		mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3817							LPFC_MBOX_TMO);
3818
3819		if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3820			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3821				"0544 lpfc_create_static_vport failed to"
3822				" issue dump mailbox command ret 0x%x "
3823				"status 0x%x\n",
3824				mbx_wait_rc, mb->mbxStatus);
3825			goto out;
3826		}
3827
3828		if (phba->sli_rev == LPFC_SLI_REV4) {
3829			byte_count = pmb->u.mqe.un.mb_words[5];
3830			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3831			if (byte_count > sizeof(struct static_vport_info) -
3832					offset)
3833				byte_count = sizeof(struct static_vport_info)
3834					- offset;
3835			memcpy(vport_buff + offset, mp->virt, byte_count);
3836			offset += byte_count;
3837		} else {
3838			if (mb->un.varDmp.word_cnt >
3839				sizeof(struct static_vport_info) - offset)
3840				mb->un.varDmp.word_cnt =
3841					sizeof(struct static_vport_info)
3842						- offset;
3843			byte_count = mb->un.varDmp.word_cnt;
3844			lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3845				vport_buff + offset,
3846				byte_count);
3847
3848			offset += byte_count;
3849		}
3850
3851	} while (byte_count &&
3852		offset < sizeof(struct static_vport_info));
3853
3854
3855	if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3856		((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3857			!= VPORT_INFO_REV)) {
3858		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3859				"0545 lpfc_create_static_vport bad"
3860				" information header 0x%x 0x%x\n",
3861				le32_to_cpu(vport_info->signature),
3862				le32_to_cpu(vport_info->rev) &
3863				VPORT_INFO_REV_MASK);
3864
3865		goto out;
3866	}
3867
3868	shost = lpfc_shost_from_vport(phba->pport);
3869
3870	for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3871		memset(&vport_id, 0, sizeof(vport_id));
3872		vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3873		vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3874		if (!vport_id.port_name || !vport_id.node_name)
3875			continue;
3876
3877		vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3878		vport_id.vport_type = FC_PORTTYPE_NPIV;
3879		vport_id.disable = false;
3880		new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3881
3882		if (!new_fc_vport) {
3883			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3884				"0546 lpfc_create_static_vport failed to"
3885				" create vport\n");
3886			continue;
3887		}
3888
3889		vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3890		vport->vport_flag |= STATIC_VPORT;
3891	}
3892
3893out:
3894	kfree(vport_info);
3895	if (mbx_wait_rc != MBX_TIMEOUT) {
3896		if (pmb->ctx_buf) {
3897			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3898			lpfc_mbuf_free(phba, mp->virt, mp->phys);
3899			kfree(mp);
3900		}
3901		mempool_free(pmb, phba->mbox_mem_pool);
3902	}
3903
3904	return;
3905}
3906
3907/*
3908 * This routine handles processing a Fabric REG_LOGIN mailbox
3909 * command upon completion. It is setup in the LPFC_MBOXQ
3910 * as the completion routine when the command is
3911 * handed off to the SLI layer.
3912 */
3913void
3914lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3915{
3916	struct lpfc_vport *vport = pmb->vport;
3917	MAILBOX_t *mb = &pmb->u.mb;
3918	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3919	struct lpfc_nodelist *ndlp;
3920	struct Scsi_Host *shost;
3921
3922	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3923	pmb->ctx_ndlp = NULL;
3924	pmb->ctx_buf = NULL;
3925
3926	if (mb->mbxStatus) {
3927		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3928				 "0258 Register Fabric login error: 0x%x\n",
3929				 mb->mbxStatus);
3930		lpfc_mbuf_free(phba, mp->virt, mp->phys);
3931		kfree(mp);
3932		mempool_free(pmb, phba->mbox_mem_pool);
3933
3934		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3935			/* FLOGI failed, use loop map to make discovery list */
3936			lpfc_disc_list_loopmap(vport);
3937
3938			/* Start discovery */
3939			lpfc_disc_start(vport);
3940			/* Decrement the reference count to ndlp after the
3941			 * reference to the ndlp are done.
3942			 */
3943			lpfc_nlp_put(ndlp);
3944			return;
3945		}
3946
3947		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3948		/* Decrement the reference count to ndlp after the reference
3949		 * to the ndlp are done.
3950		 */
3951		lpfc_nlp_put(ndlp);
3952		return;
3953	}
3954
3955	if (phba->sli_rev < LPFC_SLI_REV4)
3956		ndlp->nlp_rpi = mb->un.varWords[0];
3957	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3958	ndlp->nlp_type |= NLP_FABRIC;
3959	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3960
3961	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3962		/* when physical port receive logo donot start
3963		 * vport discovery */
3964		if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3965			lpfc_start_fdiscs(phba);
3966		else {
3967			shost = lpfc_shost_from_vport(vport);
3968			spin_lock_irq(shost->host_lock);
3969			vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3970			spin_unlock_irq(shost->host_lock);
3971		}
3972		lpfc_do_scr_ns_plogi(phba, vport);
3973	}
3974
3975	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3976	kfree(mp);
3977	mempool_free(pmb, phba->mbox_mem_pool);
3978
3979	/* Drop the reference count from the mbox at the end after
3980	 * all the current reference to the ndlp have been done.
3981	 */
3982	lpfc_nlp_put(ndlp);
3983	return;
3984}
3985
3986 /*
3987  * This routine will issue a GID_FT for each FC4 Type supported
3988  * by the driver. ALL GID_FTs must complete before discovery is started.
3989  */
3990int
3991lpfc_issue_gidft(struct lpfc_vport *vport)
3992{
3993	/* Good status, issue CT Request to NameServer */
3994	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3995	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
3996		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3997			/* Cannot issue NameServer FCP Query, so finish up
3998			 * discovery
3999			 */
4000			lpfc_printf_vlog(vport, KERN_ERR,
4001					 LOG_TRACE_EVENT,
4002					 "0604 %s FC TYPE %x %s\n",
4003					 "Failed to issue GID_FT to ",
4004					 FC_TYPE_FCP,
4005					 "Finishing discovery.");
4006			return 0;
4007		}
4008		vport->gidft_inp++;
4009	}
4010
4011	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4012	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4013		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
4014			/* Cannot issue NameServer NVME Query, so finish up
4015			 * discovery
4016			 */
4017			lpfc_printf_vlog(vport, KERN_ERR,
4018					 LOG_TRACE_EVENT,
4019					 "0605 %s FC_TYPE %x %s %d\n",
4020					 "Failed to issue GID_FT to ",
4021					 FC_TYPE_NVME,
4022					 "Finishing discovery: gidftinp ",
4023					 vport->gidft_inp);
4024			if (vport->gidft_inp == 0)
4025				return 0;
4026		} else
4027			vport->gidft_inp++;
4028	}
4029	return vport->gidft_inp;
4030}
4031
4032/**
4033 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
4034 * @vport: The virtual port for which this call is being executed.
4035 *
4036 * This routine will issue a GID_PT to get a list of all N_Ports
4037 *
4038 * Return value :
4039 *   0 - Failure to issue a GID_PT
4040 *   1 - GID_PT issued
4041 **/
4042int
4043lpfc_issue_gidpt(struct lpfc_vport *vport)
4044{
4045	/* Good status, issue CT Request to NameServer */
4046	if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4047		/* Cannot issue NameServer FCP Query, so finish up
4048		 * discovery
4049		 */
4050		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4051				 "0606 %s Port TYPE %x %s\n",
4052				 "Failed to issue GID_PT to ",
4053				 GID_PT_N_PORT,
4054				 "Finishing discovery.");
4055		return 0;
4056	}
4057	vport->gidft_inp++;
4058	return 1;
4059}
4060
4061/*
4062 * This routine handles processing a NameServer REG_LOGIN mailbox
4063 * command upon completion. It is setup in the LPFC_MBOXQ
4064 * as the completion routine when the command is
4065 * handed off to the SLI layer.
4066 */
4067void
4068lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4069{
4070	MAILBOX_t *mb = &pmb->u.mb;
4071	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4072	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4073	struct lpfc_vport *vport = pmb->vport;
4074
4075	pmb->ctx_buf = NULL;
4076	pmb->ctx_ndlp = NULL;
4077	vport->gidft_inp = 0;
4078
4079	if (mb->mbxStatus) {
4080		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4081				 "0260 Register NameServer error: 0x%x\n",
4082				 mb->mbxStatus);
4083
4084out:
4085		/* decrement the node reference count held for this
4086		 * callback function.
4087		 */
4088		lpfc_nlp_put(ndlp);
4089		lpfc_mbuf_free(phba, mp->virt, mp->phys);
4090		kfree(mp);
4091		mempool_free(pmb, phba->mbox_mem_pool);
4092
4093		/* If no other thread is using the ndlp, free it */
4094		lpfc_nlp_not_used(ndlp);
4095
4096		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4097			/*
4098			 * RegLogin failed, use loop map to make discovery
4099			 * list
4100			 */
4101			lpfc_disc_list_loopmap(vport);
4102
4103			/* Start discovery */
4104			lpfc_disc_start(vport);
4105			return;
4106		}
4107		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4108		return;
4109	}
4110
4111	if (phba->sli_rev < LPFC_SLI_REV4)
4112		ndlp->nlp_rpi = mb->un.varWords[0];
4113	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4114	ndlp->nlp_type |= NLP_FABRIC;
4115	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4116	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
4117			 "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
4118			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4119			 kref_read(&ndlp->kref),
4120			 ndlp->nlp_usg_map, ndlp);
4121
4122	if (vport->port_state < LPFC_VPORT_READY) {
4123		/* Link up discovery requires Fabric registration. */
4124		lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4125		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4126		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4127		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
4128
4129		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4130		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
4131			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4132
4133		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4134		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
4135			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4136				    FC_TYPE_NVME);
4137
4138		/* Issue SCR just before NameServer GID_FT Query */
4139		lpfc_issue_els_scr(vport, 0);
4140
4141		lpfc_issue_els_rdf(vport, 0);
4142	}
4143
4144	vport->fc_ns_retry = 0;
4145	if (lpfc_issue_gidft(vport) == 0)
4146		goto out;
4147
4148	/*
4149	 * At this point in time we may need to wait for multiple
4150	 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
4151	 *
4152	 * decrement the node reference count held for this
4153	 * callback function.
4154	 */
4155	lpfc_nlp_put(ndlp);
4156	lpfc_mbuf_free(phba, mp->virt, mp->phys);
4157	kfree(mp);
4158	mempool_free(pmb, phba->mbox_mem_pool);
4159
4160	return;
4161}
4162
4163static void
4164lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4165{
4166	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4167	struct fc_rport  *rport;
4168	struct lpfc_rport_data *rdata;
4169	struct fc_rport_identifiers rport_ids;
4170	struct lpfc_hba  *phba = vport->phba;
4171
4172	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4173		return;
4174
4175	/* Remote port has reappeared. Re-register w/ FC transport */
4176	rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4177	rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4178	rport_ids.port_id = ndlp->nlp_DID;
4179	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4180
4181	/*
4182	 * We leave our node pointer in rport->dd_data when we unregister a
4183	 * FCP target port.  But fc_remote_port_add zeros the space to which
4184	 * rport->dd_data points.  So, if we're reusing a previously
4185	 * registered port, drop the reference that we took the last time we
4186	 * registered the port.
4187	 */
4188	rport = ndlp->rport;
4189	if (rport) {
4190		rdata = rport->dd_data;
4191		/* break the link before dropping the ref */
4192		ndlp->rport = NULL;
4193		if (rdata) {
4194			if (rdata->pnode == ndlp)
4195				lpfc_nlp_put(ndlp);
4196			rdata->pnode = NULL;
4197		}
4198		/* drop reference for earlier registeration */
4199		put_device(&rport->dev);
4200	}
4201
4202	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4203		"rport add:       did:x%x flg:x%x type x%x",
4204		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4205
4206	/* Don't add the remote port if unloading. */
4207	if (vport->load_flag & FC_UNLOADING)
4208		return;
4209
4210	ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4211	if (!rport || !get_device(&rport->dev)) {
4212		dev_printk(KERN_WARNING, &phba->pcidev->dev,
4213			   "Warning: fc_remote_port_add failed\n");
4214		return;
4215	}
4216
4217	/* initialize static port data */
4218	rport->maxframe_size = ndlp->nlp_maxframe;
4219	rport->supported_classes = ndlp->nlp_class_sup;
4220	rdata = rport->dd_data;
4221	rdata->pnode = lpfc_nlp_get(ndlp);
4222
4223	if (ndlp->nlp_type & NLP_FCP_TARGET)
4224		rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4225	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4226		rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4227	if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4228		rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4229	if (ndlp->nlp_type & NLP_NVME_TARGET)
4230		rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4231	if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4232		rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
4233
4234	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
4235		fc_remote_port_rolechg(rport, rport_ids.roles);
4236
4237	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4238			 "3183 rport register x%06x, rport x%px role x%x\n",
4239			 ndlp->nlp_DID, rport, rport_ids.roles);
4240
4241	if ((rport->scsi_target_id != -1) &&
4242	    (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4243		ndlp->nlp_sid = rport->scsi_target_id;
4244	}
4245	return;
4246}
4247
4248static void
4249lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4250{
4251	struct fc_rport *rport = ndlp->rport;
4252	struct lpfc_vport *vport = ndlp->vport;
4253
4254	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4255		return;
4256
4257	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4258		"rport delete:    did:x%x flg:x%x type x%x",
4259		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4260
4261	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4262			 "3184 rport unregister x%06x, rport x%px\n",
4263			 ndlp->nlp_DID, rport);
4264
4265	fc_remote_port_delete(rport);
4266
4267	return;
4268}
4269
4270static void
4271lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4272{
4273	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4274	unsigned long iflags;
4275
4276	spin_lock_irqsave(shost->host_lock, iflags);
4277	switch (state) {
4278	case NLP_STE_UNUSED_NODE:
4279		vport->fc_unused_cnt += count;
4280		break;
4281	case NLP_STE_PLOGI_ISSUE:
4282		vport->fc_plogi_cnt += count;
4283		break;
4284	case NLP_STE_ADISC_ISSUE:
4285		vport->fc_adisc_cnt += count;
4286		break;
4287	case NLP_STE_REG_LOGIN_ISSUE:
4288		vport->fc_reglogin_cnt += count;
4289		break;
4290	case NLP_STE_PRLI_ISSUE:
4291		vport->fc_prli_cnt += count;
4292		break;
4293	case NLP_STE_UNMAPPED_NODE:
4294		vport->fc_unmap_cnt += count;
4295		break;
4296	case NLP_STE_MAPPED_NODE:
4297		vport->fc_map_cnt += count;
4298		break;
4299	case NLP_STE_NPR_NODE:
4300		if (vport->fc_npr_cnt == 0 && count == -1)
4301			vport->fc_npr_cnt = 0;
4302		else
4303			vport->fc_npr_cnt += count;
4304		break;
4305	}
4306	spin_unlock_irqrestore(shost->host_lock, iflags);
4307}
4308
4309static void
4310lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4311		       int old_state, int new_state)
4312{
4313	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4314
4315	if (new_state == NLP_STE_UNMAPPED_NODE) {
4316		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4317		ndlp->nlp_type |= NLP_FC_NODE;
4318	}
4319	if (new_state == NLP_STE_MAPPED_NODE)
4320		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4321	if (new_state == NLP_STE_NPR_NODE)
4322		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4323
4324	/* FCP and NVME Transport interface */
4325	if ((old_state == NLP_STE_MAPPED_NODE ||
4326	     old_state == NLP_STE_UNMAPPED_NODE)) {
4327		if (ndlp->rport) {
4328			vport->phba->nport_event_cnt++;
4329			lpfc_unregister_remote_port(ndlp);
4330		}
4331
4332		if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4333			vport->phba->nport_event_cnt++;
4334			if (vport->phba->nvmet_support == 0) {
4335				/* Start devloss if target. */
4336				if (ndlp->nlp_type & NLP_NVME_TARGET)
4337					lpfc_nvme_unregister_port(vport, ndlp);
4338			} else {
4339				/* NVMET has no upcall. */
4340				lpfc_nlp_put(ndlp);
4341			}
4342		}
4343	}
4344
4345	/* FCP and NVME Transport interfaces */
4346
4347	if (new_state ==  NLP_STE_MAPPED_NODE ||
4348	    new_state == NLP_STE_UNMAPPED_NODE) {
4349		if (ndlp->nlp_fc4_type ||
4350		    ndlp->nlp_DID == Fabric_DID ||
4351		    ndlp->nlp_DID == NameServer_DID ||
4352		    ndlp->nlp_DID == FDMI_DID) {
4353			vport->phba->nport_event_cnt++;
4354			/*
4355			 * Tell the fc transport about the port, if we haven't
4356			 * already. If we have, and it's a scsi entity, be
4357			 */
4358			lpfc_register_remote_port(vport, ndlp);
4359		}
4360		/* Notify the NVME transport of this new rport. */
4361		if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4362		    ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4363			if (vport->phba->nvmet_support == 0) {
4364				/* Register this rport with the transport.
4365				 * Only NVME Target Rports are registered with
4366				 * the transport.
4367				 */
4368				if (ndlp->nlp_type & NLP_NVME_TARGET) {
4369					vport->phba->nport_event_cnt++;
4370					lpfc_nvme_register_port(vport, ndlp);
4371				}
4372			} else {
4373				/* Just take an NDLP ref count since the
4374				 * target does not register rports.
4375				 */
4376				lpfc_nlp_get(ndlp);
4377			}
4378		}
4379	}
4380
4381	if ((new_state ==  NLP_STE_MAPPED_NODE) &&
4382		(vport->stat_data_enabled)) {
4383		/*
4384		 * A new target is discovered, if there is no buffer for
4385		 * statistical data collection allocate buffer.
4386		 */
4387		ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4388					 sizeof(struct lpfc_scsicmd_bkt),
4389					 GFP_KERNEL);
4390
4391		if (!ndlp->lat_data)
4392			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4393				"0286 lpfc_nlp_state_cleanup failed to "
4394				"allocate statistical data buffer DID "
4395				"0x%x\n", ndlp->nlp_DID);
4396	}
4397	/*
4398	 * If the node just added to Mapped list was an FCP target,
4399	 * but the remote port registration failed or assigned a target
4400	 * id outside the presentable range - move the node to the
4401	 * Unmapped List.
4402	 */
4403	if ((new_state == NLP_STE_MAPPED_NODE) &&
4404	    (ndlp->nlp_type & NLP_FCP_TARGET) &&
4405	    (!ndlp->rport ||
4406	     ndlp->rport->scsi_target_id == -1 ||
4407	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4408		spin_lock_irq(shost->host_lock);
4409		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4410		spin_unlock_irq(shost->host_lock);
4411		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4412	}
4413}
4414
4415static char *
4416lpfc_nlp_state_name(char *buffer, size_t size, int state)
4417{
4418	static char *states[] = {
4419		[NLP_STE_UNUSED_NODE] = "UNUSED",
4420		[NLP_STE_PLOGI_ISSUE] = "PLOGI",
4421		[NLP_STE_ADISC_ISSUE] = "ADISC",
4422		[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4423		[NLP_STE_PRLI_ISSUE] = "PRLI",
4424		[NLP_STE_LOGO_ISSUE] = "LOGO",
4425		[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4426		[NLP_STE_MAPPED_NODE] = "MAPPED",
4427		[NLP_STE_NPR_NODE] = "NPR",
4428	};
4429
4430	if (state < NLP_STE_MAX_STATE && states[state])
4431		strlcpy(buffer, states[state], size);
4432	else
4433		snprintf(buffer, size, "unknown (%d)", state);
4434	return buffer;
4435}
4436
4437void
4438lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4439		   int state)
4440{
4441	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4442	int  old_state = ndlp->nlp_state;
4443	char name1[16], name2[16];
4444
4445	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4446			 "0904 NPort state transition x%06x, %s -> %s\n",
4447			 ndlp->nlp_DID,
4448			 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4449			 lpfc_nlp_state_name(name2, sizeof(name2), state));
4450
4451	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4452		"node statechg    did:x%x old:%d ste:%d",
4453		ndlp->nlp_DID, old_state, state);
4454
4455	if (old_state == NLP_STE_NPR_NODE &&
4456	    state != NLP_STE_NPR_NODE)
4457		lpfc_cancel_retry_delay_tmo(vport, ndlp);
4458	if (old_state == NLP_STE_UNMAPPED_NODE) {
4459		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4460		ndlp->nlp_type &= ~NLP_FC_NODE;
4461	}
4462
4463	if (list_empty(&ndlp->nlp_listp)) {
4464		spin_lock_irq(shost->host_lock);
4465		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4466		spin_unlock_irq(shost->host_lock);
4467	} else if (old_state)
4468		lpfc_nlp_counters(vport, old_state, -1);
4469
4470	ndlp->nlp_state = state;
4471	lpfc_nlp_counters(vport, state, 1);
4472	lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4473}
4474
4475void
4476lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4477{
4478	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4479
4480	if (list_empty(&ndlp->nlp_listp)) {
4481		spin_lock_irq(shost->host_lock);
4482		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4483		spin_unlock_irq(shost->host_lock);
4484	}
4485}
4486
4487void
4488lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4489{
4490	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4491
4492	lpfc_cancel_retry_delay_tmo(vport, ndlp);
4493	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4494		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4495	spin_lock_irq(shost->host_lock);
4496	list_del_init(&ndlp->nlp_listp);
4497	spin_unlock_irq(shost->host_lock);
4498	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4499				NLP_STE_UNUSED_NODE);
4500}
4501
4502static void
4503lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4504{
4505	lpfc_cancel_retry_delay_tmo(vport, ndlp);
4506	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4507		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4508	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4509				NLP_STE_UNUSED_NODE);
4510}
4511/**
4512 * lpfc_initialize_node - Initialize all fields of node object
4513 * @vport: Pointer to Virtual Port object.
4514 * @ndlp: Pointer to FC node object.
4515 * @did: FC_ID of the node.
4516 *
4517 * This function is always called when node object need to be initialized.
4518 * It initializes all the fields of the node object. Although the reference
4519 * to phba from @ndlp can be obtained indirectly through it's reference to
4520 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4521 * to the life-span of the @ndlp might go beyond the existence of @vport as
4522 * the final release of ndlp is determined by its reference count. And, the
4523 * operation on @ndlp needs the reference to phba.
4524 **/
4525static inline void
4526lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4527	uint32_t did)
4528{
4529	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4530	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4531	timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4532	INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4533
4534	ndlp->nlp_DID = did;
4535	ndlp->vport = vport;
4536	ndlp->phba = vport->phba;
4537	ndlp->nlp_sid = NLP_NO_SID;
4538	ndlp->nlp_fc4_type = NLP_FC4_NONE;
4539	kref_init(&ndlp->kref);
4540	NLP_INT_NODE_ACT(ndlp);
4541	atomic_set(&ndlp->cmd_pending, 0);
4542	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4543	ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4544}
4545
4546struct lpfc_nodelist *
4547lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4548		 int state)
4549{
4550	struct lpfc_hba *phba = vport->phba;
4551	uint32_t did, flag;
4552	unsigned long flags;
4553	unsigned long *active_rrqs_xri_bitmap = NULL;
4554	int rpi = LPFC_RPI_ALLOC_ERROR;
4555	uint32_t defer_did = 0;
4556
4557	if (!ndlp)
4558		return NULL;
4559
4560	if (phba->sli_rev == LPFC_SLI_REV4) {
4561		if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
4562			rpi = lpfc_sli4_alloc_rpi(vport->phba);
4563		else
4564			rpi = ndlp->nlp_rpi;
4565
4566		if (rpi == LPFC_RPI_ALLOC_ERROR) {
4567			lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4568					 "0359 %s: ndlp:x%px "
4569					 "usgmap:x%x refcnt:%d FAILED RPI "
4570					 " ALLOC\n",
4571					 __func__,
4572					 (void *)ndlp, ndlp->nlp_usg_map,
4573					 kref_read(&ndlp->kref));
4574			return NULL;
4575		}
4576	}
4577
4578	spin_lock_irqsave(&phba->ndlp_lock, flags);
4579	/* The ndlp should not be in memory free mode */
4580	if (NLP_CHK_FREE_REQ(ndlp)) {
4581		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4582		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4583				"0277 %s: ndlp:x%px "
4584				"usgmap:x%x refcnt:%d\n",
4585				__func__, (void *)ndlp, ndlp->nlp_usg_map,
4586				kref_read(&ndlp->kref));
4587		goto free_rpi;
4588	}
4589	/* The ndlp should not already be in active mode */
4590	if (NLP_CHK_NODE_ACT(ndlp)) {
4591		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4592		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4593				"0278 %s: ndlp:x%px "
4594				"usgmap:x%x refcnt:%d\n",
4595				__func__, (void *)ndlp, ndlp->nlp_usg_map,
4596				kref_read(&ndlp->kref));
4597		goto free_rpi;
4598	}
4599
4600	/* First preserve the orginal DID, xri_bitmap and some flags */
4601	did = ndlp->nlp_DID;
4602	flag = (ndlp->nlp_flag & NLP_UNREG_INP);
4603	if (flag & NLP_UNREG_INP)
4604		defer_did = ndlp->nlp_defer_did;
4605	if (phba->sli_rev == LPFC_SLI_REV4)
4606		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4607
4608	/* Zero ndlp except of ndlp linked list pointer */
4609	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4610		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4611
4612	/* Next reinitialize and restore saved objects */
4613	lpfc_initialize_node(vport, ndlp, did);
4614	ndlp->nlp_flag |= flag;
4615	if (flag & NLP_UNREG_INP)
4616		ndlp->nlp_defer_did = defer_did;
4617	if (phba->sli_rev == LPFC_SLI_REV4)
4618		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4619
4620	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4621	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4622		ndlp->nlp_rpi = rpi;
4623		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4624				 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4625				 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4626				 ndlp->nlp_flag,
4627				 kref_read(&ndlp->kref),
4628				 ndlp->nlp_usg_map, ndlp);
4629	}
4630
4631
4632	if (state != NLP_STE_UNUSED_NODE)
4633		lpfc_nlp_set_state(vport, ndlp, state);
4634	else
4635		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4636				 "0013 rpi:%x DID:%x flg:%x refcnt:%d "
4637				 "map:%x x%px STATE=UNUSED\n",
4638				 ndlp->nlp_rpi, ndlp->nlp_DID,
4639				 ndlp->nlp_flag,
4640				 kref_read(&ndlp->kref),
4641				 ndlp->nlp_usg_map, ndlp);
4642
4643	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4644		"node enable:       did:x%x",
4645		ndlp->nlp_DID, 0, 0);
4646	return ndlp;
4647
4648free_rpi:
4649	if (phba->sli_rev == LPFC_SLI_REV4) {
4650		lpfc_sli4_free_rpi(vport->phba, rpi);
4651		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4652	}
4653	return NULL;
4654}
4655
4656void
4657lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4658{
4659	/*
4660	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4661	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4662	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4663	 * until ALL other outstanding threads have completed. We check
4664	 * that the ndlp not already in the UNUSED state before we proceed.
4665	 */
4666	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4667		return;
4668	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4669	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4670		lpfc_cleanup_vports_rrqs(vport, ndlp);
4671		lpfc_unreg_rpi(vport, ndlp);
4672	}
4673
4674	lpfc_nlp_put(ndlp);
4675	return;
4676}
4677
4678/*
4679 * Start / ReStart rescue timer for Discovery / RSCN handling
4680 */
4681void
4682lpfc_set_disctmo(struct lpfc_vport *vport)
4683{
4684	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4685	struct lpfc_hba  *phba = vport->phba;
4686	uint32_t tmo;
4687
4688	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4689		/* For FAN, timeout should be greater than edtov */
4690		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4691	} else {
4692		/* Normal discovery timeout should be > than ELS/CT timeout
4693		 * FC spec states we need 3 * ratov for CT requests
4694		 */
4695		tmo = ((phba->fc_ratov * 3) + 3);
4696	}
4697
4698
4699	if (!timer_pending(&vport->fc_disctmo)) {
4700		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4701			"set disc timer:  tmo:x%x state:x%x flg:x%x",
4702			tmo, vport->port_state, vport->fc_flag);
4703	}
4704
4705	mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4706	spin_lock_irq(shost->host_lock);
4707	vport->fc_flag |= FC_DISC_TMO;
4708	spin_unlock_irq(shost->host_lock);
4709
4710	/* Start Discovery Timer state <hba_state> */
4711	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4712			 "0247 Start Discovery Timer state x%x "
4713			 "Data: x%x x%lx x%x x%x\n",
4714			 vport->port_state, tmo,
4715			 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4716			 vport->fc_adisc_cnt);
4717
4718	return;
4719}
4720
4721/*
4722 * Cancel rescue timer for Discovery / RSCN handling
4723 */
4724int
4725lpfc_can_disctmo(struct lpfc_vport *vport)
4726{
4727	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4728	unsigned long iflags;
4729
4730	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4731		"can disc timer:  state:x%x rtry:x%x flg:x%x",
4732		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4733
4734	/* Turn off discovery timer if its running */
4735	if (vport->fc_flag & FC_DISC_TMO) {
4736		spin_lock_irqsave(shost->host_lock, iflags);
4737		vport->fc_flag &= ~FC_DISC_TMO;
4738		spin_unlock_irqrestore(shost->host_lock, iflags);
4739		del_timer_sync(&vport->fc_disctmo);
4740		spin_lock_irqsave(&vport->work_port_lock, iflags);
4741		vport->work_port_events &= ~WORKER_DISC_TMO;
4742		spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4743	}
4744
4745	/* Cancel Discovery Timer state <hba_state> */
4746	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4747			 "0248 Cancel Discovery Timer state x%x "
4748			 "Data: x%x x%x x%x\n",
4749			 vport->port_state, vport->fc_flag,
4750			 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4751	return 0;
4752}
4753
4754/*
4755 * Check specified ring for outstanding IOCB on the SLI queue
4756 * Return true if iocb matches the specified nport
4757 */
4758int
4759lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4760		    struct lpfc_sli_ring *pring,
4761		    struct lpfc_iocbq *iocb,
4762		    struct lpfc_nodelist *ndlp)
4763{
4764	IOCB_t *icmd = &iocb->iocb;
4765	struct lpfc_vport    *vport = ndlp->vport;
4766
4767	if (iocb->vport != vport)
4768		return 0;
4769
4770	if (pring->ringno == LPFC_ELS_RING) {
4771		switch (icmd->ulpCommand) {
4772		case CMD_GEN_REQUEST64_CR:
4773			if (iocb->context_un.ndlp == ndlp)
4774				return 1;
4775			fallthrough;
4776		case CMD_ELS_REQUEST64_CR:
4777			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4778				return 1;
4779			fallthrough;
4780		case CMD_XMIT_ELS_RSP64_CX:
4781			if (iocb->context1 == (uint8_t *) ndlp)
4782				return 1;
4783		}
4784	} else if (pring->ringno == LPFC_FCP_RING) {
4785		/* Skip match check if waiting to relogin to FCP target */
4786		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4787		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4788			return 0;
4789		}
4790		if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4791			return 1;
4792		}
4793	}
4794	return 0;
4795}
4796
4797static void
4798__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4799		struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4800		struct list_head *dequeue_list)
4801{
4802	struct lpfc_iocbq *iocb, *next_iocb;
4803
4804	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4805		/* Check to see if iocb matches the nport */
4806		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4807			/* match, dequeue */
4808			list_move_tail(&iocb->list, dequeue_list);
4809	}
4810}
4811
4812static void
4813lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4814		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4815{
4816	struct lpfc_sli *psli = &phba->sli;
4817	uint32_t i;
4818
4819	spin_lock_irq(&phba->hbalock);
4820	for (i = 0; i < psli->num_rings; i++)
4821		__lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4822						dequeue_list);
4823	spin_unlock_irq(&phba->hbalock);
4824}
4825
4826static void
4827lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4828		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4829{
4830	struct lpfc_sli_ring *pring;
4831	struct lpfc_queue *qp = NULL;
4832
4833	spin_lock_irq(&phba->hbalock);
4834	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4835		pring = qp->pring;
4836		if (!pring)
4837			continue;
4838		spin_lock(&pring->ring_lock);
4839		__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4840		spin_unlock(&pring->ring_lock);
4841	}
4842	spin_unlock_irq(&phba->hbalock);
4843}
4844
4845/*
4846 * Free resources / clean up outstanding I/Os
4847 * associated with nlp_rpi in the LPFC_NODELIST entry.
4848 */
4849static int
4850lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4851{
4852	LIST_HEAD(completions);
4853
4854	lpfc_fabric_abort_nport(ndlp);
4855
4856	/*
4857	 * Everything that matches on txcmplq will be returned
4858	 * by firmware with a no rpi error.
4859	 */
4860	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4861		if (phba->sli_rev != LPFC_SLI_REV4)
4862			lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4863		else
4864			lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4865	}
4866
4867	/* Cancel all the IOCBs from the completions list */
4868	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4869			      IOERR_SLI_ABORTED);
4870
4871	return 0;
4872}
4873
4874/**
4875 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4876 * @phba: Pointer to HBA context object.
4877 * @pmb: Pointer to mailbox object.
4878 *
4879 * This function will issue an ELS LOGO command after completing
4880 * the UNREG_RPI.
4881 **/
4882static void
4883lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4884{
4885	struct lpfc_vport  *vport = pmb->vport;
4886	struct lpfc_nodelist *ndlp;
4887
4888	ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
4889	if (!ndlp)
4890		return;
4891	lpfc_issue_els_logo(vport, ndlp, 0);
4892	mempool_free(pmb, phba->mbox_mem_pool);
4893
4894	/* Check to see if there are any deferred events to process */
4895	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4896	    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4897		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4898				 "1434 UNREG cmpl deferred logo x%x "
4899				 "on NPort x%x Data: x%x x%px\n",
4900				 ndlp->nlp_rpi, ndlp->nlp_DID,
4901				 ndlp->nlp_defer_did, ndlp);
4902
4903		ndlp->nlp_flag &= ~NLP_UNREG_INP;
4904		ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4905		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4906	} else {
4907		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4908			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4909			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4910			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4911		}
4912		ndlp->nlp_flag &= ~NLP_UNREG_INP;
4913	}
4914}
4915
4916/*
4917 * Sets the mailbox completion handler to be used for the
4918 * unreg_rpi command. The handler varies based on the state of
4919 * the port and what will be happening to the rpi next.
4920 */
4921static void
4922lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
4923	struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
4924{
4925	unsigned long iflags;
4926
4927	if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4928		mbox->ctx_ndlp = ndlp;
4929		mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4930
4931	} else if (phba->sli_rev == LPFC_SLI_REV4 &&
4932		   (!(vport->load_flag & FC_UNLOADING)) &&
4933		    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
4934				      LPFC_SLI_INTF_IF_TYPE_2) &&
4935		    (kref_read(&ndlp->kref) > 0)) {
4936		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4937		mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
4938	} else {
4939		if (vport->load_flag & FC_UNLOADING) {
4940			if (phba->sli_rev == LPFC_SLI_REV4) {
4941				spin_lock_irqsave(&vport->phba->ndlp_lock,
4942						  iflags);
4943				ndlp->nlp_flag |= NLP_RELEASE_RPI;
4944				spin_unlock_irqrestore(&vport->phba->ndlp_lock,
4945						       iflags);
4946			}
4947			lpfc_nlp_get(ndlp);
4948		}
4949		mbox->ctx_ndlp = ndlp;
4950		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4951	}
4952}
4953
4954/*
4955 * Free rpi associated with LPFC_NODELIST entry.
4956 * This routine is called from lpfc_freenode(), when we are removing
4957 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4958 * LOGO that completes successfully, and we are waiting to PLOGI back
4959 * to the remote NPort. In addition, it is called after we receive
4960 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4961 * we are waiting to PLOGI back to the remote NPort.
4962 */
4963int
4964lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4965{
4966	struct lpfc_hba *phba = vport->phba;
4967	LPFC_MBOXQ_t    *mbox;
4968	int rc, acc_plogi = 1;
4969	uint16_t rpi;
4970
4971	if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4972	    ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4973		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4974			lpfc_printf_vlog(vport, KERN_INFO,
4975					 LOG_NODE | LOG_DISCOVERY,
4976					 "3366 RPI x%x needs to be "
4977					 "unregistered nlp_flag x%x "
4978					 "did x%x\n",
4979					 ndlp->nlp_rpi, ndlp->nlp_flag,
4980					 ndlp->nlp_DID);
4981
4982		/* If there is already an UNREG in progress for this ndlp,
4983		 * no need to queue up another one.
4984		 */
4985		if (ndlp->nlp_flag & NLP_UNREG_INP) {
4986			lpfc_printf_vlog(vport, KERN_INFO,
4987					 LOG_NODE | LOG_DISCOVERY,
4988					 "1436 unreg_rpi SKIP UNREG x%x on "
4989					 "NPort x%x deferred x%x  flg x%x "
4990					 "Data: x%px\n",
4991					 ndlp->nlp_rpi, ndlp->nlp_DID,
4992					 ndlp->nlp_defer_did,
4993					 ndlp->nlp_flag, ndlp);
4994			goto out;
4995		}
4996
4997		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4998		if (mbox) {
4999			/* SLI4 ports require the physical rpi value. */
5000			rpi = ndlp->nlp_rpi;
5001			if (phba->sli_rev == LPFC_SLI_REV4)
5002				rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5003
5004			lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
5005			mbox->vport = vport;
5006			lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5007			if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
5008				/*
5009				 * accept PLOGIs after unreg_rpi_cmpl
5010				 */
5011				acc_plogi = 0;
5012			if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
5013			    Fabric_DID_MASK) &&
5014			    (!(vport->fc_flag & FC_OFFLINE_MODE)))
5015				ndlp->nlp_flag |= NLP_UNREG_INP;
5016
5017			lpfc_printf_vlog(vport, KERN_INFO,
5018					 LOG_NODE | LOG_DISCOVERY,
5019					 "1433 unreg_rpi UNREG x%x on "
5020					 "NPort x%x deferred flg x%x "
5021					 "Data:x%px\n",
5022					 ndlp->nlp_rpi, ndlp->nlp_DID,
5023					 ndlp->nlp_flag, ndlp);
5024
5025			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5026			if (rc == MBX_NOT_FINISHED) {
5027				mempool_free(mbox, phba->mbox_mem_pool);
5028				acc_plogi = 1;
5029			}
5030		} else {
5031			lpfc_printf_vlog(vport, KERN_INFO,
5032					 LOG_NODE | LOG_DISCOVERY,
5033					 "1444 Failed to allocate mempool "
5034					 "unreg_rpi UNREG x%x, "
5035					 "DID x%x, flag x%x, "
5036					 "ndlp x%px\n",
5037					 ndlp->nlp_rpi, ndlp->nlp_DID,
5038					 ndlp->nlp_flag, ndlp);
5039
5040			/* Because mempool_alloc failed, we
5041			 * will issue a LOGO here and keep the rpi alive if
5042			 * not unloading.
5043			 */
5044			if (!(vport->load_flag & FC_UNLOADING)) {
5045				ndlp->nlp_flag &= ~NLP_UNREG_INP;
5046				lpfc_issue_els_logo(vport, ndlp, 0);
5047				ndlp->nlp_prev_state = ndlp->nlp_state;
5048				lpfc_nlp_set_state(vport, ndlp,
5049						   NLP_STE_NPR_NODE);
5050			}
5051
5052			return 1;
5053		}
5054		lpfc_no_rpi(phba, ndlp);
5055out:
5056		if (phba->sli_rev != LPFC_SLI_REV4)
5057			ndlp->nlp_rpi = 0;
5058		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5059		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5060		if (acc_plogi)
5061			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5062		return 1;
5063	}
5064	ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5065	return 0;
5066}
5067
5068/**
5069 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
5070 * @phba: pointer to lpfc hba data structure.
5071 *
5072 * This routine is invoked to unregister all the currently registered RPIs
5073 * to the HBA.
5074 **/
5075void
5076lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
5077{
5078	struct lpfc_vport **vports;
5079	struct lpfc_nodelist *ndlp;
5080	struct Scsi_Host *shost;
5081	int i;
5082
5083	vports = lpfc_create_vport_work_array(phba);
5084	if (!vports) {
5085		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5086				"2884 Vport array allocation failed \n");
5087		return;
5088	}
5089	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5090		shost = lpfc_shost_from_vport(vports[i]);
5091		spin_lock_irq(shost->host_lock);
5092		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5093			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5094				/* The mempool_alloc might sleep */
5095				spin_unlock_irq(shost->host_lock);
5096				lpfc_unreg_rpi(vports[i], ndlp);
5097				spin_lock_irq(shost->host_lock);
5098			}
5099		}
5100		spin_unlock_irq(shost->host_lock);
5101	}
5102	lpfc_destroy_vport_work_array(phba, vports);
5103}
5104
5105void
5106lpfc_unreg_all_rpis(struct lpfc_vport *vport)
5107{
5108	struct lpfc_hba  *phba  = vport->phba;
5109	LPFC_MBOXQ_t     *mbox;
5110	int rc;
5111
5112	if (phba->sli_rev == LPFC_SLI_REV4) {
5113		lpfc_sli4_unreg_all_rpis(vport);
5114		return;
5115	}
5116
5117	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5118	if (mbox) {
5119		lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5120				 mbox);
5121		mbox->vport = vport;
5122		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5123		mbox->ctx_ndlp = NULL;
5124		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5125		if (rc != MBX_TIMEOUT)
5126			mempool_free(mbox, phba->mbox_mem_pool);
5127
5128		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5129			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5130					 "1836 Could not issue "
5131					 "unreg_login(all_rpis) status %d\n",
5132					 rc);
5133	}
5134}
5135
5136void
5137lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5138{
5139	struct lpfc_hba  *phba  = vport->phba;
5140	LPFC_MBOXQ_t     *mbox;
5141	int rc;
5142
5143	/* Unreg DID is an SLI3 operation. */
5144	if (phba->sli_rev > LPFC_SLI_REV3)
5145		return;
5146
5147	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5148	if (mbox) {
5149		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5150			       mbox);
5151		mbox->vport = vport;
5152		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5153		mbox->ctx_ndlp = NULL;
5154		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5155		if (rc != MBX_TIMEOUT)
5156			mempool_free(mbox, phba->mbox_mem_pool);
5157
5158		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5159			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5160					 "1815 Could not issue "
5161					 "unreg_did (default rpis) status %d\n",
5162					 rc);
5163	}
5164}
5165
5166/*
5167 * Free resources associated with LPFC_NODELIST entry
5168 * so it can be freed.
5169 */
5170static int
5171lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5172{
5173	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5174	struct lpfc_hba  *phba = vport->phba;
5175	LPFC_MBOXQ_t *mb, *nextmb;
5176	struct lpfc_dmabuf *mp;
5177	unsigned long iflags;
5178
5179	/* Cleanup node for NPort <nlp_DID> */
5180	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5181			 "0900 Cleanup node for NPort x%x "
5182			 "Data: x%x x%x x%x\n",
5183			 ndlp->nlp_DID, ndlp->nlp_flag,
5184			 ndlp->nlp_state, ndlp->nlp_rpi);
5185	if (NLP_CHK_FREE_REQ(ndlp)) {
5186		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5187				"0280 %s: ndlp:x%px "
5188				"usgmap:x%x refcnt:%d\n",
5189				__func__, (void *)ndlp, ndlp->nlp_usg_map,
5190				kref_read(&ndlp->kref));
5191		lpfc_dequeue_node(vport, ndlp);
5192	} else {
5193		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5194				"0281 %s: ndlp:x%px "
5195				"usgmap:x%x refcnt:%d\n",
5196				__func__, (void *)ndlp, ndlp->nlp_usg_map,
5197				kref_read(&ndlp->kref));
5198		lpfc_disable_node(vport, ndlp);
5199	}
5200
5201
5202	/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
5203
5204	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5205	if ((mb = phba->sli.mbox_active)) {
5206		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5207		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5208		   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5209			mb->ctx_ndlp = NULL;
5210			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5211		}
5212	}
5213
5214	spin_lock_irq(&phba->hbalock);
5215	/* Cleanup REG_LOGIN completions which are not yet processed */
5216	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5217		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
5218			(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
5219			(ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5220			continue;
5221
5222		mb->ctx_ndlp = NULL;
5223		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5224	}
5225
5226	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
5227		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5228		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5229		    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5230			mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
5231			if (mp) {
5232				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
5233				kfree(mp);
5234			}
5235			list_del(&mb->list);
5236			mempool_free(mb, phba->mbox_mem_pool);
5237			/* We shall not invoke the lpfc_nlp_put to decrement
5238			 * the ndlp reference count as we are in the process
5239			 * of lpfc_nlp_release.
5240			 */
5241		}
5242	}
5243	spin_unlock_irq(&phba->hbalock);
5244
5245	lpfc_els_abort(phba, ndlp);
5246
5247	spin_lock_irq(shost->host_lock);
5248	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5249	spin_unlock_irq(shost->host_lock);
5250
5251	ndlp->nlp_last_elscmd = 0;
5252	del_timer_sync(&ndlp->nlp_delayfunc);
5253
5254	list_del_init(&ndlp->els_retry_evt.evt_listp);
5255	list_del_init(&ndlp->dev_loss_evt.evt_listp);
5256	list_del_init(&ndlp->recovery_evt.evt_listp);
5257	lpfc_cleanup_vports_rrqs(vport, ndlp);
5258	if (phba->sli_rev == LPFC_SLI_REV4)
5259		ndlp->nlp_flag |= NLP_RELEASE_RPI;
5260	if (!lpfc_unreg_rpi(vport, ndlp)) {
5261		/* Clean up unregistered and non freed rpis */
5262		if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
5263		    !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
5264			lpfc_sli4_free_rpi(vport->phba,
5265					   ndlp->nlp_rpi);
5266			spin_lock_irqsave(&vport->phba->ndlp_lock,
5267					  iflags);
5268			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5269			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5270			spin_unlock_irqrestore(&vport->phba->ndlp_lock,
5271					       iflags);
5272		}
5273	}
5274	return 0;
5275}
5276
5277/*
5278 * Check to see if we can free the nlp back to the freelist.
5279 * If we are in the middle of using the nlp in the discovery state
5280 * machine, defer the free till we reach the end of the state machine.
5281 */
5282static void
5283lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5284{
5285	struct lpfc_hba  *phba = vport->phba;
5286	struct lpfc_rport_data *rdata;
5287	struct fc_rport *rport;
5288	LPFC_MBOXQ_t *mbox;
5289	int rc;
5290
5291	lpfc_cancel_retry_delay_tmo(vport, ndlp);
5292	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
5293	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
5294	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5295	    phba->sli_rev != LPFC_SLI_REV4) {
5296		/* For this case we need to cleanup the default rpi
5297		 * allocated by the firmware.
5298		 */
5299		lpfc_printf_vlog(vport, KERN_INFO,
5300				 LOG_NODE | LOG_DISCOVERY,
5301				 "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
5302				 "ref %d map:x%x ndlp x%px\n",
5303				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5304				 kref_read(&ndlp->kref),
5305				 ndlp->nlp_usg_map, ndlp);
5306		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
5307			!= NULL) {
5308			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
5309			    (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
5310			if (rc) {
5311				mempool_free(mbox, phba->mbox_mem_pool);
5312			}
5313			else {
5314				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5315				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5316				mbox->vport = vport;
5317				mbox->ctx_ndlp = ndlp;
5318				rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5319				if (rc == MBX_NOT_FINISHED) {
5320					mempool_free(mbox, phba->mbox_mem_pool);
5321				}
5322			}
5323		}
5324	}
5325	lpfc_cleanup_node(vport, ndlp);
5326
5327	/*
5328	 * ndlp->rport must be set to NULL before it reaches here
5329	 * i.e. break rport/node link before doing lpfc_nlp_put for
5330	 * registered rport and then drop the reference of rport.
5331	 */
5332	if (ndlp->rport) {
5333		/*
5334		 * extra lpfc_nlp_put dropped the reference of ndlp
5335		 * for registered rport so need to cleanup rport
5336		 */
5337		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5338				"0940 removed node x%px DID x%x "
5339				"rpi %d rport not null x%px\n",
5340				 ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
5341				 ndlp->rport);
5342		rport = ndlp->rport;
5343		rdata = rport->dd_data;
5344		rdata->pnode = NULL;
5345		ndlp->rport = NULL;
5346		put_device(&rport->dev);
5347	}
5348}
5349
5350static int
5351lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5352	      uint32_t did)
5353{
5354	D_ID mydid, ndlpdid, matchdid;
5355
5356	if (did == Bcast_DID)
5357		return 0;
5358
5359	/* First check for Direct match */
5360	if (ndlp->nlp_DID == did)
5361		return 1;
5362
5363	/* Next check for area/domain identically equals 0 match */
5364	mydid.un.word = vport->fc_myDID;
5365	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5366		return 0;
5367	}
5368
5369	matchdid.un.word = did;
5370	ndlpdid.un.word = ndlp->nlp_DID;
5371	if (matchdid.un.b.id == ndlpdid.un.b.id) {
5372		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5373		    (mydid.un.b.area == matchdid.un.b.area)) {
5374			/* This code is supposed to match the ID
5375			 * for a private loop device that is
5376			 * connect to fl_port. But we need to
5377			 * check that the port did not just go
5378			 * from pt2pt to fabric or we could end
5379			 * up matching ndlp->nlp_DID 000001 to
5380			 * fabric DID 0x20101
5381			 */
5382			if ((ndlpdid.un.b.domain == 0) &&
5383			    (ndlpdid.un.b.area == 0)) {
5384				if (ndlpdid.un.b.id &&
5385				    vport->phba->fc_topology ==
5386				    LPFC_TOPOLOGY_LOOP)
5387					return 1;
5388			}
5389			return 0;
5390		}
5391
5392		matchdid.un.word = ndlp->nlp_DID;
5393		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5394		    (mydid.un.b.area == ndlpdid.un.b.area)) {
5395			if ((matchdid.un.b.domain == 0) &&
5396			    (matchdid.un.b.area == 0)) {
5397				if (matchdid.un.b.id)
5398					return 1;
5399			}
5400		}
5401	}
5402	return 0;
5403}
5404
5405/* Search for a nodelist entry */
5406static struct lpfc_nodelist *
5407__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5408{
5409	struct lpfc_nodelist *ndlp;
5410	uint32_t data1;
5411
5412	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5413		if (lpfc_matchdid(vport, ndlp, did)) {
5414			data1 = (((uint32_t)ndlp->nlp_state << 24) |
5415				 ((uint32_t)ndlp->nlp_xri << 16) |
5416				 ((uint32_t)ndlp->nlp_type << 8) |
5417				 ((uint32_t)ndlp->nlp_usg_map & 0xff));
5418			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5419					 "0929 FIND node DID "
5420					 "Data: x%px x%x x%x x%x x%x x%px\n",
5421					 ndlp, ndlp->nlp_DID,
5422					 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5423					 ndlp->active_rrqs_xri_bitmap);
5424			return ndlp;
5425		}
5426	}
5427
5428	/* FIND node did <did> NOT FOUND */
5429	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5430			 "0932 FIND node did x%x NOT FOUND.\n", did);
5431	return NULL;
5432}
5433
5434struct lpfc_nodelist *
5435lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5436{
5437	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5438	struct lpfc_nodelist *ndlp;
5439	unsigned long iflags;
5440
5441	spin_lock_irqsave(shost->host_lock, iflags);
5442	ndlp = __lpfc_findnode_did(vport, did);
5443	spin_unlock_irqrestore(shost->host_lock, iflags);
5444	return ndlp;
5445}
5446
5447struct lpfc_nodelist *
5448lpfc_findnode_mapped(struct lpfc_vport *vport)
5449{
5450	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5451	struct lpfc_nodelist *ndlp;
5452	uint32_t data1;
5453	unsigned long iflags;
5454
5455	spin_lock_irqsave(shost->host_lock, iflags);
5456
5457	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5458		if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5459		    ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5460			data1 = (((uint32_t)ndlp->nlp_state << 24) |
5461				 ((uint32_t)ndlp->nlp_xri << 16) |
5462				 ((uint32_t)ndlp->nlp_type << 8) |
5463				 ((uint32_t)ndlp->nlp_rpi & 0xff));
5464			spin_unlock_irqrestore(shost->host_lock, iflags);
5465			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5466					 "2025 FIND node DID "
5467					 "Data: x%px x%x x%x x%x x%px\n",
5468					 ndlp, ndlp->nlp_DID,
5469					 ndlp->nlp_flag, data1,
5470					 ndlp->active_rrqs_xri_bitmap);
5471			return ndlp;
5472		}
5473	}
5474	spin_unlock_irqrestore(shost->host_lock, iflags);
5475
5476	/* FIND node did <did> NOT FOUND */
5477	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5478			 "2026 FIND mapped did NOT FOUND.\n");
5479	return NULL;
5480}
5481
5482struct lpfc_nodelist *
5483lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5484{
5485	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5486	struct lpfc_nodelist *ndlp;
5487
5488	ndlp = lpfc_findnode_did(vport, did);
5489	if (!ndlp) {
5490		if (vport->phba->nvmet_support)
5491			return NULL;
5492		if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5493		    lpfc_rscn_payload_check(vport, did) == 0)
5494			return NULL;
5495		ndlp = lpfc_nlp_init(vport, did);
5496		if (!ndlp)
5497			return NULL;
5498		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5499
5500		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5501				 "6453 Setup New Node 2B_DISC x%x "
5502				 "Data:x%x x%x x%x\n",
5503				 ndlp->nlp_DID, ndlp->nlp_flag,
5504				 ndlp->nlp_state, vport->fc_flag);
5505
5506		spin_lock_irq(shost->host_lock);
5507		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5508		spin_unlock_irq(shost->host_lock);
5509		return ndlp;
5510	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
5511		if (vport->phba->nvmet_support)
5512			return NULL;
5513		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5514		if (!ndlp) {
5515			lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
5516					 "0014 Could not enable ndlp\n");
5517			return NULL;
5518		}
5519		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5520				 "6454 Setup Enabled Node 2B_DISC x%x "
5521				 "Data:x%x x%x x%x\n",
5522				 ndlp->nlp_DID, ndlp->nlp_flag,
5523				 ndlp->nlp_state, vport->fc_flag);
5524
5525		spin_lock_irq(shost->host_lock);
5526		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5527		spin_unlock_irq(shost->host_lock);
5528		return ndlp;
5529	}
5530
5531	/* The NVME Target does not want to actively manage an rport.
5532	 * The goal is to allow the target to reset its state and clear
5533	 * pending IO in preparation for the initiator to recover.
5534	 */
5535	if ((vport->fc_flag & FC_RSCN_MODE) &&
5536	    !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5537		if (lpfc_rscn_payload_check(vport, did)) {
5538
5539			/* Since this node is marked for discovery,
5540			 * delay timeout is not needed.
5541			 */
5542			lpfc_cancel_retry_delay_tmo(vport, ndlp);
5543
5544			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5545					 "6455 Setup RSCN Node 2B_DISC x%x "
5546					 "Data:x%x x%x x%x\n",
5547					 ndlp->nlp_DID, ndlp->nlp_flag,
5548					 ndlp->nlp_state, vport->fc_flag);
5549
5550			/* NVME Target mode waits until rport is known to be
5551			 * impacted by the RSCN before it transitions.  No
5552			 * active management - just go to NPR provided the
5553			 * node had a valid login.
5554			 */
5555			if (vport->phba->nvmet_support)
5556				return ndlp;
5557
5558			/* If we've already received a PLOGI from this NPort
5559			 * we don't need to try to discover it again.
5560			 */
5561			if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5562			    !(ndlp->nlp_type &
5563			     (NLP_FCP_TARGET | NLP_NVME_TARGET)))
5564				return NULL;
5565
5566			ndlp->nlp_prev_state = ndlp->nlp_state;
5567			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5568
5569			spin_lock_irq(shost->host_lock);
5570			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5571			spin_unlock_irq(shost->host_lock);
5572		} else {
5573			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5574					 "6456 Skip Setup RSCN Node x%x "
5575					 "Data:x%x x%x x%x\n",
5576					 ndlp->nlp_DID, ndlp->nlp_flag,
5577					 ndlp->nlp_state, vport->fc_flag);
5578			ndlp = NULL;
5579		}
5580	} else {
5581		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5582				 "6457 Setup Active Node 2B_DISC x%x "
5583				 "Data:x%x x%x x%x\n",
5584				 ndlp->nlp_DID, ndlp->nlp_flag,
5585				 ndlp->nlp_state, vport->fc_flag);
5586
5587		/* If the initiator received a PLOGI from this NPort or if the
5588		 * initiator is already in the process of discovery on it,
5589		 * there's no need to try to discover it again.
5590		 */
5591		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5592		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5593		    (!vport->phba->nvmet_support &&
5594		     ndlp->nlp_flag & NLP_RCV_PLOGI))
5595			return NULL;
5596
5597		if (vport->phba->nvmet_support)
5598			return ndlp;
5599
5600		/* Moving to NPR state clears unsolicited flags and
5601		 * allows for rediscovery
5602		 */
5603		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5604
5605		spin_lock_irq(shost->host_lock);
5606		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5607		spin_unlock_irq(shost->host_lock);
5608	}
5609	return ndlp;
5610}
5611
5612/* Build a list of nodes to discover based on the loopmap */
5613void
5614lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5615{
5616	struct lpfc_hba  *phba = vport->phba;
5617	int j;
5618	uint32_t alpa, index;
5619
5620	if (!lpfc_is_link_up(phba))
5621		return;
5622
5623	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5624		return;
5625
5626	/* Check for loop map present or not */
5627	if (phba->alpa_map[0]) {
5628		for (j = 1; j <= phba->alpa_map[0]; j++) {
5629			alpa = phba->alpa_map[j];
5630			if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5631				continue;
5632			lpfc_setup_disc_node(vport, alpa);
5633		}
5634	} else {
5635		/* No alpamap, so try all alpa's */
5636		for (j = 0; j < FC_MAXLOOP; j++) {
5637			/* If cfg_scan_down is set, start from highest
5638			 * ALPA (0xef) to lowest (0x1).
5639			 */
5640			if (vport->cfg_scan_down)
5641				index = j;
5642			else
5643				index = FC_MAXLOOP - j - 1;
5644			alpa = lpfcAlpaArray[index];
5645			if ((vport->fc_myDID & 0xff) == alpa)
5646				continue;
5647			lpfc_setup_disc_node(vport, alpa);
5648		}
5649	}
5650	return;
5651}
5652
5653/* SLI3 only */
5654void
5655lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5656{
5657	LPFC_MBOXQ_t *mbox;
5658	struct lpfc_sli *psli = &phba->sli;
5659	struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5660	struct lpfc_sli_ring *fcp_ring   = &psli->sli3_ring[LPFC_FCP_RING];
5661	int  rc;
5662
5663	/*
5664	 * if it's not a physical port or if we already send
5665	 * clear_la then don't send it.
5666	 */
5667	if ((phba->link_state >= LPFC_CLEAR_LA) ||
5668	    (vport->port_type != LPFC_PHYSICAL_PORT) ||
5669		(phba->sli_rev == LPFC_SLI_REV4))
5670		return;
5671
5672			/* Link up discovery */
5673	if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5674		phba->link_state = LPFC_CLEAR_LA;
5675		lpfc_clear_la(phba, mbox);
5676		mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5677		mbox->vport = vport;
5678		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5679		if (rc == MBX_NOT_FINISHED) {
5680			mempool_free(mbox, phba->mbox_mem_pool);
5681			lpfc_disc_flush_list(vport);
5682			extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5683			fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5684			phba->link_state = LPFC_HBA_ERROR;
5685		}
5686	}
5687}
5688
5689/* Reg_vpi to tell firmware to resume normal operations */
5690void
5691lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5692{
5693	LPFC_MBOXQ_t *regvpimbox;
5694
5695	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5696	if (regvpimbox) {
5697		lpfc_reg_vpi(vport, regvpimbox);
5698		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5699		regvpimbox->vport = vport;
5700		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5701					== MBX_NOT_FINISHED) {
5702			mempool_free(regvpimbox, phba->mbox_mem_pool);
5703		}
5704	}
5705}
5706
5707/* Start Link up / RSCN discovery on NPR nodes */
5708void
5709lpfc_disc_start(struct lpfc_vport *vport)
5710{
5711	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5712	struct lpfc_hba  *phba = vport->phba;
5713	uint32_t num_sent;
5714	uint32_t clear_la_pending;
5715
5716	if (!lpfc_is_link_up(phba)) {
5717		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5718				 "3315 Link is not up %x\n",
5719				 phba->link_state);
5720		return;
5721	}
5722
5723	if (phba->link_state == LPFC_CLEAR_LA)
5724		clear_la_pending = 1;
5725	else
5726		clear_la_pending = 0;
5727
5728	if (vport->port_state < LPFC_VPORT_READY)
5729		vport->port_state = LPFC_DISC_AUTH;
5730
5731	lpfc_set_disctmo(vport);
5732
5733	vport->fc_prevDID = vport->fc_myDID;
5734	vport->num_disc_nodes = 0;
5735
5736	/* Start Discovery state <hba_state> */
5737	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5738			 "0202 Start Discovery port state x%x "
5739			 "flg x%x Data: x%x x%x x%x\n",
5740			 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5741			 vport->fc_adisc_cnt, vport->fc_npr_cnt);
5742
5743	/* First do ADISCs - if any */
5744	num_sent = lpfc_els_disc_adisc(vport);
5745
5746	if (num_sent)
5747		return;
5748
5749	/* Register the VPI for SLI3, NPIV only. */
5750	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5751	    !(vport->fc_flag & FC_PT2PT) &&
5752	    !(vport->fc_flag & FC_RSCN_MODE) &&
5753	    (phba->sli_rev < LPFC_SLI_REV4)) {
5754		lpfc_issue_clear_la(phba, vport);
5755		lpfc_issue_reg_vpi(phba, vport);
5756		return;
5757	}
5758
5759	/*
5760	 * For SLI2, we need to set port_state to READY and continue
5761	 * discovery.
5762	 */
5763	if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5764		/* If we get here, there is nothing to ADISC */
5765		lpfc_issue_clear_la(phba, vport);
5766
5767		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5768			vport->num_disc_nodes = 0;
5769			/* go thru NPR nodes and issue ELS PLOGIs */
5770			if (vport->fc_npr_cnt)
5771				lpfc_els_disc_plogi(vport);
5772
5773			if (!vport->num_disc_nodes) {
5774				spin_lock_irq(shost->host_lock);
5775				vport->fc_flag &= ~FC_NDISC_ACTIVE;
5776				spin_unlock_irq(shost->host_lock);
5777				lpfc_can_disctmo(vport);
5778			}
5779		}
5780		vport->port_state = LPFC_VPORT_READY;
5781	} else {
5782		/* Next do PLOGIs - if any */
5783		num_sent = lpfc_els_disc_plogi(vport);
5784
5785		if (num_sent)
5786			return;
5787
5788		if (vport->fc_flag & FC_RSCN_MODE) {
5789			/* Check to see if more RSCNs came in while we
5790			 * were processing this one.
5791			 */
5792			if ((vport->fc_rscn_id_cnt == 0) &&
5793			    (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5794				spin_lock_irq(shost->host_lock);
5795				vport->fc_flag &= ~FC_RSCN_MODE;
5796				spin_unlock_irq(shost->host_lock);
5797				lpfc_can_disctmo(vport);
5798			} else
5799				lpfc_els_handle_rscn(vport);
5800		}
5801	}
5802	return;
5803}
5804
5805/*
5806 *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5807 *  ring the match the sppecified nodelist.
5808 */
5809static void
5810lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5811{
5812	LIST_HEAD(completions);
5813	IOCB_t     *icmd;
5814	struct lpfc_iocbq    *iocb, *next_iocb;
5815	struct lpfc_sli_ring *pring;
5816
5817	pring = lpfc_phba_elsring(phba);
5818	if (unlikely(!pring))
5819		return;
5820
5821	/* Error matching iocb on txq or txcmplq
5822	 * First check the txq.
5823	 */
5824	spin_lock_irq(&phba->hbalock);
5825	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5826		if (iocb->context1 != ndlp) {
5827			continue;
5828		}
5829		icmd = &iocb->iocb;
5830		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5831		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5832
5833			list_move_tail(&iocb->list, &completions);
5834		}
5835	}
5836
5837	/* Next check the txcmplq */
5838	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5839		if (iocb->context1 != ndlp) {
5840			continue;
5841		}
5842		icmd = &iocb->iocb;
5843		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5844		    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5845			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5846		}
5847	}
5848	spin_unlock_irq(&phba->hbalock);
5849
5850	/* Cancel all the IOCBs from the completions list */
5851	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5852			      IOERR_SLI_ABORTED);
5853}
5854
5855static void
5856lpfc_disc_flush_list(struct lpfc_vport *vport)
5857{
5858	struct lpfc_nodelist *ndlp, *next_ndlp;
5859	struct lpfc_hba *phba = vport->phba;
5860
5861	if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5862		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5863					 nlp_listp) {
5864			if (!NLP_CHK_NODE_ACT(ndlp))
5865				continue;
5866			if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5867			    ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5868				lpfc_free_tx(phba, ndlp);
5869			}
5870		}
5871	}
5872}
5873
5874void
5875lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5876{
5877	lpfc_els_flush_rscn(vport);
5878	lpfc_els_flush_cmd(vport);
5879	lpfc_disc_flush_list(vport);
5880}
5881
5882/*****************************************************************************/
5883/*
5884 * NAME:     lpfc_disc_timeout
5885 *
5886 * FUNCTION: Fibre Channel driver discovery timeout routine.
5887 *
5888 * EXECUTION ENVIRONMENT: interrupt only
5889 *
5890 * CALLED FROM:
5891 *      Timer function
5892 *
5893 * RETURNS:
5894 *      none
5895 */
5896/*****************************************************************************/
5897void
5898lpfc_disc_timeout(struct timer_list *t)
5899{
5900	struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
5901	struct lpfc_hba   *phba = vport->phba;
5902	uint32_t tmo_posted;
5903	unsigned long flags = 0;
5904
5905	if (unlikely(!phba))
5906		return;
5907
5908	spin_lock_irqsave(&vport->work_port_lock, flags);
5909	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5910	if (!tmo_posted)
5911		vport->work_port_events |= WORKER_DISC_TMO;
5912	spin_unlock_irqrestore(&vport->work_port_lock, flags);
5913
5914	if (!tmo_posted)
5915		lpfc_worker_wake_up(phba);
5916	return;
5917}
5918
5919static void
5920lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5921{
5922	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5923	struct lpfc_hba  *phba = vport->phba;
5924	struct lpfc_sli  *psli = &phba->sli;
5925	struct lpfc_nodelist *ndlp, *next_ndlp;
5926	LPFC_MBOXQ_t *initlinkmbox;
5927	int rc, clrlaerr = 0;
5928
5929	if (!(vport->fc_flag & FC_DISC_TMO))
5930		return;
5931
5932	spin_lock_irq(shost->host_lock);
5933	vport->fc_flag &= ~FC_DISC_TMO;
5934	spin_unlock_irq(shost->host_lock);
5935
5936	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5937		"disc timeout:    state:x%x rtry:x%x flg:x%x",
5938		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5939
5940	switch (vport->port_state) {
5941
5942	case LPFC_LOCAL_CFG_LINK:
5943		/*
5944		 * port_state is identically  LPFC_LOCAL_CFG_LINK while
5945		 * waiting for FAN timeout
5946		 */
5947		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5948				 "0221 FAN timeout\n");
5949
5950		/* Start discovery by sending FLOGI, clean up old rpis */
5951		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5952					 nlp_listp) {
5953			if (!NLP_CHK_NODE_ACT(ndlp))
5954				continue;
5955			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5956				continue;
5957			if (ndlp->nlp_type & NLP_FABRIC) {
5958				/* Clean up the ndlp on Fabric connections */
5959				lpfc_drop_node(vport, ndlp);
5960
5961			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5962				/* Fail outstanding IO now since device
5963				 * is marked for PLOGI.
5964				 */
5965				lpfc_unreg_rpi(vport, ndlp);
5966			}
5967		}
5968		if (vport->port_state != LPFC_FLOGI) {
5969			if (phba->sli_rev <= LPFC_SLI_REV3)
5970				lpfc_initial_flogi(vport);
5971			else
5972				lpfc_issue_init_vfi(vport);
5973			return;
5974		}
5975		break;
5976
5977	case LPFC_FDISC:
5978	case LPFC_FLOGI:
5979	/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5980		/* Initial FLOGI timeout */
5981		lpfc_printf_vlog(vport, KERN_ERR,
5982				 LOG_TRACE_EVENT,
5983				 "0222 Initial %s timeout\n",
5984				 vport->vpi ? "FDISC" : "FLOGI");
5985
5986		/* Assume no Fabric and go on with discovery.
5987		 * Check for outstanding ELS FLOGI to abort.
5988		 */
5989
5990		/* FLOGI failed, so just use loop map to make discovery list */
5991		lpfc_disc_list_loopmap(vport);
5992
5993		/* Start discovery */
5994		lpfc_disc_start(vport);
5995		break;
5996
5997	case LPFC_FABRIC_CFG_LINK:
5998	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5999	   NameServer login */
6000		lpfc_printf_vlog(vport, KERN_ERR,
6001				 LOG_TRACE_EVENT,
6002				 "0223 Timeout while waiting for "
6003				 "NameServer login\n");
6004		/* Next look for NameServer ndlp */
6005		ndlp = lpfc_findnode_did(vport, NameServer_DID);
6006		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
6007			lpfc_els_abort(phba, ndlp);
6008
6009		/* ReStart discovery */
6010		goto restart_disc;
6011
6012	case LPFC_NS_QRY:
6013	/* Check for wait for NameServer Rsp timeout */
6014		lpfc_printf_vlog(vport, KERN_ERR,
6015				 LOG_TRACE_EVENT,
6016				 "0224 NameServer Query timeout "
6017				 "Data: x%x x%x\n",
6018				 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6019
6020		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
6021			/* Try it one more time */
6022			vport->fc_ns_retry++;
6023			vport->gidft_inp = 0;
6024			rc = lpfc_issue_gidft(vport);
6025			if (rc == 0)
6026				break;
6027		}
6028		vport->fc_ns_retry = 0;
6029
6030restart_disc:
6031		/*
6032		 * Discovery is over.
6033		 * set port_state to PORT_READY if SLI2.
6034		 * cmpl_reg_vpi will set port_state to READY for SLI3.
6035		 */
6036		if (phba->sli_rev < LPFC_SLI_REV4) {
6037			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6038				lpfc_issue_reg_vpi(phba, vport);
6039			else  {
6040				lpfc_issue_clear_la(phba, vport);
6041				vport->port_state = LPFC_VPORT_READY;
6042			}
6043		}
6044
6045		/* Setup and issue mailbox INITIALIZE LINK command */
6046		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6047		if (!initlinkmbox) {
6048			lpfc_printf_vlog(vport, KERN_ERR,
6049					 LOG_TRACE_EVENT,
6050					 "0206 Device Discovery "
6051					 "completion error\n");
6052			phba->link_state = LPFC_HBA_ERROR;
6053			break;
6054		}
6055
6056		lpfc_linkdown(phba);
6057		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
6058			       phba->cfg_link_speed);
6059		initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6060		initlinkmbox->vport = vport;
6061		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6062		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
6063		lpfc_set_loopback_flag(phba);
6064		if (rc == MBX_NOT_FINISHED)
6065			mempool_free(initlinkmbox, phba->mbox_mem_pool);
6066
6067		break;
6068
6069	case LPFC_DISC_AUTH:
6070	/* Node Authentication timeout */
6071		lpfc_printf_vlog(vport, KERN_ERR,
6072				 LOG_TRACE_EVENT,
6073				 "0227 Node Authentication timeout\n");
6074		lpfc_disc_flush_list(vport);
6075
6076		/*
6077		 * set port_state to PORT_READY if SLI2.
6078		 * cmpl_reg_vpi will set port_state to READY for SLI3.
6079		 */
6080		if (phba->sli_rev < LPFC_SLI_REV4) {
6081			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6082				lpfc_issue_reg_vpi(phba, vport);
6083			else  {	/* NPIV Not enabled */
6084				lpfc_issue_clear_la(phba, vport);
6085				vport->port_state = LPFC_VPORT_READY;
6086			}
6087		}
6088		break;
6089
6090	case LPFC_VPORT_READY:
6091		if (vport->fc_flag & FC_RSCN_MODE) {
6092			lpfc_printf_vlog(vport, KERN_ERR,
6093					 LOG_TRACE_EVENT,
6094					 "0231 RSCN timeout Data: x%x "
6095					 "x%x\n",
6096					 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6097
6098			/* Cleanup any outstanding ELS commands */
6099			lpfc_els_flush_cmd(vport);
6100
6101			lpfc_els_flush_rscn(vport);
6102			lpfc_disc_flush_list(vport);
6103		}
6104		break;
6105
6106	default:
6107		lpfc_printf_vlog(vport, KERN_ERR,
6108				 LOG_TRACE_EVENT,
6109				 "0273 Unexpected discovery timeout, "
6110				 "vport State x%x\n", vport->port_state);
6111		break;
6112	}
6113
6114	switch (phba->link_state) {
6115	case LPFC_CLEAR_LA:
6116				/* CLEAR LA timeout */
6117		lpfc_printf_vlog(vport, KERN_ERR,
6118				 LOG_TRACE_EVENT,
6119				 "0228 CLEAR LA timeout\n");
6120		clrlaerr = 1;
6121		break;
6122
6123	case LPFC_LINK_UP:
6124		lpfc_issue_clear_la(phba, vport);
6125		fallthrough;
6126	case LPFC_LINK_UNKNOWN:
6127	case LPFC_WARM_START:
6128	case LPFC_INIT_START:
6129	case LPFC_INIT_MBX_CMDS:
6130	case LPFC_LINK_DOWN:
6131	case LPFC_HBA_ERROR:
6132		lpfc_printf_vlog(vport, KERN_ERR,
6133				 LOG_TRACE_EVENT,
6134				 "0230 Unexpected timeout, hba link "
6135				 "state x%x\n", phba->link_state);
6136		clrlaerr = 1;
6137		break;
6138
6139	case LPFC_HBA_READY:
6140		break;
6141	}
6142
6143	if (clrlaerr) {
6144		lpfc_disc_flush_list(vport);
6145		if (phba->sli_rev != LPFC_SLI_REV4) {
6146			psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
6147				~LPFC_STOP_IOCB_EVENT;
6148			psli->sli3_ring[LPFC_FCP_RING].flag &=
6149				~LPFC_STOP_IOCB_EVENT;
6150		}
6151		vport->port_state = LPFC_VPORT_READY;
6152	}
6153	return;
6154}
6155
6156/*
6157 * This routine handles processing a NameServer REG_LOGIN mailbox
6158 * command upon completion. It is setup in the LPFC_MBOXQ
6159 * as the completion routine when the command is
6160 * handed off to the SLI layer.
6161 */
6162void
6163lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6164{
6165	MAILBOX_t *mb = &pmb->u.mb;
6166	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
6167	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6168	struct lpfc_vport    *vport = pmb->vport;
6169
6170	pmb->ctx_buf = NULL;
6171	pmb->ctx_ndlp = NULL;
6172
6173	if (phba->sli_rev < LPFC_SLI_REV4)
6174		ndlp->nlp_rpi = mb->un.varWords[0];
6175	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6176	ndlp->nlp_type |= NLP_FABRIC;
6177	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6178	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6179			 "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
6180			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6181			 kref_read(&ndlp->kref),
6182			 ndlp->nlp_usg_map, ndlp);
6183	/*
6184	 * Start issuing Fabric-Device Management Interface (FDMI) command to
6185	 * 0xfffffa (FDMI well known port).
6186	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
6187	 * DPRT -> RPRT (vports)
6188	 */
6189	if (vport->port_type == LPFC_PHYSICAL_PORT)
6190		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6191	else
6192		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6193
6194
6195	/* decrement the node reference count held for this callback
6196	 * function.
6197	 */
6198	lpfc_nlp_put(ndlp);
6199	lpfc_mbuf_free(phba, mp->virt, mp->phys);
6200	kfree(mp);
6201	mempool_free(pmb, phba->mbox_mem_pool);
6202
6203	return;
6204}
6205
6206static int
6207lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6208{
6209	uint16_t *rpi = param;
6210
6211	/* check for active node */
6212	if (!NLP_CHK_NODE_ACT(ndlp))
6213		return 0;
6214
6215	return ndlp->nlp_rpi == *rpi;
6216}
6217
6218static int
6219lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6220{
6221	return memcmp(&ndlp->nlp_portname, param,
6222		      sizeof(ndlp->nlp_portname)) == 0;
6223}
6224
6225static struct lpfc_nodelist *
6226__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6227{
6228	struct lpfc_nodelist *ndlp;
6229
6230	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6231		if (filter(ndlp, param)) {
6232			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6233					 "3185 FIND node filter %ps DID "
6234					 "ndlp x%px did x%x flg x%x st x%x "
6235					 "xri x%x type x%x rpi x%x\n",
6236					 filter, ndlp, ndlp->nlp_DID,
6237					 ndlp->nlp_flag, ndlp->nlp_state,
6238					 ndlp->nlp_xri, ndlp->nlp_type,
6239					 ndlp->nlp_rpi);
6240			return ndlp;
6241		}
6242	}
6243	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6244			 "3186 FIND node filter %ps NOT FOUND.\n", filter);
6245	return NULL;
6246}
6247
6248/*
6249 * This routine looks up the ndlp lists for the given RPI. If rpi found it
6250 * returns the node list element pointer else return NULL.
6251 */
6252struct lpfc_nodelist *
6253__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6254{
6255	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
6256}
6257
6258/*
6259 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
6260 * returns the node element list pointer else return NULL.
6261 */
6262struct lpfc_nodelist *
6263lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
6264{
6265	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6266	struct lpfc_nodelist *ndlp;
6267
6268	spin_lock_irq(shost->host_lock);
6269	ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6270	spin_unlock_irq(shost->host_lock);
6271	return ndlp;
6272}
6273
6274/*
6275 * This routine looks up the ndlp lists for the given RPI. If the rpi
6276 * is found, the routine returns the node element list pointer else
6277 * return NULL.
6278 */
6279struct lpfc_nodelist *
6280lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6281{
6282	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6283	struct lpfc_nodelist *ndlp;
6284	unsigned long flags;
6285
6286	spin_lock_irqsave(shost->host_lock, flags);
6287	ndlp = __lpfc_findnode_rpi(vport, rpi);
6288	spin_unlock_irqrestore(shost->host_lock, flags);
6289	return ndlp;
6290}
6291
6292/**
6293 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
6294 * @phba: pointer to lpfc hba data structure.
6295 * @vpi: the physical host virtual N_Port identifier.
6296 *
6297 * This routine finds a vport on a HBA (referred by @phba) through a
6298 * @vpi. The function walks the HBA's vport list and returns the address
6299 * of the vport with the matching @vpi.
6300 *
6301 * Return code
6302 *    NULL - No vport with the matching @vpi found
6303 *    Otherwise - Address to the vport with the matching @vpi.
6304 **/
6305struct lpfc_vport *
6306lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6307{
6308	struct lpfc_vport *vport;
6309	unsigned long flags;
6310	int i = 0;
6311
6312	/* The physical ports are always vpi 0 - translate is unnecessary. */
6313	if (vpi > 0) {
6314		/*
6315		 * Translate the physical vpi to the logical vpi.  The
6316		 * vport stores the logical vpi.
6317		 */
6318		for (i = 0; i < phba->max_vpi; i++) {
6319			if (vpi == phba->vpi_ids[i])
6320				break;
6321		}
6322
6323		if (i >= phba->max_vpi) {
6324			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6325					"2936 Could not find Vport mapped "
6326					"to vpi %d\n", vpi);
6327			return NULL;
6328		}
6329	}
6330
6331	spin_lock_irqsave(&phba->port_list_lock, flags);
6332	list_for_each_entry(vport, &phba->port_list, listentry) {
6333		if (vport->vpi == i) {
6334			spin_unlock_irqrestore(&phba->port_list_lock, flags);
6335			return vport;
6336		}
6337	}
6338	spin_unlock_irqrestore(&phba->port_list_lock, flags);
6339	return NULL;
6340}
6341
6342struct lpfc_nodelist *
6343lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6344{
6345	struct lpfc_nodelist *ndlp;
6346	int rpi = LPFC_RPI_ALLOC_ERROR;
6347
6348	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6349		rpi = lpfc_sli4_alloc_rpi(vport->phba);
6350		if (rpi == LPFC_RPI_ALLOC_ERROR)
6351			return NULL;
6352	}
6353
6354	ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6355	if (!ndlp) {
6356		if (vport->phba->sli_rev == LPFC_SLI_REV4)
6357			lpfc_sli4_free_rpi(vport->phba, rpi);
6358		return NULL;
6359	}
6360
6361	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6362
6363	lpfc_initialize_node(vport, ndlp, did);
6364	INIT_LIST_HEAD(&ndlp->nlp_listp);
6365	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6366		ndlp->nlp_rpi = rpi;
6367		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6368				 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6369				 "flg:x%x refcnt:%d map:x%x\n",
6370				 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6371				 ndlp->nlp_flag, kref_read(&ndlp->kref),
6372				 ndlp->nlp_usg_map);
6373
6374		ndlp->active_rrqs_xri_bitmap =
6375				mempool_alloc(vport->phba->active_rrq_pool,
6376					      GFP_KERNEL);
6377		if (ndlp->active_rrqs_xri_bitmap)
6378			memset(ndlp->active_rrqs_xri_bitmap, 0,
6379			       ndlp->phba->cfg_rrq_xri_bitmap_sz);
6380	}
6381
6382
6383
6384	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6385		"node init:       did:x%x",
6386		ndlp->nlp_DID, 0, 0);
6387
6388	return ndlp;
6389}
6390
6391/* This routine releases all resources associated with a specifc NPort's ndlp
6392 * and mempool_free's the nodelist.
6393 */
6394static void
6395lpfc_nlp_release(struct kref *kref)
6396{
6397	struct lpfc_hba *phba;
6398	unsigned long flags;
6399	struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6400						  kref);
6401
6402	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6403		"node release:    did:x%x flg:x%x type:x%x",
6404		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6405
6406	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6407			"0279 %s: ndlp:x%px did %x "
6408			"usgmap:x%x refcnt:%d rpi:%x\n",
6409			__func__,
6410			(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6411			kref_read(&ndlp->kref), ndlp->nlp_rpi);
6412
6413	/* remove ndlp from action. */
6414	lpfc_nlp_remove(ndlp->vport, ndlp);
6415
6416	/* clear the ndlp active flag for all release cases */
6417	phba = ndlp->phba;
6418	spin_lock_irqsave(&phba->ndlp_lock, flags);
6419	NLP_CLR_NODE_ACT(ndlp);
6420	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6421
6422	/* free ndlp memory for final ndlp release */
6423	if (NLP_CHK_FREE_REQ(ndlp)) {
6424		kfree(ndlp->lat_data);
6425		if (phba->sli_rev == LPFC_SLI_REV4)
6426			mempool_free(ndlp->active_rrqs_xri_bitmap,
6427				     ndlp->phba->active_rrq_pool);
6428		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6429	}
6430}
6431
6432/* This routine bumps the reference count for a ndlp structure to ensure
6433 * that one discovery thread won't free a ndlp while another discovery thread
6434 * is using it.
6435 */
6436struct lpfc_nodelist *
6437lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6438{
6439	struct lpfc_hba *phba;
6440	unsigned long flags;
6441
6442	if (ndlp) {
6443		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6444			"node get:        did:x%x flg:x%x refcnt:x%x",
6445			ndlp->nlp_DID, ndlp->nlp_flag,
6446			kref_read(&ndlp->kref));
6447		/* The check of ndlp usage to prevent incrementing the
6448		 * ndlp reference count that is in the process of being
6449		 * released.
6450		 */
6451		phba = ndlp->phba;
6452		spin_lock_irqsave(&phba->ndlp_lock, flags);
6453		if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6454			spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6455			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6456				"0276 %s: ndlp:x%px "
6457				"usgmap:x%x refcnt:%d\n",
6458				__func__, (void *)ndlp, ndlp->nlp_usg_map,
6459				kref_read(&ndlp->kref));
6460			return NULL;
6461		} else
6462			kref_get(&ndlp->kref);
6463		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6464	}
6465	return ndlp;
6466}
6467
6468/* This routine decrements the reference count for a ndlp structure. If the
6469 * count goes to 0, this indicates the the associated nodelist should be
6470 * freed. Returning 1 indicates the ndlp resource has been released; on the
6471 * other hand, returning 0 indicates the ndlp resource has not been released
6472 * yet.
6473 */
6474int
6475lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6476{
6477	struct lpfc_hba *phba;
6478	unsigned long flags;
6479
6480	if (!ndlp)
6481		return 1;
6482
6483	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6484			"node put:        did:x%x flg:x%x refcnt:x%x",
6485			ndlp->nlp_DID, ndlp->nlp_flag,
6486			kref_read(&ndlp->kref));
6487	phba = ndlp->phba;
6488	spin_lock_irqsave(&phba->ndlp_lock, flags);
6489	/* Check the ndlp memory free acknowledge flag to avoid the
6490	 * possible race condition that kref_put got invoked again
6491	 * after previous one has done ndlp memory free.
6492	 */
6493	if (NLP_CHK_FREE_ACK(ndlp)) {
6494		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6495		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6496				"0274 %s: ndlp:x%px "
6497				"usgmap:x%x refcnt:%d\n",
6498				__func__, (void *)ndlp, ndlp->nlp_usg_map,
6499				kref_read(&ndlp->kref));
6500		return 1;
6501	}
6502	/* Check the ndlp inactivate log flag to avoid the possible
6503	 * race condition that kref_put got invoked again after ndlp
6504	 * is already in inactivating state.
6505	 */
6506	if (NLP_CHK_IACT_REQ(ndlp)) {
6507		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6508		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6509				"0275 %s: ndlp:x%px "
6510				"usgmap:x%x refcnt:%d\n",
6511				__func__, (void *)ndlp, ndlp->nlp_usg_map,
6512				kref_read(&ndlp->kref));
6513		return 1;
6514	}
6515	/* For last put, mark the ndlp usage flags to make sure no
6516	 * other kref_get and kref_put on the same ndlp shall get
6517	 * in between the process when the final kref_put has been
6518	 * invoked on this ndlp.
6519	 */
6520	if (kref_read(&ndlp->kref) == 1) {
6521		/* Indicate ndlp is put to inactive state. */
6522		NLP_SET_IACT_REQ(ndlp);
6523		/* Acknowledge ndlp memory free has been seen. */
6524		if (NLP_CHK_FREE_REQ(ndlp))
6525			NLP_SET_FREE_ACK(ndlp);
6526	}
6527	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6528	/* Note, the kref_put returns 1 when decrementing a reference
6529	 * count that was 1, it invokes the release callback function,
6530	 * but it still left the reference count as 1 (not actually
6531	 * performs the last decrementation). Otherwise, it actually
6532	 * decrements the reference count and returns 0.
6533	 */
6534	return kref_put(&ndlp->kref, lpfc_nlp_release);
6535}
6536
6537/* This routine free's the specified nodelist if it is not in use
6538 * by any other discovery thread. This routine returns 1 if the
6539 * ndlp has been freed. A return value of 0 indicates the ndlp is
6540 * not yet been released.
6541 */
6542int
6543lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6544{
6545	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6546		"node not used:   did:x%x flg:x%x refcnt:x%x",
6547		ndlp->nlp_DID, ndlp->nlp_flag,
6548		kref_read(&ndlp->kref));
6549	if (kref_read(&ndlp->kref) == 1)
6550		if (lpfc_nlp_put(ndlp))
6551			return 1;
6552	return 0;
6553}
6554
6555/**
6556 * lpfc_fcf_inuse - Check if FCF can be unregistered.
6557 * @phba: Pointer to hba context object.
6558 *
6559 * This function iterate through all FC nodes associated
6560 * will all vports to check if there is any node with
6561 * fc_rports associated with it. If there is an fc_rport
6562 * associated with the node, then the node is either in
6563 * discovered state or its devloss_timer is pending.
6564 */
6565static int
6566lpfc_fcf_inuse(struct lpfc_hba *phba)
6567{
6568	struct lpfc_vport **vports;
6569	int i, ret = 0;
6570	struct lpfc_nodelist *ndlp;
6571	struct Scsi_Host  *shost;
6572
6573	vports = lpfc_create_vport_work_array(phba);
6574
6575	/* If driver cannot allocate memory, indicate fcf is in use */
6576	if (!vports)
6577		return 1;
6578
6579	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6580		shost = lpfc_shost_from_vport(vports[i]);
6581		spin_lock_irq(shost->host_lock);
6582		/*
6583		 * IF the CVL_RCVD bit is not set then we have sent the
6584		 * flogi.
6585		 * If dev_loss fires while we are waiting we do not want to
6586		 * unreg the fcf.
6587		 */
6588		if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6589			spin_unlock_irq(shost->host_lock);
6590			ret =  1;
6591			goto out;
6592		}
6593		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6594			if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6595			  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6596				ret = 1;
6597				spin_unlock_irq(shost->host_lock);
6598				goto out;
6599			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6600				ret = 1;
6601				lpfc_printf_log(phba, KERN_INFO,
6602						LOG_NODE | LOG_DISCOVERY,
6603						"2624 RPI %x DID %x flag %x "
6604						"still logged in\n",
6605						ndlp->nlp_rpi, ndlp->nlp_DID,
6606						ndlp->nlp_flag);
6607			}
6608		}
6609		spin_unlock_irq(shost->host_lock);
6610	}
6611out:
6612	lpfc_destroy_vport_work_array(phba, vports);
6613	return ret;
6614}
6615
6616/**
6617 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
6618 * @phba: Pointer to hba context object.
6619 * @mboxq: Pointer to mailbox object.
6620 *
6621 * This function frees memory associated with the mailbox command.
6622 */
6623void
6624lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6625{
6626	struct lpfc_vport *vport = mboxq->vport;
6627	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6628
6629	if (mboxq->u.mb.mbxStatus) {
6630		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6631				"2555 UNREG_VFI mbxStatus error x%x "
6632				"HBA state x%x\n",
6633				mboxq->u.mb.mbxStatus, vport->port_state);
6634	}
6635	spin_lock_irq(shost->host_lock);
6636	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6637	spin_unlock_irq(shost->host_lock);
6638	mempool_free(mboxq, phba->mbox_mem_pool);
6639	return;
6640}
6641
6642/**
6643 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
6644 * @phba: Pointer to hba context object.
6645 * @mboxq: Pointer to mailbox object.
6646 *
6647 * This function frees memory associated with the mailbox command.
6648 */
6649static void
6650lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6651{
6652	struct lpfc_vport *vport = mboxq->vport;
6653
6654	if (mboxq->u.mb.mbxStatus) {
6655		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6656				"2550 UNREG_FCFI mbxStatus error x%x "
6657				"HBA state x%x\n",
6658				mboxq->u.mb.mbxStatus, vport->port_state);
6659	}
6660	mempool_free(mboxq, phba->mbox_mem_pool);
6661	return;
6662}
6663
6664/**
6665 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
6666 * @phba: Pointer to hba context object.
6667 *
6668 * This function prepare the HBA for unregistering the currently registered
6669 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
6670 * VFIs.
6671 */
6672int
6673lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6674{
6675	struct lpfc_vport **vports;
6676	struct lpfc_nodelist *ndlp;
6677	struct Scsi_Host *shost;
6678	int i = 0, rc;
6679
6680	/* Unregister RPIs */
6681	if (lpfc_fcf_inuse(phba))
6682		lpfc_unreg_hba_rpis(phba);
6683
6684	/* At this point, all discovery is aborted */
6685	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6686
6687	/* Unregister VPIs */
6688	vports = lpfc_create_vport_work_array(phba);
6689	if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6690		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6691			/* Stop FLOGI/FDISC retries */
6692			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6693			if (ndlp)
6694				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6695			lpfc_cleanup_pending_mbox(vports[i]);
6696			if (phba->sli_rev == LPFC_SLI_REV4)
6697				lpfc_sli4_unreg_all_rpis(vports[i]);
6698			lpfc_mbx_unreg_vpi(vports[i]);
6699			shost = lpfc_shost_from_vport(vports[i]);
6700			spin_lock_irq(shost->host_lock);
6701			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6702			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6703			spin_unlock_irq(shost->host_lock);
6704		}
6705	lpfc_destroy_vport_work_array(phba, vports);
6706	if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6707		ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6708		if (ndlp)
6709			lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6710		lpfc_cleanup_pending_mbox(phba->pport);
6711		if (phba->sli_rev == LPFC_SLI_REV4)
6712			lpfc_sli4_unreg_all_rpis(phba->pport);
6713		lpfc_mbx_unreg_vpi(phba->pport);
6714		shost = lpfc_shost_from_vport(phba->pport);
6715		spin_lock_irq(shost->host_lock);
6716		phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6717		phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6718		spin_unlock_irq(shost->host_lock);
6719	}
6720
6721	/* Cleanup any outstanding ELS commands */
6722	lpfc_els_flush_all_cmd(phba);
6723
6724	/* Unregister the physical port VFI */
6725	rc = lpfc_issue_unreg_vfi(phba->pport);
6726	return rc;
6727}
6728
6729/**
6730 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
6731 * @phba: Pointer to hba context object.
6732 *
6733 * This function issues synchronous unregister FCF mailbox command to HBA to
6734 * unregister the currently registered FCF record. The driver does not reset
6735 * the driver FCF usage state flags.
6736 *
6737 * Return 0 if successfully issued, none-zero otherwise.
6738 */
6739int
6740lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6741{
6742	LPFC_MBOXQ_t *mbox;
6743	int rc;
6744
6745	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6746	if (!mbox) {
6747		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6748				"2551 UNREG_FCFI mbox allocation failed"
6749				"HBA state x%x\n", phba->pport->port_state);
6750		return -ENOMEM;
6751	}
6752	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6753	mbox->vport = phba->pport;
6754	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6755	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6756
6757	if (rc == MBX_NOT_FINISHED) {
6758		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6759				"2552 Unregister FCFI command failed rc x%x "
6760				"HBA state x%x\n",
6761				rc, phba->pport->port_state);
6762		return -EINVAL;
6763	}
6764	return 0;
6765}
6766
6767/**
6768 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
6769 * @phba: Pointer to hba context object.
6770 *
6771 * This function unregisters the currently reigstered FCF. This function
6772 * also tries to find another FCF for discovery by rescan the HBA FCF table.
6773 */
6774void
6775lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6776{
6777	int rc;
6778
6779	/* Preparation for unregistering fcf */
6780	rc = lpfc_unregister_fcf_prep(phba);
6781	if (rc) {
6782		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6783				"2748 Failed to prepare for unregistering "
6784				"HBA's FCF record: rc=%d\n", rc);
6785		return;
6786	}
6787
6788	/* Now, unregister FCF record and reset HBA FCF state */
6789	rc = lpfc_sli4_unregister_fcf(phba);
6790	if (rc)
6791		return;
6792	/* Reset HBA FCF states after successful unregister FCF */
6793	phba->fcf.fcf_flag = 0;
6794	phba->fcf.current_rec.flag = 0;
6795
6796	/*
6797	 * If driver is not unloading, check if there is any other
6798	 * FCF record that can be used for discovery.
6799	 */
6800	if ((phba->pport->load_flag & FC_UNLOADING) ||
6801	    (phba->link_state < LPFC_LINK_UP))
6802		return;
6803
6804	/* This is considered as the initial FCF discovery scan */
6805	spin_lock_irq(&phba->hbalock);
6806	phba->fcf.fcf_flag |= FCF_INIT_DISC;
6807	spin_unlock_irq(&phba->hbalock);
6808
6809	/* Reset FCF roundrobin bmask for new discovery */
6810	lpfc_sli4_clear_fcf_rr_bmask(phba);
6811
6812	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6813
6814	if (rc) {
6815		spin_lock_irq(&phba->hbalock);
6816		phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6817		spin_unlock_irq(&phba->hbalock);
6818		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6819				"2553 lpfc_unregister_unused_fcf failed "
6820				"to read FCF record HBA state x%x\n",
6821				phba->pport->port_state);
6822	}
6823}
6824
6825/**
6826 * lpfc_unregister_fcf - Unregister the currently registered fcf record
6827 * @phba: Pointer to hba context object.
6828 *
6829 * This function just unregisters the currently reigstered FCF. It does not
6830 * try to find another FCF for discovery.
6831 */
6832void
6833lpfc_unregister_fcf(struct lpfc_hba *phba)
6834{
6835	int rc;
6836
6837	/* Preparation for unregistering fcf */
6838	rc = lpfc_unregister_fcf_prep(phba);
6839	if (rc) {
6840		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6841				"2749 Failed to prepare for unregistering "
6842				"HBA's FCF record: rc=%d\n", rc);
6843		return;
6844	}
6845
6846	/* Now, unregister FCF record and reset HBA FCF state */
6847	rc = lpfc_sli4_unregister_fcf(phba);
6848	if (rc)
6849		return;
6850	/* Set proper HBA FCF states after successful unregister FCF */
6851	spin_lock_irq(&phba->hbalock);
6852	phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6853	spin_unlock_irq(&phba->hbalock);
6854}
6855
6856/**
6857 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6858 * @phba: Pointer to hba context object.
6859 *
6860 * This function check if there are any connected remote port for the FCF and
6861 * if all the devices are disconnected, this function unregister FCFI.
6862 * This function also tries to use another FCF for discovery.
6863 */
6864void
6865lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6866{
6867	/*
6868	 * If HBA is not running in FIP mode, if HBA does not support
6869	 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6870	 * registered, do nothing.
6871	 */
6872	spin_lock_irq(&phba->hbalock);
6873	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6874	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6875	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6876	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6877	    (phba->pport->port_state == LPFC_FLOGI)) {
6878		spin_unlock_irq(&phba->hbalock);
6879		return;
6880	}
6881	spin_unlock_irq(&phba->hbalock);
6882
6883	if (lpfc_fcf_inuse(phba))
6884		return;
6885
6886	lpfc_unregister_fcf_rescan(phba);
6887}
6888
6889/**
6890 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6891 * @phba: Pointer to hba context object.
6892 * @buff: Buffer containing the FCF connection table as in the config
6893 *         region.
6894 * This function create driver data structure for the FCF connection
6895 * record table read from config region 23.
6896 */
6897static void
6898lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6899	uint8_t *buff)
6900{
6901	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6902	struct lpfc_fcf_conn_hdr *conn_hdr;
6903	struct lpfc_fcf_conn_rec *conn_rec;
6904	uint32_t record_count;
6905	int i;
6906
6907	/* Free the current connect table */
6908	list_for_each_entry_safe(conn_entry, next_conn_entry,
6909		&phba->fcf_conn_rec_list, list) {
6910		list_del_init(&conn_entry->list);
6911		kfree(conn_entry);
6912	}
6913
6914	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6915	record_count = conn_hdr->length * sizeof(uint32_t)/
6916		sizeof(struct lpfc_fcf_conn_rec);
6917
6918	conn_rec = (struct lpfc_fcf_conn_rec *)
6919		(buff + sizeof(struct lpfc_fcf_conn_hdr));
6920
6921	for (i = 0; i < record_count; i++) {
6922		if (!(conn_rec[i].flags & FCFCNCT_VALID))
6923			continue;
6924		conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6925			GFP_KERNEL);
6926		if (!conn_entry) {
6927			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6928					"2566 Failed to allocate connection"
6929					" table entry\n");
6930			return;
6931		}
6932
6933		memcpy(&conn_entry->conn_rec, &conn_rec[i],
6934			sizeof(struct lpfc_fcf_conn_rec));
6935		list_add_tail(&conn_entry->list,
6936			&phba->fcf_conn_rec_list);
6937	}
6938
6939	if (!list_empty(&phba->fcf_conn_rec_list)) {
6940		i = 0;
6941		list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6942				    list) {
6943			conn_rec = &conn_entry->conn_rec;
6944			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6945					"3345 FCF connection list rec[%02d]: "
6946					"flags:x%04x, vtag:x%04x, "
6947					"fabric_name:x%02x:%02x:%02x:%02x:"
6948					"%02x:%02x:%02x:%02x, "
6949					"switch_name:x%02x:%02x:%02x:%02x:"
6950					"%02x:%02x:%02x:%02x\n", i++,
6951					conn_rec->flags, conn_rec->vlan_tag,
6952					conn_rec->fabric_name[0],
6953					conn_rec->fabric_name[1],
6954					conn_rec->fabric_name[2],
6955					conn_rec->fabric_name[3],
6956					conn_rec->fabric_name[4],
6957					conn_rec->fabric_name[5],
6958					conn_rec->fabric_name[6],
6959					conn_rec->fabric_name[7],
6960					conn_rec->switch_name[0],
6961					conn_rec->switch_name[1],
6962					conn_rec->switch_name[2],
6963					conn_rec->switch_name[3],
6964					conn_rec->switch_name[4],
6965					conn_rec->switch_name[5],
6966					conn_rec->switch_name[6],
6967					conn_rec->switch_name[7]);
6968		}
6969	}
6970}
6971
6972/**
6973 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6974 * @phba: Pointer to hba context object.
6975 * @buff: Buffer containing the FCoE parameter data structure.
6976 *
6977 *  This function update driver data structure with config
6978 *  parameters read from config region 23.
6979 */
6980static void
6981lpfc_read_fcoe_param(struct lpfc_hba *phba,
6982			uint8_t *buff)
6983{
6984	struct lpfc_fip_param_hdr *fcoe_param_hdr;
6985	struct lpfc_fcoe_params *fcoe_param;
6986
6987	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6988		buff;
6989	fcoe_param = (struct lpfc_fcoe_params *)
6990		(buff + sizeof(struct lpfc_fip_param_hdr));
6991
6992	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6993		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6994		return;
6995
6996	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6997		phba->valid_vlan = 1;
6998		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6999			0xFFF;
7000	}
7001
7002	phba->fc_map[0] = fcoe_param->fc_map[0];
7003	phba->fc_map[1] = fcoe_param->fc_map[1];
7004	phba->fc_map[2] = fcoe_param->fc_map[2];
7005	return;
7006}
7007
7008/**
7009 * lpfc_get_rec_conf23 - Get a record type in config region data.
7010 * @buff: Buffer containing config region 23 data.
7011 * @size: Size of the data buffer.
7012 * @rec_type: Record type to be searched.
7013 *
7014 * This function searches config region data to find the beginning
7015 * of the record specified by record_type. If record found, this
7016 * function return pointer to the record else return NULL.
7017 */
7018static uint8_t *
7019lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
7020{
7021	uint32_t offset = 0, rec_length;
7022
7023	if ((buff[0] == LPFC_REGION23_LAST_REC) ||
7024		(size < sizeof(uint32_t)))
7025		return NULL;
7026
7027	rec_length = buff[offset + 1];
7028
7029	/*
7030	 * One TLV record has one word header and number of data words
7031	 * specified in the rec_length field of the record header.
7032	 */
7033	while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
7034		<= size) {
7035		if (buff[offset] == rec_type)
7036			return &buff[offset];
7037
7038		if (buff[offset] == LPFC_REGION23_LAST_REC)
7039			return NULL;
7040
7041		offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
7042		rec_length = buff[offset + 1];
7043	}
7044	return NULL;
7045}
7046
7047/**
7048 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
7049 * @phba: Pointer to lpfc_hba data structure.
7050 * @buff: Buffer containing config region 23 data.
7051 * @size: Size of the data buffer.
7052 *
7053 * This function parses the FCoE config parameters in config region 23 and
7054 * populate driver data structure with the parameters.
7055 */
7056void
7057lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
7058		uint8_t *buff,
7059		uint32_t size)
7060{
7061	uint32_t offset = 0;
7062	uint8_t *rec_ptr;
7063
7064	/*
7065	 * If data size is less than 2 words signature and version cannot be
7066	 * verified.
7067	 */
7068	if (size < 2*sizeof(uint32_t))
7069		return;
7070
7071	/* Check the region signature first */
7072	if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
7073		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7074			"2567 Config region 23 has bad signature\n");
7075		return;
7076	}
7077
7078	offset += 4;
7079
7080	/* Check the data structure version */
7081	if (buff[offset] != LPFC_REGION23_VERSION) {
7082		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7083				"2568 Config region 23 has bad version\n");
7084		return;
7085	}
7086	offset += 4;
7087
7088	/* Read FCoE param record */
7089	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7090			size - offset, FCOE_PARAM_TYPE);
7091	if (rec_ptr)
7092		lpfc_read_fcoe_param(phba, rec_ptr);
7093
7094	/* Read FCF connection table */
7095	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7096		size - offset, FCOE_CONN_TBL_TYPE);
7097	if (rec_ptr)
7098		lpfc_read_fcf_conn_tbl(phba, rec_ptr);
7099
7100}
7101