Lines Matching defs:ndlp
80 struct lpfc_nodelist * ndlp;
84 ndlp = rdata->pnode;
86 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
94 phba = ndlp->phba;
96 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
98 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
100 if (ndlp->nlp_sid != NLP_NO_SID) {
101 lpfc_sli_abort_iocb(ndlp->vport,
103 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
114 struct lpfc_nodelist * ndlp;
124 ndlp = rdata->pnode;
125 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
128 vport = ndlp->vport;
133 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
135 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
137 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
141 * appropriately we just need to cleanup the ndlp rport info here.
145 put_rport = ndlp->rport != NULL;
147 ndlp->rport = NULL;
149 lpfc_nlp_put(ndlp);
155 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
158 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
162 wwn_to_u64(ndlp->nlp_portname.u.wwn));
164 evtp = &ndlp->dev_loss_evt;
175 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
181 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
196 * @ndlp: Pointer to remote node object.
200 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
202 * when devloss timeout happened to this @ndlp.
205 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
218 rport = ndlp->rport;
219 vport = ndlp->vport;
223 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
229 name = (uint8_t *) &ndlp->nlp_portname;
237 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
239 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
241 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
246 * pointer in ndlp before lpfc_nlp_put.
252 * appropriately we just need to cleanup the ndlp rport info here.
255 if (ndlp->nlp_sid != NLP_NO_SID) {
259 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
263 ndlp->rport = NULL;
265 lpfc_nlp_put(ndlp);
271 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
278 ndlp->nlp_DID);
284 ndlp->rport = NULL;
286 lpfc_nlp_put(ndlp);
289 if (ndlp->nlp_type & NLP_FABRIC)
292 if (ndlp->nlp_sid != NLP_NO_SID) {
295 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
305 ndlp->nlp_DID, ndlp->nlp_flag,
306 ndlp->nlp_state, ndlp->nlp_rpi);
314 ndlp->nlp_DID, ndlp->nlp_flag,
315 ndlp->nlp_state, ndlp->nlp_rpi);
318 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
319 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
320 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
321 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
322 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
323 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
335 * timeout handler and releasing the reference count for the ndlp with
520 struct lpfc_nodelist *ndlp;
533 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
534 lpfc_els_retry_delay_handler(ndlp);
535 free_evt = 0; /* evt is part of ndlp */
539 lpfc_nlp_put(ndlp);
542 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
543 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
548 nlp_did = ndlp->nlp_DID;
549 lpfc_nlp_put(ndlp);
556 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
557 lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
562 lpfc_nlp_put(ndlp);
826 struct lpfc_nodelist *ndlp, *next_ndlp;
828 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
829 if (!NLP_CHK_NODE_ACT(ndlp))
831 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
835 (ndlp->nlp_DID == NameServer_DID)))
836 lpfc_unreg_rpi(vport, ndlp);
840 (!remove && ndlp->nlp_type & NLP_FABRIC))
845 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
846 lpfc_nvmet_invalidate_host(phba, ndlp);
848 lpfc_disc_state_machine(vport, ndlp, NULL,
997 struct lpfc_nodelist *ndlp;
999 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1000 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1001 if (!NLP_CHK_NODE_ACT(ndlp))
1003 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1005 if (ndlp->nlp_type & NLP_FABRIC) {
1006 /* On Linkup its safe to clean up the ndlp
1009 if (ndlp->nlp_DID != Fabric_DID)
1010 lpfc_unreg_rpi(vport, ndlp);
1011 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1012 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1016 lpfc_unreg_rpi(vport, ndlp);
2883 struct lpfc_nodelist *ndlp;
2900 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2901 if (!ndlp)
2907 lpfc_register_new_vport(phba, vport, ndlp);
3595 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3603 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3604 kref_read(&ndlp->kref),
3605 ndlp->nlp_usg_map, ndlp);
3606 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3607 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3609 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3610 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3620 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3625 * if we go thru discovery again for this ndlp
3628 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3629 lpfc_unreg_rpi(vport, ndlp);
3633 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3641 lpfc_nlp_put(ndlp);
3919 struct lpfc_nodelist *ndlp;
3922 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3940 /* Decrement the reference count to ndlp after the
3941 * reference to the ndlp are done.
3943 lpfc_nlp_put(ndlp);
3948 /* Decrement the reference count to ndlp after the reference
3949 * to the ndlp are done.
3951 lpfc_nlp_put(ndlp);
3956 ndlp->nlp_rpi = mb->un.varWords[0];
3957 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3958 ndlp->nlp_type |= NLP_FABRIC;
3959 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3980 * all the current reference to the ndlp have been done.
3982 lpfc_nlp_put(ndlp);
4072 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4088 lpfc_nlp_put(ndlp);
4093 /* If no other thread is using the ndlp, free it */
4094 lpfc_nlp_not_used(ndlp);
4112 ndlp->nlp_rpi = mb->un.varWords[0];
4113 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4114 ndlp->nlp_type |= NLP_FABRIC;
4115 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4118 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4119 kref_read(&ndlp->kref),
4120 ndlp->nlp_usg_map, ndlp);
4155 lpfc_nlp_put(ndlp);
4164 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4176 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4177 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4178 rport_ids.port_id = ndlp->nlp_DID;
4188 rport = ndlp->rport;
4192 ndlp->rport = NULL;
4194 if (rdata->pnode == ndlp)
4195 lpfc_nlp_put(ndlp);
4204 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4210 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4218 rport->maxframe_size = ndlp->nlp_maxframe;
4219 rport->supported_classes = ndlp->nlp_class_sup;
4221 rdata->pnode = lpfc_nlp_get(ndlp);
4223 if (ndlp->nlp_type & NLP_FCP_TARGET)
4225 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4227 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4229 if (ndlp->nlp_type & NLP_NVME_TARGET)
4231 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4237 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4239 ndlp->nlp_DID, rport, rport_ids.roles);
4243 ndlp->nlp_sid = rport->scsi_target_id;
4249 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4251 struct fc_rport *rport = ndlp->rport;
4252 struct lpfc_vport *vport = ndlp->vport;
4259 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4263 ndlp->nlp_DID, rport);
4310 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4316 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4317 ndlp->nlp_type |= NLP_FC_NODE;
4320 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4322 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4327 if (ndlp->rport) {
4329 lpfc_unregister_remote_port(ndlp);
4332 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4336 if (ndlp->nlp_type & NLP_NVME_TARGET)
4337 lpfc_nvme_unregister_port(vport, ndlp);
4340 lpfc_nlp_put(ndlp);
4349 if (ndlp->nlp_fc4_type ||
4350 ndlp->nlp_DID == Fabric_DID ||
4351 ndlp->nlp_DID == NameServer_DID ||
4352 ndlp->nlp_DID == FDMI_DID) {
4358 lpfc_register_remote_port(vport, ndlp);
4362 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4368 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4370 lpfc_nvme_register_port(vport, ndlp);
4376 lpfc_nlp_get(ndlp);
4387 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4391 if (!ndlp->lat_data)
4395 "0x%x\n", ndlp->nlp_DID);
4404 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4405 (!ndlp->rport ||
4406 ndlp->rport->scsi_target_id == -1 ||
4407 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4409 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4411 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4438 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4442 int old_state = ndlp->nlp_state;
4447 ndlp->nlp_DID,
4453 ndlp->nlp_DID, old_state, state);
4457 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4459 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4460 ndlp->nlp_type &= ~NLP_FC_NODE;
4463 if (list_empty(&ndlp->nlp_listp)) {
4465 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4470 ndlp->nlp_state = state;
4472 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4476 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4480 if (list_empty(&ndlp->nlp_listp)) {
4482 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4488 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4492 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4493 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4494 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4496 list_del_init(&ndlp->nlp_listp);
4498 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4503 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4505 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4506 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4507 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4508 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4514 * @ndlp: Pointer to FC node object.
4519 * to phba from @ndlp can be obtained indirectly through it's reference to
4520 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4521 * to the life-span of the @ndlp might go beyond the existence of @vport as
4522 * the final release of ndlp is determined by its reference count. And, the
4523 * operation on @ndlp needs the reference to phba.
4526 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4529 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4530 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4531 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4532 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4534 ndlp->nlp_DID = did;
4535 ndlp->vport = vport;
4536 ndlp->phba = vport->phba;
4537 ndlp->nlp_sid = NLP_NO_SID;
4538 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4539 kref_init(&ndlp->kref);
4540 NLP_INT_NODE_ACT(ndlp);
4541 atomic_set(&ndlp->cmd_pending, 0);
4542 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4543 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4547 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4557 if (!ndlp)
4561 if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
4564 rpi = ndlp->nlp_rpi;
4568 "0359 %s: ndlp:x%px "
4572 (void *)ndlp, ndlp->nlp_usg_map,
4573 kref_read(&ndlp->kref));
4579 /* The ndlp should not be in memory free mode */
4580 if (NLP_CHK_FREE_REQ(ndlp)) {
4583 "0277 %s: ndlp:x%px "
4585 __func__, (void *)ndlp, ndlp->nlp_usg_map,
4586 kref_read(&ndlp->kref));
4589 /* The ndlp should not already be in active mode */
4590 if (NLP_CHK_NODE_ACT(ndlp)) {
4593 "0278 %s: ndlp:x%px "
4595 __func__, (void *)ndlp, ndlp->nlp_usg_map,
4596 kref_read(&ndlp->kref));
4601 did = ndlp->nlp_DID;
4602 flag = (ndlp->nlp_flag & NLP_UNREG_INP);
4604 defer_did = ndlp->nlp_defer_did;
4606 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4608 /* Zero ndlp except of ndlp linked list pointer */
4609 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4613 lpfc_initialize_node(vport, ndlp, did);
4614 ndlp->nlp_flag |= flag;
4616 ndlp->nlp_defer_did = defer_did;
4618 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4622 ndlp->nlp_rpi = rpi;
4625 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4626 ndlp->nlp_flag,
4627 kref_read(&ndlp->kref),
4628 ndlp->nlp_usg_map, ndlp);
4633 lpfc_nlp_set_state(vport, ndlp, state);
4638 ndlp->nlp_rpi, ndlp->nlp_DID,
4639 ndlp->nlp_flag,
4640 kref_read(&ndlp->kref),
4641 ndlp->nlp_usg_map, ndlp);
4645 ndlp->nlp_DID, 0, 0);
4646 return ndlp;
4651 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4657 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4662 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4664 * that the ndlp not already in the UNUSED state before we proceed.
4666 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4668 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4670 lpfc_cleanup_vports_rrqs(vport, ndlp);
4671 lpfc_unreg_rpi(vport, ndlp);
4674 lpfc_nlp_put(ndlp);
4762 struct lpfc_nodelist *ndlp)
4765 struct lpfc_vport *vport = ndlp->vport;
4773 if (iocb->context_un.ndlp == ndlp)
4777 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4781 if (iocb->context1 == (uint8_t *) ndlp)
4786 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4787 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4790 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4799 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4806 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4814 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4821 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4828 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4839 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4850 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4854 lpfc_fabric_abort_nport(ndlp);
4860 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4862 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4864 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4886 struct lpfc_nodelist *ndlp;
4888 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
4889 if (!ndlp)
4891 lpfc_issue_els_logo(vport, ndlp, 0);
4895 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4896 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4900 ndlp->nlp_rpi, ndlp->nlp_DID,
4901 ndlp->nlp_defer_did, ndlp);
4903 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4904 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4905 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4907 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4908 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4909 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4910 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4912 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4923 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
4927 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4928 mbox->ctx_ndlp = ndlp;
4935 (kref_read(&ndlp->kref) > 0)) {
4936 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4943 ndlp->nlp_flag |= NLP_RELEASE_RPI;
4947 lpfc_nlp_get(ndlp);
4949 mbox->ctx_ndlp = ndlp;
4964 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4971 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4972 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4973 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4979 ndlp->nlp_rpi, ndlp->nlp_flag,
4980 ndlp->nlp_DID);
4982 /* If there is already an UNREG in progress for this ndlp,
4985 if (ndlp->nlp_flag & NLP_UNREG_INP) {
4991 ndlp->nlp_rpi, ndlp->nlp_DID,
4992 ndlp->nlp_defer_did,
4993 ndlp->nlp_flag, ndlp);
5000 rpi = ndlp->nlp_rpi;
5002 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5006 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5012 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
5015 ndlp->nlp_flag |= NLP_UNREG_INP;
5022 ndlp->nlp_rpi, ndlp->nlp_DID,
5023 ndlp->nlp_flag, ndlp);
5036 "ndlp x%px\n",
5037 ndlp->nlp_rpi, ndlp->nlp_DID,
5038 ndlp->nlp_flag, ndlp);
5045 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5046 lpfc_issue_els_logo(vport, ndlp, 0);
5047 ndlp->nlp_prev_state = ndlp->nlp_state;
5048 lpfc_nlp_set_state(vport, ndlp,
5054 lpfc_no_rpi(phba, ndlp);
5057 ndlp->nlp_rpi = 0;
5058 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5059 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5061 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5064 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5079 struct lpfc_nodelist *ndlp;
5092 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5093 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5096 lpfc_unreg_rpi(vports[i], ndlp);
5171 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5183 ndlp->nlp_DID, ndlp->nlp_flag,
5184 ndlp->nlp_state, ndlp->nlp_rpi);
5185 if (NLP_CHK_FREE_REQ(ndlp)) {
5187 "0280 %s: ndlp:x%px "
5189 __func__, (void *)ndlp, ndlp->nlp_usg_map,
5190 kref_read(&ndlp->kref));
5191 lpfc_dequeue_node(vport, ndlp);
5194 "0281 %s: ndlp:x%px "
5196 __func__, (void *)ndlp, ndlp->nlp_usg_map,
5197 kref_read(&ndlp->kref));
5198 lpfc_disable_node(vport, ndlp);
5204 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5208 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5219 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5229 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5238 * the ndlp reference count as we are in the process
5245 lpfc_els_abort(phba, ndlp);
5248 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5251 ndlp->nlp_last_elscmd = 0;
5252 del_timer_sync(&ndlp->nlp_delayfunc);
5254 list_del_init(&ndlp->els_retry_evt.evt_listp);
5255 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5256 list_del_init(&ndlp->recovery_evt.evt_listp);
5257 lpfc_cleanup_vports_rrqs(vport, ndlp);
5259 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5260 if (!lpfc_unreg_rpi(vport, ndlp)) {
5262 if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
5263 !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
5265 ndlp->nlp_rpi);
5268 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5269 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5283 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5291 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5292 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
5293 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
5294 !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5302 "ref %d map:x%x ndlp x%px\n",
5303 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5304 kref_read(&ndlp->kref),
5305 ndlp->nlp_usg_map, ndlp);
5308 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
5309 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
5317 mbox->ctx_ndlp = ndlp;
5325 lpfc_cleanup_node(vport, ndlp);
5328 * ndlp->rport must be set to NULL before it reaches here
5332 if (ndlp->rport) {
5334 * extra lpfc_nlp_put dropped the reference of ndlp
5340 ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
5341 ndlp->rport);
5342 rport = ndlp->rport;
5345 ndlp->rport = NULL;
5351 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5360 if (ndlp->nlp_DID == did)
5370 ndlpdid.un.word = ndlp->nlp_DID;
5379 * up matching ndlp->nlp_DID 000001 to
5392 matchdid.un.word = ndlp->nlp_DID;
5409 struct lpfc_nodelist *ndlp;
5412 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5413 if (lpfc_matchdid(vport, ndlp, did)) {
5414 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5415 ((uint32_t)ndlp->nlp_xri << 16) |
5416 ((uint32_t)ndlp->nlp_type << 8) |
5417 ((uint32_t)ndlp->nlp_usg_map & 0xff));
5421 ndlp, ndlp->nlp_DID,
5422 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5423 ndlp->active_rrqs_xri_bitmap);
5424 return ndlp;
5438 struct lpfc_nodelist *ndlp;
5442 ndlp = __lpfc_findnode_did(vport, did);
5444 return ndlp;
5451 struct lpfc_nodelist *ndlp;
5457 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5458 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5459 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5460 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5461 ((uint32_t)ndlp->nlp_xri << 16) |
5462 ((uint32_t)ndlp->nlp_type << 8) |
5463 ((uint32_t)ndlp->nlp_rpi & 0xff));
5468 ndlp, ndlp->nlp_DID,
5469 ndlp->nlp_flag, data1,
5470 ndlp->active_rrqs_xri_bitmap);
5471 return ndlp;
5486 struct lpfc_nodelist *ndlp;
5488 ndlp = lpfc_findnode_did(vport, did);
5489 if (!ndlp) {
5495 ndlp = lpfc_nlp_init(vport, did);
5496 if (!ndlp)
5498 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5503 ndlp->nlp_DID, ndlp->nlp_flag,
5504 ndlp->nlp_state, vport->fc_flag);
5507 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5509 return ndlp;
5510 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5513 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5514 if (!ndlp) {
5516 "0014 Could not enable ndlp\n");
5522 ndlp->nlp_DID, ndlp->nlp_flag,
5523 ndlp->nlp_state, vport->fc_flag);
5526 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5528 return ndlp;
5542 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5547 ndlp->nlp_DID, ndlp->nlp_flag,
5548 ndlp->nlp_state, vport->fc_flag);
5556 return ndlp;
5561 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5562 !(ndlp->nlp_type &
5566 ndlp->nlp_prev_state = ndlp->nlp_state;
5567 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5570 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5576 ndlp->nlp_DID, ndlp->nlp_flag,
5577 ndlp->nlp_state, vport->fc_flag);
5578 ndlp = NULL;
5584 ndlp->nlp_DID, ndlp->nlp_flag,
5585 ndlp->nlp_state, vport->fc_flag);
5591 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5592 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5594 ndlp->nlp_flag & NLP_RCV_PLOGI))
5598 return ndlp;
5603 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5606 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5609 return ndlp;
5810 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5826 if (iocb->context1 != ndlp) {
5839 if (iocb->context1 != ndlp) {
5858 struct lpfc_nodelist *ndlp, *next_ndlp;
5862 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5864 if (!NLP_CHK_NODE_ACT(ndlp))
5866 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5867 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5868 lpfc_free_tx(phba, ndlp);
5925 struct lpfc_nodelist *ndlp, *next_ndlp;
5951 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5953 if (!NLP_CHK_NODE_ACT(ndlp))
5955 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5957 if (ndlp->nlp_type & NLP_FABRIC) {
5958 /* Clean up the ndlp on Fabric connections */
5959 lpfc_drop_node(vport, ndlp);
5961 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5965 lpfc_unreg_rpi(vport, ndlp);
6004 /* Next look for NameServer ndlp */
6005 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6006 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
6007 lpfc_els_abort(phba, ndlp);
6167 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6174 ndlp->nlp_rpi = mb->un.varWords[0];
6175 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6176 ndlp->nlp_type |= NLP_FABRIC;
6177 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6180 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6181 kref_read(&ndlp->kref),
6182 ndlp->nlp_usg_map, ndlp);
6190 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6192 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6198 lpfc_nlp_put(ndlp);
6207 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6212 if (!NLP_CHK_NODE_ACT(ndlp))
6215 return ndlp->nlp_rpi == *rpi;
6219 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6221 return memcmp(&ndlp->nlp_portname, param,
6222 sizeof(ndlp->nlp_portname)) == 0;
6228 struct lpfc_nodelist *ndlp;
6230 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6231 if (filter(ndlp, param)) {
6234 "ndlp x%px did x%x flg x%x st x%x "
6236 filter, ndlp, ndlp->nlp_DID,
6237 ndlp->nlp_flag, ndlp->nlp_state,
6238 ndlp->nlp_xri, ndlp->nlp_type,
6239 ndlp->nlp_rpi);
6240 return ndlp;
6249 * This routine looks up the ndlp lists for the given RPI. If rpi found it
6259 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
6266 struct lpfc_nodelist *ndlp;
6269 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6271 return ndlp;
6275 * This routine looks up the ndlp lists for the given RPI. If the rpi
6283 struct lpfc_nodelist *ndlp;
6287 ndlp = __lpfc_findnode_rpi(vport, rpi);
6289 return ndlp;
6345 struct lpfc_nodelist *ndlp;
6354 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6355 if (!ndlp) {
6361 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6363 lpfc_initialize_node(vport, ndlp, did);
6364 INIT_LIST_HEAD(&ndlp->nlp_listp);
6366 ndlp->nlp_rpi = rpi;
6368 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6370 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6371 ndlp->nlp_flag, kref_read(&ndlp->kref),
6372 ndlp->nlp_usg_map);
6374 ndlp->active_rrqs_xri_bitmap =
6377 if (ndlp->active_rrqs_xri_bitmap)
6378 memset(ndlp->active_rrqs_xri_bitmap, 0,
6379 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6386 ndlp->nlp_DID, 0, 0);
6388 return ndlp;
6391 /* This routine releases all resources associated with a specifc NPort's ndlp
6399 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6402 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6404 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6406 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6407 "0279 %s: ndlp:x%px did %x "
6410 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6411 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6413 /* remove ndlp from action. */
6414 lpfc_nlp_remove(ndlp->vport, ndlp);
6416 /* clear the ndlp active flag for all release cases */
6417 phba = ndlp->phba;
6419 NLP_CLR_NODE_ACT(ndlp);
6422 /* free ndlp memory for final ndlp release */
6423 if (NLP_CHK_FREE_REQ(ndlp)) {
6424 kfree(ndlp->lat_data);
6426 mempool_free(ndlp->active_rrqs_xri_bitmap,
6427 ndlp->phba->active_rrq_pool);
6428 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6432 /* This routine bumps the reference count for a ndlp structure to ensure
6433 * that one discovery thread won't free a ndlp while another discovery thread
6437 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6442 if (ndlp) {
6443 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6445 ndlp->nlp_DID, ndlp->nlp_flag,
6446 kref_read(&ndlp->kref));
6447 /* The check of ndlp usage to prevent incrementing the
6448 * ndlp reference count that is in the process of being
6451 phba = ndlp->phba;
6453 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6455 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6456 "0276 %s: ndlp:x%px "
6458 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6459 kref_read(&ndlp->kref));
6462 kref_get(&ndlp->kref);
6465 return ndlp;
6468 /* This routine decrements the reference count for a ndlp structure. If the
6470 * freed. Returning 1 indicates the ndlp resource has been released; on the
6471 * other hand, returning 0 indicates the ndlp resource has not been released
6475 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6480 if (!ndlp)
6483 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6485 ndlp->nlp_DID, ndlp->nlp_flag,
6486 kref_read(&ndlp->kref));
6487 phba = ndlp->phba;
6489 /* Check the ndlp memory free acknowledge flag to avoid the
6491 * after previous one has done ndlp memory free.
6493 if (NLP_CHK_FREE_ACK(ndlp)) {
6495 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6496 "0274 %s: ndlp:x%px "
6498 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6499 kref_read(&ndlp->kref));
6502 /* Check the ndlp inactivate log flag to avoid the possible
6503 * race condition that kref_put got invoked again after ndlp
6506 if (NLP_CHK_IACT_REQ(ndlp)) {
6508 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6509 "0275 %s: ndlp:x%px "
6511 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6512 kref_read(&ndlp->kref));
6515 /* For last put, mark the ndlp usage flags to make sure no
6516 * other kref_get and kref_put on the same ndlp shall get
6518 * invoked on this ndlp.
6520 if (kref_read(&ndlp->kref) == 1) {
6521 /* Indicate ndlp is put to inactive state. */
6522 NLP_SET_IACT_REQ(ndlp);
6523 /* Acknowledge ndlp memory free has been seen. */
6524 if (NLP_CHK_FREE_REQ(ndlp))
6525 NLP_SET_FREE_ACK(ndlp);
6534 return kref_put(&ndlp->kref, lpfc_nlp_release);
6539 * ndlp has been freed. A return value of 0 indicates the ndlp is
6543 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6545 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6547 ndlp->nlp_DID, ndlp->nlp_flag,
6548 kref_read(&ndlp->kref));
6549 if (kref_read(&ndlp->kref) == 1)
6550 if (lpfc_nlp_put(ndlp))
6570 struct lpfc_nodelist *ndlp;
6593 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6594 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6595 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6599 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6605 ndlp->nlp_rpi, ndlp->nlp_DID,
6606 ndlp->nlp_flag);
6676 struct lpfc_nodelist *ndlp;
6692 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6693 if (ndlp)
6694 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6707 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6708 if (ndlp)
6709 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);