Lines Matching defs:ndlp

79 lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
81 if (ndlp->nlp_fc4_type ||
82 ndlp->nlp_type & NLP_FABRIC)
96 struct lpfc_nodelist *ndlp;
110 ndlp = rdata->pnode;
112 pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
117 if (!ndlp->vport) {
118 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
119 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
130 struct lpfc_nodelist *ndlp;
137 ndlp = rdata->pnode;
138 vport = ndlp->vport;
141 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
143 if (ndlp->nlp_sid != NLP_NO_SID)
144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
153 struct lpfc_nodelist *ndlp;
159 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
160 if (!ndlp)
163 vport = ndlp->vport;
168 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
174 vport->load_flag, kref_read(&ndlp->kref),
175 ndlp->nlp_state, ndlp->fc4_xpt_flags);
179 spin_lock_irqsave(&ndlp->lock, iflags);
180 ndlp->rport = NULL;
186 if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
187 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
192 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
193 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
194 spin_unlock_irqrestore(&ndlp->lock, iflags);
195 lpfc_nlp_put(ndlp);
196 spin_lock_irqsave(&ndlp->lock, iflags);
202 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
203 !(ndlp->nlp_flag & NLP_DROPPED)) {
204 ndlp->nlp_flag |= NLP_DROPPED;
205 spin_unlock_irqrestore(&ndlp->lock, iflags);
206 lpfc_nlp_put(ndlp);
210 spin_unlock_irqrestore(&ndlp->lock, iflags);
214 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
217 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
221 wwn_to_u64(ndlp->nlp_portname.u.wwn));
223 evtp = &ndlp->dev_loss_evt;
232 spin_lock_irqsave(&ndlp->lock, iflags);
233 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
238 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
239 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
243 * rport. Remove the association between rport and ndlp.
245 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
247 ndlp->rport = NULL;
248 spin_unlock_irqrestore(&ndlp->lock, iflags);
254 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
264 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
267 "%d\n", __func__, ndlp->nlp_DID,
268 ndlp->rport, ndlp->nlp_flag,
269 vport->load_flag, kref_read(&ndlp->kref));
270 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
271 spin_lock_irqsave(&ndlp->lock, iflags);
273 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
274 spin_unlock_irqrestore(&ndlp->lock, iflags);
275 lpfc_disc_state_machine(vport, ndlp, NULL,
389 * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss
391 * @ndlp: Pointer to remote node object.
399 struct lpfc_nodelist *ndlp)
403 spin_lock_irqsave(&ndlp->lock, iflags);
404 if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
405 ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
406 spin_unlock_irqrestore(&ndlp->lock, iflags);
407 lpfc_nlp_get(ndlp);
410 "refcnt %d ndlp %p flag x%x "
412 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
413 ndlp->nlp_flag, vport->port_state);
414 spin_lock_irqsave(&ndlp->lock, iflags);
416 spin_unlock_irqrestore(&ndlp->lock, iflags);
421 * @ndlp: Pointer to remote node object.
425 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
427 * when devloss timeout happened to this @ndlp.
430 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
441 vport = ndlp->vport;
442 name = (uint8_t *)&ndlp->nlp_portname;
450 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
452 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
454 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
455 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
458 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
465 ndlp->nlp_DID);
467 spin_lock_irqsave(&ndlp->lock, iflags);
468 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
469 spin_unlock_irqrestore(&ndlp->lock, iflags);
474 if (ndlp->nlp_type & NLP_FABRIC) {
475 spin_lock_irqsave(&ndlp->lock, iflags);
481 switch (ndlp->nlp_DID) {
496 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
502 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
503 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
511 if (ndlp->nlp_DID & Fabric_DID_MASK) {
512 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
513 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
518 spin_unlock_irqrestore(&ndlp->lock, iflags);
528 "DID x%x refcnt %d ndlp %p "
530 ndlp->nlp_DID, kref_read(&ndlp->kref),
531 ndlp, ndlp->nlp_flag,
533 spin_lock_irqsave(&ndlp->lock, iflags);
534 ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
535 spin_unlock_irqrestore(&ndlp->lock, iflags);
536 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
544 "DID x%x refcnt %d ndlp %p "
546 ndlp->nlp_DID, kref_read(&ndlp->kref),
547 ndlp, ndlp->nlp_flag,
552 spin_lock_irqsave(&ndlp->lock, iflags);
553 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
554 spin_unlock_irqrestore(&ndlp->lock, iflags);
555 lpfc_nlp_put(ndlp);
559 if (ndlp->nlp_sid != NLP_NO_SID) {
561 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
571 ndlp->nlp_DID, ndlp->nlp_flag,
572 ndlp->nlp_state, ndlp->nlp_rpi,
573 kref_read(&ndlp->kref));
581 ndlp->nlp_DID, ndlp->nlp_flag,
582 ndlp->nlp_state, ndlp->nlp_rpi);
584 spin_lock_irqsave(&ndlp->lock, iflags);
585 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
586 spin_unlock_irqrestore(&ndlp->lock, iflags);
589 * ndlp, don't issue a NLP_EVT_DEVICE_RM event.
591 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
592 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
596 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
597 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
635 * timeout handler and releasing the reference count for the ndlp with
820 struct lpfc_nodelist *ndlp;
835 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
837 lpfc_els_retry_delay_handler(ndlp);
838 free_evt = 0; /* evt is part of ndlp */
843 lpfc_nlp_put(ndlp);
846 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
847 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
852 nlp_did = ndlp->nlp_DID;
853 lpfc_nlp_put(ndlp);
860 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
862 lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
868 lpfc_nlp_put(ndlp);
1154 struct lpfc_nodelist *ndlp, *next_ndlp;
1156 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1159 ((ndlp->nlp_DID == NameServer_DID) ||
1160 (ndlp->nlp_DID == FDMI_DID) ||
1161 (ndlp->nlp_DID == Fabric_Cntl_DID))))
1162 lpfc_unreg_rpi(vport, ndlp);
1166 (!remove && ndlp->nlp_type & NLP_FABRIC))
1171 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
1172 lpfc_nvmet_invalidate_host(phba, ndlp);
1174 lpfc_disc_state_machine(vport, ndlp, NULL,
1338 struct lpfc_nodelist *ndlp;
1340 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1341 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1343 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1345 if (ndlp->nlp_type & NLP_FABRIC) {
1346 /* On Linkup its safe to clean up the ndlp
1349 if (ndlp->nlp_DID != Fabric_DID)
1350 lpfc_unreg_rpi(vport, ndlp);
1351 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1352 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1356 lpfc_unreg_rpi(vport, ndlp);
3220 struct lpfc_nodelist *ndlp;
3237 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3238 if (!ndlp)
3244 lpfc_register_new_vport(phba, vport, ndlp);
3876 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3887 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3888 kref_read(&ndlp->kref),
3889 ndlp);
3890 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3891 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3893 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3894 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3903 spin_lock_irq(&ndlp->lock);
3904 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3905 spin_unlock_irq(&ndlp->lock);
3909 * if we go thru discovery again for this ndlp
3912 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3913 lpfc_unreg_rpi(vport, ndlp);
3917 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3924 lpfc_nlp_put(ndlp);
4195 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4211 /* Decrement the reference count to ndlp after the
4212 * reference to the ndlp are done.
4214 lpfc_nlp_put(ndlp);
4219 /* Decrement the reference count to ndlp after the reference
4220 * to the ndlp are done.
4222 lpfc_nlp_put(ndlp);
4227 ndlp->nlp_rpi = mb->un.varWords[0];
4228 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4229 ndlp->nlp_type |= NLP_FABRIC;
4230 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4249 * all the current reference to the ndlp have been done.
4251 lpfc_nlp_put(ndlp);
4340 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4356 lpfc_nlp_put(ndlp);
4364 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
4365 spin_lock_irq(&ndlp->lock);
4366 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4367 spin_unlock_irq(&ndlp->lock);
4368 lpfc_nlp_put(ndlp);
4387 ndlp->nlp_rpi = mb->un.varWords[0];
4388 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4389 ndlp->nlp_type |= NLP_FABRIC;
4390 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4393 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4394 kref_read(&ndlp->kref),
4395 ndlp);
4449 lpfc_nlp_put(ndlp);
4464 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4474 lpfc_check_nlp_post_devloss(vport, ndlp);
4477 ndlp->nlp_rpi = mb->un.varWords[0];
4481 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
4482 ndlp->nlp_state);
4484 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4485 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4486 ndlp->nlp_type |= NLP_FABRIC;
4487 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4493 * all the current reference to the ndlp have been done.
4495 lpfc_nlp_put(ndlp);
4499 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4512 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4513 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4514 rport_ids.port_id = ndlp->nlp_DID;
4520 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4526 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4534 rport->maxframe_size = ndlp->nlp_maxframe;
4535 rport->supported_classes = ndlp->nlp_class_sup;
4537 rdata->pnode = lpfc_nlp_get(ndlp);
4542 ndlp->rport = NULL;
4546 spin_lock_irqsave(&ndlp->lock, flags);
4547 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
4548 spin_unlock_irqrestore(&ndlp->lock, flags);
4550 if (ndlp->nlp_type & NLP_FCP_TARGET)
4552 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4554 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4556 if (ndlp->nlp_type & NLP_NVME_TARGET)
4558 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4564 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4567 kref_read(&ndlp->kref));
4571 ndlp->nlp_sid = rport->scsi_target_id;
4578 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4580 struct fc_rport *rport = ndlp->rport;
4581 struct lpfc_vport *vport = ndlp->vport;
4588 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4593 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
4594 kref_read(&ndlp->kref));
4597 lpfc_nlp_put(ndlp);
4641 lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4645 lpfc_check_nlp_post_devloss(vport, ndlp);
4647 spin_lock_irqsave(&ndlp->lock, iflags);
4648 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
4650 spin_unlock_irqrestore(&ndlp->lock, iflags);
4652 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
4653 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
4654 lpfc_nvme_rescan_port(vport, ndlp);
4659 ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
4660 spin_unlock_irqrestore(&ndlp->lock, iflags);
4662 if (lpfc_valid_xpt_node(ndlp)) {
4668 lpfc_register_remote_port(vport, ndlp);
4672 if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
4677 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4683 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4685 lpfc_nvme_register_port(vport, ndlp);
4691 lpfc_nlp_get(ndlp);
4698 lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4702 spin_lock_irqsave(&ndlp->lock, iflags);
4703 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
4704 spin_unlock_irqrestore(&ndlp->lock, iflags);
4707 "0999 %s Not regd: ndlp x%px rport x%px DID "
4709 __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
4710 ndlp->nlp_flag, ndlp->fc4_xpt_flags);
4714 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
4715 spin_unlock_irqrestore(&ndlp->lock, iflags);
4717 if (ndlp->rport &&
4718 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
4720 lpfc_unregister_remote_port(ndlp);
4721 } else if (!ndlp->rport) {
4726 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
4727 ndlp->fc4_xpt_flags,
4728 kref_read(&ndlp->kref));
4731 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
4735 if (ndlp->nlp_type & NLP_NVME_TARGET)
4736 lpfc_nvme_unregister_port(vport, ndlp);
4739 lpfc_nlp_put(ndlp);
4749 lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4766 ndlp->nlp_type |= NLP_FC_NODE;
4769 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4770 lpfc_nlp_reg_node(vport, ndlp);
4780 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4783 lpfc_nlp_unreg_node(vport, ndlp);
4790 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4796 lpfc_handle_adisc_state(vport, ndlp, new_state);
4801 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4802 ndlp->nlp_type |= NLP_FC_NODE;
4805 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4807 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4815 if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
4817 lpfc_nlp_unreg_node(vport, ndlp);
4822 lpfc_nlp_reg_node(vport, ndlp);
4831 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4832 (!ndlp->rport ||
4833 ndlp->rport->scsi_target_id == -1 ||
4834 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4835 spin_lock_irq(&ndlp->lock);
4836 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4837 spin_unlock_irq(&ndlp->lock);
4838 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4865 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4869 int old_state = ndlp->nlp_state;
4870 int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
4875 ndlp->nlp_DID,
4881 ndlp->nlp_DID, old_state, state);
4885 ndlp->nlp_flag &= ~NLP_DROPPED;
4886 lpfc_nlp_get(ndlp);
4891 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4893 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4894 ndlp->nlp_type &= ~NLP_FC_NODE;
4897 if (list_empty(&ndlp->nlp_listp)) {
4899 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4904 ndlp->nlp_state = state;
4906 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4910 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4914 if (list_empty(&ndlp->nlp_listp)) {
4916 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4922 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4926 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4927 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4928 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4930 list_del_init(&ndlp->nlp_listp);
4932 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4939 * @ndlp: Pointer to FC node object.
4944 * to phba from @ndlp can be obtained indirectly through it's reference to
4945 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4946 * to the life-span of the @ndlp might go beyond the existence of @vport as
4947 * the final release of ndlp is determined by its reference count. And, the
4948 * operation on @ndlp needs the reference to phba.
4951 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4954 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4955 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4956 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4957 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4959 ndlp->nlp_DID = did;
4960 ndlp->vport = vport;
4961 ndlp->phba = vport->phba;
4962 ndlp->nlp_sid = NLP_NO_SID;
4963 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4964 kref_init(&ndlp->kref);
4965 atomic_set(&ndlp->cmd_pending, 0);
4966 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4967 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4971 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4976 * release the ndlp from the vport when conditions are correct.
4978 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4980 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4982 lpfc_cleanup_vports_rrqs(vport, ndlp);
4983 lpfc_unreg_rpi(vport, ndlp);
4990 spin_lock_irq(&ndlp->lock);
4991 if (!(ndlp->nlp_flag & NLP_DROPPED)) {
4992 ndlp->nlp_flag |= NLP_DROPPED;
4993 spin_unlock_irq(&ndlp->lock);
4994 lpfc_nlp_put(ndlp);
4997 spin_unlock_irq(&ndlp->lock);
5085 struct lpfc_nodelist *ndlp)
5087 struct lpfc_vport *vport = ndlp->vport;
5102 if (iocb->ndlp == ndlp)
5106 if (remote_id == ndlp->nlp_DID)
5110 if (iocb->ndlp == ndlp)
5115 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
5116 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
5119 if (ulp_context == ndlp->nlp_rpi)
5127 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
5134 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
5142 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5149 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
5156 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5167 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
5178 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5182 lpfc_fabric_abort_nport(ndlp);
5188 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5190 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
5192 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
5214 struct lpfc_nodelist *ndlp;
5216 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
5217 if (!ndlp)
5219 lpfc_issue_els_logo(vport, ndlp, 0);
5222 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
5223 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
5227 ndlp->nlp_rpi, ndlp->nlp_DID,
5228 ndlp->nlp_defer_did, ndlp);
5230 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5231 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
5232 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5235 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
5236 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
5237 spin_lock_irq(&ndlp->lock);
5238 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5239 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5240 spin_unlock_irq(&ndlp->lock);
5242 spin_lock_irq(&ndlp->lock);
5243 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5244 spin_unlock_irq(&ndlp->lock);
5251 lpfc_nlp_put(ndlp);
5262 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
5269 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5273 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
5280 (kref_read(&ndlp->kref) > 0)) {
5285 spin_lock_irqsave(&ndlp->lock, iflags);
5286 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5287 spin_unlock_irqrestore(&ndlp->lock, iflags);
5304 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5311 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5312 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
5313 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
5319 ndlp->nlp_rpi, ndlp->nlp_flag,
5320 ndlp->nlp_DID);
5322 /* If there is already an UNREG in progress for this ndlp,
5325 if (ndlp->nlp_flag & NLP_UNREG_INP) {
5331 ndlp->nlp_rpi, ndlp->nlp_DID,
5332 ndlp->nlp_defer_did,
5333 ndlp->nlp_flag, ndlp);
5340 rpi = ndlp->nlp_rpi;
5342 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5346 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5357 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
5360 ndlp->nlp_flag |= NLP_UNREG_INP;
5367 ndlp->nlp_rpi, ndlp->nlp_DID,
5368 ndlp->nlp_flag, ndlp);
5372 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5375 lpfc_nlp_put(ndlp);
5383 "ndlp x%px\n",
5384 ndlp->nlp_rpi, ndlp->nlp_DID,
5385 ndlp->nlp_flag, ndlp);
5392 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5393 lpfc_issue_els_logo(vport, ndlp, 0);
5394 ndlp->nlp_prev_state = ndlp->nlp_state;
5395 lpfc_nlp_set_state(vport, ndlp,
5401 lpfc_no_rpi(phba, ndlp);
5404 ndlp->nlp_rpi = 0;
5405 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5406 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5408 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5411 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5426 struct lpfc_nodelist *ndlp;
5439 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5440 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5443 lpfc_unreg_rpi(vports[i], ndlp);
5518 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5527 ndlp->nlp_DID, ndlp->nlp_flag,
5528 ndlp->nlp_state, ndlp->nlp_rpi);
5529 lpfc_dequeue_node(vport, ndlp);
5533 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
5537 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5548 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5558 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5569 lpfc_els_abort(phba, ndlp);
5571 spin_lock_irq(&ndlp->lock);
5572 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5573 spin_unlock_irq(&ndlp->lock);
5575 ndlp->nlp_last_elscmd = 0;
5576 del_timer_sync(&ndlp->nlp_delayfunc);
5578 list_del_init(&ndlp->els_retry_evt.evt_listp);
5579 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5580 list_del_init(&ndlp->recovery_evt.evt_listp);
5581 lpfc_cleanup_vports_rrqs(vport, ndlp);
5584 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5590 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5599 if (ndlp->nlp_DID == did)
5609 ndlpdid.un.word = ndlp->nlp_DID;
5618 * up matching ndlp->nlp_DID 000001 to
5631 matchdid.un.word = ndlp->nlp_DID;
5648 struct lpfc_nodelist *ndlp;
5651 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5652 if (lpfc_matchdid(vport, ndlp, did)) {
5653 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5654 ((uint32_t)ndlp->nlp_xri << 16) |
5655 ((uint32_t)ndlp->nlp_type << 8)
5660 ndlp, ndlp->nlp_DID,
5661 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5662 ndlp->active_rrqs_xri_bitmap);
5663 return ndlp;
5677 struct lpfc_nodelist *ndlp;
5681 ndlp = __lpfc_findnode_did(vport, did);
5683 return ndlp;
5690 struct lpfc_nodelist *ndlp;
5696 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5697 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5698 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5699 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5700 ((uint32_t)ndlp->nlp_xri << 16) |
5701 ((uint32_t)ndlp->nlp_type << 8) |
5702 ((uint32_t)ndlp->nlp_rpi & 0xff));
5707 ndlp, ndlp->nlp_DID,
5708 ndlp->nlp_flag, data1,
5709 ndlp->active_rrqs_xri_bitmap);
5710 return ndlp;
5724 struct lpfc_nodelist *ndlp;
5726 ndlp = lpfc_findnode_did(vport, did);
5727 if (!ndlp) {
5733 ndlp = lpfc_nlp_init(vport, did);
5734 if (!ndlp)
5736 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5741 ndlp->nlp_DID, ndlp->nlp_flag,
5742 ndlp->nlp_state, vport->fc_flag);
5744 spin_lock_irq(&ndlp->lock);
5745 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5746 spin_unlock_irq(&ndlp->lock);
5747 return ndlp;
5761 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5766 ndlp->nlp_DID, ndlp->nlp_flag,
5767 ndlp->nlp_state, vport->fc_flag);
5775 return ndlp;
5780 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5781 !(ndlp->nlp_type &
5785 if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
5786 ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
5787 lpfc_disc_state_machine(vport, ndlp, NULL,
5791 spin_lock_irq(&ndlp->lock);
5792 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5793 spin_unlock_irq(&ndlp->lock);
5798 ndlp->nlp_DID, ndlp->nlp_flag,
5799 ndlp->nlp_state, vport->fc_flag);
5800 ndlp = NULL;
5806 ndlp->nlp_DID, ndlp->nlp_flag,
5807 ndlp->nlp_state, vport->fc_flag);
5813 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5814 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5816 ndlp->nlp_flag & NLP_RCV_PLOGI))
5820 return ndlp;
5825 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5827 spin_lock_irq(&ndlp->lock);
5828 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5829 spin_unlock_irq(&ndlp->lock);
5831 return ndlp;
6032 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
6048 if (iocb->ndlp != ndlp)
6062 if (iocb->ndlp != ndlp)
6085 struct lpfc_nodelist *ndlp, *next_ndlp;
6089 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6091 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
6092 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
6093 lpfc_free_tx(phba, ndlp);
6104 * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
6112 struct lpfc_nodelist *ndlp, *next_ndlp;
6114 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6116 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6172 struct lpfc_nodelist *ndlp, *next_ndlp;
6198 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6200 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
6202 if (ndlp->nlp_type & NLP_FABRIC) {
6203 /* Clean up the ndlp on Fabric connections */
6204 lpfc_drop_node(vport, ndlp);
6206 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
6210 lpfc_unreg_rpi(vport, ndlp);
6249 /* Next look for NameServer ndlp */
6250 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6251 if (ndlp)
6252 lpfc_els_abort(phba, ndlp);
6412 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6418 ndlp->nlp_rpi = mb->un.varWords[0];
6419 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6420 ndlp->nlp_type |= NLP_FABRIC;
6421 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6424 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6425 kref_read(&ndlp->kref),
6426 ndlp);
6435 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6437 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6444 lpfc_nlp_put(ndlp);
6450 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6454 return ndlp->nlp_rpi == *rpi;
6458 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6460 return memcmp(&ndlp->nlp_portname, param,
6461 sizeof(ndlp->nlp_portname)) == 0;
6467 struct lpfc_nodelist *ndlp;
6469 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6470 if (filter(ndlp, param)) {
6473 "ndlp x%px did x%x flg x%x st x%x "
6475 filter, ndlp, ndlp->nlp_DID,
6476 ndlp->nlp_flag, ndlp->nlp_state,
6477 ndlp->nlp_xri, ndlp->nlp_type,
6478 ndlp->nlp_rpi);
6479 return ndlp;
6488 * This routine looks up the ndlp lists for the given RPI. If rpi found it
6498 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
6505 struct lpfc_nodelist *ndlp;
6508 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6510 return ndlp;
6514 * This routine looks up the ndlp lists for the given RPI. If the rpi
6522 struct lpfc_nodelist *ndlp;
6526 ndlp = __lpfc_findnode_rpi(vport, rpi);
6528 return ndlp;
6584 struct lpfc_nodelist *ndlp;
6593 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6594 if (!ndlp) {
6600 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6602 spin_lock_init(&ndlp->lock);
6604 lpfc_initialize_node(vport, ndlp, did);
6605 INIT_LIST_HEAD(&ndlp->nlp_listp);
6607 ndlp->nlp_rpi = rpi;
6609 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6611 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6612 ndlp->nlp_flag, kref_read(&ndlp->kref));
6614 ndlp->active_rrqs_xri_bitmap =
6617 if (ndlp->active_rrqs_xri_bitmap)
6618 memset(ndlp->active_rrqs_xri_bitmap, 0,
6619 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6626 ndlp->nlp_DID, 0, 0);
6628 return ndlp;
6631 /* This routine releases all resources associated with a specifc NPort's ndlp
6637 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6639 struct lpfc_vport *vport = ndlp->vport;
6641 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6643 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6646 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
6647 __func__, ndlp, ndlp->nlp_DID,
6648 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6650 /* remove ndlp from action. */
6651 lpfc_cancel_retry_delay_tmo(vport, ndlp);
6652 lpfc_cleanup_node(vport, ndlp);
6661 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
6662 if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
6663 !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
6664 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
6665 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
6672 ndlp->vport = NULL;
6673 ndlp->nlp_state = NLP_STE_FREED_NODE;
6674 ndlp->nlp_flag = 0;
6675 ndlp->fc4_xpt_flags = 0;
6677 /* free ndlp memory for final ndlp release */
6678 if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
6679 mempool_free(ndlp->active_rrqs_xri_bitmap,
6680 ndlp->phba->active_rrq_pool);
6681 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6684 /* This routine bumps the reference count for a ndlp structure to ensure
6685 * that one discovery thread won't free a ndlp while another discovery thread
6689 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6693 if (ndlp) {
6694 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6696 ndlp->nlp_DID, ndlp->nlp_flag,
6697 kref_read(&ndlp->kref));
6699 /* The check of ndlp usage to prevent incrementing the
6700 * ndlp reference count that is in the process of being
6703 spin_lock_irqsave(&ndlp->lock, flags);
6704 if (!kref_get_unless_zero(&ndlp->kref)) {
6705 spin_unlock_irqrestore(&ndlp->lock, flags);
6706 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6707 "0276 %s: ndlp:x%px refcnt:%d\n",
6708 __func__, (void *)ndlp, kref_read(&ndlp->kref));
6711 spin_unlock_irqrestore(&ndlp->lock, flags);
6713 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
6716 return ndlp;
6719 /* This routine decrements the reference count for a ndlp structure. If the
6723 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6725 if (ndlp) {
6726 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6728 ndlp->nlp_DID, ndlp->nlp_flag,
6729 kref_read(&ndlp->kref));
6731 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
6734 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
6752 struct lpfc_nodelist *ndlp;
6775 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6776 if (ndlp->rport &&
6777 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6781 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6787 ndlp->nlp_rpi, ndlp->nlp_DID,
6788 ndlp->nlp_flag);
6858 struct lpfc_nodelist *ndlp;
6874 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6875 if (ndlp)
6876 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6889 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6890 if (ndlp)
6891 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);