Lines Matching refs:ndlp

986 	struct lpfc_nodelist *ndlp = NULL;
990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
992 if (!ndlp)
995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1108 if (!ndlp) {
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1132 * @ndlp: Targets nodelist pointer for this exchange.
1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1143 if (!ndlp)
1145 if (!ndlp->active_rrqs_xri_bitmap)
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1156 * @ndlp: nodelist pointer for this target.
1166 * < 0 No memory or invalid ndlp.
1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1176 if (!ndlp)
1188 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191 if (!ndlp->active_rrqs_xri_bitmap)
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1229 xritag, rxid, ndlp->nlp_DID, send_rrq);
1251 struct lpfc_nodelist *ndlp;
1259 ndlp = lpfc_cmd->rdata->pnode;
1262 ndlp = piocbq->ndlp;
1265 ndlp = NULL;
1267 ndlp = piocbq->ndlp;
1269 ndlp = piocbq->ndlp;
1278 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1280 ndlp->active_rrqs_xri_bitmap)) {
1295 sglq->ndlp = ndlp;
1390 sglq->ndlp = NULL;
1404 /* Check if we can get a reference on ndlp */
1405 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1406 sglq->ndlp = NULL;
1416 sglq->ndlp = NULL;
2841 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2845 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2846 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2847 spin_lock_irqsave(&ndlp->lock, iflags);
2848 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2849 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2850 spin_unlock_irqrestore(&ndlp->lock, iflags);
2852 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2856 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2858 __lpfc_sli_rpi_release(vport, ndlp);
2876 struct lpfc_nodelist *ndlp;
2917 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2918 lpfc_nlp_put(ndlp);
2922 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2925 if (ndlp) {
2931 ndlp->nlp_rpi, ndlp->nlp_DID,
2932 ndlp->nlp_flag, ndlp->nlp_defer_did,
2933 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2935 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2936 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2937 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2938 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2939 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2941 __lpfc_sli_rpi_release(vport, ndlp);
2948 lpfc_nlp_put(ndlp);
2955 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2956 lpfc_nlp_put(ndlp);
2978 * command. An additional reference is put on the ndlp to prevent
2988 struct lpfc_nodelist *ndlp;
2990 ndlp = pmb->ctx_ndlp;
2996 if (ndlp) {
3002 vport->vpi, ndlp->nlp_rpi,
3003 ndlp->nlp_DID, ndlp->nlp_defer_did,
3004 ndlp->nlp_flag,
3005 ndlp);
3006 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3011 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
3012 (ndlp->nlp_defer_did !=
3019 ndlp->nlp_rpi, ndlp->nlp_DID,
3020 ndlp->nlp_defer_did, ndlp);
3021 ndlp->nlp_flag &= ~NLP_UNREG_INP;
3022 ndlp->nlp_defer_did =
3025 vport, ndlp->nlp_DID, 0);
3027 __lpfc_sli_rpi_release(vport, ndlp);
3029 lpfc_nlp_put(ndlp);
3212 struct lpfc_nodelist *ndlp;
3263 ndlp = lpfc_findnode_did(phba->pport, sid);
3264 if (!ndlp ||
3265 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3266 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3268 "6216 NVME Unsol rcv: No ndlp: "
3275 axchg->ndlp = ndlp;
3289 spin_lock_irq(&ndlp->lock);
3290 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3291 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3292 spin_unlock_irq(&ndlp->lock);
3298 if (!lpfc_nlp_get(ndlp))
3302 "6206 NVMET unsol ls_req ndlp x%px "
3304 ndlp, ndlp->nlp_DID,
3305 ndlp->fc4_xpt_flags,
3306 kref_read(&ndlp->kref));
3308 spin_unlock_irq(&ndlp->lock);
11367 struct lpfc_nodelist *ndlp)
11370 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11379 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11407 struct lpfc_nodelist *ndlp = NULL;
11423 ndlp = lpfc_findnode_rpi(vport, rpi);
11424 if (!ndlp)
11428 lpfc_sli_abts_recover_port(vport, ndlp);
11441 * @ndlp: nodelist pointer for the impacted rport.
11451 struct lpfc_nodelist *ndlp,
11456 if (!ndlp) {
11466 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11479 lpfc_sli_post_recovery_event(phba, ndlp);
12332 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12363 cmdiocb->ndlp);
12365 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12373 lpfc_nlp_put(ndlp);
12399 struct lpfc_nodelist *ndlp = NULL;
12452 ndlp = cmdiocb->ndlp;
12453 ulp_context = ndlp->nlp_rpi;
12836 struct lpfc_nodelist *ndlp = NULL;
12911 ndlp = iocbq->ndlp;
12912 ulp_context = ndlp->nlp_rpi;
12918 ndlp = lpfc_cmd->rdata->pnode;
12921 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
13632 struct lpfc_nodelist *ndlp;
13831 ndlp = (struct lpfc_nodelist *)
13846 pmb->ctx_ndlp = ndlp;
14288 struct lpfc_nodelist *ndlp;
14340 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14347 spin_lock_irqsave(&ndlp->lock, iflags);
14348 ndlp->nlp_flag |= NLP_UNREG_INP;
14349 spin_unlock_irqrestore(&ndlp->lock, iflags);
14360 pmb->ctx_ndlp = ndlp;
18862 lpfc_nlp_put(cmd_iocbq->ndlp);
18910 struct lpfc_nodelist *ndlp;
18923 ndlp = lpfc_findnode_did(vport, sid);
18924 if (!ndlp) {
18925 ndlp = lpfc_nlp_init(vport, sid);
18926 if (!ndlp) {
18928 "1268 Failed to allocate ndlp for "
18932 /* Put ndlp onto pport node list */
18933 lpfc_enqueue_node(vport, ndlp);
18946 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18947 if (!ctiocb->ndlp) {
18967 lpfc_set_rrq_active(phba, ndlp, lxri,
19018 ndlp->nlp_DID);
19020 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
19035 lpfc_nlp_put(ndlp);
19036 ctiocb->ndlp = NULL;
19810 * @ndlp: pointer to lpfc nodelist data structure.
19818 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19822 struct lpfc_hba *phba = ndlp->phba;
19836 if (!lpfc_nlp_get(ndlp)) {
19845 lpfc_resume_rpi(mboxq, ndlp);
19851 mboxq->ctx_ndlp = ndlp;
19852 mboxq->vport = ndlp->vport;
19859 lpfc_nlp_put(ndlp);
21000 struct lpfc_nodelist *ndlp;
21053 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21058 spin_lock(&ndlp->lock);
21059 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21060 spin_unlock(&ndlp->lock);
21073 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
21075 if (ndlp) {
21076 spin_lock(&ndlp->lock);
21077 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21078 spin_unlock(&ndlp->lock);
21079 lpfc_nlp_put(ndlp);
21085 /* Release the ndlp with the cleaned-up active mailbox command */
21928 * @ndlp: pointer to lpfc nodelist data structure.
21940 struct lpfc_nodelist *ndlp)
21950 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
22000 * @ndlp: pointer to lpfc nodelist data structure.
22013 * Note: ndlp is only used on SCSI side for RRQ testing.
22014 * The caller should pass NULL for ndlp on NVME side.
22022 struct lpfc_nodelist *ndlp,
22059 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22075 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22083 if (lpfc_test_rrq_active(phba, ndlp,
22102 * @ndlp: pointer to lpfc nodelist data structure.
22110 * Note: ndlp is only used on SCSI side for RRQ testing.
22111 * The caller should pass NULL for ndlp on NVME side.
22118 struct lpfc_nodelist *ndlp,
22136 phba, ndlp, hwqid, expedite);
22141 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22153 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22578 struct lpfc_nodelist *ndlp = NULL;
22596 ndlp = job->ndlp;
22627 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22632 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22641 ndlp = job->ndlp;
22676 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);