Lines Matching refs:ctxp
219 struct lpfc_async_xchg_ctx *ctxp;
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
233 return ctxp;
241 struct lpfc_async_xchg_ctx *ctxp;
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
255 return ctxp;
263 struct lpfc_async_xchg_ctx *ctxp)
265 lockdep_assert_held(&ctxp->ctxlock);
269 ctxp->oxid, ctxp->flag);
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
276 list_del(&ctxp->list);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
374 * @ctxp: context to clean up
388 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
397 if (ctxp->state == LPFC_NVME_STE_FREE) {
400 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
403 if (ctxp->rqb_buffer) {
404 spin_lock_irqsave(&ctxp->ctxlock, iflag);
405 nvmebuf = ctxp->rqb_buffer;
408 ctxp->rqb_buffer = NULL;
409 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
410 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
411 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
415 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
420 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
423 ctxp->state = LPFC_NVME_STE_FREE;
440 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
441 ctxp->wqeq = NULL;
442 ctxp->offset = 0;
443 ctxp->phba = phba;
444 ctxp->size = size;
445 ctxp->oxid = oxid;
446 ctxp->sid = sid;
447 ctxp->state = LPFC_NVME_STE_RCV;
448 ctxp->entry_cnt = 1;
449 ctxp->flag = 0;
450 ctxp->ctxbuf = ctx_buf;
451 ctxp->rqb_buffer = (void *)nvmebuf;
452 spin_lock_init(&ctxp->ctxlock);
456 if (ctxp->ts_isr_cmd) {
457 ctxp->ts_cmd_nvme = 0;
458 ctxp->ts_nvme_data = 0;
459 ctxp->ts_data_wqput = 0;
460 ctxp->ts_isr_data = 0;
461 ctxp->ts_data_nvme = 0;
462 ctxp->ts_nvme_status = 0;
463 ctxp->ts_status_wqput = 0;
464 ctxp->ts_isr_status = 0;
465 ctxp->ts_status_nvme = 0;
471 spin_lock_irqsave(&ctxp->ctxlock, iflag);
472 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
473 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
481 ctxp->oxid,
486 spin_lock_irqsave(&ctxp->ctxlock, iflag);
487 lpfc_nvmet_defer_release(phba, ctxp);
488 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
489 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
497 * (ctxp->idx), to save context structure.
500 list_del_init(&ctxp->list);
503 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
514 struct lpfc_async_xchg_ctx *ctxp)
520 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
521 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
522 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
523 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
524 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
527 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
529 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
531 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
533 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
535 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
537 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
539 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
541 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
543 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
545 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
572 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
575 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
581 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
587 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
593 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
601 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
602 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
608 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
614 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
620 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
626 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
628 seg10 = (ctxp->ts_isr_status -
629 ctxp->ts_isr_cmd);
631 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
637 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
723 struct lpfc_async_xchg_ctx *ctxp;
729 ctxp = cmdwqe->context2;
730 ctxp->flag &= ~LPFC_NVME_IO_INP;
732 rsp = &ctxp->hdlrctx.fcp_req;
744 ctxp->oxid, op, status);
759 ctxp->flag |= LPFC_NVME_XBUSY;
765 ctxp->flag &= ~LPFC_NVME_XBUSY;
771 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
772 status, result, ctxp->flag);
787 ctxp->state = LPFC_NVME_STE_DONE;
788 ctxp->entry_cnt++;
791 if (ctxp->ts_cmd_nvme) {
793 ctxp->ts_isr_data =
795 ctxp->ts_data_nvme =
797 ctxp->ts_nvme_status =
798 ctxp->ts_data_nvme;
799 ctxp->ts_status_wqput =
800 ctxp->ts_data_nvme;
801 ctxp->ts_isr_status =
802 ctxp->ts_data_nvme;
803 ctxp->ts_status_nvme =
804 ctxp->ts_data_nvme;
806 ctxp->ts_isr_status =
808 ctxp->ts_status_nvme =
815 if (ctxp->ts_cmd_nvme)
816 lpfc_nvmet_ktime(phba, ctxp);
820 ctxp->entry_cnt++;
825 if (ctxp->ts_cmd_nvme) {
826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
827 ctxp->ts_data_nvme = ktime_get_ns();
836 if (ctxp->cpu != id)
840 id, ctxp->cpu);
932 * before freeing ctxp and iocbq.
1014 struct lpfc_async_xchg_ctx *ctxp =
1016 struct lpfc_hba *phba = ctxp->phba;
1032 if (ctxp->ts_cmd_nvme) {
1034 ctxp->ts_nvme_status = ktime_get_ns();
1036 ctxp->ts_nvme_data = ktime_get_ns();
1040 if (!ctxp->hdwq)
1041 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1051 ctxp->cpu = id; /* Setup cpu for cmpl check */
1056 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1057 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1061 ctxp->oxid);
1066 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1071 ctxp->oxid);
1078 nvmewqeq->context2 = ctxp;
1080 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1083 ctxp->oxid, rsp->op, rsp->rsplen);
1085 ctxp->flag |= LPFC_NVME_IO_INP;
1086 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1089 if (!ctxp->ts_cmd_nvme)
1092 ctxp->ts_status_wqput = ktime_get_ns();
1094 ctxp->ts_data_wqput = ktime_get_ns();
1104 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1105 wq = ctxp->hdwq->io_wq;
1119 ctxp->oxid, rc);
1121 ctxp->wqeq->hba_wqidx = 0;
1144 struct lpfc_async_xchg_ctx *ctxp =
1146 struct lpfc_hba *phba = ctxp->phba;
1153 if (!ctxp->hdwq)
1154 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1158 ctxp->oxid, ctxp->flag, ctxp->state);
1161 ctxp->oxid, ctxp->flag, ctxp->state);
1165 spin_lock_irqsave(&ctxp->ctxlock, flags);
1170 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1171 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1174 ctxp->flag |= LPFC_NVME_ABORT_OP;
1176 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1177 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1178 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1179 ctxp->oxid);
1180 wq = ctxp->hdwq->io_wq;
1181 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1184 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1190 if (ctxp->state == LPFC_NVME_STE_RCV)
1191 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1192 ctxp->oxid);
1194 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1195 ctxp->oxid);
1203 struct lpfc_async_xchg_ctx *ctxp =
1205 struct lpfc_hba *phba = ctxp->phba;
1209 spin_lock_irqsave(&ctxp->ctxlock, flags);
1210 if (ctxp->flag & LPFC_NVME_XBUSY)
1214 ctxp->flag, ctxp->oxid);
1215 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1216 ctxp->state != LPFC_NVME_STE_ABORT)
1219 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1221 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1222 (ctxp->flag & LPFC_NVME_XBUSY)) {
1225 lpfc_nvmet_defer_release(phba, ctxp);
1227 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1229 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1230 ctxp->state, aborting);
1233 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1238 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1246 struct lpfc_async_xchg_ctx *ctxp =
1248 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1249 struct lpfc_hba *phba = ctxp->phba;
1254 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1260 ctxp->oxid, ctxp->flag, ctxp->state);
1270 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1271 ctxp->rqb_buffer = NULL;
1272 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1767 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1788 list_for_each_entry_safe(ctxp, next_ctxp,
1791 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1794 spin_lock(&ctxp->ctxlock);
1798 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1799 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1800 list_del_init(&ctxp->list);
1803 ctxp->flag &= ~LPFC_NVME_XBUSY;
1804 spin_unlock(&ctxp->ctxlock);
1809 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1814 ctxp->ctxbuf->sglq->sli4_lxritag,
1821 ctxp->oxid, ctxp->flag, released);
1823 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1832 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1833 if (ctxp) {
1839 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1841 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1844 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1845 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1846 ctxp->state = LPFC_NVME_STE_ABORT;
1847 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1853 req = &ctxp->hdlrctx.fcp_req;
1866 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1877 list_for_each_entry_safe(ctxp, next_ctxp,
1880 if (ctxp->oxid != oxid || ctxp->sid != sid)
1883 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1888 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1889 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1890 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1899 rsp = &ctxp->hdlrctx.fcp_req;
1932 "is waiting for a ctxp\n",
1953 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1954 if (ctxp) {
1955 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1957 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1968 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1970 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1973 &ctxp->hdlrctx.fcp_req);
1975 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977 lpfc_nvmet_defer_release(phba, ctxp);
1978 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1980 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1981 ctxp->oxid);
2001 struct lpfc_async_xchg_ctx *ctxp)
2021 if (ctxp) {
2023 if (nvmewqeq->context2 == ctxp) {
2040 if (!ctxp)
2052 struct lpfc_async_xchg_ctx *ctxp;
2066 ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2067 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2077 if (ctxp->ts_cmd_nvme) {
2078 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2079 ctxp->ts_status_wqput = ktime_get_ns();
2081 ctxp->ts_data_wqput = ktime_get_ns();
2179 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2180 struct lpfc_hba *phba = ctxp->phba;
2181 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2191 ctxp->oxid, ctxp->flag, ctxp->state);
2192 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2193 lpfc_nvmet_defer_release(phba, ctxp);
2194 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2195 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2196 ctxp->oxid);
2200 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2203 ctxp->oxid);
2209 ctxp->flag |= LPFC_NVME_TNOTIFY;
2211 if (ctxp->ts_isr_cmd)
2212 ctxp->ts_cmd_nvme = ktime_get_ns();
2217 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2223 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2224 payload, ctxp->size);
2228 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2229 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2230 (nvmebuf != ctxp->rqb_buffer)) {
2231 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2234 ctxp->rqb_buffer = NULL;
2235 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2244 ctxp->oxid, ctxp->size, ctxp->sid);
2247 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2248 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2249 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2252 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2263 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2267 ctxp->oxid, rc,
2272 ctxp->oxid, ctxp->size, ctxp->sid);
2273 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2274 lpfc_nvmet_defer_release(phba, ctxp);
2275 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2276 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2370 struct lpfc_async_xchg_ctx *ctxp;
2452 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2454 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2456 if (ctxp->state != LPFC_NVME_STE_FREE) {
2459 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2461 ctxp->wqeq = NULL;
2462 ctxp->offset = 0;
2463 ctxp->phba = phba;
2464 ctxp->size = size;
2465 ctxp->oxid = oxid;
2466 ctxp->sid = sid;
2467 ctxp->idx = idx;
2468 ctxp->state = LPFC_NVME_STE_RCV;
2469 ctxp->entry_cnt = 1;
2470 ctxp->flag = 0;
2471 ctxp->ctxbuf = ctx_buf;
2472 ctxp->rqb_buffer = (void *)nvmebuf;
2473 ctxp->hdwq = NULL;
2474 spin_lock_init(&ctxp->ctxlock);
2478 ctxp->ts_isr_cmd = isr_timestamp;
2479 ctxp->ts_cmd_nvme = 0;
2480 ctxp->ts_nvme_data = 0;
2481 ctxp->ts_data_wqput = 0;
2482 ctxp->ts_isr_data = 0;
2483 ctxp->ts_data_nvme = 0;
2484 ctxp->ts_nvme_status = 0;
2485 ctxp->ts_status_wqput = 0;
2486 ctxp->ts_isr_status = 0;
2487 ctxp->ts_status_nvme = 0;
2502 ctxp->oxid,
2507 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2508 lpfc_nvmet_defer_release(phba, ctxp);
2509 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2510 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2550 * @ctxp: Context info for NVME LS Request
2574 struct lpfc_async_xchg_ctx *ctxp,
2585 ctxp->sid, ctxp->oxid, ctxp->state);
2595 ctxp->sid, ctxp->oxid, ctxp->state);
2599 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2606 ctxp->sid, ctxp->oxid, ctxp->state);
2609 ctxp->wqeq = nvmewqe;
2615 nvmewqe->context2 = ctxp;
2655 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2682 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2696 struct lpfc_async_xchg_ctx *ctxp)
2698 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2715 ctxp->sid, ctxp->oxid, ctxp->state);
2719 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2726 ctxp->sid, ctxp->oxid, ctxp->state);
2734 ctxp->sid, ctxp->oxid, ctxp->state,
2741 nvmewqe = ctxp->wqeq;
2744 nvmewqe = ctxp->ctxbuf->iocbq;
2749 ctxp->sid, ctxp->oxid, ctxp->state);
2752 ctxp->wqeq = nvmewqe;
2759 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2760 (ctxp->entry_cnt == 1)) ||
2761 (ctxp->state == LPFC_NVME_STE_DATA)) {
2766 ctxp->state, ctxp->entry_cnt);
2770 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2792 wqe->fcp_tsend.relative_offset = ctxp->offset;
2810 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2875 wqe->fcp_treceive.relative_offset = ctxp->offset;
2890 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2956 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
3002 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3022 ctxp->offset += cnt;
3024 ctxp->state = LPFC_NVME_STE_DATA;
3025 ctxp->entry_cnt++;
3043 struct lpfc_async_xchg_ctx *ctxp;
3049 ctxp = cmdwqe->context2;
3053 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3056 spin_lock_irqsave(&ctxp->ctxlock, flags);
3057 ctxp->state = LPFC_NVME_STE_DONE;
3062 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3063 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3065 list_del_init(&ctxp->list);
3069 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3070 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3076 ctxp->oxid, ctxp->flag, released,
3087 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3112 struct lpfc_async_xchg_ctx *ctxp;
3118 ctxp = cmdwqe->context2;
3121 if (!ctxp) {
3131 spin_lock_irqsave(&ctxp->ctxlock, flags);
3132 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3136 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3139 ctxp->state, ctxp->oxid);
3145 ctxp->state = LPFC_NVME_STE_DONE;
3146 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3147 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3149 list_del_init(&ctxp->list);
3153 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3154 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3160 ctxp->oxid, ctxp->flag, released,
3171 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3193 struct lpfc_async_xchg_ctx *ctxp;
3197 ctxp = cmdwqe->context2;
3207 ctxp, wcqe->word0, wcqe->total_data_placed,
3210 if (!ctxp) {
3221 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3225 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3231 kfree(ctxp);
3236 struct lpfc_async_xchg_ctx *ctxp,
3246 sid, xri, ctxp->wqeq->sli4_xritag);
3265 abts_wqeq = ctxp->wqeq;
3317 abts_wqeq->context2 = ctxp;
3332 struct lpfc_async_xchg_ctx *ctxp,
3343 if (!ctxp->wqeq) {
3344 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3345 ctxp->wqeq->hba_wqidx = 0;
3358 spin_lock_irqsave(&ctxp->ctxlock, flags);
3359 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3360 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3365 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3366 spin_lock_irqsave(&ctxp->ctxlock, flags);
3367 if (!ctxp->abort_wqeq) {
3371 "xri: x%x\n", ctxp->oxid);
3373 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3374 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3377 abts_wqeq = ctxp->abort_wqeq;
3378 ctxp->state = LPFC_NVME_STE_ABORT;
3379 opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3380 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3386 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3399 phba->hba_flag, ctxp->oxid);
3401 spin_lock_irqsave(&ctxp->ctxlock, flags);
3402 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3403 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3414 ctxp->oxid);
3416 spin_lock_irqsave(&ctxp->ctxlock, flags);
3417 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3418 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3425 lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3428 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3432 abts_wqeq->context2 = ctxp;
3434 if (!ctxp->hdwq)
3435 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3437 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3445 spin_lock_irqsave(&ctxp->ctxlock, flags);
3446 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3447 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3452 rc, ctxp->oxid);
3458 struct lpfc_async_xchg_ctx *ctxp,
3468 if (!ctxp->wqeq) {
3469 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3470 ctxp->wqeq->hba_wqidx = 0;
3473 if (ctxp->state == LPFC_NVME_STE_FREE) {
3476 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3480 ctxp->state = LPFC_NVME_STE_ABORT;
3481 ctxp->entry_cnt++;
3482 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3487 abts_wqeq = ctxp->wqeq;
3491 if (!ctxp->hdwq)
3492 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3494 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3501 spin_lock_irqsave(&ctxp->ctxlock, flags);
3502 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3504 list_del_init(&ctxp->list);
3508 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3509 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3515 ctxp->oxid, rc, released);
3517 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3525 * @ctxp: pointer to the asynchronously received received sequence
3531 struct lpfc_async_xchg_ctx *ctxp,
3539 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3540 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3541 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3542 ctxp->entry_cnt++;
3547 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3548 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3554 if (!ctxp->wqeq) {
3556 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3557 if (!ctxp->wqeq) {
3562 kfree(ctxp);
3566 abts_wqeq = ctxp->wqeq;
3568 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3577 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);