Lines Matching defs:tfcp_req
286 struct fcloop_fcpreq *tfcp_req;
529 struct fcloop_fcpreq *tfcp_req =
532 kfree(tfcp_req);
536 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
538 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
542 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
544 return kref_get_unless_zero(&tfcp_req->ref);
549 struct fcloop_fcpreq *tfcp_req, int status)
556 inireq->tfcp_req = NULL;
564 fcloop_tfcp_req_put(tfcp_req);
581 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
583 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
614 struct fcloop_fcpreq *tfcp_req =
616 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
621 spin_lock_irqsave(&tfcp_req->reqlock, flags);
622 switch (tfcp_req->inistate) {
624 tfcp_req->inistate = INI_IO_ACTIVE;
630 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
634 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
639 if (likely(!check_for_drop(tfcp_req)))
640 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
641 &tfcp_req->tgt_fcp_req,
647 fcloop_call_host_done(fcpreq, tfcp_req, ret);
653 struct fcloop_fcpreq *tfcp_req =
659 spin_lock_irqsave(&tfcp_req->reqlock, flags);
660 fcpreq = tfcp_req->fcpreq;
661 switch (tfcp_req->inistate) {
668 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
672 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
676 fcloop_tfcp_req_put(tfcp_req);
680 if (tfcp_req->tport->targetport)
681 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
682 &tfcp_req->tgt_fcp_req);
684 spin_lock_irqsave(&tfcp_req->reqlock, flags);
685 tfcp_req->fcpreq = NULL;
686 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
688 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
699 struct fcloop_fcpreq *tfcp_req =
704 spin_lock_irqsave(&tfcp_req->reqlock, flags);
705 fcpreq = tfcp_req->fcpreq;
706 tfcp_req->inistate = INI_IO_COMPLETED;
707 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
709 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
721 struct fcloop_fcpreq *tfcp_req;
726 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
727 if (!tfcp_req)
731 inireq->tfcp_req = tfcp_req;
734 tfcp_req->fcpreq = fcpreq;
735 tfcp_req->tport = rport->targetport->private;
736 tfcp_req->inistate = INI_IO_START;
737 spin_lock_init(&tfcp_req->reqlock);
738 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
739 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
740 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
741 kref_init(&tfcp_req->ref);
743 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
806 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
813 spin_lock_irqsave(&tfcp_req->reqlock, flags);
814 fcpreq = tfcp_req->fcpreq;
815 active = tfcp_req->active;
816 aborted = tfcp_req->aborted;
817 tfcp_req->active = true;
818 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
826 spin_lock_irqsave(&tfcp_req->reqlock, flags);
827 tfcp_req->active = false;
828 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
877 tfcp_req->status = 0;
885 spin_lock_irqsave(&tfcp_req->reqlock, flags);
886 tfcp_req->active = false;
887 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
900 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
908 spin_lock_irqsave(&tfcp_req->reqlock, flags);
909 tfcp_req->aborted = true;
910 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
912 tfcp_req->status = NVME_SC_INTERNAL;
925 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
927 queue_work(nvmet_wq, &tfcp_req->tio_done_work);
950 struct fcloop_fcpreq *tfcp_req;
955 tfcp_req = inireq->tfcp_req;
956 if (tfcp_req)
957 fcloop_tfcp_req_get(tfcp_req);
960 if (!tfcp_req)
965 spin_lock_irqsave(&tfcp_req->reqlock, flags);
966 switch (tfcp_req->inistate) {
969 tfcp_req->inistate = INI_IO_ABORTED;
975 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
979 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
983 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
989 fcloop_tfcp_req_put(tfcp_req);