Lines Matching defs:fod
151 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
186 return (fodptr - fodptr->queue->fod);
262 struct nvmet_fc_fcp_iod *fod);
640 struct nvmet_fc_fcp_iod *fod = queue->fod;
643 for (i = 0; i < queue->sqsize; fod++, i++) {
644 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
645 fod->tgtport = tgtport;
646 fod->queue = queue;
647 fod->active = false;
648 fod->abort = false;
649 fod->aborted = false;
650 fod->fcpreq = NULL;
651 list_add_tail(&fod->fcp_list, &queue->fod_list);
652 spin_lock_init(&fod->flock);
654 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
655 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
656 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
657 list_del(&fod->fcp_list);
658 for (fod--, i--; i >= 0; fod--, i--) {
659 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
660 sizeof(fod->rspiubuf),
662 fod->rspdma = 0L;
663 list_del(&fod->fcp_list);
675 struct nvmet_fc_fcp_iod *fod = queue->fod;
678 for (i = 0; i < queue->sqsize; fod++, i++) {
679 if (fod->rspdma)
680 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
681 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
688 struct nvmet_fc_fcp_iod *fod;
692 fod = list_first_entry_or_null(&queue->fod_list,
694 if (fod) {
695 list_del(&fod->fcp_list);
696 fod->active = true;
703 return fod;
712 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
721 nvmet_fc_handle_fcp_rqst(tgtport, fod);
727 struct nvmet_fc_fcp_iod *fod =
731 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
737 struct nvmet_fc_fcp_iod *fod)
739 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
740 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
744 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
745 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
749 fod->active = false;
750 fod->abort = false;
751 fod->aborted = false;
752 fod->writedataactive = false;
753 fod->fcpreq = NULL;
764 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
769 /* Re-use the fod for the next pending cmd that was deferred */
779 /* Save NVME CMD IO in fod */
780 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
785 fcpreq->nvmet_fc_private = fod;
786 fod->fcpreq = fcpreq;
787 fod->active = true;
794 * fod was originally allocated.
797 queue_work(queue->work_q, &fod->defer_work);
810 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
883 struct nvmet_fc_fcp_iod *fod = queue->fod;
897 for (i = 0; i < queue->sqsize; fod++, i++) {
898 if (fod->active) {
899 spin_lock(&fod->flock);
900 fod->abort = true;
906 if (fod->writedataactive) {
907 fod->aborted = true;
908 spin_unlock(&fod->flock);
910 &tgtport->fc_target_port, fod->fcpreq);
912 spin_unlock(&fod->flock);
2091 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2096 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2100 fod->data_sg = sg;
2101 fod->data_sg_cnt = nent;
2102 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2103 ((fod->io_dir == NVMET_FCP_WRITE) ?
2106 fod->next_sg = fod->data_sg;
2115 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2117 if (!fod->data_sg || !fod->data_sg_cnt)
2120 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2121 ((fod->io_dir == NVMET_FCP_WRITE) ?
2123 sgl_free(fod->data_sg);
2124 fod->data_sg = NULL;
2125 fod->data_sg_cnt = 0;
2147 struct nvmet_fc_fcp_iod *fod)
2149 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2150 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2156 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2157 xfr_length = fod->req.transfer_len;
2159 xfr_length = fod->offset;
2180 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2181 if (!(rspcnt % fod->queue->ersp_ratio) ||
2183 xfr_length != fod->req.transfer_len ||
2186 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2190 fod->fcpreq->rspaddr = ersp;
2191 fod->fcpreq->rspdma = fod->rspdma;
2195 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2198 rsn = atomic_inc_return(&fod->queue->rsn);
2201 fod->fcpreq->rsplen = sizeof(*ersp);
2204 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2205 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2212 struct nvmet_fc_fcp_iod *fod)
2214 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2217 nvmet_fc_free_tgt_pgs(fod);
2224 if (!fod->aborted)
2227 nvmet_fc_free_fcp_iod(fod->queue, fod);
2232 struct nvmet_fc_fcp_iod *fod)
2236 fod->fcpreq->op = NVMET_FCOP_RSP;
2237 fod->fcpreq->timeout = 0;
2239 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2241 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2243 nvmet_fc_abort_op(tgtport, fod);
2248 struct nvmet_fc_fcp_iod *fod, u8 op)
2250 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2251 struct scatterlist *sg = fod->next_sg;
2253 u32 remaininglen = fod->req.transfer_len - fod->offset;
2258 fcpreq->offset = fod->offset;
2285 fod->next_sg = sg;
2287 fod->next_sg = NULL;
2299 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2302 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2305 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2312 fod->abort = true;
2315 spin_lock_irqsave(&fod->flock, flags);
2316 fod->writedataactive = false;
2317 spin_unlock_irqrestore(&fod->flock, flags);
2318 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2322 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2328 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2330 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2336 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2340 nvmet_fc_abort_op(tgtport, fod);
2351 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2353 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2354 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2358 spin_lock_irqsave(&fod->flock, flags);
2359 abort = fod->abort;
2360 fod->writedataactive = false;
2361 spin_unlock_irqrestore(&fod->flock, flags);
2366 if (__nvmet_fc_fod_op_abort(fod, abort))
2370 spin_lock_irqsave(&fod->flock, flags);
2371 fod->abort = true;
2372 spin_unlock_irqrestore(&fod->flock, flags);
2374 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2378 fod->offset += fcpreq->transferred_length;
2379 if (fod->offset != fod->req.transfer_len) {
2380 spin_lock_irqsave(&fod->flock, flags);
2381 fod->writedataactive = true;
2382 spin_unlock_irqrestore(&fod->flock, flags);
2385 nvmet_fc_transfer_fcp_data(tgtport, fod,
2391 fod->req.execute(&fod->req);
2396 if (__nvmet_fc_fod_op_abort(fod, abort))
2400 nvmet_fc_abort_op(tgtport, fod);
2408 nvmet_fc_free_tgt_pgs(fod);
2409 nvmet_fc_free_fcp_iod(fod->queue, fod);
2413 fod->offset += fcpreq->transferred_length;
2414 if (fod->offset != fod->req.transfer_len) {
2416 nvmet_fc_transfer_fcp_data(tgtport, fod,
2424 nvmet_fc_free_tgt_pgs(fod);
2426 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2431 if (__nvmet_fc_fod_op_abort(fod, abort))
2433 nvmet_fc_free_fcp_iod(fod->queue, fod);
2444 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2446 nvmet_fc_fod_op_done(fod);
2454 struct nvmet_fc_fcp_iod *fod, int status)
2456 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2457 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2461 spin_lock_irqsave(&fod->flock, flags);
2462 abort = fod->abort;
2463 spin_unlock_irqrestore(&fod->flock, flags);
2467 fod->queue->sqhd = cqe->sq_head;
2470 nvmet_fc_abort_op(tgtport, fod);
2478 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2479 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2489 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2491 nvmet_fc_transfer_fcp_data(tgtport, fod,
2500 nvmet_fc_free_tgt_pgs(fod);
2502 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2509 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2510 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2512 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2521 struct nvmet_fc_fcp_iod *fod)
2523 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2536 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2539 fod->io_dir = NVMET_FCP_WRITE;
2543 fod->io_dir = NVMET_FCP_READ;
2547 fod->io_dir = NVMET_FCP_NODATA;
2552 fod->req.cmd = &fod->cmdiubuf.sqe;
2553 fod->req.cqe = &fod->rspiubuf.cqe;
2556 fod->req.port = tgtport->pe->port;
2559 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2561 fod->data_sg = NULL;
2562 fod->data_sg_cnt = 0;
2564 ret = nvmet_req_init(&fod->req,
2565 &fod->queue->nvme_cq,
2566 &fod->queue->nvme_sq,
2574 fod->req.transfer_len = xfrlen;
2577 atomic_inc(&fod->queue->sqtail);
2579 if (fod->req.transfer_len) {
2580 ret = nvmet_fc_alloc_tgt_pgs(fod);
2582 nvmet_req_complete(&fod->req, ret);
2586 fod->req.sg = fod->data_sg;
2587 fod->req.sg_cnt = fod->data_sg_cnt;
2588 fod->offset = 0;
2590 if (fod->io_dir == NVMET_FCP_WRITE) {
2592 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2602 fod->req.execute(&fod->req);
2606 nvmet_fc_abort_op(tgtport, fod);
2664 struct nvmet_fc_fcp_iod *fod;
2682 * After successful fod allocation, the fod will inherit the
2684 * when the fod is freed.
2689 fod = nvmet_fc_alloc_fcp_iod(queue);
2690 if (fod) {
2693 fcpreq->nvmet_fc_private = fod;
2694 fod->fcpreq = fcpreq;
2696 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2733 /* defer processing till a fod becomes available */
2771 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2775 if (!fod || fod->fcpreq != fcpreq)
2779 queue = fod->queue;
2782 if (fod->active) {
2788 spin_lock(&fod->flock);
2789 fod->abort = true;
2790 fod->aborted = true;
2791 spin_unlock(&fod->flock);