Lines Matching defs:fod
148 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
182 return (fodptr - fodptr->queue->fod);
251 struct nvmet_fc_fcp_iod *fod);
628 struct nvmet_fc_fcp_iod *fod = queue->fod;
631 for (i = 0; i < queue->sqsize; fod++, i++) {
632 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
633 fod->tgtport = tgtport;
634 fod->queue = queue;
635 fod->active = false;
636 fod->abort = false;
637 fod->aborted = false;
638 fod->fcpreq = NULL;
639 list_add_tail(&fod->fcp_list, &queue->fod_list);
640 spin_lock_init(&fod->flock);
642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
643 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
645 list_del(&fod->fcp_list);
646 for (fod--, i--; i >= 0; fod--, i--) {
647 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
648 sizeof(fod->rspiubuf),
650 fod->rspdma = 0L;
651 list_del(&fod->fcp_list);
663 struct nvmet_fc_fcp_iod *fod = queue->fod;
666 for (i = 0; i < queue->sqsize; fod++, i++) {
667 if (fod->rspdma)
668 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
669 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
676 struct nvmet_fc_fcp_iod *fod;
680 fod = list_first_entry_or_null(&queue->fod_list,
682 if (fod) {
683 list_del(&fod->fcp_list);
684 fod->active = true;
691 return fod;
700 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
709 nvmet_fc_handle_fcp_rqst(tgtport, fod);
715 struct nvmet_fc_fcp_iod *fod =
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
725 struct nvmet_fc_fcp_iod *fod)
727 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
728 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
733 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
737 fod->active = false;
738 fod->abort = false;
739 fod->aborted = false;
740 fod->writedataactive = false;
741 fod->fcpreq = NULL;
752 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
757 /* Re-use the fod for the next pending cmd that was deferred */
767 /* Save NVME CMD IO in fod */
768 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
773 fcpreq->nvmet_fc_private = fod;
774 fod->fcpreq = fcpreq;
775 fod->active = true;
782 * fod was originally allocated.
785 queue_work(queue->work_q, &fod->defer_work);
799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
886 struct nvmet_fc_fcp_iod *fod = queue->fod;
900 for (i = 0; i < queue->sqsize; fod++, i++) {
901 if (fod->active) {
902 spin_lock(&fod->flock);
903 fod->abort = true;
909 if (fod->writedataactive) {
910 fod->aborted = true;
911 spin_unlock(&fod->flock);
913 &tgtport->fc_target_port, fod->fcpreq);
915 spin_unlock(&fod->flock);
2068 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2073 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2077 fod->data_sg = sg;
2078 fod->data_sg_cnt = nent;
2079 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2080 ((fod->io_dir == NVMET_FCP_WRITE) ?
2083 fod->next_sg = fod->data_sg;
2092 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2094 if (!fod->data_sg || !fod->data_sg_cnt)
2097 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2098 ((fod->io_dir == NVMET_FCP_WRITE) ?
2100 sgl_free(fod->data_sg);
2101 fod->data_sg = NULL;
2102 fod->data_sg_cnt = 0;
2124 struct nvmet_fc_fcp_iod *fod)
2126 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2127 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2133 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2134 xfr_length = fod->req.transfer_len;
2136 xfr_length = fod->offset;
2157 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2158 if (!(rspcnt % fod->queue->ersp_ratio) ||
2160 xfr_length != fod->req.transfer_len ||
2163 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2167 fod->fcpreq->rspaddr = ersp;
2168 fod->fcpreq->rspdma = fod->rspdma;
2172 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2175 rsn = atomic_inc_return(&fod->queue->rsn);
2178 fod->fcpreq->rsplen = sizeof(*ersp);
2181 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2182 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2189 struct nvmet_fc_fcp_iod *fod)
2191 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2194 nvmet_fc_free_tgt_pgs(fod);
2201 if (!fod->aborted)
2204 nvmet_fc_free_fcp_iod(fod->queue, fod);
2209 struct nvmet_fc_fcp_iod *fod)
2213 fod->fcpreq->op = NVMET_FCOP_RSP;
2214 fod->fcpreq->timeout = 0;
2216 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2218 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2220 nvmet_fc_abort_op(tgtport, fod);
2225 struct nvmet_fc_fcp_iod *fod, u8 op)
2227 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2228 struct scatterlist *sg = fod->next_sg;
2230 u32 remaininglen = fod->req.transfer_len - fod->offset;
2235 fcpreq->offset = fod->offset;
2262 fod->next_sg = sg;
2264 fod->next_sg = NULL;
2276 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2279 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2282 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2289 fod->abort = true;
2292 spin_lock_irqsave(&fod->flock, flags);
2293 fod->writedataactive = false;
2294 spin_unlock_irqrestore(&fod->flock, flags);
2295 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2299 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2305 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2307 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2308 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2313 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2317 nvmet_fc_abort_op(tgtport, fod);
2328 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2330 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2335 spin_lock_irqsave(&fod->flock, flags);
2336 abort = fod->abort;
2337 fod->writedataactive = false;
2338 spin_unlock_irqrestore(&fod->flock, flags);
2343 if (__nvmet_fc_fod_op_abort(fod, abort))
2347 spin_lock_irqsave(&fod->flock, flags);
2348 fod->abort = true;
2349 spin_unlock_irqrestore(&fod->flock, flags);
2351 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2355 fod->offset += fcpreq->transferred_length;
2356 if (fod->offset != fod->req.transfer_len) {
2357 spin_lock_irqsave(&fod->flock, flags);
2358 fod->writedataactive = true;
2359 spin_unlock_irqrestore(&fod->flock, flags);
2362 nvmet_fc_transfer_fcp_data(tgtport, fod,
2368 fod->req.execute(&fod->req);
2373 if (__nvmet_fc_fod_op_abort(fod, abort))
2377 nvmet_fc_abort_op(tgtport, fod);
2385 nvmet_fc_free_tgt_pgs(fod);
2386 nvmet_fc_free_fcp_iod(fod->queue, fod);
2390 fod->offset += fcpreq->transferred_length;
2391 if (fod->offset != fod->req.transfer_len) {
2393 nvmet_fc_transfer_fcp_data(tgtport, fod,
2401 nvmet_fc_free_tgt_pgs(fod);
2403 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2408 if (__nvmet_fc_fod_op_abort(fod, abort))
2410 nvmet_fc_free_fcp_iod(fod->queue, fod);
2421 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2423 nvmet_fc_fod_op_done(fod);
2431 struct nvmet_fc_fcp_iod *fod, int status)
2433 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2434 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2438 spin_lock_irqsave(&fod->flock, flags);
2439 abort = fod->abort;
2440 spin_unlock_irqrestore(&fod->flock, flags);
2444 fod->queue->sqhd = cqe->sq_head;
2447 nvmet_fc_abort_op(tgtport, fod);
2455 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2456 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2466 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2468 nvmet_fc_transfer_fcp_data(tgtport, fod,
2477 nvmet_fc_free_tgt_pgs(fod);
2479 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2486 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2487 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2489 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2498 struct nvmet_fc_fcp_iod *fod)
2500 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2513 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2516 fod->io_dir = NVMET_FCP_WRITE;
2520 fod->io_dir = NVMET_FCP_READ;
2524 fod->io_dir = NVMET_FCP_NODATA;
2529 fod->req.cmd = &fod->cmdiubuf.sqe;
2530 fod->req.cqe = &fod->rspiubuf.cqe;
2532 fod->req.port = tgtport->pe->port;
2535 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2537 fod->data_sg = NULL;
2538 fod->data_sg_cnt = 0;
2540 ret = nvmet_req_init(&fod->req,
2541 &fod->queue->nvme_cq,
2542 &fod->queue->nvme_sq,
2550 fod->req.transfer_len = xfrlen;
2553 atomic_inc(&fod->queue->sqtail);
2555 if (fod->req.transfer_len) {
2556 ret = nvmet_fc_alloc_tgt_pgs(fod);
2558 nvmet_req_complete(&fod->req, ret);
2562 fod->req.sg = fod->data_sg;
2563 fod->req.sg_cnt = fod->data_sg_cnt;
2564 fod->offset = 0;
2566 if (fod->io_dir == NVMET_FCP_WRITE) {
2568 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2578 fod->req.execute(&fod->req);
2582 nvmet_fc_abort_op(tgtport, fod);
2640 struct nvmet_fc_fcp_iod *fod;
2658 * After successful fod allocation, the fod will inherit the
2660 * when the fod is freed.
2665 fod = nvmet_fc_alloc_fcp_iod(queue);
2666 if (fod) {
2669 fcpreq->nvmet_fc_private = fod;
2670 fod->fcpreq = fcpreq;
2672 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2709 /* defer processing till a fod becomes available */
2747 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2751 if (!fod || fod->fcpreq != fcpreq)
2755 queue = fod->queue;
2758 if (fod->active) {
2764 spin_lock(&fod->flock);
2765 fod->abort = true;
2766 fod->aborted = true;
2767 spin_unlock(&fod->flock);