Lines Matching refs:req

68 	struct nvmet_req	req;
195 return nvme_is_write(rsp->req.cmd) &&
196 rsp->req.transfer_len &&
202 return !nvme_is_write(rsp->req.cmd) &&
203 rsp->req.transfer_len &&
204 !rsp->req.cqe->status &&
408 r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
409 if (!r->req.cqe)
412 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
413 sizeof(*r->req.cqe), DMA_TO_DEVICE);
418 r->req.p2p_client = &ndev->device->dev;
419 r->send_sge.length = sizeof(*r->req.cqe);
437 kfree(r->req.cqe);
446 sizeof(*r->req.cqe), DMA_TO_DEVICE);
447 kfree(r->req.cqe);
594 static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
597 struct nvme_command *cmd = req->cmd;
599 u8 pi_type = req->ns->pi_type;
602 bi = bdev_get_integrity(req->ns->bdev);
615 req->transfer_len += req->metadata_len;
636 struct nvmet_req *req = &rsp->req;
639 if (req->metadata_len)
641 cm_id->port_num, req->sg, req->sg_cnt,
642 req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
643 addr, key, nvmet_data_dir(req));
646 req->sg, req->sg_cnt, 0, addr, key,
647 nvmet_data_dir(req));
655 struct nvmet_req *req = &rsp->req;
657 if (req->metadata_len)
659 cm_id->port_num, req->sg, req->sg_cnt,
660 req->metadata_sg, req->metadata_sg_cnt,
661 nvmet_data_dir(req));
664 req->sg, req->sg_cnt, nvmet_data_dir(req));
676 if (rsp->req.sg != rsp->cmd->inline_sg)
677 nvmet_req_free_sgls(&rsp->req);
715 static void nvmet_rdma_queue_response(struct nvmet_req *req)
718 container_of(req, struct nvmet_rdma_rsp, req);
730 if (rsp->req.metadata_len)
765 nvmet_req_uninit(&rsp->req);
775 if (rsp->req.metadata_len)
780 nvmet_req_complete(&rsp->req, status);
782 rsp->req.execute(&rsp->req);
802 nvmet_req_uninit(&rsp->req);
819 rsp->req.cqe->status = cpu_to_le16(status << 1);
848 rsp->req.sg = rsp->cmd->inline_sg;
849 rsp->req.sg_cnt = sg_count;
854 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
858 if (!nvme_is_write(rsp->req.cmd)) {
859 rsp->req.error_loc =
875 rsp->req.transfer_len += len;
887 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
890 if (!rsp->req.transfer_len)
893 if (rsp->req.metadata_len)
894 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
896 ret = nvmet_req_alloc_sgls(&rsp->req);
913 rsp->req.transfer_len = 0;
919 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
928 rsp->req.error_loc =
940 rsp->req.error_loc =
946 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
967 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
969 rsp->req.execute(&rsp->req);
987 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
1004 nvmet_req_complete(&cmd->req, status);
1044 rsp->req.cmd = cmd->nvme_cmd;
1045 rsp->req.port = queue->port;
1378 struct nvme_rdma_cm_req *req;
1380 req = (struct nvme_rdma_cm_req *)conn->private_data;
1381 if (!req || conn->private_data_len == 0)
1384 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1387 queue->host_qid = le16_to_cpu(req->qid);
1390 * req->hsqsize corresponds to our recv queue size plus 1
1391 * req->hrqsize corresponds to our send queue size
1393 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1394 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1978 static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1986 container_of(req, struct nvmet_rdma_rsp, req);