Lines Matching refs:req
69 struct nvmet_req req;
196 return nvme_is_write(rsp->req.cmd) &&
197 rsp->req.transfer_len &&
203 return !nvme_is_write(rsp->req.cmd) &&
204 rsp->req.transfer_len &&
205 !rsp->req.cqe->status &&
409 r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
410 if (!r->req.cqe)
413 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
414 sizeof(*r->req.cqe), DMA_TO_DEVICE);
419 r->req.p2p_client = &ndev->device->dev;
420 r->send_sge.length = sizeof(*r->req.cqe);
438 kfree(r->req.cqe);
447 sizeof(*r->req.cqe), DMA_TO_DEVICE);
448 kfree(r->req.cqe);
595 static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
598 struct nvme_command *cmd = req->cmd;
600 u8 pi_type = req->ns->pi_type;
603 bi = bdev_get_integrity(req->ns->bdev);
616 req->transfer_len += req->metadata_len;
637 struct nvmet_req *req = &rsp->req;
640 if (req->metadata_len)
642 cm_id->port_num, req->sg, req->sg_cnt,
643 req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
644 addr, key, nvmet_data_dir(req));
647 req->sg, req->sg_cnt, 0, addr, key,
648 nvmet_data_dir(req));
656 struct nvmet_req *req = &rsp->req;
658 if (req->metadata_len)
660 cm_id->port_num, req->sg, req->sg_cnt,
661 req->metadata_sg, req->metadata_sg_cnt,
662 nvmet_data_dir(req));
665 req->sg, req->sg_cnt, nvmet_data_dir(req));
677 if (rsp->req.sg != rsp->cmd->inline_sg)
678 nvmet_req_free_sgls(&rsp->req);
716 static void nvmet_rdma_queue_response(struct nvmet_req *req)
719 container_of(req, struct nvmet_rdma_rsp, req);
731 if (rsp->req.metadata_len)
766 nvmet_req_uninit(&rsp->req);
776 if (rsp->req.metadata_len)
781 nvmet_req_complete(&rsp->req, status);
783 rsp->req.execute(&rsp->req);
803 nvmet_req_uninit(&rsp->req);
820 rsp->req.cqe->status = cpu_to_le16(status << 1);
849 rsp->req.sg = rsp->cmd->inline_sg;
850 rsp->req.sg_cnt = sg_count;
855 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
859 if (!nvme_is_write(rsp->req.cmd)) {
860 rsp->req.error_loc =
876 rsp->req.transfer_len += len;
888 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
891 if (!rsp->req.transfer_len)
894 if (rsp->req.metadata_len)
895 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
897 ret = nvmet_req_alloc_sgls(&rsp->req);
914 rsp->req.transfer_len = 0;
920 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
929 rsp->req.error_loc =
941 rsp->req.error_loc =
947 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
968 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
970 rsp->req.execute(&rsp->req);
988 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
1005 nvmet_req_complete(&cmd->req, status);
1045 rsp->req.cmd = cmd->nvme_cmd;
1046 rsp->req.port = queue->port;
1378 struct nvme_rdma_cm_req *req;
1380 req = (struct nvme_rdma_cm_req *)conn->private_data;
1381 if (!req || conn->private_data_len == 0)
1384 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1387 queue->host_qid = le16_to_cpu(req->qid);
1390 * req->hsqsize corresponds to our recv queue size plus 1
1391 * req->hrqsize corresponds to our send queue size
1393 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1394 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1978 static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
1986 container_of(req, struct nvmet_rdma_rsp, req);