Lines Matching refs:req

63 	struct nvme_request	req;
288 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
290 kfree(req->sqe.data);
298 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
303 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
304 if (!req->sqe.data)
309 req->metadata_sgl = (void *)nvme_req(rq) +
313 req->queue = queue;
314 nvme_req(rq)->cmd = req->sqe.data;
1153 static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1155 struct request *rq = blk_mq_rq_from_pdu(req);
1157 if (!refcount_dec_and_test(&req->ref))
1159 if (!nvme_try_complete_req(rq, req->status, req->result))
1185 struct nvme_rdma_request *req =
1191 nvme_rdma_end_request(req);
1195 struct nvme_rdma_request *req)
1202 .ex.invalidate_rkey = req->mr->rkey,
1205 req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1206 wr.wr_cqe = &req->reg_cqe;
1213 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1216 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1217 req->metadata_sgl->nents, rq_dma_dir(rq));
1218 sg_free_table_chained(&req->metadata_sgl->sg_table,
1222 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1224 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1230 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1238 if (req->use_sig_mr)
1241 if (req->mr) {
1242 ib_mr_pool_put(queue->qp, pool, req->mr);
1243 req->mr = NULL;
1261 struct nvme_rdma_request *req, struct nvme_command *c,
1265 struct ib_sge *sge = &req->sge[1];
1270 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
1282 req->num_sge += count;
1287 struct nvme_rdma_request *req, struct nvme_command *c)
1291 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
1292 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
1299 struct nvme_rdma_request *req, struct nvme_command *c,
1305 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
1306 if (WARN_ON_ONCE(!req->mr))
1313 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
1316 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
1317 req->mr = NULL;
1323 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1325 req->reg_cqe.done = nvme_rdma_memreg_done;
1326 memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1327 req->reg_wr.wr.opcode = IB_WR_REG_MR;
1328 req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1329 req->reg_wr.wr.num_sge = 0;
1330 req->reg_wr.mr = req->mr;
1331 req->reg_wr.key = req->mr->rkey;
1332 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1336 sg->addr = cpu_to_le64(req->mr->iova);
1337 put_unaligned_le24(req->mr->length, sg->length);
1338 put_unaligned_le32(req->mr->rkey, sg->key);
1403 struct nvme_rdma_request *req, struct nvme_command *c,
1406 struct nvme_rdma_sgl *sgl = &req->data_sgl;
1407 struct ib_reg_wr *wr = &req->reg_wr;
1408 struct request *rq = blk_mq_rq_from_pdu(req);
1414 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
1415 if (WARN_ON_ONCE(!req->mr))
1418 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
1419 req->metadata_sgl->sg_table.sgl, pi_count, NULL,
1425 req->mr->sig_attrs, ns->pi_type);
1426 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
1428 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1430 req->reg_cqe.done = nvme_rdma_sig_done;
1433 wr->wr.wr_cqe = &req->reg_cqe;
1436 wr->mr = req->mr;
1437 wr->key = req->mr->rkey;
1442 sg->addr = cpu_to_le64(req->mr->iova);
1443 put_unaligned_le24(req->mr->length, sg->length);
1444 put_unaligned_le32(req->mr->rkey, sg->key);
1450 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
1451 req->mr = NULL;
1460 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1463 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
1464 ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
1465 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
1470 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1471 req->data_sgl.sg_table.sgl);
1473 *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1474 req->data_sgl.nents, rq_dma_dir(rq));
1481 req->metadata_sgl->sg_table.sgl =
1482 (struct scatterlist *)(req->metadata_sgl + 1);
1483 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
1485 req->metadata_sgl->sg_table.sgl,
1492 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
1493 rq->bio, req->metadata_sgl->sg_table.sgl);
1495 req->metadata_sgl->sg_table.sgl,
1496 req->metadata_sgl->nents,
1507 sg_free_table_chained(&req->metadata_sgl->sg_table,
1510 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1513 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1520 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1526 req->num_sge = 1;
1527 refcount_set(&req->ref, 2); /* send and recv completions */
1538 if (req->use_sig_mr) {
1539 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
1548 ret = nvme_rdma_map_sg_inline(queue, req, c, count);
1553 ret = nvme_rdma_map_sg_single(queue, req, c);
1558 ret = nvme_rdma_map_sg_fr(queue, req, c, count);
1574 struct nvme_rdma_request *req =
1580 nvme_rdma_end_request(req);
1686 struct nvme_rdma_request *req;
1696 req = blk_mq_rq_to_pdu(rq);
1698 req->status = cqe->status;
1699 req->result = cqe->result;
1702 if (unlikely(!req->mr ||
1703 wc->ex.invalidate_rkey != req->mr->rkey)) {
1706 req->mr ? req->mr->rkey : 0);
1709 } else if (req->mr) {
1712 ret = nvme_rdma_inv_rkey(queue, req);
1716 req->mr->rkey, ret);
1723 nvme_rdma_end_request(req);
1936 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1937 struct nvme_rdma_queue *queue = req->queue;
1945 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1946 struct nvme_rdma_queue *queue = req->queue;
1984 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1985 struct nvme_rdma_qe *sqe = &req->sqe;
1999 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
2002 err = ib_dma_mapping_error(dev, req->sqe.dma);
2020 req->use_sig_mr = true;
2022 req->use_sig_mr = false;
2036 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
2037 req->mr ? &req->reg_wr.wr : NULL);
2054 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
2066 static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
2068 struct request *rq = blk_mq_rq_from_pdu(req);
2072 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
2099 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2100 struct nvme_rdma_queue *queue = req->queue;
2103 if (req->use_sig_mr)
2104 nvme_rdma_check_pi_status(req);
2107 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),