Lines Matching refs:req

63 	struct nvme_request	req;
291 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
293 kfree(req->sqe.data);
301 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
306 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
307 if (!req->sqe.data)
312 req->metadata_sgl = (void *)nvme_req(rq) +
316 req->queue = queue;
1240 static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1242 struct request *rq = blk_mq_rq_from_pdu(req);
1244 if (!refcount_dec_and_test(&req->ref))
1246 if (!nvme_try_complete_req(rq, req->status, req->result))
1272 struct nvme_rdma_request *req =
1278 nvme_rdma_end_request(req);
1282 struct nvme_rdma_request *req)
1289 .ex.invalidate_rkey = req->mr->rkey,
1292 req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1293 wr.wr_cqe = &req->reg_cqe;
1301 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1310 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1311 req->metadata_sgl->nents, rq_dma_dir(rq));
1312 sg_free_table_chained(&req->metadata_sgl->sg_table,
1316 if (req->use_sig_mr)
1319 if (req->mr) {
1320 ib_mr_pool_put(queue->qp, pool, req->mr);
1321 req->mr = NULL;
1324 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1326 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1341 struct nvme_rdma_request *req, struct nvme_command *c,
1345 struct ib_sge *sge = &req->sge[1];
1350 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
1362 req->num_sge += count;
1367 struct nvme_rdma_request *req, struct nvme_command *c)
1371 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
1372 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
1379 struct nvme_rdma_request *req, struct nvme_command *c,
1385 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
1386 if (WARN_ON_ONCE(!req->mr))
1393 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
1396 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
1397 req->mr = NULL;
1403 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1405 req->reg_cqe.done = nvme_rdma_memreg_done;
1406 memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1407 req->reg_wr.wr.opcode = IB_WR_REG_MR;
1408 req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1409 req->reg_wr.wr.num_sge = 0;
1410 req->reg_wr.mr = req->mr;
1411 req->reg_wr.key = req->mr->rkey;
1412 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1416 sg->addr = cpu_to_le64(req->mr->iova);
1417 put_unaligned_le24(req->mr->length, sg->length);
1418 put_unaligned_le32(req->mr->rkey, sg->key);
1483 struct nvme_rdma_request *req, struct nvme_command *c,
1486 struct nvme_rdma_sgl *sgl = &req->data_sgl;
1487 struct ib_reg_wr *wr = &req->reg_wr;
1488 struct request *rq = blk_mq_rq_from_pdu(req);
1494 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
1495 if (WARN_ON_ONCE(!req->mr))
1498 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
1499 req->metadata_sgl->sg_table.sgl, pi_count, NULL,
1505 req->mr->sig_attrs, ns->pi_type);
1506 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
1508 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1510 req->reg_cqe.done = nvme_rdma_sig_done;
1513 wr->wr.wr_cqe = &req->reg_cqe;
1516 wr->mr = req->mr;
1517 wr->key = req->mr->rkey;
1522 sg->addr = cpu_to_le64(req->mr->iova);
1523 put_unaligned_le24(req->mr->length, sg->length);
1524 put_unaligned_le32(req->mr->rkey, sg->key);
1530 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
1531 req->mr = NULL;
1540 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1546 req->num_sge = 1;
1547 refcount_set(&req->ref, 2); /* send and recv completions */
1554 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
1555 ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
1556 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
1561 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1562 req->data_sgl.sg_table.sgl);
1564 count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1565 req->data_sgl.nents, rq_dma_dir(rq));
1572 req->metadata_sgl->sg_table.sgl =
1573 (struct scatterlist *)(req->metadata_sgl + 1);
1574 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
1576 req->metadata_sgl->sg_table.sgl,
1583 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
1584 rq->bio, req->metadata_sgl->sg_table.sgl);
1586 req->metadata_sgl->sg_table.sgl,
1587 req->metadata_sgl->nents,
1595 if (req->use_sig_mr) {
1596 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
1605 ret = nvme_rdma_map_sg_inline(queue, req, c, count);
1610 ret = nvme_rdma_map_sg_single(queue, req, c);
1615 ret = nvme_rdma_map_sg_fr(queue, req, c, count);
1624 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1625 req->metadata_sgl->nents, rq_dma_dir(rq));
1628 sg_free_table_chained(&req->metadata_sgl->sg_table,
1631 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1634 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1642 struct nvme_rdma_request *req =
1648 nvme_rdma_end_request(req);
1754 struct nvme_rdma_request *req;
1764 req = blk_mq_rq_to_pdu(rq);
1766 req->status = cqe->status;
1767 req->result = cqe->result;
1770 if (unlikely(!req->mr ||
1771 wc->ex.invalidate_rkey != req->mr->rkey)) {
1774 req->mr ? req->mr->rkey : 0);
1777 } else if (req->mr) {
1780 ret = nvme_rdma_inv_rkey(queue, req);
1784 req->mr->rkey, ret);
1791 nvme_rdma_end_request(req);
2004 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2005 struct nvme_rdma_queue *queue = req->queue;
2017 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2018 struct nvme_rdma_queue *queue = req->queue;
2056 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2057 struct nvme_rdma_qe *sqe = &req->sqe;
2071 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
2074 err = ib_dma_mapping_error(dev, req->sqe.dma);
2092 req->use_sig_mr = true;
2094 req->use_sig_mr = false;
2108 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
2109 req->mr ? &req->reg_wr.wr : NULL);
2124 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
2136 static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
2138 struct request *rq = blk_mq_rq_from_pdu(req);
2142 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
2169 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2170 struct nvme_rdma_queue *queue = req->queue;
2173 if (req->use_sig_mr)
2174 nvme_rdma_check_pi_status(req);
2177 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),