Lines Matching defs:ibdev
176 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
183 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
191 if (ib_dma_mapping_error(ibdev, qe->dma)) {
200 static void nvme_rdma_free_ring(struct ib_device *ibdev,
207 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
211 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
228 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
235 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
424 struct ib_device *ibdev;
430 ibdev = dev->dev;
444 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
450 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
455 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
457 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
462 static int nvme_rdma_create_cq(struct ib_device *ibdev,
471 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
475 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
478 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
491 struct ib_device *ibdev;
502 ibdev = queue->device->dev;
507 ret = nvme_rdma_create_cq(ibdev, queue);
515 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
527 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
558 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
1211 static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
1216 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1222 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1232 struct ib_device *ibdev = dev->dev;
1246 nvme_rdma_dma_unmap_req(ibdev, rq);
1457 static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
1473 *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1494 *pi_count = ib_dma_map_sg(ibdev,
1510 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1522 struct ib_device *ibdev = dev->dev;
1534 ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
1566 nvme_rdma_dma_unmap_req(ibdev, rq);
1731 struct ib_device *ibdev = queue->device->dev;
1748 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1761 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
2101 struct ib_device *ibdev = queue->device->dev;
2107 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),