/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | umem_odp.c | 52 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, in ib_init_umem_odp() argument 57 umem_odp->umem.is_odp = 1; in ib_init_umem_odp() 58 mutex_init(&umem_odp->umem_mutex); in ib_init_umem_odp() 60 if (!umem_odp->is_implicit_odp) { in ib_init_umem_odp() 61 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 66 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp() 67 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp() 68 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp() 75 ndmas = (end - start) >> umem_odp->page_shift; in ib_init_umem_odp() 80 umem_odp in ib_init_umem_odp() 122 struct ib_umem_odp *umem_odp; ib_umem_odp_alloc_implicit() local 229 struct ib_umem_odp *umem_odp; ib_umem_odp_get() local 266 ib_umem_odp_release(struct ib_umem_odp *umem_odp) ib_umem_odp_release() argument 302 ib_umem_odp_map_dma_single_page( struct ib_umem_odp *umem_odp, unsigned int dma_index, struct page *page, u64 access_mask) ib_umem_odp_map_dma_single_page() argument 477 ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, u64 bound) ib_umem_odp_unmap_dma_pages() argument [all...] |
H A D | Makefile | 44 ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
|
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | umem_odp.c | 50 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, in ib_init_umem_odp() argument 55 umem_odp->umem.is_odp = 1; in ib_init_umem_odp() 56 mutex_init(&umem_odp->umem_mutex); in ib_init_umem_odp() 58 if (!umem_odp->is_implicit_odp) { in ib_init_umem_odp() 59 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 64 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp() 65 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp() 66 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp() 73 ndmas = (end - start) >> umem_odp->page_shift; in ib_init_umem_odp() 78 umem_odp in ib_init_umem_odp() 120 struct ib_umem_odp *umem_odp; ib_umem_odp_alloc_implicit() local 227 struct ib_umem_odp *umem_odp; ib_umem_odp_get() local 263 ib_umem_odp_release(struct ib_umem_odp *umem_odp) ib_umem_odp_release() argument 296 ib_umem_odp_map_dma_single_page( struct ib_umem_odp *umem_odp, unsigned int dma_index, struct page *page, u64 access_mask) ib_umem_odp_map_dma_single_page() argument 471 ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, u64 bound) ib_umem_odp_unmap_dma_pages() argument [all...] |
H A D | Makefile | 44 ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
|
/kernel/linux/linux-5.10/include/rdma/ |
H A D | ib_umem_odp.h | 53 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) in ib_umem_start() argument 55 return umem_odp->notifier.interval_tree.start; in ib_umem_start() 59 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) in ib_umem_end() argument 61 return umem_odp->notifier.interval_tree.last + 1; in ib_umem_end() 64 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) in ib_umem_odp_num_pages() argument 66 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> in ib_umem_odp_num_pages() 67 umem_odp->page_shift; in ib_umem_odp_num_pages() 94 void ib_umem_odp_release(struct ib_umem_odp *umem_odp); 96 int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u6 111 ib_umem_odp_release(struct ib_umem_odp *umem_odp) ib_umem_odp_release() argument [all...] |
/kernel/linux/linux-6.6/include/rdma/ |
H A D | ib_umem_odp.h | 53 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) in ib_umem_start() argument 55 return umem_odp->notifier.interval_tree.start; in ib_umem_start() 59 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) in ib_umem_end() argument 61 return umem_odp->notifier.interval_tree.last + 1; in ib_umem_end() 64 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) in ib_umem_odp_num_pages() argument 66 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> in ib_umem_odp_num_pages() 67 umem_odp->page_shift; in ib_umem_odp_num_pages() 94 void ib_umem_odp_release(struct ib_umem_odp *umem_odp); 96 int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u6 111 ib_umem_odp_release(struct ib_umem_odp *umem_odp) ib_umem_odp_release() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | odp.c | 288 struct ib_umem_odp *umem_odp = in mlx5_ib_invalidate_range() local 303 mutex_lock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range() 306 * If npages is zero then umem_odp->private may not be setup yet. This in mlx5_ib_invalidate_range() 309 if (!umem_odp->npages) in mlx5_ib_invalidate_range() 311 mr = umem_odp->private; in mlx5_ib_invalidate_range() 313 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range() 314 end = min_t(u64, ib_umem_end(umem_odp), range->end); in mlx5_ib_invalidate_range() 322 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range() 323 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp in mlx5_ib_invalidate_range() 535 struct ib_umem_odp *umem_odp; mlx5_ib_alloc_implicit_mr() local 740 struct ib_umem_odp *umem_odp; pagefault_implicit_mr() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | odp.c | 230 struct ib_umem_odp *umem_odp = in mlx5_ib_invalidate_range() local 244 mutex_lock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range() 247 * If npages is zero then umem_odp->private may not be setup yet. This in mlx5_ib_invalidate_range() 250 if (!umem_odp->npages) in mlx5_ib_invalidate_range() 252 mr = umem_odp->private; in mlx5_ib_invalidate_range() 254 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range() 255 end = min_t(u64, ib_umem_end(umem_odp), range->end); in mlx5_ib_invalidate_range() 263 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range() 264 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp in mlx5_ib_invalidate_range() 485 struct ib_umem_odp *umem_odp; mlx5_ib_alloc_implicit_mr() local 622 struct ib_umem_odp *umem_odp; pagefault_implicit_mr() local [all...] |