Lines Matching refs:umem
31 /* Returns the offset of the umem start relative to the first page. */
32 static inline int ib_umem_offset(struct ib_umem *umem)
34 return umem->address & ~PAGE_MASK;
37 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
40 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
41 ALIGN_DOWN(umem->iova, pgsz))) /
45 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
47 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
51 struct ib_umem *umem,
54 __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
58 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
59 * @umem: umem to iterate over
64 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
68 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
69 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
76 void ib_umem_release(struct ib_umem *umem);
77 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
93 static inline void ib_umem_release(struct ib_umem *umem) { }
94 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
98 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,