/kernel/linux/linux-5.10/net/xdp/ |
H A D | xdp_umem.c | 26 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument 28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages() 30 kfree(umem->pgs); in xdp_umem_unpin_pages() 31 umem->pgs = NULL; in xdp_umem_unpin_pages() 34 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument 36 if (umem->user) { in xdp_umem_unaccount_pages() 37 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages() 38 free_uid(umem in xdp_umem_unaccount_pages() 42 xdp_umem_addr_unmap(struct xdp_umem *umem) xdp_umem_addr_unmap() argument 48 xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages, u32 nr_pages) xdp_umem_addr_map() argument 57 xdp_umem_release(struct xdp_umem *umem) xdp_umem_release() argument 71 struct xdp_umem *umem = container_of(work, struct xdp_umem, work); xdp_umem_release_deferred() local 76 xdp_get_umem(struct xdp_umem *umem) xdp_get_umem() argument 81 xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup) xdp_put_umem() argument 96 xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address) xdp_umem_pin_pages() argument 131 xdp_umem_account_pages(struct xdp_umem *umem) xdp_umem_account_pages() argument 154 xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) xdp_umem_reg() argument 240 struct xdp_umem *umem; xdp_umem_create() local [all...] |
H A D | xsk_buff_pool.c | 45 struct xdp_umem *umem) in xp_create_and_assign_umem() 51 pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), in xp_create_and_assign_umem() 56 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem() 60 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem() 61 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem() 62 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem() 63 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem() 64 pool->headroom = umem->headroom; in xp_create_and_assign_umem() 65 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem() 66 pool->unaligned = umem in xp_create_and_assign_umem() 44 xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem) xp_create_and_assign_umem() argument 205 struct xdp_umem *umem = umem_xs->umem; xp_assign_dev_shared() local 283 xp_create_dma_map(struct device *dev, struct net_device *netdev, u32 nr_pages, struct xdp_umem *umem) xp_create_dma_map() argument [all...] |
/kernel/linux/linux-6.6/net/xdp/ |
H A D | xdp_umem.c | 24 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument 26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages() 28 kvfree(umem->pgs); in xdp_umem_unpin_pages() 29 umem->pgs = NULL; in xdp_umem_unpin_pages() 32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument 34 if (umem->user) { in xdp_umem_unaccount_pages() 35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages() 36 free_uid(umem in xdp_umem_unaccount_pages() 40 xdp_umem_addr_unmap(struct xdp_umem *umem) xdp_umem_addr_unmap() argument 46 xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages, u32 nr_pages) xdp_umem_addr_map() argument 55 xdp_umem_release(struct xdp_umem *umem) xdp_umem_release() argument 69 struct xdp_umem *umem = container_of(work, struct xdp_umem, work); xdp_umem_release_deferred() local 74 xdp_get_umem(struct xdp_umem *umem) xdp_get_umem() argument 79 xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup) xdp_put_umem() argument 94 xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address) xdp_umem_pin_pages() argument 128 xdp_umem_account_pages(struct xdp_umem *umem) xdp_umem_account_pages() argument 151 xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) xdp_umem_reg() argument 237 struct xdp_umem *umem; xdp_umem_create() local [all...] |
H A D | xsk_buff_pool.c | 56 struct xdp_umem *umem) in xp_create_and_assign_umem() 58 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem() 63 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem() 68 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem() 76 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem() 77 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem() 78 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem() 79 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem() 80 pool->headroom = umem->headroom; in xp_create_and_assign_umem() 81 pool->chunk_size = umem in xp_create_and_assign_umem() 55 xp_create_and_assign_umem(struct xdp_sock *xs, struct xdp_umem *umem) xp_create_and_assign_umem() argument 232 struct xdp_umem *umem = umem_xs->umem; xp_assign_dev_shared() local 310 xp_create_dma_map(struct device *dev, struct net_device *netdev, u32 nr_pages, struct xdp_umem *umem) xp_create_dma_map() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | umem.c | 22 #include "umem.h" 40 struct nvkm_umem *umem; in nvkm_umem_search() local 46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search() 47 if (umem->object.object == handle) { in nvkm_umem_search() 48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 55 umem = nvkm_umem(object); in nvkm_umem_search() 56 if (!umem->priv || client->super) in nvkm_umem_search() 57 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 66 struct nvkm_umem *umem in nvkm_umem_unmap() local 90 struct nvkm_umem *umem = nvkm_umem(object); nvkm_umem_map() local 127 struct nvkm_umem *umem = nvkm_umem(object); nvkm_umem_dtor() local 150 struct nvkm_umem *umem; nvkm_umem_new() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | umem.c | 47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 52 if (umem->nmap > 0) in __ib_umem_release() 53 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents, in __ib_umem_release() 56 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { in __ib_umem_release() 58 unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty); in __ib_umem_release() 61 sg_free_table(&umem->sg_head); in __ib_umem_release() 67 * @umem: umem struc 78 ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt) ib_umem_find_best_pgsz() argument 144 struct ib_umem *umem; ib_umem_get() local 268 ib_umem_release(struct ib_umem *umem) ib_umem_release() argument 293 ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) ib_umem_copy_from() argument [all...] |
H A D | umem_odp.c | 57 umem_odp->umem.is_odp = 1; in ib_init_umem_odp() 66 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp() 67 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp() 68 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp() 93 umem_odp->umem.owning_mm, in ib_init_umem_odp() 109 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem 121 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local 131 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit() 132 umem in ib_umem_odp_alloc_implicit() 169 struct ib_umem *umem; ib_umem_odp_alloc_child() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | umem.c | 22 #include "umem.h" 40 struct nvkm_umem *umem; in nvkm_umem_search() local 46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search() 47 if (umem->object.object == handle) { in nvkm_umem_search() 48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 55 umem = nvkm_umem(object); in nvkm_umem_search() 56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 65 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local 67 if (!umem in nvkm_umem_unmap() 89 struct nvkm_umem *umem = nvkm_umem(object); nvkm_umem_map() local 126 struct nvkm_umem *umem = nvkm_umem(object); nvkm_umem_dtor() local 149 struct nvkm_umem *umem; nvkm_umem_new() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | umem.c | 48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 50 bool make_dirty = umem->writable && dirty; in __ib_umem_release() 55 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release() 58 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) in __ib_umem_release() 62 sg_free_append_table(&umem->sgt_append); in __ib_umem_release() 68 * @umem: umem struct 75 * Returns 0 if the umem requires page sizes not supported by 79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument 88 umem in ib_umem_find_best_pgsz() 145 struct ib_umem *umem; ib_umem_get() local 262 ib_umem_release(struct ib_umem *umem) ib_umem_release() argument 289 ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) ib_umem_copy_from() argument [all...] |
H A D | umem_odp.c | 55 umem_odp->umem.is_odp = 1; in ib_init_umem_odp() 64 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp() 65 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp() 66 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp() 91 umem_odp->umem.owning_mm, in ib_init_umem_odp() 107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem 119 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local 129 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit() 130 umem in ib_umem_odp_alloc_implicit() 167 struct ib_umem *umem; ib_umem_odp_alloc_child() local [all...] |
H A D | umem_dmabuf.c | 34 /* modify the sg list in-place to match umem address and length */ in ib_umem_dmabuf_map_pages() 36 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); in ib_umem_dmabuf_map_pages() 37 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages() 62 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages() 63 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages() 120 struct ib_umem *umem; in ib_umem_dmabuf_get() local 143 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get() 144 umem in ib_umem_dmabuf_get() [all...] |
/kernel/linux/linux-6.6/include/rdma/ |
H A D | ib_umem.h | 32 struct ib_umem umem; member 43 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) in to_ib_umem_dmabuf() argument 45 return container_of(umem, struct ib_umem_dmabuf, umem); in to_ib_umem_dmabuf() 48 /* Returns the offset of the umem start relative to the first page. */ 49 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument 51 return umem->address & ~PAGE_MASK; in ib_umem_offset() 54 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, in ib_umem_dma_offset() argument 57 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) in ib_umem_dma_offset() 61 ib_umem_num_dma_blocks(struct ib_umem *umem, unsigned long pgsz) ib_umem_num_dma_blocks() argument 69 ib_umem_num_pages(struct ib_umem *umem) ib_umem_num_pages() argument 74 __rdma_umem_block_iter_start(struct ib_block_iter *biter, struct ib_umem *umem, unsigned long pgsz) __rdma_umem_block_iter_start() argument 133 ib_umem_find_best_pgoff(struct ib_umem *umem, unsigned long pgsz_bitmap, u64 pgoff_bitmask) ib_umem_find_best_pgoff() argument 167 ib_umem_release(struct ib_umem *umem) ib_umem_release() argument 168 ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) ib_umem_copy_from() argument 172 ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt) ib_umem_find_best_pgsz() argument 178 ib_umem_find_best_pgoff(struct ib_umem *umem, unsigned long pgsz_bitmap, u64 pgoff_bitmask) ib_umem_find_best_pgoff() argument [all...] |
/kernel/linux/linux-5.10/tools/lib/bpf/ |
H A D | xsk.c | 65 struct xsk_umem *umem; member 104 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument 106 return umem ? umem->fd : -EINVAL; in xsk_umem__fd() 216 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument 225 &umem->config.fill_size, in xsk_create_umem_rings() 226 sizeof(umem->config.fill_size)); in xsk_create_umem_rings() 231 &umem->config.comp_size, in xsk_create_umem_rings() 232 sizeof(umem->config.comp_size)); in xsk_create_umem_rings() 240 map = mmap(NULL, off.fr.desc + umem in xsk_create_umem_rings() 282 struct xsk_umem *umem; xsk_umem__create_v0_0_4() local 615 xsk_get_ctx(struct xsk_umem *umem, int ifindex, __u32 queue_id) xsk_get_ctx() argument 635 struct xsk_umem *umem = ctx->umem; xsk_put_ctx() local 659 xsk_create_ctx(struct xsk_socket *xsk, struct xsk_umem *umem, int ifindex, const char *ifname, __u32 queue_id, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp) xsk_create_ctx() argument 697 xsk_socket__create_shared(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_socket_config *usr_config) xsk_socket__create_shared() argument 881 xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *usr_config) xsk_socket__create() argument 894 xsk_umem__delete(struct xsk_umem *umem) xsk_umem__delete() argument 923 struct xsk_umem *umem; xsk_socket__delete() local [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/ |
H A D | xsk.c | 72 struct xsk_umem *umem; member 92 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument 94 return umem ? umem->fd : -EINVAL; in xsk_umem__fd() 161 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument 170 &umem->config.fill_size, in xsk_create_umem_rings() 171 sizeof(umem->config.fill_size)); in xsk_create_umem_rings() 176 &umem->config.comp_size, in xsk_create_umem_rings() 177 sizeof(umem->config.comp_size)); in xsk_create_umem_rings() 185 map = mmap(NULL, off.fr.desc + umem in xsk_create_umem_rings() 227 struct xsk_umem *umem; xsk_umem__create() local 456 xsk_get_ctx(struct xsk_umem *umem, int ifindex, __u32 queue_id) xsk_get_ctx() argument 476 struct xsk_umem *umem = ctx->umem; xsk_put_ctx() local 500 xsk_create_ctx(struct xsk_socket *xsk, struct xsk_umem *umem, int ifindex, __u32 queue_id, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp) xsk_create_ctx() argument 536 xsk_socket__create_shared(struct xsk_socket **xsk_ptr, int ifindex, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, const struct xsk_socket_config *usr_config) xsk_socket__create_shared() argument 706 xsk_socket__create(struct xsk_socket **xsk_ptr, int ifindex, __u32 queue_id, struct xsk_umem *umem, struct xsk_ring_cons *rx, struct xsk_ring_prod *tx, const struct xsk_socket_config *usr_config) xsk_socket__create() argument 719 xsk_umem__delete(struct xsk_umem *umem) xsk_umem__delete() argument 748 struct xsk_umem *umem; xsk_socket__delete() local [all...] |
H A D | xskxceiver.c | 38 * Set the 'len' field of tx descriptors to an invalid value (umem frame 62 * - Each thread creates one AF_XDP socket connected to a unique umem for each 166 return !!ifobj->umem->umem; in is_umem_valid() 174 static u64 umem_size(struct xsk_umem_info *umem) in umem_size() argument 176 return umem->num_frames * umem->frame_size; in umem_size() 179 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, in xsk_configure_umem() argument 185 .frame_size = umem->frame_size, in xsk_configure_umem() 186 .frame_headroom = umem in xsk_configure_umem() 208 umem_alloc_buffer(struct xsk_umem_info *umem) umem_alloc_buffer() argument 220 umem_reset_alloc(struct xsk_umem_info *umem) umem_reset_alloc() argument 245 __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, struct ifobject *ifobject, bool shared) __xsk_configure_socket() argument 271 struct xsk_umem_info *umem; ifobj_zc_avail() local 599 pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len) pkt_set() argument 609 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len) pkt_get_buffer_len() argument 614 pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) pkt_stream_generate() argument 635 pkt_stream_clone(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) pkt_stream_clone() argument 654 struct xsk_umem_info *umem = ifobj->umem; __pkt_stream_replace_half() local 673 struct xsk_umem_info *umem = test->ifobj_rx->umem; pkt_stream_receive_half() local 684 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) pkt_get_addr() argument 817 is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr) is_offset_correct() argument 849 is_frag_valid(struct xsk_umem_info *umem, u64 addr, u32 len, u32 expected_pkt_nb, u32 bytes_processed) is_frag_valid() argument 974 struct xsk_umem_info *umem = xsk->umem; receive_pkts() local 1100 struct xsk_umem_info *umem = ifobject->umem; __send_pkts() local 1351 xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, struct xsk_umem_info *umem, bool tx) xsk_configure_socket() argument 1385 xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, bool fill_up) xsk_populate_fill_ring() argument 1848 struct xsk_umem_info *umem = test->ifobj_tx->umem; testapp_invalid_desc_mb() local 1888 struct xsk_umem_info *umem = test->ifobj_tx->umem; testapp_invalid_desc() local [all...] |
/kernel/linux/linux-5.10/include/rdma/ |
H A D | ib_umem.h | 31 /* Returns the offset of the umem start relative to the first page. */ 32 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument 34 return umem->address & ~PAGE_MASK; in ib_umem_offset() 37 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, in ib_umem_num_dma_blocks() argument 40 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks() 41 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks() 45 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument 47 return ib_umem_num_dma_blocks(umem, PAGE_SIZE); in ib_umem_num_pages() 51 struct ib_umem *umem, in __rdma_umem_block_iter_start() 50 __rdma_umem_block_iter_start(struct ib_block_iter *biter, struct ib_umem *umem, unsigned long pgsz) __rdma_umem_block_iter_start() argument 93 ib_umem_release(struct ib_umem *umem) ib_umem_release() argument 94 ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) ib_umem_copy_from() argument 98 ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt) ib_umem_find_best_pgsz() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/ |
H A D | siw_mem.c | 69 void siw_umem_release(struct siw_umem *umem, bool dirty) in siw_umem_release() argument 71 struct mm_struct *mm_s = umem->owning_mm; in siw_umem_release() 72 int i, num_pages = umem->num_pages; in siw_umem_release() 77 siw_free_plist(&umem->page_chunk[i], to_free, in siw_umem_release() 78 umem->writable && dirty); in siw_umem_release() 79 kfree(umem->page_chunk[i].plist); in siw_umem_release() 82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release() 85 kfree(umem->page_chunk); in siw_umem_release() 86 kfree(umem); in siw_umem_release() 148 siw_umem_release(mem->umem, tru in siw_free_mem() 367 struct siw_umem *umem; siw_umem_get() local [all...] |
/kernel/linux/linux-5.10/samples/bpf/ |
H A D | xdpsock_user.c | 139 struct xsk_umem *umem; member 146 struct xsk_umem_info *umem; member 483 struct xsk_umem *umem = xsks[0]->umem->umem; in xdpsock_cleanup() local 489 (void)xsk_umem__delete(umem); in xdpsock_cleanup() 785 static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr) in gen_eth_frame() argument 787 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data, in gen_eth_frame() 793 struct xsk_umem_info *umem; in xsk_configure_umem() local 812 umem in xsk_configure_umem() 825 xsk_populate_fill_ring(struct xsk_umem_info *umem) xsk_populate_fill_ring() argument 840 xsk_configure_socket(struct xsk_umem_info *umem, bool rx, bool tx) xsk_configure_socket() argument 1104 struct xsk_umem_info *umem = xsk->umem; complete_tx_l2fwd() local 1470 struct xsk_umem_info *umem; main() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/siw/ |
H A D | siw_mem.c | 69 void siw_umem_release(struct siw_umem *umem, bool dirty) in siw_umem_release() argument 71 struct mm_struct *mm_s = umem->owning_mm; in siw_umem_release() 72 int i, num_pages = umem->num_pages; in siw_umem_release() 77 siw_free_plist(&umem->page_chunk[i], to_free, in siw_umem_release() 78 umem->writable && dirty); in siw_umem_release() 79 kfree(umem->page_chunk[i].plist); in siw_umem_release() 82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release() 85 kfree(umem->page_chunk); in siw_umem_release() 86 kfree(umem); in siw_umem_release() 148 siw_umem_release(mem->umem, tru in siw_free_mem() 367 struct siw_umem *umem; siw_umem_get() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mana/ |
H A D | wq.c | 16 struct ib_umem *umem; in mana_ib_create_wq() local 35 umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size, in mana_ib_create_wq() 37 if (IS_ERR(umem)) { in mana_ib_create_wq() 38 err = PTR_ERR(umem); in mana_ib_create_wq() 40 "Failed to get umem for create wq, err %d\n", err); in mana_ib_create_wq() 44 wq->umem = umem; in mana_ib_create_wq() 49 err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region); in mana_ib_create_wq() 66 ib_umem_release(umem); in mana_ib_create_wq() 90 ib_umem_release(wq->umem); in mana_ib_destroy_wq() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | mr.c | 77 mr->umem = NULL; in mlx4_ib_get_dma_mr() 183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() 203 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_write_mtt() 257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument 274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in mlx4_ib_umem_calc_optimal_mtt_size() 276 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_calc_optimal_mtt_size() 420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr() 421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 182 mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) mlx4_ib_umem_write_mtt() argument [all...] |
H A D | doorbell.c | 40 struct ib_umem *umem; member 67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user() 69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() 78 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user() 94 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | mr.c | 77 mr->umem = NULL; in mlx4_ib_get_dma_mr() 183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() 203 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_write_mtt() 257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument 274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in mlx4_ib_umem_calc_optimal_mtt_size() 276 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_calc_optimal_mtt_size() 420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr() 421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr() 426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, star in mlx4_ib_reg_user_mr() 182 mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) mlx4_ib_umem_write_mtt() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | doorbell.c | 41 struct ib_umem *umem; member 67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user() 69 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 70 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() 78 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user() 94 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | doorbell.c | 42 struct ib_umem *umem; member 69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user() 71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() 82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user() 100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
|