/third_party/node/deps/openssl/openssl/crypto/rand/ |
H A D | rand_pool.c | 20 * Allocate memory and initialize a new random pool 25 RAND_POOL *pool = OPENSSL_zalloc(sizeof(*pool)); in ossl_rand_pool_new() local 28 if (pool == NULL) { in ossl_rand_pool_new() 33 pool->min_len = min_len; in ossl_rand_pool_new() 34 pool->max_len = (max_len > RAND_POOL_MAX_LENGTH) ? in ossl_rand_pool_new() 36 pool->alloc_len = min_len < min_alloc_size ? min_alloc_size : min_len; in ossl_rand_pool_new() 37 if (pool->alloc_len > pool->max_len) in ossl_rand_pool_new() 38 pool in ossl_rand_pool_new() 68 RAND_POOL *pool = OPENSSL_zalloc(sizeof(*pool)); ossl_rand_pool_attach() local 94 ossl_rand_pool_free(RAND_POOL *pool) ossl_rand_pool_free() argument 118 ossl_rand_pool_buffer(RAND_POOL *pool) ossl_rand_pool_buffer() argument 126 ossl_rand_pool_entropy(RAND_POOL *pool) ossl_rand_pool_entropy() argument 134 ossl_rand_pool_length(RAND_POOL *pool) ossl_rand_pool_length() argument 145 ossl_rand_pool_detach(RAND_POOL *pool) ossl_rand_pool_detach() argument 157 ossl_rand_pool_reattach(RAND_POOL *pool, unsigned char *buffer) ossl_rand_pool_reattach() argument 180 ossl_rand_pool_entropy_available(RAND_POOL *pool) ossl_rand_pool_entropy_available() argument 196 ossl_rand_pool_entropy_needed(RAND_POOL *pool) ossl_rand_pool_entropy_needed() argument 205 rand_pool_grow(RAND_POOL *pool, size_t len) rand_pool_grow() argument 246 ossl_rand_pool_bytes_needed(RAND_POOL *pool, unsigned int entropy_factor) ossl_rand_pool_bytes_needed() argument 291 ossl_rand_pool_bytes_remaining(RAND_POOL *pool) ossl_rand_pool_bytes_remaining() argument 305 ossl_rand_pool_add(RAND_POOL *pool, const unsigned char *buffer, size_t len, size_t entropy) ossl_rand_pool_add() argument 360 ossl_rand_pool_add_begin(RAND_POOL *pool, size_t len) ossl_rand_pool_add_begin() argument 399 ossl_rand_pool_add_end(RAND_POOL *pool, size_t len, size_t entropy) ossl_rand_pool_add_end() argument [all...] |
/third_party/openssl/crypto/rand/ |
H A D | rand_pool.c | 20 * Allocate memory and initialize a new random pool 25 RAND_POOL *pool = OPENSSL_zalloc(sizeof(*pool)); in ossl_rand_pool_new() local 28 if (pool == NULL) { in ossl_rand_pool_new() 33 pool->min_len = min_len; in ossl_rand_pool_new() 34 pool->max_len = (max_len > RAND_POOL_MAX_LENGTH) ? in ossl_rand_pool_new() 36 pool->alloc_len = min_len < min_alloc_size ? min_alloc_size : min_len; in ossl_rand_pool_new() 37 if (pool->alloc_len > pool->max_len) in ossl_rand_pool_new() 38 pool in ossl_rand_pool_new() 68 RAND_POOL *pool = OPENSSL_zalloc(sizeof(*pool)); ossl_rand_pool_attach() local 94 ossl_rand_pool_free(RAND_POOL *pool) ossl_rand_pool_free() argument 118 ossl_rand_pool_buffer(RAND_POOL *pool) ossl_rand_pool_buffer() argument 126 ossl_rand_pool_entropy(RAND_POOL *pool) ossl_rand_pool_entropy() argument 134 ossl_rand_pool_length(RAND_POOL *pool) ossl_rand_pool_length() argument 145 ossl_rand_pool_detach(RAND_POOL *pool) ossl_rand_pool_detach() argument 157 ossl_rand_pool_reattach(RAND_POOL *pool, unsigned char *buffer) ossl_rand_pool_reattach() argument 180 ossl_rand_pool_entropy_available(RAND_POOL *pool) ossl_rand_pool_entropy_available() argument 196 ossl_rand_pool_entropy_needed(RAND_POOL *pool) ossl_rand_pool_entropy_needed() argument 205 rand_pool_grow(RAND_POOL *pool, size_t len) rand_pool_grow() argument 246 ossl_rand_pool_bytes_needed(RAND_POOL *pool, unsigned int entropy_factor) ossl_rand_pool_bytes_needed() argument 291 ossl_rand_pool_bytes_remaining(RAND_POOL *pool) ossl_rand_pool_bytes_remaining() argument 305 ossl_rand_pool_add(RAND_POOL *pool, const unsigned char *buffer, size_t len, size_t entropy) ossl_rand_pool_add() argument 360 ossl_rand_pool_add_begin(RAND_POOL *pool, size_t len) ossl_rand_pool_add_begin() argument 399 ossl_rand_pool_add_end(RAND_POOL *pool, size_t len, size_t entropy) ossl_rand_pool_add_end() argument [all...] |
/kernel/linux/linux-6.6/net/xdp/ |
H A D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool in xp_destroy() 45 xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs) xp_alloc_tx_descs() argument 59 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local 117 xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) xp_set_rxq_info() argument 126 xp_disable_drv_zc(struct xsk_buff_pool *pool) xp_disable_drv_zc() argument 149 xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev, u16 queue_id, u16 flags) xp_assign_dev() argument 228 xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id) xp_assign_dev_shared() argument 245 xp_clear_dev(struct xsk_buff_pool *pool) xp_clear_dev() argument 258 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local 279 xp_get_pool(struct xsk_buff_pool *pool) xp_get_pool() argument 284 xp_put_pool(struct xsk_buff_pool *pool) xp_put_pool() argument 298 xp_find_dma_map(struct xsk_buff_pool *pool) xp_find_dma_map() argument 359 xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) xp_dma_unmap() argument 395 xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) xp_init_dma_info() argument 420 xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs, struct page **pages, u32 nr_pages) xp_dma_map() argument 467 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, u64 addr) xp_addr_crosses_non_contig_pg() argument 473 xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) xp_check_unaligned() argument 483 xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) xp_check_aligned() argument 489 __xp_alloc(struct xsk_buff_pool *pool) __xp_alloc() argument 527 xp_alloc(struct xsk_buff_pool *pool) xp_alloc() argument 555 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) xp_alloc_new_from_fq() argument 598 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) xp_alloc_reused() argument 618 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) xp_alloc_batch() argument 649 xp_can_alloc(struct xsk_buff_pool *pool, u32 count) xp_can_alloc() argument 667 xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) xp_raw_get_data() argument 674 xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) xp_raw_get_dma() argument 690 xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) xp_dma_sync_for_device_slow() argument [all...] |
/kernel/linux/linux-5.10/net/xdp/ |
H A D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool in xp_destroy() 47 struct xsk_buff_pool *pool; xp_create_and_assign_umem() local 94 xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) xp_set_rxq_info() argument 103 xp_disable_drv_zc(struct xsk_buff_pool *pool) xp_disable_drv_zc() argument 122 __xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev, u16 queue_id, u16 flags) __xp_assign_dev() argument 195 xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, u16 queue_id, u16 flags) xp_assign_dev() argument 201 xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, struct net_device *dev, u16 queue_id) xp_assign_dev_shared() argument 218 xp_clear_dev(struct xsk_buff_pool *pool) xp_clear_dev() argument 231 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, xp_release_deferred() local 252 xp_get_pool(struct xsk_buff_pool *pool) xp_get_pool() argument 257 xp_put_pool(struct xsk_buff_pool *pool) xp_put_pool() argument 271 xp_find_dma_map(struct xsk_buff_pool *pool) xp_find_dma_map() argument 332 xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) xp_dma_unmap() argument 367 xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) xp_init_dma_info() argument 382 xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs, struct page **pages, u32 nr_pages) xp_dma_map() argument 429 xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, u64 addr) xp_addr_crosses_non_contig_pg() argument 435 xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) xp_check_unaligned() argument 445 xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) xp_check_aligned() argument 451 __xp_alloc(struct xsk_buff_pool *pool) __xp_alloc() argument 492 xp_alloc(struct xsk_buff_pool *pool) xp_alloc() argument 519 xp_can_alloc(struct xsk_buff_pool *pool, u32 count) xp_can_alloc() argument 534 xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) xp_raw_get_data() argument 541 xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) xp_raw_get_dma() argument 557 xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) xp_dma_sync_for_device_slow() argument [all...] |
/kernel/linux/linux-6.6/net/core/ |
H A D | page_pool.c | 33 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) 35 #define recycle_stat_inc(pool, __stat) \ 37 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 41 #define recycle_stat_add(pool, __stat, val) \ 43 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 62 * page_pool_get_stats() - fetch page pool stats 63 * @pool: pool from which page was allocated 72 bool page_pool_get_stats(struct page_pool *pool, in page_pool_get_stats() argument 171 page_pool_init(struct page_pool *pool, const struct page_pool_params *params) page_pool_init() argument 248 struct page_pool *pool; page_pool_create() local 269 page_pool_refill_alloc_cache(struct page_pool *pool) page_pool_refill_alloc_cache() argument 322 __page_pool_get_cached(struct page_pool *pool) __page_pool_get_cached() argument 338 page_pool_dma_sync_for_device(struct page_pool *pool, struct page *page, unsigned int dma_sync_size) page_pool_dma_sync_for_device() argument 350 page_pool_dma_map(struct page_pool *pool, struct page *page) page_pool_dma_map() argument 374 page_pool_set_pp_info(struct page_pool *pool, struct page *page) page_pool_set_pp_info() argument 389 __page_pool_alloc_page_order(struct page_pool *pool, gfp_t gfp) __page_pool_alloc_page_order() argument 416 __page_pool_alloc_pages_slow(struct page_pool *pool, gfp_t gfp) __page_pool_alloc_pages_slow() argument 475 page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) page_pool_alloc_pages() argument 495 page_pool_inflight(struct page_pool *pool) page_pool_inflight() argument 514 page_pool_return_page(struct page_pool *pool, struct page *page) page_pool_return_page() argument 548 page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) page_pool_recycle_in_ring() argument 570 page_pool_recycle_in_cache(struct page *page, struct page_pool *pool) page_pool_recycle_in_cache() argument 591 __page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) __page_pool_put_page() argument 638 page_pool_put_defragged_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) page_pool_put_defragged_page() argument 665 page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) page_pool_put_page_bulk() argument 711 page_pool_drain_frag(struct page_pool *pool, struct page *page) page_pool_drain_frag() argument 731 page_pool_free_frag(struct page_pool *pool) page_pool_free_frag() argument 744 page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size, gfp_t gfp) page_pool_alloc_frag() argument 790 page_pool_empty_ring(struct page_pool *pool) page_pool_empty_ring() argument 805 page_pool_free(struct page_pool *pool) page_pool_free() argument 821 page_pool_empty_alloc_cache_once(struct page_pool *pool) page_pool_empty_alloc_cache_once() argument 838 page_pool_scrub(struct page_pool *pool) page_pool_scrub() argument 849 page_pool_release(struct page_pool *pool) page_pool_release() argument 864 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); page_pool_release_retry() local 884 page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), struct xdp_mem_info *mem) page_pool_use_xdp_mem() argument 892 page_pool_unlink_napi(struct page_pool *pool) page_pool_unlink_napi() argument 907 page_pool_destroy(struct page_pool *pool) page_pool_destroy() argument 930 page_pool_update_nid(struct page_pool *pool, int new_nid) page_pool_update_nid() argument [all...] |
/kernel/linux/linux-5.10/drivers/staging/android/ion/ |
H A D | ion_page_pool.c | 3 * ION Memory Allocator page pool helpers 15 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool) in ion_page_pool_alloc_pages() argument 19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 22 static void ion_page_pool_free_pages(struct ion_page_pool *pool, in ion_page_pool_free_pages() argument 25 __free_pages(page, pool->order); in ion_page_pool_free_pages() 28 static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument 30 mutex_lock(&pool->mutex); in ion_page_pool_add() 32 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add() 33 pool in ion_page_pool_add() 44 ion_page_pool_remove(struct ion_page_pool *pool, bool high) ion_page_pool_remove() argument 64 ion_page_pool_alloc(struct ion_page_pool *pool) ion_page_pool_alloc() argument 83 ion_page_pool_free(struct ion_page_pool *pool, struct page *page) ion_page_pool_free() argument 90 ion_page_pool_total(struct ion_page_pool *pool, bool high) ion_page_pool_total() argument 100 ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, int nr_to_scan) ion_page_pool_shrink() argument 136 struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL); ion_page_pool_create() local 152 ion_page_pool_destroy(struct ion_page_pool *pool) ion_page_pool_destroy() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/ti/ |
H A D | k3-cppi-desc-pool.c | 2 /* TI K3 CPPI5 descriptors pool API 15 #include "k3-cppi-desc-pool.h" 27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 29 if (!pool) in k3_cppi_desc_pool_destroy() 32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 36 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 37 dma_free_coherent(pool in k3_cppi_desc_pool_destroy() 48 struct k3_cppi_desc_pool *pool; k3_cppi_desc_pool_create_name() local 102 k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool, void *addr) k3_cppi_desc_pool_virt2dma() argument 108 k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma) k3_cppi_desc_pool_dma2virt() argument 113 k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool) k3_cppi_desc_pool_alloc() argument 118 k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr) k3_cppi_desc_pool_free() argument 123 k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool) k3_cppi_desc_pool_avail() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/ti/ |
H A D | k3-cppi-desc-pool.c | 2 /* TI K3 CPPI5 descriptors pool API 15 #include "k3-cppi-desc-pool.h" 27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 29 if (!pool) in k3_cppi_desc_pool_destroy() 32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 36 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 37 dma_free_coherent(pool in k3_cppi_desc_pool_destroy() 49 struct k3_cppi_desc_pool *pool; k3_cppi_desc_pool_create_name() local 104 k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool, void *addr) k3_cppi_desc_pool_virt2dma() argument 111 k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma) k3_cppi_desc_pool_dma2virt() argument 117 k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool) k3_cppi_desc_pool_alloc() argument 123 k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr) k3_cppi_desc_pool_free() argument 129 k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool) k3_cppi_desc_pool_avail() argument [all...] |
/kernel/linux/linux-5.10/net/core/ |
H A D | page_pool.c | 24 static int page_pool_init(struct page_pool *pool, in page_pool_init() argument 29 memcpy(&pool->p, params, sizeof(pool->p)); in page_pool_init() 32 if (pool->p.flags & ~(PP_FLAG_ALL)) in page_pool_init() 35 if (pool->p.pool_size) in page_pool_init() 36 ring_qsize = pool->p.pool_size; in page_pool_init() 46 if (pool->p.flags & PP_FLAG_DMA_MAP) { in page_pool_init() 47 if ((pool->p.dma_dir != DMA_FROM_DEVICE) && in page_pool_init() 48 (pool->p.dma_dir != DMA_BIDIRECTIONAL)) in page_pool_init() 52 if (pool in page_pool_init() 83 struct page_pool *pool; page_pool_create() local 104 page_pool_refill_alloc_cache(struct page_pool *pool) page_pool_refill_alloc_cache() argument 156 __page_pool_get_cached(struct page_pool *pool) __page_pool_get_cached() argument 171 page_pool_dma_sync_for_device(struct page_pool *pool, struct page *page, unsigned int dma_sync_size) page_pool_dma_sync_for_device() argument 185 __page_pool_alloc_pages_slow(struct page_pool *pool, gfp_t _gfp) __page_pool_alloc_pages_slow() argument 247 page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) page_pool_alloc_pages() argument 267 page_pool_inflight(struct page_pool *pool) page_pool_inflight() argument 286 page_pool_release_page(struct page_pool *pool, struct page *page) page_pool_release_page() argument 314 page_pool_return_page(struct page_pool *pool, struct page *page) page_pool_return_page() argument 325 page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) page_pool_recycle_in_ring() argument 342 page_pool_recycle_in_cache(struct page *page, struct page_pool *pool) page_pool_recycle_in_cache() argument 356 pool_page_reusable(struct page_pool *pool, struct page *page) pool_page_reusable() argument 367 page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) page_pool_put_page() argument 413 page_pool_empty_ring(struct page_pool *pool) page_pool_empty_ring() argument 428 page_pool_free(struct page_pool *pool) page_pool_free() argument 441 page_pool_empty_alloc_cache_once(struct page_pool *pool) page_pool_empty_alloc_cache_once() argument 458 page_pool_scrub(struct page_pool *pool) page_pool_scrub() argument 469 page_pool_release(struct page_pool *pool) page_pool_release() argument 484 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw); page_pool_release_retry() local 504 page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *)) page_pool_use_xdp_mem() argument 510 page_pool_destroy(struct page_pool *pool) page_pool_destroy() argument 530 page_pool_update_nid(struct page_pool *pool, int new_nid) page_pool_update_nid() argument [all...] |
/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-thin.c | 41 * The block size of the device holding pool data must be 191 * A pool device ties together a metadata device and a data device. It 198 * The pool runs in various modes. Ordered in degraded order for comparisons. 229 struct pool { struct 231 struct dm_target *ti; /* Only set if a pool target is bound */ 289 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 291 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 293 return pool in get_pool_mode() 296 notify_of_pool_mode_change(struct pool *pool) notify_of_pool_mode_change() argument 326 struct pool *pool; global() member 345 struct pool *pool; global() member 366 block_size_is_power_of_two(struct pool *pool) block_size_is_power_of_two() argument 371 block_to_sectors(struct pool *pool, dm_block_t b) block_to_sectors() argument 436 wake_worker(struct pool *pool) wake_worker() argument 443 bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, struct dm_bio_prison_cell **cell_result) bio_detain() argument 466 cell_release(struct pool *pool, struct dm_bio_prison_cell *cell, struct bio_list *bios) cell_release() argument 474 cell_visit_release(struct pool *pool, void (*fn)(void *, struct dm_bio_prison_cell *), void *context, struct dm_bio_prison_cell *cell) cell_visit_release() argument 483 cell_release_no_holder(struct pool *pool, struct dm_bio_prison_cell *cell, struct bio_list *bios) cell_release_no_holder() argument 491 cell_error_with_code(struct pool *pool, struct dm_bio_prison_cell *cell, blk_status_t error_code) cell_error_with_code() argument 498 get_pool_io_error_code(struct pool *pool) get_pool_io_error_code() argument 503 cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) cell_error() argument 508 cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) cell_success() argument 513 cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) cell_requeue() argument 539 __pool_table_insert(struct pool *pool) __pool_table_insert() argument 545 __pool_table_remove(struct pool *pool) __pool_table_remove() argument 553 struct pool *pool = NULL, *tmp; __pool_table_lookup() local 569 struct pool *pool = NULL, *tmp; __pool_table_lookup_metadata_dev() local 626 struct pool *pool = tc->pool; requeue_deferred_cells() local 655 error_retry_list_with_code(struct pool *pool, blk_status_t error) error_retry_list_with_code() argument 665 error_retry_list(struct pool *pool) error_retry_list() argument 679 struct pool *pool = tc->pool; get_bio_block() local 696 struct pool *pool = tc->pool; get_bio_block_range() local 720 struct pool *pool = tc->pool; remap() local 744 inc_all_io_entry(struct pool *pool, struct bio *bio) inc_all_io_entry() argument 757 struct pool *pool = tc->pool; issue() local 832 struct pool *pool = m->tc->pool; __complete_mapping_preparation() local 843 struct pool *pool = m->tc->pool; complete_mapping_preparation() local 885 struct pool *pool = tc->pool; cell_defer_no_holder() local 963 struct pool *pool = tc->pool; complete_overwrite_bio() local 996 struct pool *pool = tc->pool; process_prepared_mapping() local 1087 struct pool *pool = tc->pool; passdown_double_checking_shared_status() local 1129 struct pool *pool = m->tc->pool; queue_passdown_pt2() local 1151 struct pool *pool = tc->pool; process_prepared_discard_passdown_pt1() local 1208 struct pool *pool = tc->pool; process_prepared_discard_passdown_pt2() local 1226 process_prepared(struct pool *pool, struct list_head *head, process_mapping_fn *fn) process_prepared() argument 1244 io_overlaps_block(struct pool *pool, struct bio *bio) io_overlaps_block() argument 1250 io_overwrites_block(struct pool *pool, struct bio *bio) io_overwrites_block() argument 1263 ensure_next_mapping(struct pool *pool) ensure_next_mapping() argument 1273 get_next_mapping(struct pool *pool) get_next_mapping() argument 1304 struct pool *pool = tc->pool; remap_and_issue_overwrite() local 1323 struct pool *pool = tc->pool; schedule_copy() local 1391 struct pool *pool = tc->pool; schedule_zero() local 1420 struct pool *pool = tc->pool; schedule_external_copy() local 1447 is_read_only(struct pool *pool) is_read_only() argument 1452 check_for_metadata_space(struct pool *pool) check_for_metadata_space() argument 1470 check_for_data_space(struct pool *pool) check_for_data_space() argument 1492 commit(struct pool *pool) commit() argument 1510 check_low_water_mark(struct pool *pool, dm_block_t free_blocks) check_low_water_mark() argument 1526 struct pool *pool = tc->pool; alloc_data_block() local 1599 should_error_unserviceable_bio(struct pool *pool) should_error_unserviceable_bio() argument 1623 handle_unserviceable_bio(struct pool *pool, struct bio *bio) handle_unserviceable_bio() argument 1634 retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) retry_bios_on_resume() argument 1656 struct pool *pool = tc->pool; process_discard_cell_no_passdown() local 1676 struct pool *pool = tc->pool; break_up_discard_bio() local 1793 struct pool *pool = tc->pool; break_sharing() local 1861 struct pool *pool = tc->pool; process_shared_bio() local 1894 struct pool *pool = tc->pool; provision_block() local 1941 struct pool *pool = tc->pool; process_cell() local 1995 struct pool *pool = tc->pool; process_bio() local 2096 need_commit_due_to_time(struct pool *pool) need_commit_due_to_time() argument 2168 struct pool *pool = tc->pool; process_thin_deferred_bios() local 2242 sort_cells(struct pool *pool, struct list_head *cells) sort_cells() argument 2262 struct pool *pool = tc->pool; process_thin_deferred_cells() local 2315 get_first_thin(struct pool *pool) get_first_thin() argument 2329 get_next_thin(struct pool *pool, struct thin_c *tc) get_next_thin() argument 2346 process_deferred_bios(struct pool *pool) process_deferred_bios() argument 2404 struct pool *pool = container_of(ws, struct pool, worker); do_worker() local 2425 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); do_waker() local 2437 struct pool *pool = container_of(to_delayed_work(ws), struct pool, do_no_space_timeout() local 2464 pool_work_wait(struct pool_work *pw, struct pool *pool, void (*fn)(struct work_struct *)) pool_work_wait() argument 2515 set_discard_callbacks(struct pool *pool) set_discard_callbacks() argument 2529 set_pool_mode(struct pool *pool, enum pool_mode new_mode) set_pool_mode() argument 2627 abort_transaction(struct pool *pool) abort_transaction() argument 2643 metadata_operation_failed(struct pool *pool, const char *op, int r) metadata_operation_failed() argument 2663 struct pool *pool = tc->pool; thin_defer_bio() local 2674 struct pool *pool = tc->pool; thin_defer_bio_with_throttle() local 2683 struct pool *pool = tc->pool; thin_defer_cell() local 2800 requeue_bios(struct pool *pool) requeue_bios() argument 2835 struct pool *pool = pt->pool; disable_passdown_if_not_supported() local 2856 bind_control_target(struct pool *pool, struct dm_target *ti) bind_control_target() argument 2882 unbind_control_target(struct pool *pool, struct dm_target *ti) unbind_control_target() argument 2901 __pool_destroy(struct pool *pool) __pool_destroy() argument 2936 struct pool *pool; pool_create() local 3065 __pool_inc(struct pool *pool) __pool_inc() argument 3071 __pool_dec(struct pool *pool) __pool_dec() argument 3085 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); __pool_find() local 3186 struct pool *pool = context; metadata_low_callback() local 3207 struct pool *pool = context; metadata_pre_commit_callback() local 3285 struct pool *pool; pool_ctr() local 3439 struct pool *pool = pt->pool; pool_map() local 3456 struct pool *pool = pt->pool; maybe_resize_data_dev() local 3504 struct pool *pool = pt->pool; maybe_resize_metadata_dev() local 3567 struct pool *pool = pt->pool; pool_preresume() local 3598 pool_suspend_active_thins(struct pool *pool) pool_suspend_active_thins() argument 3610 pool_resume_active_thins(struct pool *pool) pool_resume_active_thins() argument 3625 struct pool *pool = pt->pool; pool_resume() local 3645 struct pool *pool = pt->pool; pool_presuspend() local 3657 struct pool *pool = pt->pool; pool_presuspend_undo() local 3669 struct pool *pool = pt->pool; pool_postsuspend() local 3700 process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) process_create_thin_mesg() argument 3723 process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) process_create_snap_mesg() argument 3751 process_delete_mesg(unsigned argc, char **argv, struct pool *pool) process_delete_mesg() argument 3771 process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) process_set_transaction_id_mesg() argument 3800 process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) process_reserve_metadata_snap_mesg() argument 3817 process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) process_release_metadata_snap_mesg() argument 3846 struct pool *pool = pt->pool; pool_message() local 3926 struct pool *pool = pt->pool; pool_status() local 4048 struct pool *pool = pt->pool; pool_io_hints() local 4332 struct pool *pool = h->tc->pool; thin_endio() local 4456 struct pool *pool = tc->pool; thin_iterate_devices() local 4476 struct pool *pool = tc->pool; thin_io_hints() local [all...] |
/kernel/linux/linux-6.6/drivers/md/ |
H A D | dm-thin.c | 42 * The block size of the device holding pool data must be 194 * A pool device ties together a metadata device and a data device. It 201 * The pool runs in various modes. Ordered in degraded order for comparisons. 232 struct pool { struct 234 struct dm_target *ti; /* Only set if a pool target is bound */ 290 static void metadata_operation_failed(struct pool *pool, const char *op, int r); 292 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 294 return pool in get_pool_mode() 297 notify_of_pool_mode_change(struct pool *pool) notify_of_pool_mode_change() argument 327 struct pool *pool; global() member 346 struct pool *pool; global() member 367 block_size_is_power_of_two(struct pool *pool) block_size_is_power_of_two() argument 372 block_to_sectors(struct pool *pool, dm_block_t b) block_to_sectors() argument 436 wake_worker(struct pool *pool) wake_worker() argument 443 bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, struct dm_bio_prison_cell **cell_result) bio_detain() argument 466 cell_release(struct pool *pool, struct dm_bio_prison_cell *cell, struct bio_list *bios) cell_release() argument 474 cell_visit_release(struct pool *pool, void (*fn)(void *, struct dm_bio_prison_cell *), void *context, struct dm_bio_prison_cell *cell) cell_visit_release() argument 483 cell_release_no_holder(struct pool *pool, struct dm_bio_prison_cell *cell, struct bio_list *bios) cell_release_no_holder() argument 491 cell_error_with_code(struct pool *pool, struct dm_bio_prison_cell *cell, blk_status_t error_code) cell_error_with_code() argument 498 get_pool_io_error_code(struct pool *pool) get_pool_io_error_code() argument 503 cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) cell_error() argument 508 cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) cell_success() argument 513 cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) cell_requeue() argument 539 __pool_table_insert(struct pool *pool) __pool_table_insert() argument 545 __pool_table_remove(struct pool *pool) __pool_table_remove() argument 553 struct pool *pool = NULL, *tmp; __pool_table_lookup() local 569 struct pool *pool = NULL, *tmp; __pool_table_lookup_metadata_dev() local 626 struct pool *pool = tc->pool; requeue_deferred_cells() local 655 error_retry_list_with_code(struct pool *pool, blk_status_t error) error_retry_list_with_code() argument 665 error_retry_list(struct pool *pool) error_retry_list() argument 679 struct pool *pool = tc->pool; get_bio_block() local 696 struct pool *pool = tc->pool; get_bio_block_range() local 720 struct pool *pool = tc->pool; remap() local 744 inc_all_io_entry(struct pool *pool, struct bio *bio) inc_all_io_entry() argument 757 struct pool *pool = tc->pool; issue() local 832 struct pool *pool = m->tc->pool; __complete_mapping_preparation() local 843 struct pool *pool = m->tc->pool; complete_mapping_preparation() local 885 struct pool *pool = tc->pool; cell_defer_no_holder() local 965 struct pool *pool = tc->pool; complete_overwrite_bio() local 998 struct pool *pool = tc->pool; process_prepared_mapping() local 1090 struct pool *pool = tc->pool; passdown_double_checking_shared_status() local 1132 struct pool *pool = m->tc->pool; queue_passdown_pt2() local 1154 struct pool *pool = tc->pool; process_prepared_discard_passdown_pt1() local 1203 struct pool *pool = tc->pool; process_prepared_discard_passdown_pt2() local 1221 process_prepared(struct pool *pool, struct list_head *head, process_mapping_fn *fn) process_prepared() argument 1239 io_overlaps_block(struct pool *pool, struct bio *bio) io_overlaps_block() argument 1245 io_overwrites_block(struct pool *pool, struct bio *bio) io_overwrites_block() argument 1258 ensure_next_mapping(struct pool *pool) ensure_next_mapping() argument 1268 get_next_mapping(struct pool *pool) get_next_mapping() argument 1299 struct pool *pool = tc->pool; remap_and_issue_overwrite() local 1318 struct pool *pool = tc->pool; schedule_copy() local 1386 struct pool *pool = tc->pool; schedule_zero() local 1415 struct pool *pool = tc->pool; schedule_external_copy() local 1442 is_read_only(struct pool *pool) is_read_only() argument 1447 check_for_metadata_space(struct pool *pool) check_for_metadata_space() argument 1465 check_for_data_space(struct pool *pool) check_for_data_space() argument 1487 commit(struct pool *pool) commit() argument 1505 check_low_water_mark(struct pool *pool, dm_block_t free_blocks) check_low_water_mark() argument 1521 struct pool *pool = tc->pool; alloc_data_block() local 1594 should_error_unserviceable_bio(struct pool *pool) should_error_unserviceable_bio() argument 1618 handle_unserviceable_bio(struct pool *pool, struct bio *bio) handle_unserviceable_bio() argument 1629 retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) retry_bios_on_resume() argument 1651 struct pool *pool = tc->pool; process_discard_cell_no_passdown() local 1671 struct pool *pool = tc->pool; break_up_discard_bio() local 1810 struct pool *pool = tc->pool; break_sharing() local 1878 struct pool *pool = tc->pool; process_shared_bio() local 1911 struct pool *pool = tc->pool; provision_block() local 1958 struct pool *pool = tc->pool; process_cell() local 2012 struct pool *pool = tc->pool; process_bio() local 2113 need_commit_due_to_time(struct pool *pool) need_commit_due_to_time() argument 2185 struct pool *pool = tc->pool; process_thin_deferred_bios() local 2259 sort_cells(struct pool *pool, struct list_head *cells) sort_cells() argument 2279 struct pool *pool = tc->pool; process_thin_deferred_cells() local 2332 get_first_thin(struct pool *pool) get_first_thin() argument 2346 get_next_thin(struct pool *pool, struct thin_c *tc) get_next_thin() argument 2363 process_deferred_bios(struct pool *pool) process_deferred_bios() argument 2421 struct pool *pool = container_of(ws, struct pool, worker); do_worker() local 2442 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); do_waker() local 2455 struct pool *pool = container_of(to_delayed_work(ws), struct pool, do_no_space_timeout() local 2482 pool_work_wait(struct pool_work *pw, struct pool *pool, void (*fn)(struct work_struct *)) pool_work_wait() argument 2530 set_discard_callbacks(struct pool *pool) set_discard_callbacks() argument 2544 set_pool_mode(struct pool *pool, enum pool_mode new_mode) set_pool_mode() argument 2642 abort_transaction(struct pool *pool) abort_transaction() argument 2658 metadata_operation_failed(struct pool *pool, const char *op, int r) metadata_operation_failed() argument 2678 struct pool *pool = tc->pool; thin_defer_bio() local 2689 struct pool *pool = tc->pool; thin_defer_bio_with_throttle() local 2698 struct pool *pool = tc->pool; thin_defer_cell() local 2815 requeue_bios(struct pool *pool) requeue_bios() argument 2845 struct pool *pool = pt->pool; disable_discard_passdown_if_not_supported() local 2865 bind_control_target(struct pool *pool, struct dm_target *ti) bind_control_target() argument 2891 unbind_control_target(struct pool *pool, struct dm_target *ti) unbind_control_target() argument 2912 __pool_destroy(struct pool *pool) __pool_destroy() argument 2946 struct pool *pool; pool_create() local 3074 __pool_inc(struct pool *pool) __pool_inc() argument 3080 __pool_dec(struct pool *pool) __pool_dec() argument 3094 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); __pool_find() local 3197 struct pool *pool = context; metadata_low_callback() local 3218 struct pool *pool = context; metadata_pre_commit_callback() local 3291 struct pool *pool; pool_ctr() local 3445 struct pool *pool = pt->pool; pool_map() local 3461 struct pool *pool = pt->pool; maybe_resize_data_dev() local 3509 struct pool *pool = pt->pool; maybe_resize_metadata_dev() local 3572 struct pool *pool = pt->pool; pool_preresume() local 3603 pool_suspend_active_thins(struct pool *pool) pool_suspend_active_thins() argument 3615 pool_resume_active_thins(struct pool *pool) pool_resume_active_thins() argument 3630 struct pool *pool = pt->pool; pool_resume() local 3650 struct pool *pool = pt->pool; pool_presuspend() local 3662 struct pool *pool = pt->pool; pool_presuspend_undo() local 3674 struct pool *pool = pt->pool; pool_postsuspend() local 3705 process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool) process_create_thin_mesg() argument 3728 process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool) process_create_snap_mesg() argument 3756 process_delete_mesg(unsigned int argc, char **argv, struct pool *pool) process_delete_mesg() argument 3776 process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool) process_set_transaction_id_mesg() argument 3805 process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool) process_reserve_metadata_snap_mesg() argument 3822 process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool) process_release_metadata_snap_mesg() argument 3851 struct pool *pool = pt->pool; pool_message() local 3931 struct pool *pool = pt->pool; pool_status() local 4057 struct pool *pool = pt->pool; pool_io_hints() local 4344 struct pool *pool = h->tc->pool; thin_endio() local 4472 struct pool *pool = tc->pool; thin_iterate_devices() local 4492 struct pool *pool = tc->pool; thin_io_hints() local [all...] |
/third_party/skia/tests/ |
H A D | SkBlockAllocatorTest.cpp | 19 static size_t ScratchBlockSize(SkSBlockAllocator<N>& pool) { in ScratchBlockSize() argument 20 return (size_t) pool->scratchBlockSize(); in ScratchBlockSize() 26 static int block_count(const SkSBlockAllocator<N>& pool) { in block_count() argument 28 for (const Block* b : pool->blocks()) { in block_count() 36 static Block* get_block(SkSBlockAllocator<N>& pool, int blockIndex) { in get_block() argument 39 for (Block* b: pool->blocks()) { in get_block() 55 static size_t total_size(SkSBlockAllocator<N>& pool) { in total_size() argument 56 return pool->totalSize() - BlockAllocatorTestAccess::ScratchBlockSize(pool); in total_size() 60 static size_t add_block(SkSBlockAllocator<N>& pool) { in add_block() argument 70 alloc_byte(SkSBlockAllocator<N>& pool) alloc_byte() argument 414 SkSBlockAllocator<256> pool; DEF_TEST() local 451 SkSBlockAllocator<256> pool; DEF_TEST() local 621 run_owning_block_test(skiatest::Reporter* r, SkBlockAllocator* pool) run_owning_block_test() argument 638 run_owning_block_tests(skiatest::Reporter* r, SkBlockAllocator* pool) run_owning_block_tests() argument [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | mempool.c | 5 * memory buffer pool support. Such pools are mostly used 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 60 if (pool->free == mempool_kfree) { in check_element() 61 __check_element(pool, element, (size_t)pool in check_element() 82 poison_element(mempool_t *pool, void *element) poison_element() argument 99 check_element(mempool_t *pool, void *element) check_element() argument 102 poison_element(mempool_t *pool, void *element) poison_element() argument 107 kasan_poison_element(mempool_t *pool, void *element) kasan_poison_element() argument 116 kasan_unpoison_element(mempool_t *pool, void *element) kasan_unpoison_element() argument 127 add_element(mempool_t *pool, void *element) add_element() argument 135 remove_element(mempool_t *pool) remove_element() argument 156 mempool_exit(mempool_t *pool) mempool_exit() argument 175 mempool_destroy(mempool_t *pool) mempool_destroy() argument 185 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) mempool_init_node() argument 233 mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) mempool_init() argument 270 mempool_t *pool; mempool_create_node() local 304 mempool_resize(mempool_t *pool, int new_min_nr) mempool_resize() argument 380 mempool_alloc(mempool_t *pool, gfp_t gfp_mask) mempool_alloc() argument 457 mempool_free(void *element, mempool_t *pool) mempool_free() argument [all...] |
/third_party/skia/third_party/externals/microhttpd/src/microhttpd/ |
H A D | memorypool.c | 22 * @brief memory pool 47 * Handle for a memory pool. Pools are not reentrant and must not be 54 * Pointer to the pool's memory 59 * Size of the pool. 74 * #MHD_NO if pool was malloc'ed, #MHD_YES if mmapped (VirtualAlloc'ed for W32). 81 * Create a memory pool. 83 * @param max maximum size of the pool 89 struct MemoryPool *pool; in MHD_pool_create() local 91 pool = malloc (sizeof (struct MemoryPool)); in MHD_pool_create() 92 if (NULL == pool) in MHD_pool_create() 135 MHD_pool_destroy(struct MemoryPool *pool) MHD_pool_destroy() argument 165 MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end) MHD_pool_allocate() argument 208 MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size) MHD_pool_reallocate() argument 262 MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t size) MHD_pool_reset() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
H A D | rxe_pool.c | 81 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument 83 return rxe_type_info[pool->type].name; in pool_name() 86 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument 91 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index() 97 pool->max_index = max; in rxe_pool_init_index() 98 pool->min_index = min; in rxe_pool_init_index() 101 pool->table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index() 102 if (!pool->table) { in rxe_pool_init_index() 107 pool->table_size = size; in rxe_pool_init_index() 108 bitmap_zero(pool in rxe_pool_init_index() 114 rxe_pool_init( struct rxe_dev *rxe, struct rxe_pool *pool, enum rxe_elem_type type, unsigned int max_elem) rxe_pool_init() argument 160 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt); rxe_pool_release() local 166 rxe_pool_put(struct rxe_pool *pool) rxe_pool_put() argument 171 rxe_pool_cleanup(struct rxe_pool *pool) rxe_pool_cleanup() argument 185 alloc_index(struct rxe_pool *pool) alloc_index() argument 200 insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) insert_index() argument 227 insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) insert_key() argument 261 struct rxe_pool *pool = elem->pool; rxe_add_key() local 273 struct rxe_pool *pool = elem->pool; rxe_drop_key() local 284 struct rxe_pool *pool = elem->pool; rxe_add_index() local 296 struct rxe_pool *pool = elem->pool; rxe_drop_index() local 305 rxe_alloc(struct rxe_pool *pool) rxe_alloc() argument 345 rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) rxe_add_to_pool() argument 382 struct rxe_pool *pool = elem->pool; rxe_elem_release() local 394 rxe_pool_get_index(struct rxe_pool *pool, u32 index) rxe_pool_get_index() argument 425 rxe_pool_get_key(struct rxe_pool *pool, void *key) rxe_pool_get_key() argument [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | mempool.c | 5 * memory buffer pool support. Such pools are mostly used 25 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 28 const int nr = pool->curr_nr; in poison_error() 34 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 42 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 51 poison_error(pool, element, size, i); in __check_element() 58 static void check_element(mempool_t *pool, void *element) in check_element() argument 61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { in check_element() 62 __check_element(pool, elemen in check_element() 81 poison_element(mempool_t *pool, void *element) poison_element() argument 96 check_element(mempool_t *pool, void *element) check_element() argument 99 poison_element(mempool_t *pool, void *element) poison_element() argument 104 kasan_poison_element(mempool_t *pool, void *element) kasan_poison_element() argument 112 kasan_unpoison_element(mempool_t *pool, void *element) kasan_unpoison_element() argument 120 add_element(mempool_t *pool, void *element) add_element() argument 128 remove_element(mempool_t *pool) remove_element() argument 149 mempool_exit(mempool_t *pool) mempool_exit() argument 168 mempool_destroy(mempool_t *pool) mempool_destroy() argument 178 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) mempool_init_node() argument 226 mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) mempool_init() argument 263 mempool_t *pool; mempool_create_node() local 297 mempool_resize(mempool_t *pool, int new_min_nr) mempool_resize() argument 373 mempool_alloc(mempool_t *pool, gfp_t gfp_mask) mempool_alloc() argument 450 mempool_free(void *element, mempool_t *pool) mempool_free() argument [all...] |
H A D | zbud.c | 63 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 67 * 63 freelists per pool. 77 * struct zbud_pool - stores metadata for each zbud pool 78 * @lock: protects all pool fields and first|last_chunk fields of any 79 * zbud page in the pool 87 * @pages_nr: number of zbud pages in the pool. 89 * pool creation time. 91 * This structure is allocated at pool creation time and maintains metadata 92 * pertaining to a particular zbud pool. 110 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool 129 zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle) zbud_zpool_evict() argument 145 struct zbud_pool *pool; zbud_zpool_create() local 155 zbud_zpool_destroy(void *pool) zbud_zpool_destroy() argument 160 zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) zbud_zpool_malloc() argument 165 zbud_zpool_free(void *pool, unsigned long handle) zbud_zpool_free() argument 170 zbud_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed) zbud_zpool_shrink() argument 189 zbud_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) zbud_zpool_map() argument 194 zbud_zpool_unmap(void *pool, unsigned long handle) zbud_zpool_unmap() argument 199 zbud_zpool_total_size(void *pool) zbud_zpool_total_size() argument 309 struct zbud_pool *pool; zbud_create_pool() local 331 zbud_destroy_pool(struct zbud_pool *pool) zbud_destroy_pool() argument 355 zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) zbud_alloc() argument 430 zbud_free(struct zbud_pool *pool, unsigned long handle) zbud_free() argument 502 zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries) zbud_reclaim_page() argument 584 zbud_map(struct zbud_pool *pool, unsigned long handle) zbud_map() argument 594 zbud_unmap(struct zbud_pool *pool, unsigned long handle) zbud_unmap() argument 605 zbud_get_pool_size(struct zbud_pool *pool) zbud_get_pool_size() argument [all...] |
/third_party/mesa3d/src/gallium/frontends/nine/ |
H A D | threadpool.c | 44 struct threadpool *pool = data; in threadpool_worker() local 46 pthread_mutex_lock(&pool->m); in threadpool_worker() 48 while (!pool->shutdown) { in threadpool_worker() 52 while (!pool->workqueue && !pool->shutdown) in threadpool_worker() 53 pthread_cond_wait(&pool->new_work, &pool->m); in threadpool_worker() 55 if (pool->shutdown) in threadpool_worker() 62 task = pool->workqueue; in threadpool_worker() 63 pool in threadpool_worker() 90 struct threadpool *pool = calloc(1, sizeof(*pool)); _mesa_threadpool_create() local 109 _mesa_threadpool_destroy(struct NineSwapChain9 *swapchain, struct threadpool *pool) _mesa_threadpool_destroy() argument 142 _mesa_threadpool_queue_task(struct threadpool *pool, threadpool_task_func work, void *data) _mesa_threadpool_queue_task() argument 184 _mesa_threadpool_wait_for_task(struct threadpool *pool, struct threadpool_task **task_handle) _mesa_threadpool_wait_for_task() argument [all...] |
/kernel/linux/linux-5.10/include/net/ |
H A D | xdp_sock_drv.h | 14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 15 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 16 void xsk_tx_release(struct xsk_buff_pool *pool); 19 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 20 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 21 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 22 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 23 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 25 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument 27 return XDP_PACKET_HEADROOM + pool in xsk_pool_get_headroom() 30 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) xsk_pool_get_chunk_size() argument 35 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) xsk_pool_get_rx_frame_size() argument 40 xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) xsk_pool_set_rxq_info() argument 46 xsk_pool_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) xsk_pool_dma_unmap() argument 52 xsk_pool_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs) xsk_pool_dma_map() argument 74 xsk_buff_alloc(struct xsk_buff_pool *pool) xsk_buff_alloc() argument 79 xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) xsk_buff_can_alloc() argument 91 xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) xsk_buff_raw_get_dma() argument 97 xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) xsk_buff_raw_get_data() argument 102 xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) xsk_buff_dma_sync_for_cpu() argument 112 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) xsk_buff_raw_dma_sync_for_device() argument 121 xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) xsk_tx_completed() argument 125 xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) xsk_tx_peek_desc() argument 131 xsk_tx_release(struct xsk_buff_pool *pool) xsk_tx_release() argument 141 xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) xsk_set_rx_need_wakeup() argument 145 xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) xsk_set_tx_need_wakeup() argument 149 xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) xsk_clear_rx_need_wakeup() argument 153 xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) xsk_clear_tx_need_wakeup() argument 157 xsk_uses_need_wakeup(struct xsk_buff_pool *pool) xsk_uses_need_wakeup() argument 162 xsk_pool_get_headroom(struct xsk_buff_pool *pool) xsk_pool_get_headroom() argument 167 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) xsk_pool_get_chunk_size() argument 172 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) xsk_pool_get_rx_frame_size() argument 177 xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) xsk_pool_set_rxq_info() argument 182 xsk_pool_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) xsk_pool_dma_unmap() argument 187 xsk_pool_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs) xsk_pool_dma_map() argument 203 xsk_buff_alloc(struct xsk_buff_pool *pool) xsk_buff_alloc() argument 208 xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count) xsk_buff_can_alloc() argument 217 xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) xsk_buff_raw_get_dma() argument 223 xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) xsk_buff_raw_get_data() argument 228 xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool) xsk_buff_dma_sync_for_cpu() argument 232 xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) xsk_buff_raw_dma_sync_for_device() argument [all...] |
/kernel/linux/linux-5.10/sound/core/seq/ |
H A D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 166 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 169 cell->next = pool->free; in free_cell() 170 pool->free = cell; in free_cell() 171 atomic_dec(&pool in free_cell() 177 struct snd_seq_pool *pool; snd_seq_cell_free() local 210 snd_seq_cell_alloc(struct snd_seq_pool *pool, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp) snd_seq_cell_alloc() argument 282 snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp) snd_seq_event_dup() argument 363 snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait) snd_seq_pool_poll_wait() argument 372 snd_seq_pool_init(struct snd_seq_pool *pool) snd_seq_pool_init() argument 412 snd_seq_pool_mark_closing(struct snd_seq_pool *pool) snd_seq_pool_mark_closing() argument 424 snd_seq_pool_done(struct snd_seq_pool *pool) snd_seq_pool_done() argument 459 struct snd_seq_pool *pool; snd_seq_pool_new() local 483 struct snd_seq_pool *pool = *ppool; snd_seq_pool_delete() local 495 snd_seq_info_pool(struct snd_info_buffer *buffer, struct snd_seq_pool *pool, char *space) snd_seq_info_pool() argument [all...] |
/kernel/linux/linux-6.6/sound/core/seq/ |
H A D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 225 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 228 cell->next = pool->free; in free_cell() 229 pool->free = cell; in free_cell() 230 atomic_dec(&pool in free_cell() 236 struct snd_seq_pool *pool; snd_seq_cell_free() local 269 snd_seq_cell_alloc(struct snd_seq_pool *pool, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp) snd_seq_cell_alloc() argument 341 snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp) snd_seq_event_dup() argument 428 snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait) snd_seq_pool_poll_wait() argument 437 snd_seq_pool_init(struct snd_seq_pool *pool) snd_seq_pool_init() argument 477 snd_seq_pool_mark_closing(struct snd_seq_pool *pool) snd_seq_pool_mark_closing() argument 489 snd_seq_pool_done(struct snd_seq_pool *pool) snd_seq_pool_done() argument 524 struct snd_seq_pool *pool; snd_seq_pool_new() local 548 struct snd_seq_pool *pool = *ppool; snd_seq_pool_delete() local 560 snd_seq_info_pool(struct snd_info_buffer *buffer, struct snd_seq_pool *pool, char *space) snd_seq_info_pool() argument [all...] |
/third_party/ffmpeg/libavfilter/ |
H A D | framepool.c | 58 FFFramePool *pool; variable 62 pool = av_mallocz(sizeof(FFFramePool)); 63 if (!pool) 66 pool->type = AVMEDIA_TYPE_VIDEO; 67 pool->width = width; 68 pool->height = height; 69 pool->format = format; 70 pool->align = align; 76 if (!pool->linesize[0]) { 77 ret = av_image_fill_linesizes(pool 110 ff_frame_pool_uninit(&pool); global() variable 121 FFFramePool *pool; global() variable 148 ff_frame_pool_uninit(&pool); global() variable 152 ff_frame_pool_get_video_config(FFFramePool *pool, int *width, int *height, enum AVPixelFormat *format, int *align) ff_frame_pool_get_video_config() argument 171 ff_frame_pool_get_audio_config(FFFramePool *pool, int *channels, int *nb_samples, enum AVSampleFormat *format, int *align) ff_frame_pool_get_audio_config() argument 190 ff_frame_pool_get(FFFramePool *pool) ff_frame_pool_get() argument 283 ff_frame_pool_uninit(FFFramePool **pool) ff_frame_pool_uninit() argument [all...] |
/third_party/mesa3d/src/panfrost/vulkan/ |
H A D | panvk_mempool.c | 32 * In "owned" mode, a single parent owns the entire pool, and the pool owns all 34 * panvk_pool_get_bo_handles. Freeing occurs at the level of an entire pool. 35 * This is useful for streaming uploads, where the batch owns the pool. 37 * In "unowned" mode, the pool is freestanding. It does not track created BOs 44 panvk_pool_alloc_backing(struct panvk_pool *pool, size_t bo_sz) in panvk_pool_alloc_backing() argument 48 /* If there's a free BO in our BO pool, let's pick it. */ in panvk_pool_alloc_backing() 49 if (pool->bo_pool && bo_sz == pool->base.slab_size && in panvk_pool_alloc_backing() 50 util_dynarray_num_elements(&pool in panvk_pool_alloc_backing() 75 panvk_pool_alloc_aligned(struct panvk_pool *pool, size_t sz, unsigned alignment) panvk_pool_alloc_aligned() argument 103 panvk_pool_init(struct panvk_pool *pool, struct panfrost_device *dev, struct panvk_bo_pool *bo_pool, unsigned create_flags, size_t slab_size, const char *label, bool prealloc) panvk_pool_init() argument 120 panvk_pool_reset(struct panvk_pool *pool) panvk_pool_reset() argument 142 panvk_pool_cleanup(struct panvk_pool *pool) panvk_pool_cleanup() argument 150 panvk_pool_get_bo_handles(struct panvk_pool *pool, uint32_t *handles) panvk_pool_get_bo_handles() argument [all...] |
/foundation/communication/dsoftbus/components/nstackx/fillp/src/public/src/ |
H A D | dympool.c | 24 DympoolType *pool = FILLP_NULL_PTR; in DympCreatePool() local 26 FILLP_LOGERR("Error to create pool initSize:%d,maxSize:%d,itemSize:%d", initSize, maxSize, itemSize); in DympCreatePool() 30 pool = SpungeAlloc(1, sizeof(DympoolType), SPUNGE_ALLOC_TYPE_MALLOC); in DympCreatePool() 31 if (pool == FILLP_NULL_PTR) { in DympCreatePool() 36 pool->maxSize = maxSize; in DympCreatePool() 37 pool->itemSize = itemSize; in DympCreatePool() 38 pool->currentSize = 0; in DympCreatePool() 39 pool->itemOperaCb.createCb = itemOperaCb->createCb; in DympCreatePool() 40 pool->itemOperaCb.destroyCb = itemOperaCb->destroyCb; in DympCreatePool() 41 pool in DympCreatePool() 75 DympDestroyPool(DympoolType *pool) DympDestroyPool() argument 110 DympSetConsSafe(DympoolType *pool, FILLP_BOOL safe) DympSetConsSafe() argument 115 DympSetProdSafe(DympoolType *pool, FILLP_BOOL safe) DympSetProdSafe() argument 120 DympExpandMemory(DympoolType *pool, int stepSizeWork) DympExpandMemory() argument 176 DympAskMoreMemory(DympoolType *pool, int stepSize, int throttleGrow) DympAskMoreMemory() argument 204 DympAlloc(DympoolType *pool, void **data, int throttleGrow) DympAlloc() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
H A D | ttm_page_alloc_dma.c | 27 * A simple DMA pool losely based on dmapool.c. It has certain advantages 70 * The pool structure. There are up to nine pools: 80 * @type: Type of the pool 82 * used with irqsave/irqrestore variants because pool allocator maybe called 89 * @nfrees: Stats when pool is shrinking. 90 * @nrefills: Stats when the pool is grown. 92 * @name: Name of the pool. 117 * huge pool 129 * Limits for the pool. They are handled without locks because only place where 144 * @dev: The 'struct device' associated with the 'pool' 150 struct dma_pool *pool; global() member 265 ttm_set_pages_caching(struct dma_pool *pool, struct page **pages, unsigned cpages) ttm_set_pages_caching() argument 285 __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) __ttm_dma_free_page() argument 298 __ttm_dma_alloc_page(struct dma_pool *pool) __ttm_dma_alloc_page() argument 343 ttm_pool_update_free_locked(struct dma_pool *pool, unsigned freed_pages) ttm_pool_update_free_locked() argument 352 ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) ttm_dma_page_put() argument 369 ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, struct page *pages[], unsigned npages) ttm_dma_pages_put() argument 403 ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, bool use_static) ttm_dma_page_pool_free() argument 500 struct dma_pool *pool; ttm_dma_free_pool() local 542 struct dma_pool *pool = *(struct dma_pool **)res; ttm_dma_pool_release() local 559 struct dma_pool *pool = NULL, **ptr; ttm_dma_pool_init() local 638 struct dma_pool *pool, *tmp; ttm_dma_find_pool() local 665 ttm_dma_handle_caching_state_failure(struct dma_pool *pool, struct list_head *d_pages, struct page **failed_pages, unsigned cpages) ttm_dma_handle_caching_state_failure() argument 698 ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, struct list_head *d_pages, unsigned count) ttm_dma_pool_alloc_new_pages() argument 786 ttm_dma_page_pool_fill_locked(struct dma_pool *pool, unsigned long *irq_flags) ttm_dma_page_pool_fill_locked() argument 833 ttm_dma_pool_get_pages(struct dma_pool *pool, struct ttm_dma_tt *ttm_dma, unsigned index) ttm_dma_pool_get_pages() argument 891 struct dma_pool *pool; ttm_dma_populate() local 995 struct dma_pool *pool; ttm_dma_unpopulate() local 1217 struct dma_pool *pool = NULL; ttm_dma_page_alloc_debugfs() local [all...] |