Home
last modified time | relevance | path

Searched refs:pool (Results 51 - 75 of 1459) sorted by relevance

12345678910>>...59

/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_arg.c8 /* modify-header arg pool */
18 /* argument pool area */
23 struct mutex mutex; /* protect arg pool */
31 static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) in dr_arg_pool_alloc_objs() argument
43 pool->dmn->info.caps.log_header_modify_argument_granularity; in dr_arg_pool_alloc_objs()
46 max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, in dr_arg_pool_alloc_objs()
49 min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc, in dr_arg_pool_alloc_objs()
52 if (pool->log_chunk_size > object_range) { in dr_arg_pool_alloc_objs()
53 mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n", in dr_arg_pool_alloc_objs()
54 pool in dr_arg_pool_alloc_objs()
97 dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool) dr_arg_pool_get_arg_obj() argument
122 dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool, struct mlx5dr_arg_obj *arg_obj) dr_arg_pool_put_arg_obj() argument
133 struct dr_arg_pool *pool; dr_arg_pool_create() local
156 dr_arg_pool_destroy(struct dr_arg_pool *pool) dr_arg_pool_destroy() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gt/
H A Dintel_gt_buffer_pool.c14 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument
24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size()
25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size()
27 return &pool->cache_list[n]; in bucket_for_size()
37 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument
44 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than()
45 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than()
50 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than()
73 spin_unlock_irq(&pool->lock); in pool_free_older_than()
89 struct intel_gt_buffer_pool *pool in pool_free_work() local
102 struct intel_gt_buffer_pool *pool = node->pool; pool_retire() local
139 node_create(struct intel_gt_buffer_pool *pool, size_t sz, enum i915_map_type type) node_create() argument
174 struct intel_gt_buffer_pool *pool = &gt->buffer_pool; intel_gt_get_buffer_pool() local
222 struct intel_gt_buffer_pool *pool = &gt->buffer_pool; intel_gt_init_buffer_pool() local
233 struct intel_gt_buffer_pool *pool = &gt->buffer_pool; intel_gt_flush_buffer_pool() local
243 struct intel_gt_buffer_pool *pool = &gt->buffer_pool; intel_gt_fini_buffer_pool() local
[all...]
/kernel/linux/linux-6.6/mm/
H A Dzbud.c62 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
66 * 63 freelists per pool.
78 * struct zbud_pool - stores metadata for each zbud pool
79 * @lock: protects all pool fields and first|last_chunk fields of any
80 * zbud page in the pool
86 * @pages_nr: number of zbud pages in the pool.
88 * This structure is allocated at pool creation time and maintains metadata
89 * pertaining to a particular zbud pool.
107 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
194 * zbud_create_pool() - create a new zbud pool
202 struct zbud_pool *pool; zbud_create_pool() local
222 zbud_destroy_pool(struct zbud_pool *pool) zbud_destroy_pool() argument
246 zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) zbud_alloc() argument
311 zbud_free(struct zbud_pool *pool, unsigned long handle) zbud_free() argument
353 zbud_map(struct zbud_pool *pool, unsigned long handle) zbud_map() argument
363 zbud_unmap(struct zbud_pool *pool, unsigned long handle) zbud_unmap() argument
374 zbud_get_pool_size(struct zbud_pool *pool) zbud_get_pool_size() argument
388 zbud_zpool_destroy(void *pool) zbud_zpool_destroy() argument
393 zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) zbud_zpool_malloc() argument
398 zbud_zpool_free(void *pool, unsigned long handle) zbud_zpool_free() argument
403 zbud_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) zbud_zpool_map() argument
408 zbud_zpool_unmap(void *pool, unsigned long handle) zbud_zpool_unmap() argument
413 zbud_zpool_total_size(void *pool) zbud_zpool_total_size() argument
[all...]
/foundation/filemanagement/dfs_service/services/distributedfiledaemon/test/unittest/network/
H A Dsession_pool_test.cpp135 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); in HWTEST_F() local
139 pool->OccupySession(TEST_SESSION_ID, 1); in HWTEST_F()
161 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); in HWTEST_F() local
165 pool->occupySession_.erase(TEST_SESSION_ID); in HWTEST_F()
166 pool->occupySession_.insert(make_pair(TEST_SESSION_ID, LINK_TYPE_AP)); in HWTEST_F()
167 bool flag = pool->FindSession(TEST_SESSION_ID); in HWTEST_F()
169 pool->occupySession_.erase(TEST_SESSION_ID); in HWTEST_F()
191 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); in HWTEST_F() local
195 pool->occupySession_.erase(TEST_SESSION_ID_TWO); in HWTEST_F()
196 bool flag = pool in HWTEST_F()
221 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
249 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
281 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
307 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
353 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
379 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
426 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
466 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
494 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
520 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
552 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
583 shared_ptr<SessionPool> pool = make_shared<SessionPool>(kernelTalker); HWTEST_F() local
[all...]
/third_party/ffmpeg/libavutil/
H A Dbuffer.c263 AVBufferPool *pool = av_mallocz(sizeof(*pool)); in av_buffer_pool_init2() local
264 if (!pool) in av_buffer_pool_init2()
267 ff_mutex_init(&pool->mutex, NULL); in av_buffer_pool_init2()
269 pool->size = size; in av_buffer_pool_init2()
270 pool->opaque = opaque; in av_buffer_pool_init2()
271 pool->alloc2 = alloc; in av_buffer_pool_init2()
272 pool->alloc = av_buffer_alloc; // fallback in av_buffer_pool_init2()
273 pool->pool_free = pool_free; in av_buffer_pool_init2()
275 atomic_init(&pool in av_buffer_pool_init2()
282 AVBufferPool *pool = av_mallocz(sizeof(*pool)); av_buffer_pool_init() local
296 buffer_pool_flush(AVBufferPool *pool) buffer_pool_flush() argument
311 buffer_pool_free(AVBufferPool *pool) buffer_pool_free() argument
324 AVBufferPool *pool; av_buffer_pool_uninit() local
342 AVBufferPool *pool = buf->pool; pool_release_buffer() local
358 pool_alloc_buffer(AVBufferPool *pool) pool_alloc_buffer() argument
387 av_buffer_pool_get(AVBufferPool *pool) av_buffer_pool_get() argument
[all...]
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/nvhe/
H A Dpage_alloc.c16 * Example buddy-tree for a 4-pages physically contiguous pool:
27 * Example of requests on this pool:
28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1
29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2
30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0
31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3
33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument
42 * Don't return a page outside the pool range -- it belongs to in __find_buddy_nocheck()
45 if (addr < pool->range_start || addr >= pool in __find_buddy_nocheck()
52 __find_buddy_avail(struct hyp_pool *pool, struct hyp_page *p, unsigned short order) __find_buddy_avail() argument
93 __hyp_attach_page(struct hyp_pool *pool, struct hyp_page *p) __hyp_attach_page() argument
130 __hyp_extract_page(struct hyp_pool *pool, struct hyp_page *p, unsigned short order) __hyp_extract_page() argument
153 __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) __hyp_put_page() argument
166 hyp_put_page(struct hyp_pool *pool, void *addr) hyp_put_page() argument
175 hyp_get_page(struct hyp_pool *pool, void *addr) hyp_get_page() argument
198 hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) hyp_alloc_pages() argument
223 hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, unsigned int reserved_pages) hyp_pool_init() argument
[all...]
/kernel/linux/linux-6.6/include/net/
H A Dxsk_buff_pool.h29 struct xsk_buff_pool *pool; member
65 /* For performance reasons, each buff pool has its own array of dma_pages
103 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
105 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
107 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
108 void xp_destroy(struct xsk_buff_pool *pool);
109 void xp_get_pool(struct xsk_buff_pool *pool);
110 bool xp_put_pool(struct xsk_buff_pool *pool);
111 void xp_clear_dev(struct xsk_buff_pool *pool);
112 void xp_add_xsk(struct xsk_buff_pool *pool, struc
118 xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, u64 addr) xp_init_xskb_addr() argument
125 xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, dma_addr_t *dma_pages, u64 addr) xp_init_xskb_dma() argument
161 xp_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) xp_dma_sync_for_device() argument
177 xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, u64 addr, u32 len) xp_desc_crosses_non_contig_pg() argument
194 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) xp_aligned_extract_addr() argument
215 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) xp_aligned_extract_idx() argument
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dgenalloc.h16 * available. If new memory is added to the pool a lock has to be
46 * @pool: the pool being allocated from
52 void *data, struct gen_pool *pool,
56 * General purpose special memory pool descriptor.
60 struct list_head chunks; /* list of chunks in this pool */
70 * General purpose special memory pool chunk descriptor.
73 struct list_head next_chunk; /* next chunk in pool */
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigne argument
119 gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) gen_pool_add() argument
128 gen_pool_alloc_owner(struct gen_pool *pool, size_t size, void **owner) gen_pool_alloc_owner() argument
135 gen_pool_alloc_algo(struct gen_pool *pool, size_t size, genpool_algo_t algo, void *data) gen_pool_alloc_algo() argument
151 gen_pool_alloc(struct gen_pool *pool, size_t size) gen_pool_alloc() argument
169 gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) gen_pool_free() argument
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dgenalloc.h16 * available. If new memory is added to the pool a lock has to be
46 * @pool: the pool being allocated from
52 void *data, struct gen_pool *pool,
56 * General purpose special memory pool descriptor.
60 struct list_head chunks; /* list of chunks in this pool */
70 * General purpose special memory pool chunk descriptor.
73 struct list_head next_chunk; /* next chunk in pool */
97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigne argument
119 gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) gen_pool_add() argument
128 gen_pool_alloc_owner(struct gen_pool *pool, size_t size, void **owner) gen_pool_alloc_owner() argument
135 gen_pool_alloc_algo(struct gen_pool *pool, size_t size, genpool_algo_t algo, void *data) gen_pool_alloc_algo() argument
151 gen_pool_alloc(struct gen_pool *pool, size_t size) gen_pool_alloc() argument
169 gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) gen_pool_free() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/dcn302/
H A Ddcn302_resource.c705 static bool dcn302_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) in dcn302_dwbc_create() argument
708 uint32_t pipe_count = pool->res_cap->num_dwb; in dcn302_dwbc_create()
720 pool->dwbc[i] = &dwbc30->base; in dcn302_dwbc_create()
740 static bool dcn302_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) in dcn302_mmhubbub_create() argument
743 uint32_t pipe_count = pool->res_cap->num_dwb; in dcn302_mmhubbub_create()
755 pool->mcif_wb[i] = &mcif_wb30->base; in dcn302_mmhubbub_create()
951 static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) in init_soc_bounding_box() argument
963 loaded_ip->max_num_otg = pool->pipe_count; in init_soc_bounding_box()
964 loaded_ip->max_num_dpp = pool->pipe_count; in init_soc_bounding_box()
985 static void dcn302_resource_destruct(struct resource_pool *pool) in dcn302_resource_destruct() argument
1112 dcn302_destroy_resource_pool(struct resource_pool **pool) dcn302_destroy_resource_pool() argument
1193 dcn302_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct resource_pool *pool) dcn302_resource_construct() argument
1503 struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); dcn302_create_resource_pool() local
[all...]
/third_party/mesa3d/src/intel/vulkan/
H A DgenX_query.c54 anv_query_address(struct anv_query_pool *pool, uint32_t query) in anv_query_address() argument
57 .bo = pool->bo, in anv_query_address()
58 .offset = query * pool->stride, in anv_query_address()
82 /* Query pool slots are made up of some number of 64-bit values packed in CreateQueryPool()
94 VK_MULTIALLOC_DECL(&ma, struct anv_query_pool, pool, 1); in CreateQueryPool()
174 pool->type = pCreateInfo->queryType; in CreateQueryPool()
175 pool->pipeline_statistics = pipeline_statistics; in CreateQueryPool()
176 pool->stride = uint64s_per_slot * sizeof(uint64_t); in CreateQueryPool()
177 pool->slots = pCreateInfo->queryCount; in CreateQueryPool()
179 if (pool in CreateQueryPool()
301 khr_perf_query_availability_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass) khr_perf_query_availability_offset() argument
307 khr_perf_query_data_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end) khr_perf_query_data_offset() argument
314 khr_perf_query_availability_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass) khr_perf_query_availability_address() argument
322 khr_perf_query_data_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end) khr_perf_query_data_address() argument
378 intel_perf_query_data_offset(struct anv_query_pool *pool, bool end) intel_perf_query_data_offset() argument
397 query_slot(struct anv_query_pool *pool, uint32_t query) query_slot() argument
403 query_is_available(struct anv_query_pool *pool, uint32_t query) query_is_available() argument
421 wait_for_available(struct anv_device *device, struct anv_query_pool *pool, uint32_t query) wait_for_available() argument
659 emit_zero_queries(struct anv_cmd_buffer *cmd_buffer, struct mi_builder *b, struct anv_query_pool *pool, uint32_t first_index, uint32_t num_queries) emit_zero_queries() argument
863 emit_perf_intel_query(struct anv_cmd_buffer *cmd_buffer, struct anv_query_pool *pool, struct mi_builder *b, struct anv_address query_addr, bool end) emit_perf_intel_query() argument
[all...]
H A Danv_allocator.c85 * At the next level we can use various sub-allocators. The state pool is a
86 * pool of smaller, fixed size objects, which operates much like the block
87 * pool. It uses a free list for freeing objects, but when it runs out of
88 * space it just allocates a new block from the block pool. This allocator is
193 /* Assert that we only ever grow the pool */ in anv_state_table_expand_range()
206 /* Just leak the old map until we destroy the pool. We can't munmap it in anv_state_table_expand_range()
237 /* The block pool is always initialized to a nonzero size and this function in anv_state_table_grow()
298 /* We allocated the first block outside the pool so we have to grow in anv_state_table_add()
299 * the pool. pool_state->next acts a mutex: threads who try to in anv_state_table_add()
365 anv_block_pool_expand_range(struct anv_block_pool *pool,
369 anv_block_pool_init(struct anv_block_pool *pool, struct anv_device *device, const char *name, uint64_t start_address, uint32_t initial_size) anv_block_pool_init() argument
444 anv_block_pool_finish(struct anv_block_pool *pool) anv_block_pool_finish() argument
461 anv_block_pool_expand_range(struct anv_block_pool *pool, uint32_t center_bo_offset, uint32_t size) anv_block_pool_expand_range() argument
581 anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t size) anv_block_pool_map() argument
628 anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state, uint32_t contiguous_size) anv_block_pool_grow() argument
748 anv_block_pool_alloc_new(struct anv_block_pool *pool, struct anv_block_state *pool_state, uint32_t block_size, uint32_t *padding) anv_block_pool_alloc_new() argument
801 anv_block_pool_alloc(struct anv_block_pool *pool, uint32_t block_size, uint32_t *padding) anv_block_pool_alloc() argument
821 anv_block_pool_alloc_back(struct anv_block_pool *pool, uint32_t block_size) anv_block_pool_alloc_back() argument
837 anv_state_pool_init(struct anv_state_pool *pool, struct anv_device *device, const char *name, uint64_t base_address, int32_t start_offset, uint32_t block_size) anv_state_pool_init() argument
879 anv_state_pool_finish(struct anv_state_pool *pool) anv_state_pool_finish() argument
887 anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool, struct anv_block_pool *block_pool, uint32_t state_size, uint32_t block_size, uint32_t *padding) anv_fixed_size_state_pool_alloc_new() argument
951 anv_state_pool_return_blocks(struct anv_state_pool *pool, uint32_t chunk_offset, uint32_t count, uint32_t block_size) anv_state_pool_return_blocks() argument
991 anv_state_pool_return_chunk(struct anv_state_pool *pool, uint32_t chunk_offset, uint32_t chunk_size, uint32_t small_size) anv_state_pool_return_chunk() argument
1033 anv_state_pool_alloc_no_vg(struct anv_state_pool *pool, uint32_t size, uint32_t align) anv_state_pool_alloc_no_vg() argument
1125 anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align) anv_state_pool_alloc() argument
1136 anv_state_pool_alloc_back(struct anv_state_pool *pool) anv_state_pool_alloc_back() argument
1168 anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state) anv_state_pool_free_no_vg() argument
1184 anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state) anv_state_pool_free() argument
1293 anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool, struct anv_state_pool *parent, uint32_t count, uint32_t size, uint32_t alignment) anv_state_reserved_pool_init() argument
1308 anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool) anv_state_reserved_pool_finish() argument
1320 anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool) anv_state_reserved_pool_alloc() argument
1326 anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool, struct anv_state state) anv_state_reserved_pool_free() argument
1333 anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device, const char *name) anv_bo_pool_init() argument
1348 anv_bo_pool_finish(struct anv_bo_pool *pool) anv_bo_pool_finish() argument
1367 anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size, struct anv_bo **bo_out) anv_bo_pool_alloc() argument
1405 anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo) anv_bo_pool_free() argument
1423 anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool) anv_scratch_pool_init() argument
1429 anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool) anv_scratch_pool_finish() argument
1447 anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool, gl_shader_stage stage, unsigned per_thread_scratch) anv_scratch_pool_alloc() argument
1512 anv_scratch_pool_get_surf(struct anv_device *device, struct anv_scratch_pool *pool, unsigned per_thread_scratch) anv_scratch_pool_get_surf() argument
[all...]
/third_party/node/deps/undici/src/lib/
H A Dbalanced-pool.js14 } = require('./pool-base')
15 const Pool = require('./pool')
70 if (this[kClients].find((pool) => (
71 pool[kUrl].origin === upstreamOrigin &&
72 pool.closed !== true &&
73 pool.destroyed !== true
77 const pool = this[kFactory](upstreamOrigin, Object.assign({}, this[kOptions]))
79 this[kAddClient](pool)
80 pool.on('connect', () => {
81 pool[kWeigh
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/dcn303/
H A Ddcn303_resource.c648 static bool dcn303_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) in dcn303_dwbc_create() argument
651 uint32_t pipe_count = pool->res_cap->num_dwb; in dcn303_dwbc_create()
663 pool->dwbc[i] = &dwbc30->base; in dcn303_dwbc_create()
683 static bool dcn303_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) in dcn303_mmhubbub_create() argument
686 uint32_t pipe_count = pool->res_cap->num_dwb; in dcn303_mmhubbub_create()
698 pool->mcif_wb[i] = &mcif_wb30->base; in dcn303_mmhubbub_create()
878 static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) in init_soc_bounding_box() argument
890 loaded_ip->max_num_otg = pool->pipe_count; in init_soc_bounding_box()
891 loaded_ip->max_num_dpp = pool->pipe_count; in init_soc_bounding_box()
911 static void dcn303_resource_destruct(struct resource_pool *pool) in dcn303_resource_destruct() argument
1038 dcn303_destroy_resource_pool(struct resource_pool **pool) dcn303_destroy_resource_pool() argument
1116 dcn303_resource_construct( uint8_t num_virtual_links, struct dc *dc, struct resource_pool *pool) dcn303_resource_construct() argument
1415 struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); dcn303_create_resource_pool() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/ttm/tests/
H A Dttm_pool_test.c79 struct ttm_pool *pool; in ttm_pool_pre_populated() local
87 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); in ttm_pool_pre_populated()
88 KUNIT_ASSERT_NOT_NULL(test, pool); in ttm_pool_pre_populated()
90 ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); in ttm_pool_pre_populated()
92 err = ttm_pool_alloc(pool, tt, &simple_ctx); in ttm_pool_pre_populated()
95 ttm_pool_free(pool, tt); in ttm_pool_pre_populated()
98 return pool; in ttm_pool_pre_populated()
141 struct ttm_pool *pool; in ttm_pool_alloc_basic() local
151 pool in ttm_pool_alloc_basic()
202 struct ttm_pool *pool; ttm_pool_alloc_basic_dma_addr() local
242 struct ttm_pool *pool; ttm_pool_alloc_order_caching_match() local
270 struct ttm_pool *pool; ttm_pool_alloc_caching_mismatch() local
304 struct ttm_pool *pool; ttm_pool_alloc_order_mismatch() local
340 struct ttm_pool *pool; ttm_pool_free_dma_alloc() local
371 struct ttm_pool *pool; ttm_pool_free_no_dma_alloc() local
399 struct ttm_pool *pool; ttm_pool_fini_basic() local
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_cnt.c24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */
54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool in mlxsw_sp_counter_pool_occ_get()
124 struct mlxsw_sp_counter_pool *pool; mlxsw_sp_counter_pool_init() local
169 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_pool_fini() local
186 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_alloc() local
227 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_free() local
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlxsw/
H A Dspectrum_cnt.c24 spinlock_t counter_pool_lock; /* Protects counter pool allocations */
54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool in mlxsw_sp_counter_pool_occ_get()
124 struct mlxsw_sp_counter_pool *pool; mlxsw_sp_counter_pool_init() local
169 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_pool_fini() local
186 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_alloc() local
227 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; mlxsw_sp_counter_free() local
[all...]
/third_party/ffmpeg/libavcodec/
H A Dget_buffer.c57 FramePool *pool = (FramePool*)data; in frame_pool_free() local
60 for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) in frame_pool_free()
61 av_buffer_pool_uninit(&pool->pools[i]); in frame_pool_free()
68 FramePool *pool = av_mallocz(sizeof(*pool)); in frame_pool_alloc() local
71 if (!pool) in frame_pool_alloc()
74 buf = av_buffer_create((uint8_t*)pool, sizeof(*pool), in frame_pool_alloc()
77 av_freep(&pool); in frame_pool_alloc()
86 FramePool *pool in update_frame_pool() local
203 FramePool *pool = (FramePool*)avctx->internal->pool->data; audio_get_buffer() local
248 FramePool *pool = (FramePool*)s->internal->pool->data; video_get_buffer() local
[all...]
/kernel/linux/linux-5.10/net/rds/
H A Dib_rdma.c198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument
204 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr()
205 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr()
206 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr()
209 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr()
275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
277 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr()
281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument
285 item_count = atomic_read(&pool in rds_ib_flush_goal()
342 rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **ibmr_ret) rds_ib_flush_mr_pool() argument
440 rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) rds_ib_try_reuse_ibmr() argument
479 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); rds_ib_mr_pool_flush_worker() local
487 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_mr() local
634 rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) rds_ib_destroy_mr_pool() argument
646 struct rds_ib_mr_pool *pool; rds_ib_create_mr_pool() local
[all...]
/kernel/linux/linux-6.6/net/rds/
H A Dib_rdma.c198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument
204 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr()
205 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr()
206 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr()
209 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr()
275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
277 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr()
281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument
285 item_count = atomic_read(&pool in rds_ib_flush_goal()
342 rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **ibmr_ret) rds_ib_flush_mr_pool() argument
440 rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) rds_ib_try_reuse_ibmr() argument
479 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); rds_ib_mr_pool_flush_worker() local
487 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_mr() local
634 rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) rds_ib_destroy_mr_pool() argument
646 struct rds_ib_mr_pool *pool; rds_ib_create_mr_pool() local
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dcrypto.c13 * (for example, TLS) after last revalidation in a pool or a bulk.
19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool)
33 int num_deks; /* the total number of keys in this pool */
34 int avail_deks; /* the number of available keys in this pool */
35 int in_use_deks; /* the number of being used keys in this pool */
288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool) in mlx5_crypto_dek_bulk_create() argument
290 struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv; in mlx5_crypto_dek_bulk_create()
291 struct mlx5_core_dev *mdev = pool->mdev; in mlx5_crypto_dek_bulk_create()
313 err = mlx5_crypto_create_dek_bulk(mdev, pool in mlx5_crypto_dek_bulk_create()
334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool) mlx5_crypto_dek_pool_add_bulk() argument
357 mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool *pool, struct mlx5_crypto_dek_bulk *bulk, bool delay) mlx5_crypto_dek_pool_remove_bulk() argument
370 mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool *pool, u32 *obj_offset) mlx5_crypto_dek_pool_pop() argument
420 mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool *pool) mlx5_crypto_dek_need_sync() argument
426 mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool *pool, struct mlx5_crypto_dek *dek) mlx5_crypto_dek_free_locked() argument
454 mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool *pool, struct mlx5_crypto_dek *dek) mlx5_crypto_dek_pool_push() argument
479 mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool *pool, struct mlx5_crypto_dek_bulk *bulk) mlx5_crypto_dek_bulk_reset_synced() argument
510 mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool *pool, struct mlx5_crypto_dek_bulk *bulk, struct list_head *destroy_list) mlx5_crypto_dek_bulk_handle_avail() argument
524 mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool *pool, struct list_head *list, struct list_head *head) mlx5_crypto_dek_pool_splice_destroy_list() argument
533 mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *pool) mlx5_crypto_dek_pool_free_wait_keys() argument
548 mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool) mlx5_crypto_dek_pool_reset_synced() argument
591 struct mlx5_crypto_dek_pool *pool = mlx5_crypto_dek_sync_work_fn() local
672 struct mlx5_crypto_dek_pool *pool = mlx5_crypto_dek_destroy_work_fn() local
684 struct mlx5_crypto_dek_pool *pool; mlx5_crypto_dek_pool_create() local
707 mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool) mlx5_crypto_dek_pool_destroy() argument
[all...]
/third_party/mesa3d/src/asahi/lib/
H A Dpool.c28 #include "pool.h"
32 * into the pool and copy there */
37 agx_pool_alloc_backing(struct agx_pool *pool, size_t bo_sz) in agx_pool_alloc_backing() argument
39 struct agx_bo *bo = agx_bo_create(pool->dev, bo_sz, in agx_pool_alloc_backing()
40 pool->create_flags); in agx_pool_alloc_backing()
42 util_dynarray_append(&pool->bos, struct agx_bo *, bo); in agx_pool_alloc_backing()
43 pool->transient_bo = bo; in agx_pool_alloc_backing()
44 pool->transient_offset = 0; in agx_pool_alloc_backing()
50 agx_pool_init(struct agx_pool *pool, struct agx_device *dev, in agx_pool_init() argument
53 memset(pool, in agx_pool_init()
63 agx_pool_cleanup(struct agx_pool *pool) agx_pool_cleanup() argument
73 agx_pool_get_bo_handles(struct agx_pool *pool, uint32_t *handles) agx_pool_get_bo_handles() argument
82 agx_pool_alloc_aligned(struct agx_pool *pool, size_t sz, unsigned alignment) agx_pool_alloc_aligned() argument
109 agx_pool_upload(struct agx_pool *pool, const void *data, size_t sz) agx_pool_upload() argument
115 agx_pool_upload_aligned(struct agx_pool *pool, const void *data, size_t sz, unsigned alignment) agx_pool_upload_aligned() argument
[all...]
/third_party/mesa3d/src/gallium/drivers/iris/
H A Diris_border_color.c36 * To work around this, we maintain a single "border color pool" BO
40 * wasting a lot of space in the pool.
71 struct iris_border_color_pool *pool) in iris_init_border_color_pool()
73 simple_mtx_init(&pool->lock, mtx_plain); in iris_init_border_color_pool()
75 pool->ht = _mesa_hash_table_create(NULL, color_hash, color_equals); in iris_init_border_color_pool()
77 pool->bo = iris_bo_alloc(bufmgr, "border colors", in iris_init_border_color_pool()
80 pool->map = iris_bo_map(NULL, pool->bo, MAP_WRITE); in iris_init_border_color_pool()
83 pool->insert_point = BC_ALIGNMENT; in iris_init_border_color_pool()
86 ASSERTED uint32_t black_offset = iris_upload_border_color(pool, in iris_init_border_color_pool()
70 iris_init_border_color_pool(struct iris_bufmgr *bufmgr, struct iris_border_color_pool *pool) iris_init_border_color_pool() argument
91 iris_destroy_border_color_pool(struct iris_border_color_pool *pool) iris_destroy_border_color_pool() argument
105 iris_upload_border_color(struct iris_border_color_pool *pool, union pipe_color_union *color) iris_upload_border_color() argument
[all...]
/kernel/linux/linux-5.10/kernel/
H A Dworkqueue.c3 * kernel/workqueue.c - generic async execution with shared worker pool
19 * executed in process context. The worker pool is shared and
61 * A bound pool is either associated or disassociated with its CPU.
68 * be executing on any CPU. The pool behaves as an unbound one.
90 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
121 * L: pool->lock protected. Access with pool->lock held.
123 * X: During normal operation, modification requires pool->lock and should
125 * cpu or grabbing pool->lock is enough for read access. If
149 raw_spinlock_t lock; /* the pool loc
201 struct worker_pool *pool; /* I: the associated pool */ global() member
533 worker_pool_assign_id(struct worker_pool *pool) worker_pool_assign_id() argument
767 __need_more_worker(struct worker_pool *pool) __need_more_worker() argument
780 need_more_worker(struct worker_pool *pool) need_more_worker() argument
786 may_start_working(struct worker_pool *pool) may_start_working() argument
792 keep_working(struct worker_pool *pool) keep_working() argument
799 need_to_create_worker(struct worker_pool *pool) need_to_create_worker() argument
805 too_many_workers(struct worker_pool *pool) too_many_workers() argument
819 first_idle_worker(struct worker_pool *pool) first_idle_worker() argument
836 wake_up_worker(struct worker_pool *pool) wake_up_worker() argument
881 struct worker_pool *pool; wq_worker_sleeping() local
963 struct worker_pool *pool = worker->pool; worker_set_flags() local
988 struct worker_pool *pool = worker->pool; worker_clr_flags() local
1038 find_worker_executing_work(struct worker_pool *pool, struct work_struct *work) find_worker_executing_work() argument
1251 struct worker_pool *pool; try_to_grab_pending() local
1341 struct worker_pool *pool = pwq->pool; insert_work() local
1783 struct worker_pool *pool = worker->pool; worker_enter_idle() local
1823 struct worker_pool *pool = worker->pool; worker_leave_idle() local
1856 worker_attach_to_pool(struct worker *worker, struct worker_pool *pool) worker_attach_to_pool() argument
1888 struct worker_pool *pool = worker->pool; worker_detach_from_pool() local
1919 create_worker(struct worker_pool *pool) create_worker() argument
1981 struct worker_pool *pool = worker->pool; destroy_worker() local
2001 struct worker_pool *pool = from_timer(pool, t, idle_timer); idle_worker_timeout() local
2049 struct worker_pool *pool = from_timer(pool, t, mayday_timer); pool_mayday_timeout() local
2145 struct worker_pool *pool = worker->pool; manage_workers() local
2180 struct worker_pool *pool = worker->pool; global() variable
2245 wake_up_worker(pool); global() variable
2373 struct worker_pool *pool = worker->pool; worker_thread() local
2507 struct worker_pool *pool = pwq->pool; rescuer_thread() local
2746 struct worker_pool *pool = pwq->pool; flush_workqueue_prep_pwqs() local
2991 struct worker_pool *pool; start_flush_work() local
3436 init_worker_pool(struct worker_pool *pool) init_worker_pool() argument
3520 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); rcu_free_pool() local
3528 wq_manager_inactive(struct worker_pool *pool) wq_manager_inactive() argument
3550 put_unbound_pool(struct worker_pool *pool) put_unbound_pool() argument
3619 struct worker_pool *pool; get_unbound_pool() local
3691 struct worker_pool *pool = pwq->pool; pwq_unbound_release_workfn() local
3780 init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, struct worker_pool *pool) init_pwq() argument
3822 struct worker_pool *pool; alloc_unbound_pwq() local
4595 struct worker_pool *pool; work_busy() local
4688 pr_cont_pool_info(struct worker_pool *pool) pr_cont_pool_info() argument
4712 struct worker_pool *pool = pwq->pool; show_pwq() local
4791 struct worker_pool *pool; show_workqueue_state() local
4881 struct worker_pool *pool = worker->pool; wq_worker_comm() local
4924 struct worker_pool *pool; unbind_workers() local
4981 rebind_workers(struct worker_pool *pool) rebind_workers() argument
5050 restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) restore_unbound_workers_cpumask() argument
5070 struct worker_pool *pool; workqueue_prepare_cpu() local
5083 struct worker_pool *pool; workqueue_online_cpu() local
5802 struct worker_pool *pool; wq_watchdog_timer_fn() local
5985 struct worker_pool *pool; workqueue_init_early() local
6051 struct worker_pool *pool; workqueue_init() local
[all...]
/kernel/linux/linux-5.10/include/trace/events/
H A Dpage_pool.h16 TP_PROTO(const struct page_pool *pool,
19 TP_ARGS(pool, inflight, hold, release),
22 __field(const struct page_pool *, pool)
30 __entry->pool = pool;
34 __entry->cnt = pool->destroy_cnt;
38 __entry->pool, __entry->inflight, __entry->hold,
44 TP_PROTO(const struct page_pool *pool,
47 TP_ARGS(pool, page, release),
50 __field(const struct page_pool *, pool)
[all...]

Completed in 18 milliseconds

12345678910>>...59