Home
last modified time | relevance | path

Searched refs:pools (Results 1 - 25 of 61) sorted by relevance

123

/kernel/linux/linux-5.10/tools/testing/selftests/drivers/net/mlxsw/
H A Dsharedbuffer_configuration.py28 def __init__(self, pools):
30 for pool in pools:
72 # The threshold type of pools 4, 8, 9 and 10 cannot be changed
110 # The threshold type of pools 4, 8, 9 and 10 cannot be changed
187 pools = PoolList()
190 pools.append(Pool(pooldict))
191 return pools
194 def do_check_pools(dlname, pools, vp):
195 for pool in pools:
219 def check_pools(dlname, pools)
[all...]
/kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/mlxsw/
H A Dsharedbuffer_configuration.py28 def __init__(self, pools):
30 for pool in pools:
72 # The threshold type of pools 4, 8, 9 and 10 cannot be changed
110 # The threshold type of pools 4, 8, 9 and 10 cannot be changed
187 pools = PoolList()
190 pools.append(Pool(pooldict))
191 return pools
194 def do_check_pools(dlname, pools, vp):
195 for pool in pools:
219 def check_pools(dlname, pools)
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_arg.c28 struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX]; member
201 arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]); in mlx5dr_arg_get_obj()
226 dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj); in mlx5dr_arg_put_obj()
245 pool_mgr->pools[i] = dr_arg_pool_create(dmn, i); in mlx5dr_arg_mgr_create()
246 if (!pool_mgr->pools[i]) in mlx5dr_arg_mgr_create()
254 dr_arg_pool_destroy(pool_mgr->pools[i]); in mlx5dr_arg_mgr_create()
262 struct dr_arg_pool **pools; in mlx5dr_arg_mgr_destroy() local
268 pools = mgr->pools; in mlx5dr_arg_mgr_destroy()
270 dr_arg_pool_destroy(pools[ in mlx5dr_arg_mgr_destroy()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dpool.c25 if (!xsk->pools) { in mlx5e_xsk_get_pools()
26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools()
27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools()
28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools()
41 kfree(xsk->pools); in mlx5e_xsk_put_pools()
42 xsk->pools = NULL; in mlx5e_xsk_put_pools()
54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool()
60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
H A Dpool.h12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool()
18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
/kernel/linux/linux-5.10/drivers/staging/android/ion/
H A Dion_system_heap.c44 struct ion_page_pool *pools[NUM_ORDERS]; member
51 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page()
68 pool = heap->pools[order_to_index(order)]; in free_buffer_page()
186 pool = sys_heap->pools[i]; in ion_system_heap_shrink()
215 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) in ion_system_heap_destroy_pools() argument
220 if (pools[i]) in ion_system_heap_destroy_pools()
221 ion_page_pool_destroy(pools[i]); in ion_system_heap_destroy_pools()
224 static int ion_system_heap_create_pools(struct ion_page_pool **pools) in ion_system_heap_create_pools() argument
238 pools[i] = pool; in ion_system_heap_create_pools()
244 ion_system_heap_destroy_pools(pools); in ion_system_heap_create_pools()
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/
H A Dttm_page_alloc_dma.c28 * over the DMA pools:
70 * The pool structure. There are up to nine pools:
79 * @pools: The 'struct device->dma_pools' link.
85 * @dev: The device that is associated with these pools.
97 struct list_head pools; /* The 'struct device->dma_pools link */ member
142 * DMA pools. Guarded by _mutex->lock.
143 * @pools: The link to 'struct ttm_pool_manager->pools'
148 struct list_head pools; member
154 * struct ttm_pool_manager - Holds memory pools fo
164 struct list_head pools; global() member
[all...]
H A Dttm_page_alloc.c96 * struct ttm_pool_manager - Holds memory pools for fst allocation
106 * @pools: All pool objects in use.
114 struct ttm_page_pool pools[NUM_POOLS]; member
243 return &_manager->pools[pool_index]; in ttm_get_pool()
399 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; in ttm_pool_shrink_scan()
422 pool = &_manager->pools[i]; in ttm_pool_shrink_count()
1022 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); in ttm_page_alloc_fini()
1181 p = &_manager->pools[i]; in ttm_page_alloc_debugfs()
/kernel/linux/linux-5.10/arch/sparc/kernel/
H A Diommu-common.c82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init()
83 iommu->pools[i].start = start; in iommu_tbl_pool_init()
84 iommu->pools[i].hint = start; in iommu_tbl_pool_init()
86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init()
131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc()
193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
237 p = &tbl->pools[pool_nr]; in get_pool()
/kernel/linux/linux-6.6/arch/sparc/kernel/
H A Diommu-common.c82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init()
83 iommu->pools[i].start = start; in iommu_tbl_pool_init()
84 iommu->pools[i].hint = start; in iommu_tbl_pool_init()
86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init()
131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc()
193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc()
237 p = &tbl->pools[pool_nr]; in get_pool()
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Dpool.c25 if (!xsk->pools) { in mlx5e_xsk_get_pools()
26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools()
27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools()
28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools()
41 kfree(xsk->pools); in mlx5e_xsk_put_pools()
42 xsk->pools = NULL; in mlx5e_xsk_put_pools()
54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool()
60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
H A Dpool.h12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool()
18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/libcxgb/
H A Dlibcxgb_ppm.c348 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local
350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool()
367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool()
368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool()
370 if (!pools) in ppm_alloc_cpu_pool()
374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool()
384 return pools; in ppm_alloc_cpu_pool()
/kernel/linux/linux-5.10/mm/
H A Ddmapool.c50 struct list_head pools; member
81 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools()
105 static DEVICE_ATTR(pools, 0444, show_pools, NULL);
116 * Given one of these pools, dma_pool_alloc()
169 INIT_LIST_HEAD(&retval->pools); in dma_pool_create()
183 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create()
191 list_del(&retval->pools); in dma_pool_create()
277 list_del(&pool->pools); in dma_pool_destroy()
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/libcxgb/
H A Dlibcxgb_ppm.c348 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local
350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool()
367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool()
368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool()
370 if (!pools) in ppm_alloc_cpu_pool()
374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool()
384 return pools; in ppm_alloc_cpu_pool()
/kernel/linux/linux-6.6/mm/
H A Ddmapool.c60 struct list_head pools; member
80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
92 static DEVICE_ATTR_RO(pools);
210 * Given one of these pools, dma_pool_alloc()
267 INIT_LIST_HEAD(&retval->pools); in dma_pool_create()
280 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create()
288 list_del(&retval->pools); in dma_pool_create()
370 list_del(&pool->pools); in dma_pool_destroy()
/kernel/linux/linux-5.10/drivers/atm/
H A Dzatm.c613 /* prepare free buffer pools */ in start_rx()
1038 unsigned long pools; in zatm_int() local
1041 pools = zin(RQA); in zatm_int()
1042 EVENT("RQA (0x%08x)\n",pools,0); in zatm_int()
1043 for (i = 0; pools; i++) { in zatm_int()
1044 if (pools & 1) { in zatm_int()
1048 pools >>= 1; in zatm_int()
1052 unsigned long pools; in zatm_int() local
1054 pools = zin(RQU); in zatm_int()
1056 dev->number,pools); in zatm_int()
1268 int pools,vccs,rx; zatm_start() local
[all...]
/kernel/linux/linux-5.10/arch/powerpc/kernel/
H A Diommu.c61 * The hash is important to spread CPUs across all the pools. For example,
63 * with 4 pools all primary threads would map to the same pool.
213 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
241 pool = &(tbl->pools[0]); in iommu_range_alloc()
260 /* Now try scanning all the other pools */ in iommu_range_alloc()
263 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
381 p = &tbl->pools[pool_nr]; in get_pool()
718 p = &tbl->pools[i]; in iommu_init_table()
1064 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership()
1079 spin_unlock(&tbl->pools[ in iommu_take_ownership()
[all...]
/kernel/linux/linux-5.10/drivers/soc/ti/
H A Dknav_qmss.h191 * @pools: list of descriptor pools in the region
203 struct list_head pools; member
207 * struct knav_pool: qmss pools
304 struct list_head pools; member
363 list_for_each_entry(pool, &kdev->pools, list)
/kernel/linux/linux-6.6/drivers/soc/ti/
H A Dknav_qmss.h191 * @pools: list of descriptor pools in the region
203 struct list_head pools; member
207 * struct knav_pool: qmss pools
304 struct list_head pools; member
363 list_for_each_entry(pool, &kdev->pools, list)
/kernel/linux/linux-6.6/drivers/net/ethernet/freescale/dpaa2/
H A Ddpaa2-xsk.c162 pools_params->pools[curr_bp].priority_mask |= (1 << j); in dpaa2_xsk_set_bp_per_qdbin()
163 if (!pools_params->pools[curr_bp].priority_mask) in dpaa2_xsk_set_bp_per_qdbin()
166 pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid; in dpaa2_xsk_set_bp_per_qdbin()
167 pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size; in dpaa2_xsk_set_bp_per_qdbin()
168 pools_params->pools[curr_bp++].backup_pool = 0; in dpaa2_xsk_set_bp_per_qdbin()
/kernel/linux/linux-6.6/arch/powerpc/kernel/
H A Diommu.c100 * The hash is important to spread CPUs across all the pools. For example,
102 * with 4 pools all primary threads would map to the same pool.
252 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
280 pool = &(tbl->pools[0]); in iommu_range_alloc()
299 /* Now try scanning all the other pools */ in iommu_range_alloc()
302 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
429 p = &tbl->pools[pool_nr]; in get_pool()
755 p = &tbl->pools[i]; in iommu_init_table()
1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership()
1132 spin_unlock(&tbl->pools[ in iommu_take_ownership()
[all...]
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ice/
H A Dice.h518 struct xsk_buff_pool **pools = ring->vsi->xsk_pools; in ice_xsk_pool() local
524 if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] || in ice_xsk_pool()
528 return pools[qid]; in ice_xsk_pool()
/kernel/linux/linux-5.10/arch/sparc/include/asm/
H A Diommu-common.h26 struct iommu_pool pools[IOMMU_NR_POOLS]; member
/kernel/linux/linux-6.6/arch/sparc/include/asm/
H A Diommu-common.h26 struct iommu_pool pools[IOMMU_NR_POOLS]; member

Completed in 20 milliseconds

123