Home
last modified time | relevance | path

Searched refs:slabs (Results 1 - 8 of 8) sorted by relevance

/third_party/mesa3d/src/gallium/auxiliary/pipebuffer/
H A Dpb_slab.c38 /* Slabs with allocation candidates. Typically, slabs in this list should
46 * Due to a race in new slab allocation, additional slabs in this list
49 struct list_head slabs; member
54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry) in pb_slab_reclaim() argument
64 struct pb_slab_group *group = &slabs->groups[entry->group_index]; in pb_slab_reclaim()
65 list_addtail(&slab->head, &group->slabs); in pb_slab_reclaim()
70 slabs->slab_free(slabs->priv, slab); in pb_slab_reclaim()
77 pb_slabs_reclaim_locked(struct pb_slabs *slabs) in pb_slabs_reclaim_locked() argument
81 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs in pb_slabs_reclaim_locked()
99 pb_slabs_reclaim_all_locked(struct pb_slabs *slabs) pb_slabs_reclaim_all_locked() argument
119 pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all) pb_slab_alloc_reclaimed() argument
193 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap) pb_slab_alloc() argument
205 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry) pb_slab_free() argument
219 pb_slabs_reclaim(struct pb_slabs *slabs) pb_slabs_reclaim() argument
234 pb_slabs_init(struct pb_slabs *slabs, unsigned min_order, unsigned max_order, unsigned num_heaps, bool allow_three_fourth_allocations, void *priv, slab_can_reclaim_fn *can_reclaim, slab_alloc_fn *slab_alloc, slab_free_fn *slab_free) pb_slabs_init() argument
283 pb_slabs_deinit(struct pb_slabs *slabs) pb_slabs_deinit() argument
[all...]
H A Dpb_slab.h32 * from larger buffers (called "slabs").
138 pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all);
141 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
144 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
147 pb_slabs_reclaim(struct pb_slabs *slabs);
150 pb_slabs_init(struct pb_slabs *slabs,
159 pb_slabs_deinit(struct pb_slabs *slabs);
H A Dpb_bufmgr_slab.c94 * It adds/removes slabs as needed in order to meet the allocation/destruction
119 * Partial slabs
121 * Full slabs are not stored in any list. Empty slabs are destroyed
124 struct list_head slabs; member
131 * Wrapper around several slabs, therefore capable of handling buffers of
208 list_addtail(&slab->head, &mgr->slabs); in pb_slab_buffer_destroy()
291 * Called when we ran out of free slabs.
353 /* Add this slab to the list of partial slabs */ in pb_slab_create()
354 list_addtail(&slab->head, &mgr->slabs); in pb_slab_create()
[all...]
/third_party/mesa3d/src/gallium/frontends/nine/
H A Dnine_helpers.c37 pool->slabs = REALLOC(pool->slabs, in nine_range_pool_more()
42 pool->free = pool->slabs[pool->num_slabs++] = r; in nine_range_pool_more()
H A Dnine_helpers.h175 struct nine_range **slabs; member
/third_party/mesa3d/src/gallium/drivers/zink/
H A Dzink_bo.c93 struct pb_slabs *slabs = &bo_slabs[i]; in get_slabs() local
95 if (size <= 1ULL << (slabs->min_order + slabs->num_orders - 1)) in get_slabs()
96 return slabs; in get_slabs()
559 //struct pb_slabs *slabs = ((flags & RADEON_FLAG_ENCRYPTED) && screen->info.has_tmz_support) ? in zink_bo_create()
561 struct pb_slabs *slabs = screen->pb.bo_slabs; in zink_bo_create() local
563 struct pb_slabs *last_slab = &slabs[NUM_SLAB_ALLOCATORS - 1]; in zink_bo_create()
566 /* Sub-allocate small buffers from slabs. */ in zink_bo_create()
576 /* Always use slabs for sizes less than 4 KB because the kernel aligns in zink_bo_create()
596 struct pb_slabs *slabs in zink_bo_create() local
1177 struct pb_slabs *slabs = screen->pb.bo_slabs; bo_slab_alloc() local
[all...]
/third_party/mesa3d/src/gallium/drivers/iris/
H A Diris_bufmgr.c578 struct pb_slabs *slabs = &bufmgr->bo_slabs[i]; in get_slabs() local
580 if (size <= 1ull << (slabs->min_order + slabs->num_orders - 1)) in get_slabs()
581 return slabs; in get_slabs()
671 struct pb_slabs *slabs = bufmgr->bo_slabs; in iris_slab_alloc() local
676 1 << (slabs[i].min_order + slabs[i].num_orders - 1); in iris_slab_alloc()
799 /* Always use slabs for sizes less than 4 KB because the kernel aligns in alloc_bo_from_slabs()
820 struct pb_slabs *slabs = get_slabs(bufmgr, alloc_size); in alloc_bo_from_slabs() local
821 entry = pb_slab_alloc(slabs, alloc_siz in alloc_bo_from_slabs()
[all...]
/third_party/mesa3d/src/gallium/winsys/amdgpu/drm/
H A Damdgpu_bo.c643 struct pb_slabs *slabs = &ws->bo_slabs[i]; in get_slabs() local
645 if (size <= 1 << (slabs->min_order + slabs->num_orders - 1)) in get_slabs()
646 return slabs; in get_slabs()
666 struct pb_slabs *slabs; in amdgpu_bo_slab_destroy() local
670 slabs = get_slabs(ws, bo->base.size); in amdgpu_bo_slab_destroy()
677 pb_slab_free(slabs, &bo->u.slab.entry); in amdgpu_bo_slab_destroy()
798 /* Wasted alignment due to slabs with 3/4 allocations being aligned to a power of two. */ in amdgpu_bo_slab_alloc()
1359 /* Sub-allocate small buffers from slabs. */ in amdgpu_bo_create()
1364 /* Always use slabs fo in amdgpu_bo_create()
1384 struct pb_slabs *slabs = get_slabs(ws, alloc_size); amdgpu_bo_create() local
[all...]

Completed in 8 milliseconds