Lines Matching refs:slabs

38    /* Slabs with allocation candidates. Typically, slabs in this list should
46 * Due to a race in new slab allocation, additional slabs in this list
49 struct list_head slabs;
54 pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
64 struct pb_slab_group *group = &slabs->groups[entry->group_index];
65 list_addtail(&slab->head, &group->slabs);
70 slabs->slab_free(slabs->priv, slab);
77 pb_slabs_reclaim_locked(struct pb_slabs *slabs)
81 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) {
82 if (slabs->can_reclaim(slabs->priv, entry)) {
83 pb_slab_reclaim(slabs, entry);
99 pb_slabs_reclaim_all_locked(struct pb_slabs *slabs)
102 LIST_FOR_EACH_ENTRY_SAFE(entry, next, &slabs->reclaim, head) {
103 if (slabs->can_reclaim(slabs->priv, entry)) {
104 pb_slab_reclaim(slabs, entry);
119 pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all)
121 unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
132 if (slabs->allow_three_fourths_allocations && size <= entry_size * 3 / 4) {
137 assert(order < slabs->min_order + slabs->num_orders);
138 assert(heap < slabs->num_heaps);
140 group_index = (heap * slabs->num_orders + (order - slabs->min_order)) *
141 (1 + slabs->allow_three_fourths_allocations) + three_fourths;
142 group = &slabs->groups[group_index];
144 simple_mtx_lock(&slabs->mutex);
149 if (list_is_empty(&group->slabs) ||
150 list_is_empty(&list_entry(group->slabs.next, struct pb_slab, head)->free)) {
152 pb_slabs_reclaim_all_locked(slabs);
154 pb_slabs_reclaim_locked(slabs);
157 /* Remove slabs without free entries. */
158 while (!list_is_empty(&group->slabs)) {
159 slab = list_entry(group->slabs.next, struct pb_slab, head);
166 if (list_is_empty(&group->slabs)) {
172 * slabs for the same group, but that doesn't hurt correctness.
174 simple_mtx_unlock(&slabs->mutex);
175 slab = slabs->slab_alloc(slabs->priv, heap, entry_size, group_index);
178 simple_mtx_lock(&slabs->mutex);
180 list_add(&slab->head, &group->slabs);
187 simple_mtx_unlock(&slabs->mutex);
193 pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
195 return pb_slab_alloc_reclaimed(slabs, size, heap, false);
205 pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
207 simple_mtx_lock(&slabs->mutex);
208 list_addtail(&entry->head, &slabs->reclaim);
209 simple_mtx_unlock(&slabs->mutex);
214 * This may end up freeing some slabs and is therefore useful to try to reclaim
219 pb_slabs_reclaim(struct pb_slabs *slabs)
221 simple_mtx_lock(&slabs->mutex);
222 pb_slabs_reclaim_locked(slabs);
223 simple_mtx_unlock(&slabs->mutex);
226 /* Initialize the slabs manager.
234 pb_slabs_init(struct pb_slabs *slabs,
248 slabs->min_order = min_order;
249 slabs->num_orders = max_order - min_order + 1;
250 slabs->num_heaps = num_heaps;
251 slabs->allow_three_fourths_allocations = allow_three_fourth_allocations;
253 slabs->priv = priv;
254 slabs->can_reclaim = can_reclaim;
255 slabs->slab_alloc = slab_alloc;
256 slabs->slab_free = slab_free;
258 list_inithead(&slabs->reclaim);
260 num_groups = slabs->num_orders * slabs->num_heaps *
262 slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
263 if (!slabs->groups)
267 struct pb_slab_group *group = &slabs->groups[i];
268 list_inithead(&group->slabs);
271 (void) simple_mtx_init(&slabs->mutex, mtx_plain);
278 * This will free all allocated slabs and internal structures, even if some
283 pb_slabs_deinit(struct pb_slabs *slabs)
288 while (!list_is_empty(&slabs->reclaim)) {
290 list_entry(slabs->reclaim.next, struct pb_slab_entry, head);
291 pb_slab_reclaim(slabs, entry);
294 FREE(slabs->groups);
295 simple_mtx_destroy(&slabs->mutex);