Lines Matching refs:pool

48     * - a pointer to the child pool to which this element belongs, or
62 /* Next page in the same child pool. */
83 * pool has been destroyed). Mark the element as freed and free the whole page
99 * Create a parent pool for the allocation of same-sized objects.
123 * Create a child pool linked to the given parent.
125 void slab_create_child(struct slab_child_pool *pool,
128 pool->parent = parent;
129 pool->pages = NULL;
130 pool->free = NULL;
131 pool->migrated = NULL;
135 * Destroy the child pool.
137 * Pages associated to the pool will be orphaned. They are eventually freed
140 void slab_destroy_child(struct slab_child_pool *pool)
142 if (!pool->parent)
145 simple_mtx_lock(&pool->parent->mutex);
147 while (pool->pages) {
148 struct slab_page_header *page = pool->pages;
149 pool->pages = page->u.next;
150 p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
152 for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
153 struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
158 while (pool->migrated) {
159 struct slab_element_header *elt = pool->migrated;
160 pool->migrated = elt->next;
164 simple_mtx_unlock(&pool->parent->mutex);
166 while (pool->free) {
167 struct slab_element_header *elt = pool->free;
168 pool->free = elt->next;
173 pool->parent = NULL;
177 slab_add_new_page(struct slab_child_pool *pool)
180 pool->parent->num_elements * pool->parent->element_size);
185 for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
186 struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
187 elt->owner = (intptr_t)pool;
190 elt->next = pool->free;
191 pool->free = elt;
195 page->u.next = pool->pages;
196 pool->pages = page;
202 * Allocate an object from the child pool. Single-threaded (i.e. the caller
203 * must ensure that no operation happens on the same child pool in another
207 slab_alloc(struct slab_child_pool *pool)
211 if (!pool->free) {
213 * different child pool.
215 simple_mtx_lock(&pool->parent->mutex);
216 pool->free = pool->migrated;
217 pool->migrated = NULL;
218 simple_mtx_unlock(&pool->parent->mutex);
221 if (!pool->free && !slab_add_new_page(pool))
225 elt = pool->free;
226 pool->free = elt->next;
238 slab_zalloc(struct slab_child_pool *pool)
240 void *r = slab_alloc(pool);
242 memset(r, 0, pool->parent->item_size);
248 * must ensure that no operation happens on the same child pool in another
251 * Freeing an object in a different child pool from the one where it was
252 * allocated is allowed, as long the pool belong to the same parent. No
255 void slab_free(struct slab_child_pool *pool, void *ptr)
263 if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
267 elt->next = pool->free;
268 pool->free = elt;
273 if (pool->parent)
274 simple_mtx_lock(&pool->parent->mutex);
276 /* Note: we _must_ re-read elt->owner here because the owning child pool
285 if (pool->parent)
286 simple_mtx_unlock(&pool->parent->mutex);
288 if (pool->parent)
289 simple_mtx_unlock(&pool->parent->mutex);