Lines Matching defs:pool

63  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
67 * 63 freelists per pool.
77 * struct zbud_pool - stores metadata for each zbud pool
78 * @lock: protects all pool fields and first|last_chunk fields of any
79 * zbud page in the pool
87 * @pages_nr: number of zbud pages in the pool.
89 * pool creation time.
91 * This structure is allocated at pool creation time and maintains metadata
92 * pertaining to a particular zbud pool.
110 * @buddy: links the zbud page into the unbuddied/buddied lists in the pool
111 * @lru: links the zbud page into the lru list in the pool
129 static int zbud_zpool_evict(struct zbud_pool *pool, unsigned long handle)
131 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
132 return pool->zpool_ops->evict(pool->zpool, handle);
145 struct zbud_pool *pool;
147 pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
148 if (pool) {
149 pool->zpool = zpool;
150 pool->zpool_ops = zpool_ops;
152 return pool;
155 static void zbud_zpool_destroy(void *pool)
157 zbud_destroy_pool(pool);
160 static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
163 return zbud_alloc(pool, size, gfp, handle);
165 static void zbud_zpool_free(void *pool, unsigned long handle)
167 zbud_free(pool, handle);
170 static int zbud_zpool_shrink(void *pool, unsigned int pages,
177 ret = zbud_reclaim_page(pool, 8);
189 static void *zbud_zpool_map(void *pool, unsigned long handle,
192 return zbud_map(pool, handle);
194 static void zbud_zpool_unmap(void *pool, unsigned long handle)
196 zbud_unmap(pool, handle);
199 static u64 zbud_zpool_total_size(void *pool)
201 return zbud_get_pool_size(pool) * PAGE_SIZE;
300 * zbud_create_pool() - create a new zbud pool
301 * @gfp: gfp flags when allocating the zbud pool structure
302 * @ops: user-defined operations for the zbud pool
304 * Return: pointer to the new zbud pool or NULL if the metadata allocation
309 struct zbud_pool *pool;
312 pool = kzalloc(sizeof(struct zbud_pool), gfp);
313 if (!pool)
315 spin_lock_init(&pool->lock);
317 INIT_LIST_HEAD(&pool->unbuddied[i]);
318 INIT_LIST_HEAD(&pool->buddied);
319 INIT_LIST_HEAD(&pool->lru);
320 pool->pages_nr = 0;
321 pool->ops = ops;
322 return pool;
326 * zbud_destroy_pool() - destroys an existing zbud pool
327 * @pool: the zbud pool to be destroyed
329 * The pool should be emptied before this function is called.
331 void zbud_destroy_pool(struct zbud_pool *pool)
333 kfree(pool);
338 * @pool: zbud pool from which to allocate
340 * @gfp: gfp flags used if the pool needs to grow
343 * This function will attempt to find a free region in the pool large enough to
346 * allocated and added to the pool to satisfy the request.
349 * as zbud pool pages.
352 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
355 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
368 spin_lock(&pool->lock);
372 if (!list_empty(&pool->unbuddied[i])) {
373 zhdr = list_first_entry(&pool->unbuddied[i],
385 spin_unlock(&pool->lock);
389 spin_lock(&pool->lock);
390 pool->pages_nr++;
403 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
406 list_add(&zhdr->buddy, &pool->buddied);
412 list_add(&zhdr->lru, &pool->lru);
415 spin_unlock(&pool->lock);
422 * @pool: pool in which the allocation resided
430 void zbud_free(struct zbud_pool *pool, unsigned long handle)
435 spin_lock(&pool->lock);
446 spin_unlock(&pool->lock);
457 pool->pages_nr--;
461 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
464 spin_unlock(&pool->lock);
468 * zbud_reclaim_page() - evicts allocations from a pool page and frees it
469 * @pool: pool from which a page will attempt to be evicted
482 * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call
483 * the user-defined eviction handler with the pool and handle as arguments.
502 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries)
508 spin_lock(&pool->lock);
509 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) ||
511 spin_unlock(&pool->lock);
515 zhdr = list_last_entry(&pool->lru, struct zbud_header, lru);
530 spin_unlock(&pool->lock);
534 ret = pool->ops->evict(pool, first_handle);
539 ret = pool->ops->evict(pool, last_handle);
544 spin_lock(&pool->lock);
552 pool->pages_nr--;
553 spin_unlock(&pool->lock);
559 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
562 list_add(&zhdr->buddy, &pool->buddied);
566 list_add(&zhdr->lru, &pool->lru);
568 spin_unlock(&pool->lock);
574 * @pool: pool in which the allocation resides
584 void *zbud_map(struct zbud_pool *pool, unsigned long handle)
591 * @pool: pool in which the allocation resides
594 void zbud_unmap(struct zbud_pool *pool, unsigned long handle)
599 * zbud_get_pool_size() - gets the zbud pool size in pages
600 * @pool: pool whose size is being queried
602 * Returns: size in pages of the given pool. The pool lock need not be
605 u64 zbud_get_pool_size(struct zbud_pool *pool)
607 return pool->pages_nr;