Lines Matching refs:pool

47  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * be 63, or 62, respectively, freelists per pool.
86 unsigned long pool; /* back link */
95 * pool
100 * @pool: pointer to the containing pool
114 struct z3fold_pool *pool;
126 * struct z3fold_pool - stores metadata for each z3fold pool
127 * @name: pool name
128 * @lock: protects pool unbuddied lists
129 * @stale_lock: protects pool stale page list
134 * @pages_nr: number of z3fold pages in the pool.
140 * This structure is allocated at pool creation time and maintains metadata
141 * pertaining to a particular z3fold pool.
194 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle,
203 slots->pool = (unsigned long)pool;
212 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
292 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
310 struct z3fold_pool *pool = slots_to_pool(slots);
314 kmem_cache_free(pool->c_handle, slots);
320 struct z3fold_pool *pool, gfp_t gfp)
334 slots = alloc_slots(pool, gfp);
343 zhdr->pool = pool;
434 return zhdr->pool;
440 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
445 spin_lock(&pool->lock);
446 spin_unlock(&pool->lock);
451 spin_lock(&pool->stale_lock);
452 list_add(&zhdr->buddy, &pool->stale);
453 queue_work(pool->release_wq, &pool->work);
454 spin_unlock(&pool->stale_lock);
456 atomic64_dec(&pool->pages_nr);
471 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
473 spin_lock(&pool->lock);
475 spin_unlock(&pool->lock);
493 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
495 spin_lock(&pool->stale_lock);
496 while (!list_empty(&pool->stale)) {
497 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
504 spin_unlock(&pool->stale_lock);
508 spin_lock(&pool->stale_lock);
510 spin_unlock(&pool->stale_lock);
538 static inline void add_to_unbuddied(struct z3fold_pool *pool,
547 unbuddied = this_cpu_ptr(pool->unbuddied);
548 spin_lock(&pool->lock);
550 spin_unlock(&pool->lock);
596 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
632 new_zhdr = __z3fold_alloc(pool, sz, false);
668 add_to_unbuddied(pool, new_zhdr);
678 add_to_unbuddied(pool, new_zhdr);
736 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
748 spin_lock(&pool->lock);
750 spin_unlock(&pool->lock);
771 add_to_unbuddied(pool, zhdr);
785 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
796 unbuddied = this_cpu_ptr(pool->unbuddied);
807 spin_lock(&pool->lock);
811 spin_unlock(&pool->lock);
820 spin_unlock(&pool->lock);
835 * list while pool lock was held, and then we've taken
851 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
852 spin_lock(&pool->lock);
859 spin_unlock(&pool->lock);
865 spin_unlock(&pool->lock);
882 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
890 add_to_unbuddied(pool, zhdr);
901 * z3fold_create_pool() - create a new z3fold pool
902 * @name: pool name
903 * @gfp: gfp flags when allocating the z3fold pool structure
905 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
910 struct z3fold_pool *pool = NULL;
913 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
914 if (!pool)
916 pool->c_handle = kmem_cache_create("z3fold_handle",
919 if (!pool->c_handle)
921 spin_lock_init(&pool->lock);
922 spin_lock_init(&pool->stale_lock);
923 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS,
925 if (!pool->unbuddied)
929 per_cpu_ptr(pool->unbuddied, cpu);
933 INIT_LIST_HEAD(&pool->stale);
934 atomic64_set(&pool->pages_nr, 0);
935 pool->name = name;
936 pool->compact_wq = create_singlethread_workqueue(pool->name);
937 if (!pool->compact_wq)
939 pool->release_wq = create_singlethread_workqueue(pool->name);
940 if (!pool->release_wq)
942 INIT_WORK(&pool->work, free_pages_work);
943 return pool;
946 destroy_workqueue(pool->compact_wq);
948 free_percpu(pool->unbuddied);
950 kmem_cache_destroy(pool->c_handle);
952 kfree(pool);
958 * z3fold_destroy_pool() - destroys an existing z3fold pool
959 * @pool: the z3fold pool to be destroyed
961 * The pool should be emptied before this function is called.
963 static void z3fold_destroy_pool(struct z3fold_pool *pool)
965 kmem_cache_destroy(pool->c_handle);
968 * We need to destroy pool->compact_wq before pool->release_wq,
969 * as any pending work on pool->compact_wq will call
970 * queue_work(pool->release_wq, &pool->work).
976 destroy_workqueue(pool->compact_wq);
977 destroy_workqueue(pool->release_wq);
978 free_percpu(pool->unbuddied);
979 kfree(pool);
986 * @pool: z3fold pool from which to allocate
988 * @gfp: gfp flags used if the pool needs to grow
991 * This function will attempt to find a free region in the pool large enough to
994 * allocated and added to the pool to satisfy the request.
997 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1000 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1019 zhdr = __z3fold_alloc(pool, size, can_sleep);
1039 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1044 atomic64_inc(&pool->pages_nr);
1070 add_to_unbuddied(pool, zhdr);
1073 spin_lock(&pool->lock);
1075 spin_unlock(&pool->lock);
1084 * @pool: pool in which the allocation resided
1092 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1112 atomic64_dec(&pool->pages_nr);
1160 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1166 * @pool: pool in which the allocation resides
1174 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1216 * @pool: pool in which the allocation resides
1219 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1239 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1240 * @pool: pool whose size is being queried
1242 * Returns: size in pages of the given pool.
1244 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1246 return atomic64_read(&pool->pages_nr);
1252 struct z3fold_pool *pool;
1270 pool = zhdr_to_pool(zhdr);
1271 spin_lock(&pool->lock);
1274 spin_unlock(&pool->lock);
1289 struct z3fold_pool *pool;
1296 pool = zhdr_to_pool(zhdr);
1336 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1347 struct z3fold_pool *pool;
1350 pool = zhdr_to_pool(zhdr);
1359 add_to_unbuddied(pool, zhdr);
1379 static void z3fold_zpool_destroy(void *pool)
1381 z3fold_destroy_pool(pool);
1384 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1387 return z3fold_alloc(pool, size, gfp, handle);
1389 static void z3fold_zpool_free(void *pool, unsigned long handle)
1391 z3fold_free(pool, handle);
1394 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1397 return z3fold_map(pool, handle);
1399 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1401 z3fold_unmap(pool, handle);
1404 static u64 z3fold_zpool_total_size(void *pool)
1406 return z3fold_get_pool_size(pool) * PAGE_SIZE;