Lines Matching defs:pool
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
56 * be 63, or 62, respectively, freelists per pool.
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
93 unsigned long pool; /* back link */
102 * pool
107 * @pool: pointer to the containing pool
121 struct z3fold_pool *pool;
133 * struct z3fold_pool - stores metadata for each z3fold pool
134 * @name: pool name
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
143 * @pages_nr: number of z3fold pages in the pool.
146 * pool creation time.
152 * This structure is allocated at pool creation time and maintains metadata
153 * pertaining to a particular z3fold pool.
210 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
215 slots = kmem_cache_zalloc(pool->c_handle,
221 slots->pool = (unsigned long)pool;
230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
322 if (test_bit(HANDLES_NOFREE, &slots->pool)) {
340 struct z3fold_pool *pool = slots_to_pool(slots);
344 kmem_cache_free(pool->c_handle, slots);
377 static int z3fold_register_migration(struct z3fold_pool *pool)
379 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
380 if (IS_ERR(pool->inode)) {
381 pool->inode = NULL;
385 pool->inode->i_mapping->private_data = pool;
386 pool->inode->i_mapping->a_ops = &z3fold_aops;
390 static void z3fold_unregister_migration(struct z3fold_pool *pool)
392 if (pool->inode)
393 iput(pool->inode);
398 struct z3fold_pool *pool, gfp_t gfp)
412 slots = alloc_slots(pool, gfp);
427 zhdr->pool = pool;
519 return zhdr->pool;
525 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
530 spin_lock(&pool->lock);
533 spin_unlock(&pool->lock);
538 spin_lock(&pool->stale_lock);
539 list_add(&zhdr->buddy, &pool->stale);
540 queue_work(pool->release_wq, &pool->work);
541 spin_unlock(&pool->stale_lock);
564 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
566 spin_lock(&pool->lock);
568 spin_unlock(&pool->lock);
576 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
578 spin_lock(&pool->stale_lock);
579 while (!list_empty(&pool->stale)) {
580 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
587 spin_unlock(&pool->stale_lock);
591 spin_lock(&pool->stale_lock);
593 spin_unlock(&pool->stale_lock);
621 static inline void add_to_unbuddied(struct z3fold_pool *pool,
626 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
629 spin_lock(&pool->lock);
631 spin_unlock(&pool->lock);
633 put_cpu_ptr(pool->unbuddied);
677 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
713 new_zhdr = __z3fold_alloc(pool, sz, false);
749 add_to_unbuddied(pool, new_zhdr);
760 atomic64_dec(&pool->pages_nr);
762 add_to_unbuddied(pool, new_zhdr);
821 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
833 spin_lock(&pool->lock);
835 spin_unlock(&pool->lock);
838 atomic64_dec(&pool->pages_nr);
851 atomic64_dec(&pool->pages_nr);
860 add_to_unbuddied(pool, zhdr);
874 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
884 unbuddied = get_cpu_ptr(pool->unbuddied);
895 spin_lock(&pool->lock);
900 spin_unlock(&pool->lock);
902 put_cpu_ptr(pool->unbuddied);
909 spin_unlock(&pool->lock);
916 put_cpu_ptr(pool->unbuddied);
924 * list while pool lock was held, and then we've taken
931 put_cpu_ptr(pool->unbuddied);
940 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
941 spin_lock(&pool->lock);
948 spin_unlock(&pool->lock);
954 spin_unlock(&pool->lock);
971 zhdr->slots = alloc_slots(pool,
981 * z3fold_create_pool() - create a new z3fold pool
982 * @name: pool name
983 * @gfp: gfp flags when allocating the z3fold pool structure
984 * @ops: user-defined operations for the z3fold pool
986 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
992 struct z3fold_pool *pool = NULL;
995 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
996 if (!pool)
998 pool->c_handle = kmem_cache_create("z3fold_handle",
1001 if (!pool->c_handle)
1003 spin_lock_init(&pool->lock);
1004 spin_lock_init(&pool->stale_lock);
1005 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
1006 if (!pool->unbuddied)
1010 per_cpu_ptr(pool->unbuddied, cpu);
1014 INIT_LIST_HEAD(&pool->lru);
1015 INIT_LIST_HEAD(&pool->stale);
1016 atomic64_set(&pool->pages_nr, 0);
1017 pool->name = name;
1018 pool->compact_wq = create_singlethread_workqueue(pool->name);
1019 if (!pool->compact_wq)
1021 pool->release_wq = create_singlethread_workqueue(pool->name);
1022 if (!pool->release_wq)
1024 if (z3fold_register_migration(pool))
1026 INIT_WORK(&pool->work, free_pages_work);
1027 pool->ops = ops;
1028 return pool;
1031 destroy_workqueue(pool->release_wq);
1033 destroy_workqueue(pool->compact_wq);
1035 free_percpu(pool->unbuddied);
1037 kmem_cache_destroy(pool->c_handle);
1039 kfree(pool);
1045 * z3fold_destroy_pool() - destroys an existing z3fold pool
1046 * @pool: the z3fold pool to be destroyed
1048 * The pool should be emptied before this function is called.
1050 static void z3fold_destroy_pool(struct z3fold_pool *pool)
1052 kmem_cache_destroy(pool->c_handle);
1055 * We need to destroy pool->compact_wq before pool->release_wq,
1056 * as any pending work on pool->compact_wq will call
1057 * queue_work(pool->release_wq, &pool->work).
1063 destroy_workqueue(pool->compact_wq);
1064 destroy_workqueue(pool->release_wq);
1065 z3fold_unregister_migration(pool);
1066 free_percpu(pool->unbuddied);
1067 kfree(pool);
1072 * @pool: z3fold pool from which to allocate
1074 * @gfp: gfp flags used if the pool needs to grow
1077 * This function will attempt to find a free region in the pool large enough to
1080 * allocated and added to the pool to satisfy the request.
1083 * as z3fold pool pages.
1086 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1089 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
1108 zhdr = __z3fold_alloc(pool, size, can_sleep);
1114 atomic64_dec(&pool->pages_nr);
1129 spin_lock(&pool->stale_lock);
1130 zhdr = list_first_entry_or_null(&pool->stale,
1139 spin_unlock(&pool->stale_lock);
1143 spin_unlock(&pool->stale_lock);
1152 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1157 atomic64_inc(&pool->pages_nr);
1165 __SetPageMovable(page, pool->inode->i_mapping);
1169 __SetPageMovable(page, pool->inode->i_mapping);
1184 add_to_unbuddied(pool, zhdr);
1187 spin_lock(&pool->lock);
1192 list_add(&page->lru, &pool->lru);
1195 spin_unlock(&pool->lock);
1204 * @pool: pool in which the allocation resided
1212 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1230 spin_lock(&pool->lock);
1232 spin_unlock(&pool->lock);
1235 atomic64_dec(&pool->pages_nr);
1263 atomic64_dec(&pool->pages_nr);
1277 spin_lock(&pool->lock);
1279 spin_unlock(&pool->lock);
1288 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1293 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1294 * @pool: pool from which a page will attempt to be evicted
1307 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1308 * call the user-defined eviction handler with the pool and handle as
1328 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1338 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
1340 spin_lock(&pool->lock);
1341 if (!pool->ops || !pool->ops->evict || retries == 0) {
1342 spin_unlock(&pool->lock);
1346 if (list_empty(&pool->lru)) {
1347 spin_unlock(&pool->lock);
1350 list_for_each_prev(pos, &pool->lru) {
1378 atomic64_dec(&pool->pages_nr);
1391 atomic64_dec(&pool->pages_nr);
1406 spin_unlock(&pool->lock);
1439 ret = pool->ops->evict(pool, middle_handle);
1444 ret = pool->ops->evict(pool, first_handle);
1449 ret = pool->ops->evict(pool, last_handle);
1457 atomic64_dec(&pool->pages_nr);
1460 spin_lock(&pool->lock);
1461 list_add(&page->lru, &pool->lru);
1462 spin_unlock(&pool->lock);
1469 kmem_cache_free(pool->c_handle, slots);
1470 atomic64_dec(&pool->pages_nr);
1475 * free. Take the global pool lock then to be able
1478 spin_lock(&pool->lock);
1479 list_add(&page->lru, &pool->lru);
1480 spin_unlock(&pool->lock);
1485 /* We started off locked to we need to lock the pool back */
1486 spin_lock(&pool->lock);
1488 spin_unlock(&pool->lock);
1494 * @pool: pool in which the allocation resides
1502 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1544 * @pool: pool in which the allocation resides
1547 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1567 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1568 * @pool: pool whose size is being queried
1570 * Returns: size in pages of the given pool.
1572 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1574 return atomic64_read(&pool->pages_nr);
1580 struct z3fold_pool *pool;
1599 pool = zhdr_to_pool(zhdr);
1600 spin_lock(&pool->lock);
1605 spin_unlock(&pool->lock);
1620 struct z3fold_pool *pool;
1629 pool = zhdr_to_pool(zhdr);
1668 spin_lock(&pool->lock);
1669 list_add(&newpage->lru, &pool->lru);
1670 spin_unlock(&pool->lock);
1674 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1685 struct z3fold_pool *pool;
1688 pool = zhdr_to_pool(zhdr);
1695 atomic64_dec(&pool->pages_nr);
1698 spin_lock(&pool->lock);
1699 list_add(&page->lru, &pool->lru);
1700 spin_unlock(&pool->lock);
1715 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1717 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1718 return pool->zpool_ops->evict(pool->zpool, handle);
1731 struct z3fold_pool *pool;
1733 pool = z3fold_create_pool(name, gfp,
1735 if (pool) {
1736 pool->zpool = zpool;
1737 pool->zpool_ops = zpool_ops;
1739 return pool;
1742 static void z3fold_zpool_destroy(void *pool)
1744 z3fold_destroy_pool(pool);
1747 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1750 return z3fold_alloc(pool, size, gfp, handle);
1752 static void z3fold_zpool_free(void *pool, unsigned long handle)
1754 z3fold_free(pool, handle);
1757 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1764 ret = z3fold_reclaim_page(pool, 8);
1776 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1779 return z3fold_map(pool, handle);
1781 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1783 z3fold_unmap(pool, handle);
1786 static u64 z3fold_zpool_total_size(void *pool)
1788 return z3fold_get_pool_size(pool) * PAGE_SIZE;