Lines Matching refs:pool

302 static int zs_register_migration(struct zs_pool *pool);
303 static void zs_unregister_migration(struct zs_pool *pool);
307 static void kick_deferred_free(struct zs_pool *pool);
308 static void init_deferred_free(struct zs_pool *pool);
309 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
313 static int zs_register_migration(struct zs_pool *pool) { return 0; }
314 static void zs_unregister_migration(struct zs_pool *pool) {}
318 static void kick_deferred_free(struct zs_pool *pool) {}
319 static void init_deferred_free(struct zs_pool *pool) {}
320 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
323 static int create_cache(struct zs_pool *pool)
325 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
327 if (!pool->handle_cachep)
330 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
332 if (!pool->zspage_cachep) {
333 kmem_cache_destroy(pool->handle_cachep);
334 pool->handle_cachep = NULL;
341 static void destroy_cache(struct zs_pool *pool)
343 kmem_cache_destroy(pool->handle_cachep);
344 kmem_cache_destroy(pool->zspage_cachep);
347 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
349 return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
353 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
355 kmem_cache_free(pool->handle_cachep, (void *)handle);
358 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
360 return kmem_cache_alloc(pool->zspage_cachep,
364 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
366 kmem_cache_free(pool->zspage_cachep, zspage);
395 static void zs_zpool_destroy(void *pool)
397 zs_destroy_pool(pool);
400 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
403 *handle = zs_malloc(pool, size, gfp);
406 static void zs_zpool_free(void *pool, unsigned long handle)
408 zs_free(pool, handle);
411 static void *zs_zpool_map(void *pool, unsigned long handle,
429 return zs_map_object(pool, handle, zs_mm);
431 static void zs_zpool_unmap(void *pool, unsigned long handle)
433 zs_unmap_object(pool, handle);
436 static u64 zs_zpool_total_size(void *pool)
438 return zs_get_total_pages(pool) << PAGE_SHIFT;
529 * zsmalloc divides the pool into various size classes where each
589 struct zs_pool *pool = s->private;
604 class = pool->size_class[i];
645 static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
652 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
654 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
658 static void zs_pool_stat_destroy(struct zs_pool *pool)
660 debugfs_remove_recursive(pool->stat_dentry);
672 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
676 static inline void zs_pool_stat_destroy(struct zs_pool *pool)
686 * the pool (not yet implemented). This function returns fullness
931 static void __free_zspage(struct zs_pool *pool, struct size_class *class,
956 cache_free_zspage(pool, zspage);
960 &pool->pages_allocated);
963 static void free_zspage(struct zs_pool *pool, struct size_class *class,
970 kick_deferred_free(pool);
975 __free_zspage(pool, class, zspage);
1059 static struct zspage *alloc_zspage(struct zs_pool *pool,
1065 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1083 cache_free_zspage(pool, zspage);
1227 unsigned long zs_get_total_pages(struct zs_pool *pool)
1229 return atomic_long_read(&pool->pages_allocated);
1235 * @pool: pool from which the object was allocated
1248 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1281 class = pool->size_class[class_idx];
1307 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1323 class = pool->size_class[class_idx];
1348 * @pool: zsmalloc pool to use
1358 size_t zs_huge_class_size(struct zs_pool *pool)
1407 * zs_malloc - Allocate block of given size from pool.
1408 * @pool: pool to allocate from
1416 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1426 handle = cache_alloc_handle(pool, gfp);
1432 class = pool->size_class[get_size_class_index(size)];
1448 zspage = alloc_zspage(pool, class, gfp);
1450 cache_free_handle(pool, handle);
1461 &pool->pages_allocated);
1465 SetZsPageMovable(pool, zspage);
1497 void zs_free(struct zs_pool *pool, unsigned long handle)
1519 class = pool->size_class[class_idx];
1533 free_zspage(pool, class, zspage);
1538 cache_free_handle(pool, handle);
1652 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1853 static void putback_zspage_deferred(struct zs_pool *pool,
1861 schedule_work(&pool->free_work);
1865 static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1867 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1868 atomic_long_dec(&pool->isolated_pages);
1870 * Checking pool->destroying must happen after atomic_long_dec()
1871 * for pool->isolated_pages above. Paired with the smp_mb() in
1875 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1876 wake_up_all(&pool->migration_wait);
1904 struct zs_pool *pool;
1927 pool = mapping->private_data;
1928 class = pool->size_class[class_idx];
1948 atomic_long_inc(&pool->isolated_pages);
1961 struct zs_pool *pool;
1990 pool = mapping->private_data;
1991 class = pool->size_class[class_idx];
2052 * Also, we ensure that everyone can see pool->destroying before
2055 putback_zspage_deferred(pool, class, zspage);
2056 zs_pool_dec_isolated(pool);
2089 struct zs_pool *pool;
2102 pool = mapping->private_data;
2103 class = pool->size_class[class_idx];
2112 putback_zspage_deferred(pool, class, zspage);
2113 zs_pool_dec_isolated(pool);
2124 static int zs_register_migration(struct zs_pool *pool)
2126 pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
2127 if (IS_ERR(pool->inode)) {
2128 pool->inode = NULL;
2132 pool->inode->i_mapping->private_data = pool;
2133 pool->inode->i_mapping->a_ops = &zsmalloc_aops;
2137 static bool pool_isolated_are_drained(struct zs_pool *pool)
2139 return atomic_long_read(&pool->isolated_pages) == 0;
2143 static void wait_for_isolated_drain(struct zs_pool *pool)
2147 * We're in the process of destroying the pool, so there are no
2152 wait_event(pool->migration_wait,
2153 pool_isolated_are_drained(pool));
2156 static void zs_unregister_migration(struct zs_pool *pool)
2158 pool->destroying = true;
2161 * pool->destroying. Thus pool->isolated pages will either be 0 in which
2162 * case we don't care, or it will be > 0 and pool->destroying will
2166 wait_for_isolated_drain(pool); /* This can block */
2167 flush_work(&pool->free_work);
2168 iput(pool->inode);
2183 struct zs_pool *pool = container_of(work, struct zs_pool,
2187 class = pool->size_class[i];
2203 class = pool->size_class[class_idx];
2205 __free_zspage(pool, pool->size_class[class_idx], zspage);
2210 static void kick_deferred_free(struct zs_pool *pool)
2212 schedule_work(&pool->free_work);
2215 static void init_deferred_free(struct zs_pool *pool)
2217 INIT_WORK(&pool->free_work, async_free_zspage);
2220 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
2226 __SetPageMovable(page, pool->inode->i_mapping);
2252 static unsigned long __zs_compact(struct zs_pool *pool,
2275 if (!migrate_zspage(pool, class, &cc))
2287 free_zspage(pool, class, src_zspage);
2303 unsigned long zs_compact(struct zs_pool *pool)
2310 class = pool->size_class[i];
2315 pages_freed += __zs_compact(pool, class);
2317 atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2323 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2325 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2333 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2341 pages_freed = zs_compact(pool);
2352 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2356 class = pool->size_class[i];
2368 static void zs_unregister_shrinker(struct zs_pool *pool)
2370 unregister_shrinker(&pool->shrinker);
2373 static int zs_register_shrinker(struct zs_pool *pool)
2375 pool->shrinker.scan_objects = zs_shrinker_scan;
2376 pool->shrinker.count_objects = zs_shrinker_count;
2377 pool->shrinker.batch = 0;
2378 pool->shrinker.seeks = DEFAULT_SEEKS;
2380 return register_shrinker(&pool->shrinker);
2384 * zs_create_pool - Creates an allocation pool to work from.
2385 * @name: pool name to be created
2390 * On success, a pointer to the newly created pool is returned,
2396 struct zs_pool *pool;
2399 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2400 if (!pool)
2403 init_deferred_free(pool);
2405 pool->name = kstrdup(name, GFP_KERNEL);
2406 if (!pool->name)
2410 init_waitqueue_head(&pool->migration_wait);
2413 if (create_cache(pool))
2465 pool->size_class[i] = prev_class;
2479 pool->size_class[i] = class;
2488 zs_pool_stat_create(pool, name);
2490 if (zs_register_migration(pool))
2495 * defragmentation of the pool which is pretty optional thing. If
2496 * registration fails we still can use the pool normally and user can
2499 zs_register_shrinker(pool);
2501 return pool;
2504 zs_destroy_pool(pool);
2509 void zs_destroy_pool(struct zs_pool *pool)
2513 zs_unregister_shrinker(pool);
2514 zs_unregister_migration(pool);
2515 zs_pool_stat_destroy(pool);
2519 struct size_class *class = pool->size_class[i];
2536 destroy_cache(pool);
2537 kfree(pool->name);
2538 kfree(pool);