Lines Matching defs:pool
36 * pool->lock
256 struct zs_pool *pool;
286 static void kick_deferred_free(struct zs_pool *pool);
287 static void init_deferred_free(struct zs_pool *pool);
288 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
293 static void kick_deferred_free(struct zs_pool *pool) {}
294 static void init_deferred_free(struct zs_pool *pool) {}
295 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
298 static int create_cache(struct zs_pool *pool)
300 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
302 if (!pool->handle_cachep)
305 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
307 if (!pool->zspage_cachep) {
308 kmem_cache_destroy(pool->handle_cachep);
309 pool->handle_cachep = NULL;
316 static void destroy_cache(struct zs_pool *pool)
318 kmem_cache_destroy(pool->handle_cachep);
319 kmem_cache_destroy(pool->zspage_cachep);
322 static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
324 return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
328 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
330 kmem_cache_free(pool->handle_cachep, (void *)handle);
333 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
335 return kmem_cache_zalloc(pool->zspage_cachep,
339 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
341 kmem_cache_free(pool->zspage_cachep, zspage);
344 /* pool->lock(which owns the handle) synchronizes races */
364 static void zs_zpool_destroy(void *pool)
366 zs_destroy_pool(pool);
369 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
372 *handle = zs_malloc(pool, size, gfp);
378 static void zs_zpool_free(void *pool, unsigned long handle)
380 zs_free(pool, handle);
383 static void *zs_zpool_map(void *pool, unsigned long handle,
401 return zs_map_object(pool, handle, zs_mm);
403 static void zs_zpool_unmap(void *pool, unsigned long handle)
405 zs_unmap_object(pool, handle);
408 static u64 zs_zpool_total_size(void *pool)
410 return zs_get_total_pages(pool) << PAGE_SHIFT;
439 /* Protected by pool->lock */
489 static struct size_class *zspage_class(struct zs_pool *pool,
492 return pool->size_class[zspage->class];
504 * zsmalloc divides the pool into various size classes where each
560 struct zs_pool *pool = s->private;
576 class = pool->size_class[i];
581 spin_lock(&pool->lock);
592 spin_unlock(&pool->lock);
622 static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
629 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root);
631 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool,
635 static void zs_pool_stat_destroy(struct zs_pool *pool)
637 debugfs_remove_recursive(pool->stat_dentry);
649 static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
653 static inline void zs_pool_stat_destroy(struct zs_pool *pool)
848 static void __free_zspage(struct zs_pool *pool, struct size_class *class,
857 assert_spin_locked(&pool->lock);
873 cache_free_zspage(pool, zspage);
876 atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
879 static void free_zspage(struct zs_pool *pool, struct size_class *class,
891 kick_deferred_free(pool);
896 __free_zspage(pool, class, zspage);
980 static struct zspage *alloc_zspage(struct zs_pool *pool,
986 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1003 cache_free_zspage(pool, zspage);
1013 zspage->pool = pool;
1153 * @pool: zsmalloc pool to use
1161 unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1165 class = pool->size_class[get_size_class_index(size)];
1171 unsigned long zs_get_total_pages(struct zs_pool *pool)
1173 return atomic_long_read(&pool->pages_allocated);
1179 * @pool: pool from which the object was allocated
1192 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1213 spin_lock(&pool->lock);
1219 * migration cannot move any zpages in this zspage. Here, pool->lock
1225 spin_unlock(&pool->lock);
1227 class = zspage_class(pool, zspage);
1254 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1267 class = zspage_class(pool, zspage);
1291 * @pool: zsmalloc pool to use
1301 size_t zs_huge_class_size(struct zs_pool *pool)
1307 static unsigned long obj_malloc(struct zs_pool *pool,
1319 class = pool->size_class[zspage->class];
1351 * zs_malloc - Allocate block of given size from pool.
1352 * @pool: pool to allocate from
1360 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1370 handle = cache_alloc_handle(pool, gfp);
1376 class = pool->size_class[get_size_class_index(size)];
1378 /* pool->lock effectively protects the zpage migration */
1379 spin_lock(&pool->lock);
1382 obj = obj_malloc(pool, zspage, handle);
1391 spin_unlock(&pool->lock);
1393 zspage = alloc_zspage(pool, class, gfp);
1395 cache_free_handle(pool, handle);
1399 spin_lock(&pool->lock);
1400 obj = obj_malloc(pool, zspage, handle);
1405 atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
1410 SetZsPageMovable(pool, zspage);
1412 spin_unlock(&pool->lock);
1445 void zs_free(struct zs_pool *pool, unsigned long handle)
1457 * The pool->lock protects the race with zpage's migration
1460 spin_lock(&pool->lock);
1464 class = zspage_class(pool, zspage);
1471 free_zspage(pool, class, zspage);
1473 spin_unlock(&pool->lock);
1474 cache_free_handle(pool, handle);
1578 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
1585 struct size_class *class = pool->size_class[src_zspage->class];
1598 free_obj = obj_malloc(pool, dst_zspage, handle);
1780 struct zs_pool *pool;
1790 pool = zspage->pool;
1791 spin_lock(&pool->lock);
1793 spin_unlock(&pool->lock);
1801 struct zs_pool *pool;
1823 pool = zspage->pool;
1826 * The pool's lock protects the race between zpage migration
1829 spin_lock(&pool->lock);
1830 class = zspage_class(pool, zspage);
1862 * it's okay to release the pool's lock.
1864 spin_unlock(&pool->lock);
1881 struct zs_pool *pool;
1887 pool = zspage->pool;
1888 spin_lock(&pool->lock);
1890 spin_unlock(&pool->lock);
1911 struct zs_pool *pool = container_of(work, struct zs_pool,
1915 class = pool->size_class[i];
1919 spin_lock(&pool->lock);
1922 spin_unlock(&pool->lock);
1931 class = pool->size_class[class_idx];
1932 spin_lock(&pool->lock);
1933 __free_zspage(pool, class, zspage);
1934 spin_unlock(&pool->lock);
1938 static void kick_deferred_free(struct zs_pool *pool)
1940 schedule_work(&pool->free_work);
1943 static void zs_flush_migration(struct zs_pool *pool)
1945 flush_work(&pool->free_work);
1948 static void init_deferred_free(struct zs_pool *pool)
1950 INIT_WORK(&pool->free_work, async_free_zspage);
1953 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
1964 static inline void zs_flush_migration(struct zs_pool *pool) { }
1987 static unsigned long __zs_compact(struct zs_pool *pool,
1998 spin_lock(&pool->lock);
2015 migrate_zspage(pool, src_zspage, dst_zspage);
2020 free_zspage(pool, class, src_zspage);
2026 || spin_is_contended(&pool->lock)) {
2031 spin_unlock(&pool->lock);
2033 spin_lock(&pool->lock);
2046 spin_unlock(&pool->lock);
2051 unsigned long zs_compact(struct zs_pool *pool)
2058 * Pool compaction is performed under pool->lock so it is basically
2060 * will increase pool->lock contention, which will impact other
2061 * zsmalloc operations that need pool->lock.
2063 if (atomic_xchg(&pool->compaction_in_progress, 1))
2067 class = pool->size_class[i];
2070 pages_freed += __zs_compact(pool, class);
2072 atomic_long_add(pages_freed, &pool->stats.pages_compacted);
2073 atomic_set(&pool->compaction_in_progress, 0);
2079 void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
2081 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
2089 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2097 pages_freed = zs_compact(pool);
2108 struct zs_pool *pool = container_of(shrinker, struct zs_pool,
2112 class = pool->size_class[i];
2122 static void zs_unregister_shrinker(struct zs_pool *pool)
2124 unregister_shrinker(&pool->shrinker);
2127 static int zs_register_shrinker(struct zs_pool *pool)
2129 pool->shrinker.scan_objects = zs_shrinker_scan;
2130 pool->shrinker.count_objects = zs_shrinker_count;
2131 pool->shrinker.batch = 0;
2132 pool->shrinker.seeks = DEFAULT_SEEKS;
2134 return register_shrinker(&pool->shrinker, "mm-zspool:%s",
2135 pool->name);
2160 * zs_create_pool - Creates an allocation pool to work from.
2161 * @name: pool name to be created
2166 * On success, a pointer to the newly created pool is returned,
2172 struct zs_pool *pool;
2175 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
2176 if (!pool)
2179 init_deferred_free(pool);
2180 spin_lock_init(&pool->lock);
2181 atomic_set(&pool->compaction_in_progress, 0);
2183 pool->name = kstrdup(name, GFP_KERNEL);
2184 if (!pool->name)
2187 if (create_cache(pool))
2239 pool->size_class[i] = prev_class;
2252 pool->size_class[i] = class;
2264 zs_pool_stat_create(pool, name);
2268 * defragmentation of the pool which is pretty optional thing. If
2269 * registration fails we still can use the pool normally and user can
2272 zs_register_shrinker(pool);
2274 return pool;
2277 zs_destroy_pool(pool);
2282 void zs_destroy_pool(struct zs_pool *pool)
2286 zs_unregister_shrinker(pool);
2287 zs_flush_migration(pool);
2288 zs_pool_stat_destroy(pool);
2292 struct size_class *class = pool->size_class[i];
2310 destroy_cache(pool);
2311 kfree(pool->name);
2312 kfree(pool);