Lines Matching defs:pool
7 * RAM-based memory pool. This can result in a significant I/O reduction on
56 /* Pages written back when pool limit was reached */
58 /* Store failed due to a reclaim failure after pool limit was reached */
112 /* The maximum percentage of memory that the compressed pool can occupy */
117 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
157 * pool - the zswap_pool the entry's data is in
166 struct zswap_pool *pool;
193 /* pool counter to provide unique names to zpool */
202 /* init completed, but couldn't create the initial pool */
210 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
213 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
214 static int zswap_pool_get(struct zswap_pool *pool);
215 static void zswap_pool_put(struct zswap_pool *pool);
236 struct zswap_pool *pool;
241 list_for_each_entry_rcu(pool, &zswap_pools, list)
242 total += zpool_get_total_size(pool->zpool);
345 zpool_free(entry->pool->zpool, entry->handle);
346 zswap_pool_put(entry->pool);
417 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
420 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
423 tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
426 pool->tfm_name, PTR_ERR(tfm));
429 *per_cpu_ptr(pool->tfm, cpu) = tfm;
435 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
438 tfm = *per_cpu_ptr(pool->tfm, cpu);
441 *per_cpu_ptr(pool->tfm, cpu) = NULL;
446 * pool functions
451 struct zswap_pool *pool;
453 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
454 WARN_ONCE(!pool && zswap_has_pool,
455 "%s: no page storage pool!\n", __func__);
457 return pool;
469 struct zswap_pool *pool;
473 pool = __zswap_pool_current();
474 if (!zswap_pool_get(pool))
475 pool = NULL;
479 return pool;
484 struct zswap_pool *pool, *last = NULL;
488 list_for_each_entry_rcu(pool, &zswap_pools, list)
489 last = pool;
491 "%s: no page storage pool!\n", __func__);
503 struct zswap_pool *pool;
507 list_for_each_entry_rcu(pool, &zswap_pools, list) {
508 if (strcmp(pool->tfm_name, compressor))
510 if (strcmp(zpool_get_type(pool->zpool), type))
513 if (!zswap_pool_get(pool))
515 return pool;
523 struct zswap_pool *pool = container_of(w, typeof(*pool),
526 if (zpool_shrink(pool->zpool, 1, NULL))
528 zswap_pool_put(pool);
533 struct zswap_pool *pool;
539 /* if either are unset, pool initialization failed, and we
541 * create a pool.
549 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
550 if (!pool)
553 /* unique name for each pool specifically required by zsmalloc */
556 pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
557 if (!pool->zpool) {
561 pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
563 strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
564 pool->tfm = alloc_percpu(struct crypto_comp *);
565 if (!pool->tfm) {
571 &pool->node);
574 pr_debug("using %s compressor\n", pool->tfm_name);
576 /* being the current pool takes 1 ref; this func expects the
577 * caller to always add the new pool as the current pool
579 kref_init(&pool->kref);
580 INIT_LIST_HEAD(&pool->list);
581 INIT_WORK(&pool->shrink_work, shrink_worker);
583 zswap_pool_debug("created", pool);
585 return pool;
588 free_percpu(pool->tfm);
589 if (pool->zpool)
590 zpool_destroy_pool(pool->zpool);
591 kfree(pool);
637 static void zswap_pool_destroy(struct zswap_pool *pool)
639 zswap_pool_debug("destroying", pool);
641 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
642 free_percpu(pool->tfm);
643 zpool_destroy_pool(pool->zpool);
644 kfree(pool);
647 static int __must_check zswap_pool_get(struct zswap_pool *pool)
649 if (!pool)
652 return kref_get_unless_zero(&pool->kref);
657 struct zswap_pool *pool = container_of(work, typeof(*pool),
663 WARN_ON(kref_get_unless_zero(&pool->kref));
665 /* pool is now off zswap_pools list and has no references. */
666 zswap_pool_destroy(pool);
671 struct zswap_pool *pool;
673 pool = container_of(kref, typeof(*pool), kref);
677 WARN_ON(pool == zswap_pool_current());
679 list_del_rcu(&pool->list);
681 INIT_WORK(&pool->release_work, __zswap_pool_release);
682 schedule_work(&pool->release_work);
687 static void zswap_pool_put(struct zswap_pool *pool)
689 kref_put(&pool->kref, __zswap_pool_empty);
700 struct zswap_pool *pool, *put_pool = NULL;
714 * don't create a pool; that's done during init.
738 pool = zswap_pool_find_get(type, compressor);
739 if (pool) {
740 zswap_pool_debug("using existing", pool);
741 WARN_ON(pool == zswap_pool_current());
742 list_del_rcu(&pool->list);
747 if (!pool)
748 pool = zswap_pool_create(type, compressor);
750 if (pool)
759 list_add_rcu(&pool->list, &zswap_pools);
761 } else if (pool) {
762 /* add the possibly pre-existing pool to the end of the pools
766 list_add_tail_rcu(&pool->list, &zswap_pools);
767 put_pool = pool;
772 if (!zswap_has_pool && !pool) {
773 /* if initial pool creation failed, and this pool creation also
775 * Allow changing this param, so pool creation will succeed
783 /* drop the ref from either the old current pool,
784 * or the new pool we failed to add
812 pr_err("can't enable, no pool configured\n");
871 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
887 if (!zpool_can_sleep_mapped(pool)) {
894 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
905 zpool_unmap_handle(pool, handle);
913 if (!zpool_can_sleep_mapped(pool)) {
916 zpool_unmap_handle(pool, handle);
935 tfm = *get_cpu_ptr(entry->pool->tfm);
938 put_cpu_ptr(entry->pool->tfm);
985 if (zpool_can_sleep_mapped(pool))
986 zpool_unmap_handle(pool, handle);
1046 struct zswap_pool *pool;
1050 pool = zswap_pool_last_get();
1051 if (pool)
1052 queue_work(shrink_wq, &pool->shrink_work);
1087 entry->pool = zswap_pool_current_get();
1088 if (!entry->pool) {
1095 tfm = *get_cpu_ptr(entry->pool->tfm);
1099 put_cpu_ptr(entry->pool->tfm);
1106 hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
1108 if (zpool_malloc_support_movable(entry->pool->zpool))
1110 ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
1119 buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
1122 zpool_unmap_handle(entry->pool->zpool, handle);
1152 zswap_pool_put(entry->pool);
1191 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1201 src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
1202 if (zpool_evictable(entry->pool->zpool))
1205 if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
1208 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1212 tfm = *get_cpu_ptr(entry->pool->tfm);
1214 put_cpu_ptr(entry->pool->tfm);
1217 if (zpool_can_sleep_mapped(entry->pool->zpool))
1218 zpool_unmap_handle(entry->pool->zpool, entry->handle);
1355 struct zswap_pool *pool;
1379 pool = __zswap_pool_create_fallback();
1380 if (pool) {
1381 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1382 zpool_get_type(pool->zpool));
1383 list_add(&pool->list, &zswap_pools);
1386 pr_err("pool creation failed\n");
1400 if (pool)
1401 zswap_pool_destroy(pool);