Lines Matching defs:pool

7  * RAM-based memory pool.  This can result in a significant I/O reduction on
60 /* Pages written back when pool limit was reached */
62 /* Store failed due to a reclaim failure after pool limit was reached */
118 /* The maximum percentage of memory that the compressed pool can occupy */
123 static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */
194 * pool and lru are invalid and must be ignored.
195 * pool - the zswap_pool the entry's data is in
199 * lru - handle to the pool's lru used to evict pages.
206 struct zswap_pool *pool;
231 /* pool counter to provide unique names to zpool */
245 /* init completed, but couldn't create the initial pool */
253 pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
258 static int zswap_pool_get(struct zswap_pool *pool);
259 static void zswap_pool_put(struct zswap_pool *pool);
276 struct zswap_pool *pool;
282 list_for_each_entry_rcu(pool, &zswap_pools, list)
284 total += zpool_get_total_size(pool->zpools[i]);
380 return entry->pool->zpools[i];
396 spin_lock(&entry->pool->lru_lock);
398 spin_unlock(&entry->pool->lru_lock);
400 zswap_pool_put(entry->pool);
491 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
492 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
496 acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
499 pool->tfm_name, PTR_ERR(acomp));
507 pool->tfm_name);
530 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
531 struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
544 * pool functions
549 struct zswap_pool *pool;
551 pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
552 WARN_ONCE(!pool && zswap_has_pool,
553 "%s: no page storage pool!\n", __func__);
555 return pool;
567 struct zswap_pool *pool;
571 pool = __zswap_pool_current();
572 if (!zswap_pool_get(pool))
573 pool = NULL;
577 return pool;
582 struct zswap_pool *pool, *last = NULL;
586 list_for_each_entry_rcu(pool, &zswap_pools, list)
587 last = pool;
589 "%s: no page storage pool!\n", __func__);
601 struct zswap_pool *pool;
605 list_for_each_entry_rcu(pool, &zswap_pools, list) {
606 if (strcmp(pool->tfm_name, compressor))
609 if (strcmp(zpool_get_type(pool->zpools[0]), type))
612 if (!zswap_pool_get(pool))
614 return pool;
632 static int zswap_reclaim_entry(struct zswap_pool *pool)
640 spin_lock(&pool->lru_lock);
641 if (list_empty(&pool->lru)) {
642 spin_unlock(&pool->lru_lock);
645 entry = list_last_entry(&pool->lru, struct zswap_entry, lru);
654 spin_unlock(&pool->lru_lock);
671 spin_lock(&pool->lru_lock);
672 list_move(&entry->lru, &pool->lru);
673 spin_unlock(&pool->lru_lock);
694 struct zswap_pool *pool = container_of(w, typeof(*pool),
699 ret = zswap_reclaim_entry(pool);
709 zswap_pool_put(pool);
715 struct zswap_pool *pool;
721 /* if either are unset, pool initialization failed, and we
723 * create a pool.
731 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
732 if (!pool)
736 /* unique name for each pool specifically required by zsmalloc */
740 pool->zpools[i] = zpool_create_pool(type, name, gfp);
741 if (!pool->zpools[i]) {
746 pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
748 strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
750 pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
751 if (!pool->acomp_ctx) {
757 &pool->node);
760 pr_debug("using %s compressor\n", pool->tfm_name);
762 /* being the current pool takes 1 ref; this func expects the
763 * caller to always add the new pool as the current pool
765 kref_init(&pool->kref);
766 INIT_LIST_HEAD(&pool->list);
767 INIT_LIST_HEAD(&pool->lru);
768 spin_lock_init(&pool->lru_lock);
769 INIT_WORK(&pool->shrink_work, shrink_worker);
771 zswap_pool_debug("created", pool);
773 return pool;
776 if (pool->acomp_ctx)
777 free_percpu(pool->acomp_ctx);
779 zpool_destroy_pool(pool->zpools[i]);
780 kfree(pool);
826 static void zswap_pool_destroy(struct zswap_pool *pool)
830 zswap_pool_debug("destroying", pool);
832 cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
833 free_percpu(pool->acomp_ctx);
835 zpool_destroy_pool(pool->zpools[i]);
836 kfree(pool);
839 static int __must_check zswap_pool_get(struct zswap_pool *pool)
841 if (!pool)
844 return kref_get_unless_zero(&pool->kref);
849 struct zswap_pool *pool = container_of(work, typeof(*pool),
855 WARN_ON(kref_get_unless_zero(&pool->kref));
857 /* pool is now off zswap_pools list and has no references. */
858 zswap_pool_destroy(pool);
863 struct zswap_pool *pool;
865 pool = container_of(kref, typeof(*pool), kref);
869 WARN_ON(pool == zswap_pool_current());
871 list_del_rcu(&pool->list);
873 INIT_WORK(&pool->release_work, __zswap_pool_release);
874 schedule_work(&pool->release_work);
879 static void zswap_pool_put(struct zswap_pool *pool)
881 kref_put(&pool->kref, __zswap_pool_empty);
900 struct zswap_pool *pool, *put_pool = NULL;
909 * don't create a pool; that's done during init.
922 /* no need to create a new pool, return directly */
945 pool = zswap_pool_find_get(type, compressor);
946 if (pool) {
947 zswap_pool_debug("using existing", pool);
948 WARN_ON(pool == zswap_pool_current());
949 list_del_rcu(&pool->list);
954 if (!pool)
955 pool = zswap_pool_create(type, compressor);
957 if (pool)
966 list_add_rcu(&pool->list, &zswap_pools);
968 } else if (pool) {
969 /* add the possibly pre-existing pool to the end of the pools
973 list_add_tail_rcu(&pool->list, &zswap_pools);
974 put_pool = pool;
979 if (!zswap_has_pool && !pool) {
980 /* if initial pool creation failed, and this pool creation also
982 * Allow changing this param, so pool creation will succeed
990 /* drop the ref from either the old current pool,
991 * or the new pool we failed to add
1028 pr_err("can't enable, no pool configured\n");
1062 struct zpool *pool = zswap_find_zpool(entry);
1071 if (!zpool_can_sleep_mapped(pool)) {
1111 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1114 src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
1115 if (!zpool_can_sleep_mapped(pool)) {
1118 zpool_unmap_handle(pool, entry->handle);
1130 if (!zpool_can_sleep_mapped(pool))
1133 zpool_unmap_handle(pool, entry->handle);
1152 if (!zpool_can_sleep_mapped(pool))
1204 struct zswap_pool *pool;
1286 entry->pool = zswap_pool_current_get();
1287 if (!entry->pool)
1291 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1366 spin_lock(&entry->pool->lru_lock);
1367 list_add(&entry->lru, &entry->pool->lru);
1368 spin_unlock(&entry->pool->lru_lock);
1381 zswap_pool_put(entry->pool);
1390 pool = zswap_pool_last_get();
1391 if (pool && !queue_work(shrink_wq, &pool->shrink_work))
1392 zswap_pool_put(pool);
1449 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
1475 spin_lock(&entry->pool->lru_lock);
1476 list_move(&entry->lru, &entry->pool->lru);
1477 spin_unlock(&entry->pool->lru_lock);
1585 struct zswap_pool *pool;
1608 pool = __zswap_pool_create_fallback();
1609 if (pool) {
1610 pr_info("loaded using pool %s/%s\n", pool->tfm_name,
1611 zpool_get_type(pool->zpools[0]));
1612 list_add(&pool->list, &zswap_pools);
1615 pr_err("pool creation failed\n");
1629 if (pool)
1630 zswap_pool_destroy(pool);