Lines Matching refs:zram
15 #define KMSG_COMPONENT "zram"
60 static void zram_free_page(struct zram *zram, size_t index);
61 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
64 static inline bool init_done(struct zram *zram)
66 return zram->disksize;
69 static inline struct zram *dev_to_zram(struct device *dev)
71 return (struct zram *)dev_to_disk(dev)->private_data;
74 static inline void zram_set_element(struct zram *zram, u32 index,
77 zram->table[index].element = element;
80 static unsigned long zram_get_element(struct zram *zram, u32 index)
82 return zram->table[index].element;
85 static inline bool zram_allocated(struct zram *zram, u32 index)
87 return zram_get_obj_size(zram, index) ||
88 zram_test_flag(zram, index, ZRAM_SAME) ||
89 zram_test_flag(zram, index, ZRAM_WB);
105 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
112 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
114 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
117 static inline u32 zram_get_priority(struct zram *zram, u32 index)
119 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
124 static inline void update_used_max(struct zram *zram,
127 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
132 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
169 struct zram *zram = dev_to_zram(dev);
171 down_read(&zram->init_lock);
172 val = init_done(zram);
173 up_read(&zram->init_lock);
181 struct zram *zram = dev_to_zram(dev);
183 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
191 struct zram *zram = dev_to_zram(dev);
197 down_write(&zram->init_lock);
198 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
199 up_write(&zram->init_lock);
209 struct zram *zram = dev_to_zram(dev);
215 down_read(&zram->init_lock);
216 if (init_done(zram)) {
217 atomic_long_set(&zram->stats.max_used_pages,
218 zs_get_total_pages(zram->mem_pool));
220 up_read(&zram->init_lock);
227 * Callers should hold the zram init lock in read mode
229 static void mark_idle(struct zram *zram, ktime_t cutoff)
232 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
240 zram_slot_lock(zram, index);
241 if (zram_allocated(zram, index) &&
242 !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
244 is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time);
247 zram_set_flag(zram, index, ZRAM_IDLE);
249 zram_slot_unlock(zram, index);
256 struct zram *zram = dev_to_zram(dev);
274 down_read(&zram->init_lock);
275 if (!init_done(zram))
282 mark_idle(zram, cutoff_time);
286 up_read(&zram->init_lock);
295 struct zram *zram = dev_to_zram(dev);
302 down_read(&zram->init_lock);
303 spin_lock(&zram->wb_limit_lock);
304 zram->wb_limit_enable = val;
305 spin_unlock(&zram->wb_limit_lock);
306 up_read(&zram->init_lock);
316 struct zram *zram = dev_to_zram(dev);
318 down_read(&zram->init_lock);
319 spin_lock(&zram->wb_limit_lock);
320 val = zram->wb_limit_enable;
321 spin_unlock(&zram->wb_limit_lock);
322 up_read(&zram->init_lock);
330 struct zram *zram = dev_to_zram(dev);
337 down_read(&zram->init_lock);
338 spin_lock(&zram->wb_limit_lock);
339 zram->bd_wb_limit = val;
340 spin_unlock(&zram->wb_limit_lock);
341 up_read(&zram->init_lock);
351 struct zram *zram = dev_to_zram(dev);
353 down_read(&zram->init_lock);
354 spin_lock(&zram->wb_limit_lock);
355 val = zram->bd_wb_limit;
356 spin_unlock(&zram->wb_limit_lock);
357 up_read(&zram->init_lock);
362 static void reset_bdev(struct zram *zram)
366 if (!zram->backing_dev)
369 bdev = zram->bdev;
370 blkdev_put(bdev, zram);
372 filp_close(zram->backing_dev, NULL);
373 zram->backing_dev = NULL;
374 zram->bdev = NULL;
375 zram->disk->fops = &zram_devops;
376 kvfree(zram->bitmap);
377 zram->bitmap = NULL;
384 struct zram *zram = dev_to_zram(dev);
388 down_read(&zram->init_lock);
389 file = zram->backing_dev;
392 up_read(&zram->init_lock);
406 up_read(&zram->init_lock);
422 struct zram *zram = dev_to_zram(dev);
428 down_write(&zram->init_lock);
429 if (init_done(zram)) {
458 zram, NULL);
473 reset_bdev(zram);
475 zram->bdev = bdev;
476 zram->backing_dev = backing_dev;
477 zram->bitmap = bitmap;
478 zram->nr_pages = nr_pages;
479 up_write(&zram->init_lock);
489 blkdev_put(bdev, zram);
494 up_write(&zram->init_lock);
501 static unsigned long alloc_block_bdev(struct zram *zram)
505 /* skip 0 bit to confuse zram.handle = 0 */
506 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
507 if (blk_idx == zram->nr_pages)
510 if (test_and_set_bit(blk_idx, zram->bitmap))
513 atomic64_inc(&zram->stats.bd_count);
517 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
521 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
523 atomic64_dec(&zram->stats.bd_count);
526 static void read_from_bdev_async(struct zram *zram, struct page *page,
531 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
545 struct zram *zram = dev_to_zram(dev);
546 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
566 down_read(&zram->init_lock);
567 if (!init_done(zram)) {
572 if (!zram->backing_dev) {
584 spin_lock(&zram->wb_limit_lock);
585 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
586 spin_unlock(&zram->wb_limit_lock);
590 spin_unlock(&zram->wb_limit_lock);
593 blk_idx = alloc_block_bdev(zram);
600 zram_slot_lock(zram, index);
601 if (!zram_allocated(zram, index))
604 if (zram_test_flag(zram, index, ZRAM_WB) ||
605 zram_test_flag(zram, index, ZRAM_SAME) ||
606 zram_test_flag(zram, index, ZRAM_UNDER_WB))
610 !zram_test_flag(zram, index, ZRAM_IDLE))
613 !zram_test_flag(zram, index, ZRAM_HUGE))
616 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
623 zram_set_flag(zram, index, ZRAM_UNDER_WB);
625 zram_set_flag(zram, index, ZRAM_IDLE);
626 zram_slot_unlock(zram, index);
627 if (zram_read_page(zram, page, index, NULL)) {
628 zram_slot_lock(zram, index);
629 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
630 zram_clear_flag(zram, index, ZRAM_IDLE);
631 zram_slot_unlock(zram, index);
635 bio_init(&bio, zram->bdev, &bio_vec, 1,
646 zram_slot_lock(zram, index);
647 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
648 zram_clear_flag(zram, index, ZRAM_IDLE);
649 zram_slot_unlock(zram, index);
662 atomic64_inc(&zram->stats.bd_writes);
672 zram_slot_lock(zram, index);
673 if (!zram_allocated(zram, index) ||
674 !zram_test_flag(zram, index, ZRAM_IDLE)) {
675 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
676 zram_clear_flag(zram, index, ZRAM_IDLE);
680 zram_free_page(zram, index);
681 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
682 zram_set_flag(zram, index, ZRAM_WB);
683 zram_set_element(zram, index, blk_idx);
685 atomic64_inc(&zram->stats.pages_stored);
686 spin_lock(&zram->wb_limit_lock);
687 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
688 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
689 spin_unlock(&zram->wb_limit_lock);
691 zram_slot_unlock(zram, index);
695 free_block_bdev(zram, blk_idx);
698 up_read(&zram->init_lock);
705 struct zram *zram;
717 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
728 static int read_from_bdev_sync(struct zram *zram, struct page *page,
734 work.zram = zram;
745 static int read_from_bdev(struct zram *zram, struct page *page,
748 atomic64_inc(&zram->stats.bd_reads);
752 return read_from_bdev_sync(zram, page, entry);
754 read_from_bdev_async(zram, page, entry, parent);
758 static inline void reset_bdev(struct zram *zram) {};
759 static int read_from_bdev(struct zram *zram, struct page *page,
765 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
774 zram_debugfs_root = debugfs_create_dir("zram", NULL);
782 static void zram_accessed(struct zram *zram, u32 index)
784 zram_clear_flag(zram, index, ZRAM_IDLE);
785 zram->table[index].ac_time = ktime_get_boottime();
793 struct zram *zram = file->private_data;
794 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
801 down_read(&zram->init_lock);
802 if (!init_done(zram)) {
803 up_read(&zram->init_lock);
811 zram_slot_lock(zram, index);
812 if (!zram_allocated(zram, index))
815 ts = ktime_to_timespec64(zram->table[index].ac_time);
820 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
821 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
822 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
823 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
824 zram_get_priority(zram, index) ? 'r' : '.',
825 zram_test_flag(zram, index,
829 zram_slot_unlock(zram, index);
835 zram_slot_unlock(zram, index);
839 up_read(&zram->init_lock);
853 static void zram_debugfs_register(struct zram *zram)
858 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
860 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
861 zram, &proc_zram_block_state_op);
864 static void zram_debugfs_unregister(struct zram *zram)
866 debugfs_remove_recursive(zram->debugfs_dir);
871 static void zram_accessed(struct zram *zram, u32 index)
873 zram_clear_flag(zram, index, ZRAM_IDLE);
875 static void zram_debugfs_register(struct zram *zram) {};
876 static void zram_debugfs_unregister(struct zram *zram) {};
900 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
903 if (zram->comp_algs[prio] != default_compressor)
904 kfree(zram->comp_algs[prio]);
906 zram->comp_algs[prio] = alg;
909 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
913 down_read(&zram->init_lock);
914 sz = zcomp_available_show(zram->comp_algs[prio], buf);
915 up_read(&zram->init_lock);
920 static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
942 down_write(&zram->init_lock);
943 if (init_done(zram)) {
944 up_write(&zram->init_lock);
950 comp_algorithm_set(zram, prio, compressor);
951 up_write(&zram->init_lock);
959 struct zram *zram = dev_to_zram(dev);
961 return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
969 struct zram *zram = dev_to_zram(dev);
972 ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
981 struct zram *zram = dev_to_zram(dev);
986 if (!zram->comp_algs[prio])
990 sz += __comp_algorithm_show(zram, prio, buf + sz);
1001 struct zram *zram = dev_to_zram(dev);
1033 ret = __comp_algorithm_store(zram, prio, alg);
1041 struct zram *zram = dev_to_zram(dev);
1043 down_read(&zram->init_lock);
1044 if (!init_done(zram)) {
1045 up_read(&zram->init_lock);
1049 zs_compact(zram->mem_pool);
1050 up_read(&zram->init_lock);
1058 struct zram *zram = dev_to_zram(dev);
1061 down_read(&zram->init_lock);
1064 (u64)atomic64_read(&zram->stats.failed_reads),
1065 (u64)atomic64_read(&zram->stats.failed_writes),
1066 (u64)atomic64_read(&zram->stats.notify_free));
1067 up_read(&zram->init_lock);
1075 struct zram *zram = dev_to_zram(dev);
1083 down_read(&zram->init_lock);
1084 if (init_done(zram)) {
1085 mem_used = zs_get_total_pages(zram->mem_pool);
1086 zs_pool_stats(zram->mem_pool, &pool_stats);
1089 orig_size = atomic64_read(&zram->stats.pages_stored);
1090 max_used = atomic_long_read(&zram->stats.max_used_pages);
1095 (u64)atomic64_read(&zram->stats.compr_data_size),
1097 zram->limit_pages << PAGE_SHIFT,
1099 (u64)atomic64_read(&zram->stats.same_pages),
1101 (u64)atomic64_read(&zram->stats.huge_pages),
1102 (u64)atomic64_read(&zram->stats.huge_pages_since));
1103 up_read(&zram->init_lock);
1113 struct zram *zram = dev_to_zram(dev);
1116 down_read(&zram->init_lock);
1119 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1120 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1121 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1122 up_read(&zram->init_lock);
1132 struct zram *zram = dev_to_zram(dev);
1135 down_read(&zram->init_lock);
1139 (u64)atomic64_read(&zram->stats.writestall),
1140 (u64)atomic64_read(&zram->stats.miss_free));
1141 up_read(&zram->init_lock);
1156 struct zram *zram = dev_to_zram(dev);
1159 down_read(&zram->init_lock);
1160 if (zram->zgrp_ctrl == ZGRP_NONE)
1162 else if (zram->zgrp_ctrl == ZGRP_TRACK)
1165 else if (zram->zgrp_ctrl == ZGRP_WRITE)
1168 up_read(&zram->init_lock);
1176 struct zram *zram = dev_to_zram(dev);
1184 group_debug(zram, op, index, gid);
1190 down_write(&zram->init_lock);
1191 if (init_done(zram)) {
1197 zram->zgrp_ctrl = ZGRP_NONE;
1199 zram->zgrp_ctrl = ZGRP_TRACK;
1202 zram->zgrp_ctrl = ZGRP_WRITE;
1207 up_write(&zram->init_lock);
1213 static void zram_meta_free(struct zram *zram, u64 disksize)
1218 /* Free all pages that are still in this zram device */
1220 zram_free_page(zram, index);
1222 zs_destroy_pool(zram->mem_pool);
1223 vfree(zram->table);
1225 zram_group_deinit(zram);
1229 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1234 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1235 if (!zram->table)
1238 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1239 if (!zram->mem_pool) {
1240 vfree(zram->table);
1245 huge_class_size = zs_huge_class_size(zram->mem_pool);
1247 zram_group_init(zram, num_pages);
1258 static void zram_free_page(struct zram *zram, size_t index)
1263 zram_group_untrack_obj(zram, index);
1267 zram->table[index].ac_time = 0;
1269 if (zram_test_flag(zram, index, ZRAM_IDLE))
1270 zram_clear_flag(zram, index, ZRAM_IDLE);
1272 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1273 zram_clear_flag(zram, index, ZRAM_HUGE);
1274 atomic64_dec(&zram->stats.huge_pages);
1277 if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1278 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1280 zram_set_priority(zram, index, 0);
1282 if (zram_test_flag(zram, index, ZRAM_WB)) {
1283 zram_clear_flag(zram, index, ZRAM_WB);
1284 free_block_bdev(zram, zram_get_element(zram, index));
1292 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1293 zram_clear_flag(zram, index, ZRAM_SAME);
1294 atomic64_dec(&zram->stats.same_pages);
1298 handle = zram_get_handle(zram, index);
1302 zs_free(zram->mem_pool, handle);
1304 atomic64_sub(zram_get_obj_size(zram, index),
1305 &zram->stats.compr_data_size);
1307 atomic64_dec(&zram->stats.pages_stored);
1308 zram_set_handle(zram, index, 0);
1309 zram_set_obj_size(zram, index, 0);
1310 WARN_ON_ONCE(zram->table[index].flags &
1318 static int zram_read_from_zspool(struct zram *zram, struct page *page,
1328 handle = zram_get_handle(zram, index);
1329 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1333 value = handle ? zram_get_element(zram, index) : 0;
1340 size = zram_get_obj_size(zram, index);
1343 prio = zram_get_priority(zram, index);
1344 zstrm = zcomp_stream_get(zram->comps[prio]);
1347 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1357 zcomp_stream_put(zram->comps[prio]);
1359 zs_unmap_object(zram->mem_pool, handle);
1363 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1368 zram_slot_lock(zram, index);
1371 ret = zram_group_fault_obj(zram, index);
1373 zram_slot_unlock(zram, index);
1378 if (zram_test_flag(zram, index, ZRAM_GWB)) {
1379 zram_slot_unlock(zram, index);
1383 if (!zram_test_flag(zram, index, ZRAM_WB)) {
1385 ret = zram_read_from_zspool(zram, page, index);
1386 zram_slot_unlock(zram, index);
1392 zram_slot_unlock(zram, index);
1394 ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1409 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
1417 ret = zram_read_page(zram, page, index, NULL);
1424 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1428 return zram_bvec_read_partial(zram, bvec, index, offset);
1429 return zram_read_page(zram, bvec->bv_page, index, bio);
1432 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1448 atomic64_inc(&zram->stats.same_pages);
1454 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1460 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1462 zs_free(zram->mem_pool, handle);
1482 handle = zs_malloc(zram->mem_pool, comp_len,
1488 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1489 atomic64_inc(&zram->stats.writestall);
1490 handle = zs_malloc(zram->mem_pool, comp_len,
1505 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1508 alloced_pages = zs_get_total_pages(zram->mem_pool);
1509 update_used_max(zram, alloced_pages);
1511 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1512 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1513 zs_free(zram->mem_pool, handle);
1517 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1526 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1527 zs_unmap_object(zram->mem_pool, handle);
1528 atomic64_add(comp_len, &zram->stats.compr_data_size);
1534 zram_slot_lock(zram, index);
1535 zram_free_page(zram, index);
1538 zram_set_flag(zram, index, ZRAM_HUGE);
1539 atomic64_inc(&zram->stats.huge_pages);
1540 atomic64_inc(&zram->stats.huge_pages_since);
1544 zram_set_flag(zram, index, flags);
1545 zram_set_element(zram, index, element);
1547 zram_set_handle(zram, index, handle);
1548 zram_set_obj_size(zram, index, comp_len);
1551 zram_group_track_obj(zram, index, page_memcg(page));
1553 zram_slot_unlock(zram, index);
1556 atomic64_inc(&zram->stats.pages_stored);
1563 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
1572 ret = zram_read_page(zram, page, index, bio);
1575 ret = zram_write_page(zram, page, index);
1581 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1585 return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1586 return zram_write_page(zram, bvec->bv_page, index);
1597 static int zram_recompress(struct zram *zram, u32 index, struct page *page,
1611 handle_old = zram_get_handle(zram, index);
1615 comp_len_old = zram_get_obj_size(zram, index);
1622 ret = zram_read_from_zspool(zram, page, index);
1626 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
1632 if (!zram->comps[prio])
1639 if (prio <= zram_get_priority(zram, index))
1643 zstrm = zcomp_stream_get(zram->comps[prio]);
1649 zcomp_stream_put(zram->comps[prio]);
1653 class_index_new = zs_lookup_class_index(zram->mem_pool,
1659 zcomp_stream_put(zram->comps[prio]);
1686 if (num_recomps == zram->num_active_comps - 1)
1687 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1702 handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1708 zcomp_stream_put(zram->comps[prio]);
1712 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1714 zcomp_stream_put(zram->comps[prio]);
1716 zs_unmap_object(zram->mem_pool, handle_new);
1718 zram_free_page(zram, index);
1719 zram_set_handle(zram, index, handle_new);
1720 zram_set_obj_size(zram, index, comp_len_new);
1721 zram_set_priority(zram, index, prio);
1723 atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1724 atomic64_inc(&zram->stats.pages_stored);
1737 struct zram *zram = dev_to_zram(dev);
1738 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1782 down_read(&zram->init_lock);
1783 if (!init_done(zram)) {
1792 if (!zram->comp_algs[prio])
1795 if (!strcmp(zram->comp_algs[prio], algo)) {
1818 zram_slot_lock(zram, index);
1820 if (!zram_allocated(zram, index))
1824 !zram_test_flag(zram, index, ZRAM_IDLE))
1828 !zram_test_flag(zram, index, ZRAM_HUGE))
1831 if (zram_test_flag(zram, index, ZRAM_WB) ||
1832 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
1833 zram_test_flag(zram, index, ZRAM_SAME) ||
1834 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1837 err = zram_recompress(zram, index, page, threshold,
1840 zram_slot_unlock(zram, index);
1852 up_read(&zram->init_lock);
1857 static void zram_bio_discard(struct zram *zram, struct bio *bio)
1865 * zram manages data in physical block size units. Because logical block
1883 zram_slot_lock(zram, index);
1884 zram_free_page(zram, index);
1885 zram_slot_unlock(zram, index);
1886 atomic64_inc(&zram->stats.notify_free);
1894 static void zram_bio_read(struct zram *zram, struct bio *bio)
1907 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
1908 atomic64_inc(&zram->stats.failed_reads);
1914 zram_slot_lock(zram, index);
1915 zram_accessed(zram, index);
1916 zram_slot_unlock(zram, index);
1925 static void zram_bio_write(struct zram *zram, struct bio *bio)
1938 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
1939 atomic64_inc(&zram->stats.failed_writes);
1944 zram_slot_lock(zram, index);
1945 zram_accessed(zram, index);
1946 zram_slot_unlock(zram, index);
1956 * Handler function for all zram I/O requests.
1960 struct zram *zram = bio->bi_bdev->bd_disk->private_data;
1964 zram_bio_read(zram, bio);
1967 zram_bio_write(zram, bio);
1971 zram_bio_discard(zram, bio);
1982 struct zram *zram;
1984 zram = bdev->bd_disk->private_data;
1986 atomic64_inc(&zram->stats.notify_free);
1987 if (!zram_slot_trylock(zram, index)) {
1988 atomic64_inc(&zram->stats.miss_free);
1992 zram_free_page(zram, index);
1993 zram_slot_unlock(zram, index);
1996 static void zram_destroy_comps(struct zram *zram)
2001 struct zcomp *comp = zram->comps[prio];
2003 zram->comps[prio] = NULL;
2007 zram->num_active_comps--;
2011 static void zram_reset_device(struct zram *zram)
2013 down_write(&zram->init_lock);
2015 zram->limit_pages = 0;
2017 if (!init_done(zram)) {
2018 up_write(&zram->init_lock);
2022 set_capacity_and_notify(zram->disk, 0);
2023 part_stat_set_all(zram->disk->part0, 0);
2026 zram_meta_free(zram, zram->disksize);
2027 zram->disksize = 0;
2028 zram_destroy_comps(zram);
2029 memset(&zram->stats, 0, sizeof(zram->stats));
2030 reset_bdev(zram);
2032 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2033 up_write(&zram->init_lock);
2041 struct zram *zram = dev_to_zram(dev);
2049 down_write(&zram->init_lock);
2050 if (init_done(zram)) {
2057 if (!zram_meta_alloc(zram, disksize)) {
2063 if (!zram->comp_algs[prio])
2066 comp = zcomp_create(zram->comp_algs[prio]);
2069 zram->comp_algs[prio]);
2074 zram->comps[prio] = comp;
2075 zram->num_active_comps++;
2077 zram->disksize = disksize;
2078 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
2079 up_write(&zram->init_lock);
2084 zram_destroy_comps(zram);
2085 zram_meta_free(zram, disksize);
2087 up_write(&zram->init_lock);
2096 struct zram *zram;
2106 zram = dev_to_zram(dev);
2107 disk = zram->disk;
2111 if (disk_openers(disk) || zram->claim) {
2116 /* From now on, anyone can't open /dev/zram[0-9] */
2117 zram->claim = true;
2122 zram_reset_device(zram);
2125 zram->claim = false;
2133 struct zram *zram = disk->private_data;
2137 /* zram was claimed to reset so open request fails */
2138 if (zram->claim)
2208 * Allocate and initialize new zram device. the function returns
2213 struct zram *zram;
2216 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
2217 if (!zram)
2220 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
2225 init_rwsem(&zram->init_lock);
2227 spin_lock_init(&zram->wb_limit_lock);
2231 zram->disk = blk_alloc_disk(NUMA_NO_NODE);
2232 if (!zram->disk) {
2239 zram->disk->major = zram_major;
2240 zram->disk->first_minor = device_id;
2241 zram->disk->minors = 1;
2242 zram->disk->flags |= GENHD_FL_NO_PART;
2243 zram->disk->fops = &zram_devops;
2244 zram->disk->private_data = zram;
2245 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
2247 /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
2248 set_capacity(zram->disk, 0);
2249 /* zram devices sort of resembles non-rotational disks */
2250 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
2251 blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
2257 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
2258 blk_queue_logical_block_size(zram->disk->queue,
2260 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
2261 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
2262 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
2263 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
2274 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
2276 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
2277 ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
2281 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2283 zram_debugfs_register(zram);
2284 pr_info("Added device: %s\n", zram->disk->disk_name);
2288 put_disk(zram->disk);
2292 kfree(zram);
2296 static int zram_remove(struct zram *zram)
2300 mutex_lock(&zram->disk->open_mutex);
2301 if (disk_openers(zram->disk)) {
2302 mutex_unlock(&zram->disk->open_mutex);
2306 claimed = zram->claim;
2308 zram->claim = true;
2309 mutex_unlock(&zram->disk->open_mutex);
2311 zram_debugfs_unregister(zram);
2321 sync_blockdev(zram->disk->part0);
2322 zram_reset_device(zram);
2325 pr_info("Removed device: %s\n", zram->disk->disk_name);
2327 del_gendisk(zram->disk);
2330 WARN_ON_ONCE(claimed && zram->claim);
2337 zram_reset_device(zram);
2339 put_disk(zram->disk);
2340 kfree(zram);
2344 /* zram-control sysfs attributes */
2349 * creates a new un-initialized zram device and returns back this device's
2375 struct zram *zram;
2387 zram = idr_find(&zram_index_idr, dev_id);
2388 if (zram) {
2389 ret = zram_remove(zram);
2409 .name = "zram-control",
2425 unregister_blkdev(zram_major, "zram");
2435 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2442 pr_err("Unable to register zram-control class\n");
2448 zram_major = register_blkdev(0, "zram");
2481 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");