Lines Matching defs:index
44 /* idr index must be protected */
60 static void zram_free_page(struct zram *zram, size_t index);
61 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
74 static inline void zram_set_element(struct zram *zram, u32 index,
77 zram->table[index].element = element;
80 static unsigned long zram_get_element(struct zram *zram, u32 index)
82 return zram->table[index].element;
85 static inline bool zram_allocated(struct zram *zram, u32 index)
87 return zram_get_obj_size(zram, index) ||
88 zram_test_flag(zram, index, ZRAM_SAME) ||
89 zram_test_flag(zram, index, ZRAM_WB);
105 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
112 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
114 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
117 static inline u32 zram_get_priority(struct zram *zram, u32 index)
119 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
233 int index;
235 for (index = 0; index < nr_pages; index++) {
240 zram_slot_lock(zram, index);
241 if (zram_allocated(zram, index) &&
242 !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
244 is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time);
247 zram_set_flag(zram, index, ZRAM_IDLE);
249 zram_slot_unlock(zram, index);
547 unsigned long index = 0;
583 for (index = 0; index < nr_pages; index++) {
600 zram_slot_lock(zram, index);
601 if (!zram_allocated(zram, index))
604 if (zram_test_flag(zram, index, ZRAM_WB) ||
605 zram_test_flag(zram, index, ZRAM_SAME) ||
606 zram_test_flag(zram, index, ZRAM_UNDER_WB))
610 !zram_test_flag(zram, index, ZRAM_IDLE))
613 !zram_test_flag(zram, index, ZRAM_HUGE))
616 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
623 zram_set_flag(zram, index, ZRAM_UNDER_WB);
625 zram_set_flag(zram, index, ZRAM_IDLE);
626 zram_slot_unlock(zram, index);
627 if (zram_read_page(zram, page, index, NULL)) {
628 zram_slot_lock(zram, index);
629 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
630 zram_clear_flag(zram, index, ZRAM_IDLE);
631 zram_slot_unlock(zram, index);
646 zram_slot_lock(zram, index);
647 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
648 zram_clear_flag(zram, index, ZRAM_IDLE);
649 zram_slot_unlock(zram, index);
672 zram_slot_lock(zram, index);
673 if (!zram_allocated(zram, index) ||
674 !zram_test_flag(zram, index, ZRAM_IDLE)) {
675 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
676 zram_clear_flag(zram, index, ZRAM_IDLE);
680 zram_free_page(zram, index);
681 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
682 zram_set_flag(zram, index, ZRAM_WB);
683 zram_set_element(zram, index, blk_idx);
691 zram_slot_unlock(zram, index);
782 static void zram_accessed(struct zram *zram, u32 index)
784 zram_clear_flag(zram, index, ZRAM_IDLE);
785 zram->table[index].ac_time = ktime_get_boottime();
792 ssize_t index, written = 0;
808 for (index = *ppos; index < nr_pages; index++) {
811 zram_slot_lock(zram, index);
812 if (!zram_allocated(zram, index))
815 ts = ktime_to_timespec64(zram->table[index].ac_time);
818 index, (s64)ts.tv_sec,
820 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
821 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
822 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
823 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
824 zram_get_priority(zram, index) ? 'r' : '.',
825 zram_test_flag(zram, index,
829 zram_slot_unlock(zram, index);
835 zram_slot_unlock(zram, index);
871 static void zram_accessed(struct zram *zram, u32 index)
873 zram_clear_flag(zram, index, ZRAM_IDLE);
1179 u32 op, gid, index;
1181 ret = sscanf(buf, "%u %u %u", &op, &index, &gid);
1183 pr_info("op[%u] index[%u] gid[%u].\n", op, index, gid);
1184 group_debug(zram, op, index, gid);
1216 size_t index;
1219 for (index = 0; index < num_pages; index++)
1220 zram_free_page(zram, index);
1254 * To protect concurrent access to the same index entry,
1255 * caller should hold this table index entry's bit_spinlock to
1256 * indicate this index entry is accessing.
1258 static void zram_free_page(struct zram *zram, size_t index)
1263 zram_group_untrack_obj(zram, index);
1267 zram->table[index].ac_time = 0;
1269 if (zram_test_flag(zram, index, ZRAM_IDLE))
1270 zram_clear_flag(zram, index, ZRAM_IDLE);
1272 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1273 zram_clear_flag(zram, index, ZRAM_HUGE);
1277 if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1278 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1280 zram_set_priority(zram, index, 0);
1282 if (zram_test_flag(zram, index, ZRAM_WB)) {
1283 zram_clear_flag(zram, index, ZRAM_WB);
1284 free_block_bdev(zram, zram_get_element(zram, index));
1292 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1293 zram_clear_flag(zram, index, ZRAM_SAME);
1298 handle = zram_get_handle(zram, index);
1304 atomic64_sub(zram_get_obj_size(zram, index),
1308 zram_set_handle(zram, index, 0);
1309 zram_set_obj_size(zram, index, 0);
1310 WARN_ON_ONCE(zram->table[index].flags &
1319 u32 index)
1328 handle = zram_get_handle(zram, index);
1329 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1333 value = handle ? zram_get_element(zram, index) : 0;
1340 size = zram_get_obj_size(zram, index);
1343 prio = zram_get_priority(zram, index);
1363 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1368 zram_slot_lock(zram, index);
1371 ret = zram_group_fault_obj(zram, index);
1373 zram_slot_unlock(zram, index);
1378 if (zram_test_flag(zram, index, ZRAM_GWB)) {
1379 zram_slot_unlock(zram, index);
1383 if (!zram_test_flag(zram, index, ZRAM_WB)) {
1385 ret = zram_read_from_zspool(zram, page, index);
1386 zram_slot_unlock(zram, index);
1392 zram_slot_unlock(zram, index);
1394 ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1400 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1410 u32 index, int offset)
1417 ret = zram_read_page(zram, page, index, NULL);
1425 u32 index, int offset, struct bio *bio)
1428 return zram_bvec_read_partial(zram, bvec, index, offset);
1429 return zram_read_page(zram, bvec->bv_page, index, bio);
1432 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1534 zram_slot_lock(zram, index);
1535 zram_free_page(zram, index);
1538 zram_set_flag(zram, index, ZRAM_HUGE);
1544 zram_set_flag(zram, index, flags);
1545 zram_set_element(zram, index, element);
1547 zram_set_handle(zram, index, handle);
1548 zram_set_obj_size(zram, index, comp_len);
1551 zram_group_track_obj(zram, index, page_memcg(page));
1553 zram_slot_unlock(zram, index);
1564 u32 index, int offset, struct bio *bio)
1572 ret = zram_read_page(zram, page, index, bio);
1575 ret = zram_write_page(zram, page, index);
1582 u32 index, int offset, struct bio *bio)
1585 return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1586 return zram_write_page(zram, bvec->bv_page, index);
1597 static int zram_recompress(struct zram *zram, u32 index, struct page *page,
1611 handle_old = zram_get_handle(zram, index);
1615 comp_len_old = zram_get_obj_size(zram, index);
1622 ret = zram_read_from_zspool(zram, page, index);
1639 if (prio <= zram_get_priority(zram, index))
1687 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1718 zram_free_page(zram, index);
1719 zram_set_handle(zram, index, handle_new);
1720 zram_set_obj_size(zram, index, comp_len_new);
1721 zram_set_priority(zram, index, prio);
1741 unsigned long index;
1815 for (index = 0; index < nr_pages; index++) {
1818 zram_slot_lock(zram, index);
1820 if (!zram_allocated(zram, index))
1824 !zram_test_flag(zram, index, ZRAM_IDLE))
1828 !zram_test_flag(zram, index, ZRAM_HUGE))
1831 if (zram_test_flag(zram, index, ZRAM_WB) ||
1832 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
1833 zram_test_flag(zram, index, ZRAM_SAME) ||
1834 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1837 err = zram_recompress(zram, index, page, threshold,
1840 zram_slot_unlock(zram, index);
1860 u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1879 index++;
1883 zram_slot_lock(zram, index);
1884 zram_free_page(zram, index);
1885 zram_slot_unlock(zram, index);
1887 index++;
1900 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1907 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
1914 zram_slot_lock(zram, index);
1915 zram_accessed(zram, index);
1916 zram_slot_unlock(zram, index);
1931 u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1938 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
1944 zram_slot_lock(zram, index);
1945 zram_accessed(zram, index);
1946 zram_slot_unlock(zram, index);
1980 unsigned long index)
1987 if (!zram_slot_trylock(zram, index)) {
1992 zram_free_page(zram, index);
1993 zram_slot_unlock(zram, index);