Home
last modified time | relevance | path

Searched refs:from_cblock (Results 1 - 8 of 8) sorted by relevance

/kernel/linux/linux-5.10/drivers/md/
H A Ddm-cache-metadata.c705 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); in __commit_transaction()
895 r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value); in block_clean_combined_dirty()
921 (unsigned long long) from_cblock(begin)); in blocks_are_clean_combined_dirty()
925 begin = to_cblock(from_cblock(begin) + 1); in blocks_are_clean_combined_dirty()
939 if (from_cblock(cmd->cache_blocks) == 0) in blocks_are_clean_separate_dirty()
944 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); in blocks_are_clean_separate_dirty()
950 r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin)); in blocks_are_clean_separate_dirty()
965 (unsigned long long) from_cblock(begin)); in blocks_are_clean_separate_dirty()
971 begin = to_cblock(from_cblock(begin) + 1); in blocks_are_clean_separate_dirty()
1057 if (from_cblock(new_cache_siz in dm_cache_resize()
[all...]
H A Ddm-cache-target.c676 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty()
681 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty()
693 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) in force_set_dirty()
700 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in force_clear_dirty()
790 sector_t block = from_cblock(cblock); in remap_to_cache()
1170 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; in copy()
2414 dm_block_t nr_blocks = from_cblock(size); in set_cache_size()
2542 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2547 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create()
2741 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cach in write_dirty_bitset()
[all...]
H A Ddm-cache-policy-smq.c1005 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1113 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1133 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target()
1159 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met()
1469 from_cblock(work->cblock)); in __complete_background_work()
1525 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty()
1558 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1); in random_level()
1568 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1586 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping()
1601 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cbloc in smq_get_hint()
[all...]
H A Ddm-cache-block-types.h39 static inline uint32_t from_cblock(dm_cblock_t b) in from_cblock() function
/kernel/linux/linux-6.6/drivers/md/
H A Ddm-cache-metadata.c709 disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); in __commit_transaction()
901 r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value); in block_clean_combined_dirty()
927 (unsigned long long) from_cblock(begin)); in blocks_are_clean_combined_dirty()
931 begin = to_cblock(from_cblock(begin) + 1); in blocks_are_clean_combined_dirty()
945 if (from_cblock(cmd->cache_blocks) == 0) in blocks_are_clean_separate_dirty()
950 from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); in blocks_are_clean_separate_dirty()
956 r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin)); in blocks_are_clean_separate_dirty()
971 (unsigned long long) from_cblock(begin)); in blocks_are_clean_separate_dirty()
977 begin = to_cblock(from_cblock(begin) + 1); in blocks_are_clean_separate_dirty()
1063 if (from_cblock(new_cache_siz in dm_cache_resize()
[all...]
H A Ddm-cache-target.c610 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty()
615 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty()
627 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) in force_set_dirty()
634 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in force_clear_dirty()
728 sector_t block = from_cblock(cblock); in remap_to_cache()
1105 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; in copy()
2362 dm_block_t nr_blocks = from_cblock(size); in set_cache_size()
2491 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2496 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create()
2690 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cach in write_dirty_bitset()
[all...]
H A Ddm-cache-policy-smq.c1008 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1118 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1138 return from_cblock(mq->cache_size) * p / 100u; in percent_to_target()
1164 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; in free_target_met()
1474 from_cblock(work->cblock)); in __complete_background_work()
1530 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty()
1563 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1); in random_level()
1573 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1591 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping()
1606 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cbloc in smq_get_hint()
[all...]
H A Ddm-cache-block-types.h40 static inline uint32_t from_cblock(dm_cblock_t b) in from_cblock() function

Completed in 19 milliseconds