Lines Matching refs:cache
10 #include "dm-cache-metadata.h"
22 #define DM_MSG_PREFIX "cache"
25 "A percentage of time allocated for copying to and/or from cache");
33 * cblock: index of a cache block
34 * promotion: movement of a block from origin to cache
35 * demotion: movement of a block from cache to origin
36 * migration: movement of a block between the origin and cache device,
310 * The block size of the device holding cache data must be
325 * dirty. If you lose the cache device you will lose data.
331 * Data is written to both cache and origin. Blocks are never
337 * A degraded mode useful for various cache coherency situations
339 * origin. If a write goes to a cached oblock, then the cache
366 struct cache {
400 * Size of the cache device in blocks.
490 struct cache *cache;
502 static bool writethrough_mode(struct cache *cache)
504 return cache->features.io_mode == CM_IO_WRITETHROUGH;
507 static bool writeback_mode(struct cache *cache)
509 return cache->features.io_mode == CM_IO_WRITEBACK;
512 static inline bool passthrough_mode(struct cache *cache)
514 return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
519 static void wake_deferred_bio_worker(struct cache *cache)
521 queue_work(cache->wq, &cache->deferred_bio_worker);
524 static void wake_migration_worker(struct cache *cache)
526 if (passthrough_mode(cache))
529 queue_work(cache->wq, &cache->migration_worker);
534 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
536 return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
539 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
541 dm_bio_prison_free_cell_v2(cache->prison, cell);
544 static struct dm_cache_migration *alloc_migration(struct cache *cache)
548 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
552 mg->cache = cache;
553 atomic_inc(&cache->nr_allocated_migrations);
560 struct cache *cache = mg->cache;
562 if (atomic_dec_and_test(&cache->nr_allocated_migrations))
563 wake_up(&cache->migration_wait);
565 mempool_free(mg, &cache->migration_pool);
622 static void defer_bio(struct cache *cache, struct bio *bio)
624 spin_lock_irq(&cache->lock);
625 bio_list_add(&cache->deferred_bios, bio);
626 spin_unlock_irq(&cache->lock);
628 wake_deferred_bio_worker(cache);
631 static void defer_bios(struct cache *cache, struct bio_list *bios)
633 spin_lock_irq(&cache->lock);
634 bio_list_merge(&cache->deferred_bios, bios);
636 spin_unlock_irq(&cache->lock);
638 wake_deferred_bio_worker(cache);
643 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
651 cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
654 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
659 free_prison_cell(cache, cell_prealloc);
664 free_prison_cell(cache, cell_prealloc);
674 static bool is_dirty(struct cache *cache, dm_cblock_t b)
676 return test_bit(from_cblock(b), cache->dirty_bitset);
679 static void set_dirty(struct cache *cache, dm_cblock_t cblock)
681 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
682 atomic_inc(&cache->nr_dirty);
683 policy_set_dirty(cache->policy, cblock);
691 static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
693 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
694 atomic_inc(&cache->nr_dirty);
695 policy_set_dirty(cache->policy, cblock);
698 static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
700 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
701 if (atomic_dec_return(&cache->nr_dirty) == 0)
702 dm_table_event(cache->ti->table);
705 policy_clear_dirty(cache->policy, cblock);
710 static bool block_size_is_power_of_two(struct cache *cache)
712 return cache->sectors_per_block_shift >= 0;
722 static dm_block_t oblocks_per_dblock(struct cache *cache)
724 dm_block_t oblocks = cache->discard_block_size;
726 if (block_size_is_power_of_two(cache))
727 oblocks >>= cache->sectors_per_block_shift;
729 oblocks = block_div(oblocks, cache->sectors_per_block);
734 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
737 oblocks_per_dblock(cache)));
740 static void set_discard(struct cache *cache, dm_dblock_t b)
742 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
743 atomic_inc(&cache->stats.discard_count);
745 spin_lock_irq(&cache->lock);
746 set_bit(from_dblock(b), cache->discard_bitset);
747 spin_unlock_irq(&cache->lock);
750 static void clear_discard(struct cache *cache, dm_dblock_t b)
752 spin_lock_irq(&cache->lock);
753 clear_bit(from_dblock(b), cache->discard_bitset);
754 spin_unlock_irq(&cache->lock);
757 static bool is_discarded(struct cache *cache, dm_dblock_t b)
760 spin_lock_irq(&cache->lock);
761 r = test_bit(from_dblock(b), cache->discard_bitset);
762 spin_unlock_irq(&cache->lock);
767 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
770 spin_lock_irq(&cache->lock);
771 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
772 cache->discard_bitset);
773 spin_unlock_irq(&cache->lock);
781 static void remap_to_origin(struct cache *cache, struct bio *bio)
783 bio_set_dev(bio, cache->origin_dev->bdev);
786 static void remap_to_cache(struct cache *cache, struct bio *bio,
792 bio_set_dev(bio, cache->cache_dev->bdev);
793 if (!block_size_is_power_of_two(cache))
795 (block * cache->sectors_per_block) +
796 sector_div(bi_sector, cache->sectors_per_block);
799 (block << cache->sectors_per_block_shift) |
800 (bi_sector & (cache->sectors_per_block - 1));
803 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
807 spin_lock_irq(&cache->lock);
808 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
812 cache->need_tick_bio = false;
814 spin_unlock_irq(&cache->lock);
817 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
821 check_if_tick_bio_needed(cache, bio);
822 remap_to_origin(cache, bio);
824 clear_discard(cache, oblock_to_dblock(cache, oblock));
827 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
831 __remap_to_origin_clear_discard(cache, bio, oblock, true);
834 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
837 check_if_tick_bio_needed(cache, bio);
838 remap_to_cache(cache, bio, cblock);
840 set_dirty(cache, cblock);
841 clear_discard(cache, oblock_to_dblock(cache, oblock));
845 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
849 if (!block_size_is_power_of_two(cache))
850 (void) sector_div(block_nr, cache->sectors_per_block);
852 block_nr >>= cache->sectors_per_block_shift;
857 static bool accountable_bio(struct cache *cache, struct bio *bio)
862 static void accounted_begin(struct cache *cache, struct bio *bio)
866 if (accountable_bio(cache, bio)) {
869 iot_io_begin(&cache->tracker, pb->len);
873 static void accounted_complete(struct cache *cache, struct bio *bio)
877 iot_io_end(&cache->tracker, pb->len);
880 static void accounted_request(struct cache *cache, struct bio *bio)
882 accounted_begin(cache, bio);
888 struct cache *cache = context;
889 accounted_request(cache, bio);
894 * to both the cache and origin devices. Clone the bio and send them in parallel.
896 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
899 struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
908 __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
911 remap_to_cache(cache, bio, cblock);
917 static enum cache_metadata_mode get_cache_mode(struct cache *cache)
919 return cache->features.mode;
922 static const char *cache_device_name(struct cache *cache)
924 return dm_table_device_name(cache->ti->table);
927 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
935 dm_table_event(cache->ti->table);
936 DMINFO("%s: switching cache to %s mode",
937 cache_device_name(cache), descs[(int)mode]);
940 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
943 enum cache_metadata_mode old_mode = get_cache_mode(cache);
945 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
947 cache_device_name(cache));
952 DMERR("%s: unable to switch cache to write mode until repaired.",
953 cache_device_name(cache));
967 dm_cache_metadata_set_read_only(cache->cmd);
971 dm_cache_metadata_set_read_write(cache->cmd);
975 cache->features.mode = new_mode;
978 notify_mode_switch(cache, new_mode);
981 static void abort_transaction(struct cache *cache)
983 const char *dev_name = cache_device_name(cache);
985 if (get_cache_mode(cache) >= CM_READ_ONLY)
989 if (dm_cache_metadata_abort(cache->cmd)) {
991 set_cache_mode(cache, CM_FAIL);
994 if (dm_cache_metadata_set_needs_check(cache->cmd)) {
996 set_cache_mode(cache, CM_FAIL);
1000 static void metadata_operation_failed(struct cache *cache, const char *op, int r)
1003 cache_device_name(cache), op, r);
1004 abort_transaction(cache);
1005 set_cache_mode(cache, CM_READ_ONLY);
1010 static void load_stats(struct cache *cache)
1014 dm_cache_metadata_get_stats(cache->cmd, &stats);
1015 atomic_set(&cache->stats.read_hit, stats.read_hits);
1016 atomic_set(&cache->stats.read_miss, stats.read_misses);
1017 atomic_set(&cache->stats.write_hit, stats.write_hits);
1018 atomic_set(&cache->stats.write_miss, stats.write_misses);
1021 static void save_stats(struct cache *cache)
1025 if (get_cache_mode(cache) >= CM_READ_ONLY)
1028 stats.read_hits = atomic_read(&cache->stats.read_hit);
1029 stats.read_misses = atomic_read(&cache->stats.read_miss);
1030 stats.write_hits = atomic_read(&cache->stats.write_hit);
1031 stats.write_misses = atomic_read(&cache->stats.write_miss);
1033 dm_cache_metadata_set_stats(cache->cmd, &stats);
1056 * Migration covers moving data from the origin device to the cache, or
1060 static void inc_io_migrations(struct cache *cache)
1062 atomic_inc(&cache->nr_io_migrations);
1065 static void dec_io_migrations(struct cache *cache)
1067 atomic_dec(&cache->nr_io_migrations);
1075 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1081 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1083 if (se - sb < cache->discard_block_size)
1086 *e = to_dblock(block_div(se, cache->discard_block_size));
1091 static void prevent_background_work(struct cache *cache)
1094 down_write(&cache->background_work_lock);
1098 static void allow_background_work(struct cache *cache)
1101 up_write(&cache->background_work_lock);
1105 static bool background_work_begin(struct cache *cache)
1110 r = down_read_trylock(&cache->background_work_lock);
1116 static void background_work_end(struct cache *cache)
1119 up_read(&cache->background_work_lock);
1125 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1128 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1131 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1133 return writeback_mode(cache) &&
1134 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1141 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1157 queue_continuation(mg->cache->wq, &mg->k);
1163 struct cache *cache = mg->cache;
1165 o_region.bdev = cache->origin_dev->bdev;
1166 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1167 o_region.count = cache->sectors_per_block;
1169 c_region.bdev = cache->cache_dev->bdev;
1170 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1171 c_region.count = cache->sectors_per_block;
1174 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1176 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1179 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1183 if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
1184 free_prison_cell(cache, pb->cell);
1191 struct cache *cache = mg->cache;
1199 queue_continuation(cache->wq, &mg->k);
1215 remap_to_cache(mg->cache, bio, mg->op->cblock);
1217 remap_to_origin(mg->cache, bio);
1220 accounted_request(mg->cache, bio);
1237 struct cache *cache = mg->cache;
1242 update_stats(&cache->stats, op->op);
1246 clear_discard(cache, oblock_to_dblock(cache, op->oblock));
1247 policy_complete_background_work(cache->policy, op, success);
1251 force_set_dirty(cache, cblock);
1259 force_clear_dirty(cache, cblock);
1260 dec_io_migrations(cache);
1269 force_clear_dirty(cache, cblock);
1270 policy_complete_background_work(cache->policy, op, success);
1271 dec_io_migrations(cache);
1276 force_clear_dirty(cache, cblock);
1277 policy_complete_background_work(cache->policy, op, success);
1278 dec_io_migrations(cache);
1284 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1285 free_prison_cell(cache, mg->cell);
1289 defer_bios(cache, &bios);
1290 wake_migration_worker(cache);
1292 background_work_end(cache);
1305 struct cache *cache = mg->cache;
1310 r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
1313 cache_device_name(cache));
1314 metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
1323 r = dm_cache_remove_mapping(cache->cmd, op->cblock);
1326 cache_device_name(cache));
1327 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1338 * - vblock x in a cache block
1340 * - cache block gets reallocated and over written
1343 * When we recover, because there was no commit the cache will
1344 * rollback to having the data for vblock x in the cache block.
1345 * But the cache block has since been overwritten, so it'll end
1353 continue_after_commit(&cache->committer, &mg->k);
1354 schedule_commit(&cache->committer);
1391 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1407 struct cache *cache = mg->cache;
1411 if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
1412 is_discarded_oblock(cache, op->oblock)) {
1431 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1435 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1438 inc_io_migrations(mg->cache);
1460 struct cache *cache = mg->cache;
1463 prealloc = alloc_prison_cell(cache);
1471 r = dm_cell_lock_v2(cache->prison, &key,
1475 free_prison_cell(cache, prealloc);
1481 free_prison_cell(cache, prealloc);
1491 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1495 if (!background_work_begin(cache)) {
1496 policy_complete_background_work(cache->policy, op, false);
1500 mg = alloc_migration(cache);
1506 inc_io_migrations(cache);
1518 struct cache *cache = mg->cache;
1521 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1522 free_prison_cell(cache, mg->cell);
1528 defer_bios(cache, &bios);
1530 background_work_end(cache);
1539 static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
1541 int r = policy_invalidate_mapping(cache->policy, cblock);
1543 r = dm_cache_remove_mapping(cache->cmd, cblock);
1546 cache_device_name(cache));
1547 metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
1557 DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
1566 struct cache *cache = mg->cache;
1568 r = invalidate_cblock(cache, mg->invalidate_cblock);
1575 continue_after_commit(&cache->committer, &mg->k);
1576 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1578 schedule_commit(&cache->committer);
1585 struct cache *cache = mg->cache;
1588 prealloc = alloc_prison_cell(cache);
1591 r = dm_cell_lock_v2(cache->prison, &key,
1594 free_prison_cell(cache, prealloc);
1600 free_prison_cell(cache, prealloc);
1611 queue_work(cache->wq, &mg->k.ws);
1617 static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
1622 if (!background_work_begin(cache))
1625 mg = alloc_migration(cache);
1643 static enum busy spare_migration_bandwidth(struct cache *cache)
1645 bool idle = iot_idle_for(&cache->tracker, HZ);
1646 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
1647 cache->sectors_per_block;
1649 if (idle && current_volume <= cache->migration_threshold)
1655 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1658 &cache->stats.read_hit : &cache->stats.write_hit);
1661 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1664 &cache->stats.read_miss : &cache->stats.write_miss);
1669 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1678 rb = bio_detain_shared(cache, block, bio);
1692 if (optimisable_bio(cache, bio, block)) {
1695 r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
1698 cache_device_name(cache), r);
1704 bio_drop_shared_lock(cache, bio);
1706 mg_start(cache, op, bio);
1710 r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
1713 cache_device_name(cache), r);
1719 wake_migration_worker(cache);
1728 inc_miss_counter(cache, bio);
1730 accounted_begin(cache, bio);
1731 remap_to_origin_clear_discard(cache, bio, block);
1744 inc_hit_counter(cache, bio);
1748 * cache blocks that are written to.
1750 if (passthrough_mode(cache)) {
1752 bio_drop_shared_lock(cache, bio);
1753 atomic_inc(&cache->stats.demotion);
1754 invalidate_start(cache, cblock, block, bio);
1756 remap_to_origin_clear_discard(cache, bio, block);
1758 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1759 !is_dirty(cache, cblock)) {
1760 remap_to_origin_and_cache(cache, bio, block, cblock);
1761 accounted_begin(cache, bio);
1763 remap_to_cache_dirty(cache, bio, block, cblock);
1775 accounted_complete(cache, bio);
1776 issue_after_commit(&cache->committer, bio);
1784 static bool process_bio(struct cache *cache, struct bio *bio)
1788 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1797 static int commit(struct cache *cache, bool clean_shutdown)
1801 if (get_cache_mode(cache) >= CM_READ_ONLY)
1804 atomic_inc(&cache->stats.commit_count);
1805 r = dm_cache_commit(cache->cmd, clean_shutdown);
1807 metadata_operation_failed(cache, "dm_cache_commit", r);
1817 struct cache *cache = context;
1819 if (dm_cache_changed_this_transaction(cache->cmd))
1820 return errno_to_blk_status(commit(cache, false));
1827 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1832 remap_to_origin(cache, bio);
1834 remap_to_cache(cache, bio, 0);
1836 issue_after_commit(&cache->committer, bio);
1840 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1847 calc_discard_block_range(cache, bio, &b, &e);
1849 set_discard(cache, b);
1853 if (cache->features.discard_passdown) {
1854 remap_to_origin(cache, bio);
1864 struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
1872 spin_lock_irq(&cache->lock);
1873 bio_list_merge(&bios, &cache->deferred_bios);
1874 bio_list_init(&cache->deferred_bios);
1875 spin_unlock_irq(&cache->lock);
1879 commit_needed = process_flush_bio(cache, bio) || commit_needed;
1882 commit_needed = process_discard_bio(cache, bio) || commit_needed;
1885 commit_needed = process_bio(cache, bio) || commit_needed;
1890 schedule_commit(&cache->committer);
1897 static void requeue_deferred_bios(struct cache *cache)
1903 bio_list_merge(&bios, &cache->deferred_bios);
1904 bio_list_init(&cache->deferred_bios);
1919 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1921 policy_tick(cache->policy, true);
1922 wake_migration_worker(cache);
1923 schedule_commit(&cache->committer);
1924 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1931 struct cache *cache = container_of(ws, struct cache, migration_worker);
1935 b = spare_migration_bandwidth(cache);
1937 r = policy_get_background_work(cache->policy, b == IDLE, &op);
1943 cache_device_name(cache));
1947 r = mg_start(cache, op, NULL);
1963 static void destroy(struct cache *cache)
1967 mempool_exit(&cache->migration_pool);
1969 if (cache->prison)
1970 dm_bio_prison_destroy_v2(cache->prison);
1972 cancel_delayed_work_sync(&cache->waker);
1973 if (cache->wq)
1974 destroy_workqueue(cache->wq);
1976 if (cache->dirty_bitset)
1977 free_bitset(cache->dirty_bitset);
1979 if (cache->discard_bitset)
1980 free_bitset(cache->discard_bitset);
1982 if (cache->copier)
1983 dm_kcopyd_client_destroy(cache->copier);
1985 if (cache->cmd)
1986 dm_cache_metadata_close(cache->cmd);
1988 if (cache->metadata_dev)
1989 dm_put_device(cache->ti, cache->metadata_dev);
1991 if (cache->origin_dev)
1992 dm_put_device(cache->ti, cache->origin_dev);
1994 if (cache->cache_dev)
1995 dm_put_device(cache->ti, cache->cache_dev);
1997 if (cache->policy)
1998 dm_cache_policy_destroy(cache->policy);
2000 for (i = 0; i < cache->nr_ctr_args ; i++)
2001 kfree(cache->ctr_args[i]);
2002 kfree(cache->ctr_args);
2004 bioset_exit(&cache->bs);
2006 kfree(cache);
2011 struct cache *cache = ti->private;
2013 destroy(cache);
2024 * Construct a cache device mapping.
2026 * cache <metadata dev> <cache dev> <origin dev> <block size>
2031 * cache dev : fast device holding cached data blocks
2033 * block size : cache unit size in sectors
2043 * See cache-policies.txt for details.
2046 * writethrough : write through caching that prohibits cache block
2049 * back cache block contents later for performance reasons,
2132 *error = "Error opening cache device";
2181 *error = "Data block size is larger than the cache device";
2202 {0, 3, "Invalid number of cache feature arguments"},
2241 *error = "Unrecognised cache feature requested";
2247 *error = "Duplicate cache io_mode features requested";
2320 static int process_config_option(struct cache *cache, const char *key, const char *value)
2328 cache->migration_threshold = tmp;
2335 static int set_config_value(struct cache *cache, const char *key, const char *value)
2337 int r = process_config_option(cache, key, value);
2340 r = policy_set_config_value(cache->policy, key, value);
2348 static int set_config_values(struct cache *cache, int argc, const char **argv)
2358 r = set_config_value(cache, argv[0], argv[1]);
2369 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2373 cache->cache_size,
2374 cache->origin_sectors,
2375 cache->sectors_per_block);
2377 *error = "Error creating cache's policy";
2380 cache->policy = p;
2381 BUG_ON(!cache->policy);
2387 * We want the discard block size to be at least the size of the cache
2412 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2416 if (nr_blocks > (1 << 20) && cache->cache_size != size)
2417 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2419 "Please consider increasing the cache block size to reduce the overall cache block count.",
2422 cache->cache_size = size;
2427 static int cache_create(struct cache_args *ca, struct cache **result)
2431 struct cache *cache;
2437 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2438 if (!cache)
2441 cache->ti = ca->ti;
2442 ti->private = cache;
2451 cache->features = ca->features;
2452 if (writethrough_mode(cache)) {
2454 r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
2459 cache->metadata_dev = ca->metadata_dev;
2460 cache->origin_dev = ca->origin_dev;
2461 cache->cache_dev = ca->cache_dev;
2465 origin_blocks = cache->origin_sectors = ca->origin_sectors;
2467 cache->origin_blocks = to_oblock(origin_blocks);
2469 cache->sectors_per_block = ca->block_size;
2470 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2478 cache->sectors_per_block_shift = -1;
2480 set_cache_size(cache, to_cblock(cache_size));
2482 cache->sectors_per_block_shift = __ffs(ca->block_size);
2483 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2486 r = create_cache_policy(cache, ca, error);
2490 cache->policy_nr_args = ca->policy_argc;
2491 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2493 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2495 *error = "Error setting cache policy's config values";
2499 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2501 dm_cache_policy_get_hint_size(cache->policy),
2508 cache->cmd = cmd;
2509 set_cache_mode(cache, CM_WRITE);
2510 if (get_cache_mode(cache) != CM_WRITE) {
2516 if (passthrough_mode(cache)) {
2519 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2531 policy_allow_migrations(cache->policy, false);
2534 spin_lock_init(&cache->lock);
2535 bio_list_init(&cache->deferred_bios);
2536 atomic_set(&cache->nr_allocated_migrations, 0);
2537 atomic_set(&cache->nr_io_migrations, 0);
2538 init_waitqueue_head(&cache->migration_wait);
2541 atomic_set(&cache->nr_dirty, 0);
2542 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2543 if (!cache->dirty_bitset) {
2547 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2549 cache->discard_block_size =
2550 calculate_discard_block_size(cache->sectors_per_block,
2551 cache->origin_sectors);
2552 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2553 cache->discard_block_size));
2554 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2555 if (!cache->discard_bitset) {
2559 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2561 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2562 if (IS_ERR(cache->copier)) {
2564 r = PTR_ERR(cache->copier);
2568 cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
2569 if (!cache->wq) {
2573 INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
2574 INIT_WORK(&cache->migration_worker, check_migrations);
2575 INIT_DELAYED_WORK(&cache->waker, do_waker);
2577 cache->prison = dm_bio_prison_create_v2(cache->wq);
2578 if (!cache->prison) {
2583 r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
2586 *error = "Error creating cache's migration mempool";
2590 cache->need_tick_bio = true;
2591 cache->sized = false;
2592 cache->invalidate = false;
2593 cache->commit_requested = false;
2594 cache->loaded_mappings = false;
2595 cache->loaded_discards = false;
2597 load_stats(cache);
2599 atomic_set(&cache->stats.demotion, 0);
2600 atomic_set(&cache->stats.promotion, 0);
2601 atomic_set(&cache->stats.copies_avoided, 0);
2602 atomic_set(&cache->stats.cache_cell_clash, 0);
2603 atomic_set(&cache->stats.commit_count, 0);
2604 atomic_set(&cache->stats.discard_count, 0);
2606 spin_lock_init(&cache->invalidation_lock);
2607 INIT_LIST_HEAD(&cache->invalidation_requests);
2609 batcher_init(&cache->committer, commit_op, cache,
2610 issue_op, cache, cache->wq);
2611 iot_init(&cache->tracker);
2613 init_rwsem(&cache->background_work_lock);
2614 prevent_background_work(cache);
2616 *result = cache;
2619 destroy(cache);
2623 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2641 cache->nr_ctr_args = argc;
2642 cache->ctr_args = copy;
2651 struct cache *cache = NULL;
2655 ti->error = "Error allocating memory for cache";
2664 r = cache_create(ca, &cache);
2668 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2670 destroy(cache);
2674 ti->private = cache;
2684 struct cache *cache = ti->private;
2688 dm_oblock_t block = get_bio_block(cache, bio);
2691 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2694 * the end of the origin device. We don't cache these.
2697 remap_to_origin(cache, bio);
2698 accounted_begin(cache, bio);
2703 defer_bio(cache, bio);
2707 r = map_bio(cache, bio, block, &commit_needed);
2709 schedule_commit(&cache->committer);
2716 struct cache *cache = ti->private;
2721 policy_tick(cache->policy, false);
2723 spin_lock_irqsave(&cache->lock, flags);
2724 cache->need_tick_bio = true;
2725 spin_unlock_irqrestore(&cache->lock, flags);
2728 bio_drop_shared_lock(cache, bio);
2729 accounted_complete(cache, bio);
2734 static int write_dirty_bitset(struct cache *cache)
2738 if (get_cache_mode(cache) >= CM_READ_ONLY)
2741 r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
2743 metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
2748 static int write_discard_bitset(struct cache *cache)
2752 if (get_cache_mode(cache) >= CM_READ_ONLY)
2755 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2756 cache->discard_nr_blocks);
2758 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
2759 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
2763 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2764 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2765 is_discarded(cache, to_dblock(i)));
2767 metadata_operation_failed(cache, "dm_cache_set_discard", r);
2775 static int write_hints(struct cache *cache)
2779 if (get_cache_mode(cache) >= CM_READ_ONLY)
2782 r = dm_cache_write_hints(cache->cmd, cache->policy);
2784 metadata_operation_failed(cache, "dm_cache_write_hints", r);
2794 static bool sync_metadata(struct cache *cache)
2798 r1 = write_dirty_bitset(cache);
2800 DMERR("%s: could not write dirty bitset", cache_device_name(cache));
2802 r2 = write_discard_bitset(cache);
2804 DMERR("%s: could not write discard bitset", cache_device_name(cache));
2806 save_stats(cache);
2808 r3 = write_hints(cache);
2810 DMERR("%s: could not write hints", cache_device_name(cache));
2817 r4 = commit(cache, !r1 && !r2 && !r3);
2819 DMERR("%s: could not write cache metadata", cache_device_name(cache));
2826 struct cache *cache = ti->private;
2828 prevent_background_work(cache);
2829 BUG_ON(atomic_read(&cache->nr_io_migrations));
2831 cancel_delayed_work_sync(&cache->waker);
2832 drain_workqueue(cache->wq);
2833 WARN_ON(cache->tracker.in_flight);
2839 requeue_deferred_bios(cache);
2841 if (get_cache_mode(cache) == CM_WRITE)
2842 (void) sync_metadata(cache);
2849 struct cache *cache = context;
2852 set_bit(from_cblock(cblock), cache->dirty_bitset);
2853 atomic_inc(&cache->nr_dirty);
2855 clear_bit(from_cblock(cblock), cache->dirty_bitset);
2857 r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
2871 struct cache *cache;
2881 static void discard_load_info_init(struct cache *cache,
2884 li->cache = cache;
2904 b = dm_sector_div_up(b, li->cache->discard_block_size);
2905 sector_div(e, li->cache->discard_block_size);
2911 if (e > from_dblock(li->cache->discard_nr_blocks))
2912 e = from_dblock(li->cache->discard_nr_blocks);
2915 set_discard(li->cache, to_dblock(b));
2948 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2950 sector_t size = get_dev_size(cache->cache_dev);
2951 (void) sector_div(size, cache->sectors_per_block);
2955 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2957 if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
2958 if (cache->sized) {
2959 DMERR("%s: unable to extend cache due to missing cache table reload",
2960 cache_device_name(cache));
2966 * We can't drop a dirty block when shrinking the cache.
2968 while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2970 if (is_dirty(cache, new_size)) {
2971 DMERR("%s: unable to shrink cache; cache block %llu is dirty",
2972 cache_device_name(cache),
2981 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2985 r = dm_cache_resize(cache->cmd, new_size);
2987 DMERR("%s: could not resize cache metadata", cache_device_name(cache));
2988 metadata_operation_failed(cache, "dm_cache_resize", r);
2992 set_cache_size(cache, new_size);
3000 struct cache *cache = ti->private;
3001 dm_cblock_t csize = get_cache_dev_size(cache);
3004 * Check to see if the cache has resized.
3006 if (!cache->sized) {
3007 r = resize_cache_dev(cache, csize);
3011 cache->sized = true;
3013 } else if (csize != cache->cache_size) {
3014 if (!can_resize(cache, csize))
3017 r = resize_cache_dev(cache, csize);
3022 if (!cache->loaded_mappings) {
3023 r = dm_cache_load_mappings(cache->cmd, cache->policy,
3024 load_mapping, cache);
3026 DMERR("%s: could not load cache mappings", cache_device_name(cache));
3027 metadata_operation_failed(cache, "dm_cache_load_mappings", r);
3031 cache->loaded_mappings = true;
3034 if (!cache->loaded_discards) {
3042 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
3044 discard_load_info_init(cache, &li);
3045 r = dm_cache_load_discards(cache->cmd, load_discard, &li);
3047 DMERR("%s: could not load origin discards", cache_device_name(cache));
3048 metadata_operation_failed(cache, "dm_cache_load_discards", r);
3053 cache->loaded_discards = true;
3061 struct cache *cache = ti->private;
3063 cache->need_tick_bio = true;
3064 allow_background_work(cache);
3065 do_waker(&cache->waker.work);
3068 static void emit_flags(struct cache *cache, char *result,
3072 struct cache_features *cf = &cache->features;
3080 if (writethrough_mode(cache))
3083 else if (passthrough_mode(cache))
3086 else if (writeback_mode(cache))
3092 cache_device_name(cache), (int) cf->io_mode);
3105 * <cache block size> <#used cache blocks>/<#total cache blocks>
3110 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3121 struct cache *cache = ti->private;
3127 if (get_cache_mode(cache) == CM_FAIL) {
3134 (void) commit(cache, false);
3136 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
3139 cache_device_name(cache), r);
3143 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
3146 cache_device_name(cache), r);
3150 residency = policy_residency(cache->policy);
3156 (unsigned long long)cache->sectors_per_block,
3158 (unsigned long long) from_cblock(cache->cache_size),
3159 (unsigned) atomic_read(&cache->stats.read_hit),
3160 (unsigned) atomic_read(&cache->stats.read_miss),
3161 (unsigned) atomic_read(&cache->stats.write_hit),
3162 (unsigned) atomic_read(&cache->stats.write_miss),
3163 (unsigned) atomic_read(&cache->stats.demotion),
3164 (unsigned) atomic_read(&cache->stats.promotion),
3165 (unsigned long) atomic_read(&cache->nr_dirty));
3167 emit_flags(cache, result, maxlen, &sz);
3169 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3171 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3173 r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
3176 cache_device_name(cache), r);
3179 if (get_cache_mode(cache) == CM_READ_ONLY)
3184 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
3194 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3196 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3198 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3201 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3202 DMEMIT(" %s", cache->ctr_args[i]);
3203 if (cache->nr_ctr_args)
3204 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3223 * A cache block range can take two forms:
3228 static int parse_cblock_range(struct cache *cache, const char *str,
3261 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
3265 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3269 uint64_t n = from_cblock(cache->cache_size);
3273 cache_device_name(cache), b, n);
3279 cache_device_name(cache), e, n);
3285 cache_device_name(cache), b, e);
3297 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3308 r = invalidate_cblock(cache, range->begin);
3315 cache->commit_requested = true;
3319 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3326 if (!passthrough_mode(cache)) {
3327 DMERR("%s: cache has to be in passthrough mode for invalidation",
3328 cache_device_name(cache));
3333 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3337 r = validate_cblock_range(cache, &range);
3344 r = request_invalidation(cache, &range);
3358 * The key migration_threshold is supported by the cache target core.
3363 struct cache *cache = ti->private;
3368 if (get_cache_mode(cache) >= CM_READ_ONLY) {
3369 DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
3370 cache_device_name(cache));
3375 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3380 return set_config_value(cache, argv[0], argv[1]);
3387 struct cache *cache = ti->private;
3389 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3391 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3407 static void disable_passdown_if_not_supported(struct cache *cache)
3409 struct block_device *origin_bdev = cache->origin_dev->bdev;
3414 if (!cache->features.discard_passdown)
3420 else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
3426 cache->features.discard_passdown = false;
3430 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3432 struct block_device *origin_bdev = cache->origin_dev->bdev;
3435 if (!cache->features.discard_passdown) {
3437 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3438 cache->origin_sectors);
3439 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3456 struct cache *cache = ti->private;
3461 * cache's blocksize (io_opt is a factor) do not override them.
3463 if (io_opt_sectors < cache->sectors_per_block ||
3464 do_div(io_opt_sectors, cache->sectors_per_block)) {
3465 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3466 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3469 disable_passdown_if_not_supported(cache);
3470 set_discard_limits(cache, limits);
3476 .name = "cache",
3502 DMERR("cache target registration failed: %d", r);
3519 MODULE_DESCRIPTION(DM_NAME " cache target");