Lines Matching refs:tc
223 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
224 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
381 struct thin_c *tc;
387 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
391 op->tc = tc;
399 struct thin_c *tc = op->tc;
400 sector_t s = block_to_sectors(tc->pool, data_b);
401 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
403 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
586 struct thin_c *tc;
610 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
617 spin_lock_irq(&tc->lock);
619 spin_unlock_irq(&tc->lock);
624 static void requeue_deferred_cells(struct thin_c *tc)
626 struct pool *pool = tc->pool;
632 spin_lock_irq(&tc->lock);
633 list_splice_init(&tc->deferred_cells, &cells);
634 spin_unlock_irq(&tc->lock);
640 static void requeue_io(struct thin_c *tc)
646 spin_lock_irq(&tc->lock);
647 __merge_bio_list(&bios, &tc->deferred_bio_list);
648 __merge_bio_list(&bios, &tc->retry_on_resume_list);
649 spin_unlock_irq(&tc->lock);
652 requeue_deferred_cells(tc);
657 struct thin_c *tc;
660 list_for_each_entry_rcu(tc, &pool->active_thins, list)
661 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
677 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
679 struct pool *pool = tc->pool;
693 static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
696 struct pool *pool = tc->pool;
718 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
720 struct pool *pool = tc->pool;
723 bio_set_dev(bio, tc->pool_dev->bdev);
733 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
735 bio_set_dev(bio, tc->origin_dev->bdev);
738 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
741 dm_thin_changed_this_transaction(tc->td);
755 static void issue(struct thin_c *tc, struct bio *bio)
757 struct pool *pool = tc->pool;
759 if (!bio_triggers_commit(tc, bio)) {
769 if (dm_thin_aborted_changes(tc->td)) {
783 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
785 remap_to_origin(tc, bio);
786 issue(tc, bio);
789 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
792 remap(tc, bio, block);
793 issue(tc, bio);
815 struct thin_c *tc;
832 struct pool *pool = m->tc->pool;
843 struct pool *pool = m->tc->pool;
883 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
885 struct pool *pool = tc->pool;
889 spin_lock_irqsave(&tc->lock, flags);
890 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
891 has_work = !bio_list_empty(&tc->deferred_bio_list);
892 spin_unlock_irqrestore(&tc->lock, flags);
898 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
901 struct thin_c *tc;
916 inc_all_io_entry(info->tc->pool, bio);
928 static void inc_remap_and_issue_cell(struct thin_c *tc,
935 info.tc = tc;
944 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
948 thin_defer_bio(tc, bio);
951 remap_and_issue(info.tc, bio, block);
956 cell_error(m->tc->pool, m->cell);
958 mempool_free(m, &m->tc->pool->mapping_pool);
961 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
963 struct pool *pool = tc->pool;
969 if (!bio_triggers_commit(tc, bio)) {
979 if (dm_thin_aborted_changes(tc->td)) {
995 struct thin_c *tc = m->tc;
996 struct pool *pool = tc->pool;
1010 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1025 complete_overwrite_bio(tc, bio);
1027 inc_all_io_entry(tc->pool, m->cell->holder);
1028 remap_and_issue(tc, m->cell->holder, m->data_block);
1029 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1041 struct thin_c *tc = m->tc;
1043 cell_defer_no_holder(tc, m->cell);
1044 mempool_free(m, &tc->pool->mapping_pool);
1062 struct thin_c *tc = m->tc;
1064 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1066 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1071 cell_defer_no_holder(tc, m->cell);
1072 mempool_free(m, &tc->pool->mapping_pool);
1086 struct thin_c *tc = m->tc;
1087 struct pool *pool = tc->pool;
1091 begin_discard(&op, tc, discard_parent);
1129 struct pool *pool = m->tc->pool;
1150 struct thin_c *tc = m->tc;
1151 struct pool *pool = tc->pool;
1160 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1164 cell_defer_no_holder(tc, m->cell);
1177 cell_defer_no_holder(tc, m->cell);
1185 dm_device_name(tc->pool->pool_md));
1197 begin_discard(&op, tc, discard_parent);
1207 struct thin_c *tc = m->tc;
1208 struct pool *pool = tc->pool;
1222 cell_defer_no_holder(tc, m->cell);
1288 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1293 to.bdev = tc->pool_dev->bdev;
1297 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1300 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1304 struct pool *pool = tc->pool;
1311 remap_and_issue(tc, bio, data_begin);
1317 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1323 struct pool *pool = tc->pool;
1326 m->tc = tc;
1349 remap_and_issue_overwrite(tc, bio, data_dest, m);
1357 to.bdev = tc->pool_dev->bdev;
1369 ll_zero(tc, m,
1378 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1382 schedule_copy(tc, virt_block, tc->pool_dev,
1384 tc->pool->sectors_per_block);
1387 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1391 struct pool *pool = tc->pool;
1395 m->tc = tc;
1408 remap_and_issue_overwrite(tc, bio, data_block, m);
1410 ll_zero(tc, m, data_block * pool->sectors_per_block,
1416 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1420 struct pool *pool = tc->pool;
1424 if (virt_block_end <= tc->origin_size)
1425 schedule_copy(tc, virt_block, tc->origin_dev,
1429 else if (virt_block_begin < tc->origin_size)
1430 schedule_copy(tc, virt_block, tc->origin_dev,
1432 tc->origin_size - virt_block_begin);
1435 schedule_zero(tc, virt_block, data_dest, cell, bio);
1522 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1526 struct pool *pool = tc->pool;
1592 struct thin_c *tc = h->tc;
1594 spin_lock_irq(&tc->lock);
1595 bio_list_add(&tc->retry_on_resume_list, bio);
1596 spin_unlock_irq(&tc->lock);
1653 static void process_discard_cell_no_passdown(struct thin_c *tc,
1656 struct pool *pool = tc->pool;
1663 m->tc = tc;
1673 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1676 struct pool *pool = tc->pool;
1691 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1700 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key);
1701 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1712 m->tc = tc;
1736 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1747 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1757 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1763 get_bio_block_range(tc, bio, &begin, &end);
1772 build_key(tc->td, VIRTUAL, begin, end, &virt_key);
1773 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1783 tc->pool->process_discard_cell(tc, virt_cell);
1786 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1793 struct pool *pool = tc->pool;
1795 r = alloc_data_block(tc, &data_block);
1798 schedule_internal_copy(tc, block, lookup_result->block,
1827 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1828 inc_all_io_entry(info->tc->pool, bio);
1834 static void remap_and_issue_shared_cell(struct thin_c *tc,
1841 info.tc = tc;
1845 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1849 thin_defer_bio(tc, bio);
1852 remap_and_issue(tc, bio, block);
1855 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1861 struct pool *pool = tc->pool;
1868 build_data_key(tc->td, lookup_result->block, &key);
1870 cell_defer_no_holder(tc, virt_cell);
1875 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1876 cell_defer_no_holder(tc, virt_cell);
1882 remap_and_issue(tc, bio, lookup_result->block);
1884 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1885 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1889 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1894 struct pool *pool = tc->pool;
1901 cell_defer_no_holder(tc, cell);
1903 remap_and_issue(tc, bio, 0);
1912 cell_defer_no_holder(tc, cell);
1917 r = alloc_data_block(tc, &data_block);
1920 if (tc->origin_dev)
1921 schedule_external_copy(tc, block, data_block, cell, bio);
1923 schedule_zero(tc, block, data_block, cell, bio);
1938 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1941 struct pool *pool = tc->pool;
1943 dm_block_t block = get_bio_block(tc, bio);
1946 if (tc->requeue_mode) {
1951 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1955 process_shared_bio(tc, bio, block, &lookup_result, cell);
1958 remap_and_issue(tc, bio, lookup_result.block);
1959 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1964 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1966 cell_defer_no_holder(tc, cell);
1968 if (bio_end_sector(bio) <= tc->origin_size)
1969 remap_to_origin_and_issue(tc, bio);
1971 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1973 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1974 remap_to_origin_and_issue(tc, bio);
1981 provision_block(tc, bio, block, cell);
1987 cell_defer_no_holder(tc, cell);
1993 static void process_bio(struct thin_c *tc, struct bio *bio)
1995 struct pool *pool = tc->pool;
1996 dm_block_t block = get_bio_block(tc, bio);
2004 build_virtual_key(tc->td, block, &key);
2008 process_cell(tc, cell);
2011 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
2016 dm_block_t block = get_bio_block(tc, bio);
2019 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
2023 handle_unserviceable_bio(tc->pool, bio);
2025 cell_defer_no_holder(tc, cell);
2027 inc_all_io_entry(tc->pool, bio);
2028 remap_and_issue(tc, bio, lookup_result.block);
2030 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
2036 cell_defer_no_holder(tc, cell);
2038 handle_unserviceable_bio(tc->pool, bio);
2042 if (tc->origin_dev) {
2043 inc_all_io_entry(tc->pool, bio);
2044 remap_to_origin_and_issue(tc, bio);
2056 cell_defer_no_holder(tc, cell);
2062 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
2064 __process_bio_read_only(tc, bio, NULL);
2067 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2069 __process_bio_read_only(tc, cell->holder, cell);
2072 static void process_bio_success(struct thin_c *tc, struct bio *bio)
2077 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
2082 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2084 cell_success(tc->pool, cell);
2087 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2089 cell_error(tc->pool, cell);
2105 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2111 rbp = &tc->sort_bio_list.rb_node;
2125 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2128 static void __extract_sorted_bios(struct thin_c *tc)
2134 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2138 bio_list_add(&tc->deferred_bio_list, bio);
2139 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2142 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2145 static void __sort_thin_deferred_bios(struct thin_c *tc)
2151 bio_list_merge(&bios, &tc->deferred_bio_list);
2152 bio_list_init(&tc->deferred_bio_list);
2156 __thin_bio_rb_add(tc, bio);
2163 __extract_sorted_bios(tc);
2166 static void process_thin_deferred_bios(struct thin_c *tc)
2168 struct pool *pool = tc->pool;
2174 if (tc->requeue_mode) {
2175 error_thin_bio_list(tc, &tc->deferred_bio_list,
2182 spin_lock_irq(&tc->lock);
2184 if (bio_list_empty(&tc->deferred_bio_list)) {
2185 spin_unlock_irq(&tc->lock);
2189 __sort_thin_deferred_bios(tc);
2191 bio_list_merge(&bios, &tc->deferred_bio_list);
2192 bio_list_init(&tc->deferred_bio_list);
2194 spin_unlock_irq(&tc->lock);
2204 spin_lock_irq(&tc->lock);
2205 bio_list_add(&tc->deferred_bio_list, bio);
2206 bio_list_merge(&tc->deferred_bio_list, &bios);
2207 spin_unlock_irq(&tc->lock);
2212 pool->process_discard(tc, bio);
2214 pool->process_bio(tc, bio);
2260 static void process_thin_deferred_cells(struct thin_c *tc)
2262 struct pool *pool = tc->pool;
2269 spin_lock_irq(&tc->lock);
2270 list_splice_init(&tc->deferred_cells, &cells);
2271 spin_unlock_irq(&tc->lock);
2277 count = sort_cells(tc->pool, &cells);
2292 spin_lock_irq(&tc->lock);
2293 list_splice(&cells, &tc->deferred_cells);
2294 spin_unlock_irq(&tc->lock);
2299 pool->process_discard_cell(tc, cell);
2301 pool->process_cell(tc, cell);
2307 static void thin_get(struct thin_c *tc);
2308 static void thin_put(struct thin_c *tc);
2317 struct thin_c *tc = NULL;
2321 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2322 thin_get(tc);
2326 return tc;
2329 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2331 struct thin_c *old_tc = tc;
2334 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2335 thin_get(tc);
2338 return tc;
2350 struct thin_c *tc;
2352 tc = get_first_thin(pool);
2353 while (tc) {
2354 process_thin_deferred_cells(tc);
2355 process_thin_deferred_bios(tc);
2356 tc = get_next_thin(pool, tc);
2477 struct thin_c *tc;
2488 w->tc->requeue_mode = true;
2489 requeue_io(w->tc);
2496 w->tc->requeue_mode = false;
2500 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2504 w.tc = tc;
2505 pool_work_wait(&w.pw, tc->pool, fn);
2661 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2663 struct pool *pool = tc->pool;
2665 spin_lock_irq(&tc->lock);
2666 bio_list_add(&tc->deferred_bio_list, bio);
2667 spin_unlock_irq(&tc->lock);
2672 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2674 struct pool *pool = tc->pool;
2677 thin_defer_bio(tc, bio);
2681 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2683 struct pool *pool = tc->pool;
2686 spin_lock_irq(&tc->lock);
2687 list_add_tail(&cell->user_list, &tc->deferred_cells);
2688 spin_unlock_irq(&tc->lock);
2694 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2698 h->tc = tc;
2711 struct thin_c *tc = ti->private;
2712 dm_block_t block = get_bio_block(tc, bio);
2713 struct dm_thin_device *td = tc->td;
2718 thin_hook_bio(tc, bio);
2720 if (tc->requeue_mode) {
2726 if (get_pool_mode(tc->pool) == PM_FAIL) {
2732 thin_defer_bio_with_throttle(tc, bio);
2740 build_virtual_key(tc->td, block, &key);
2741 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2766 thin_defer_cell(tc, virt_cell);
2770 build_data_key(tc->td, result.block, &key);
2771 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2772 cell_defer_no_holder(tc, virt_cell);
2776 inc_all_io_entry(tc->pool, bio);
2777 cell_defer_no_holder(tc, data_cell);
2778 cell_defer_no_holder(tc, virt_cell);
2780 remap(tc, bio, result.block);
2785 thin_defer_cell(tc, virt_cell);
2795 cell_defer_no_holder(tc, virt_cell);
2802 struct thin_c *tc;
2805 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2806 spin_lock_irq(&tc->lock);
2807 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2808 bio_list_init(&tc->retry_on_resume_list);
2809 spin_unlock_irq(&tc->lock);
3600 struct thin_c *tc;
3603 tc = get_first_thin(pool);
3604 while (tc) {
3605 dm_internal_suspend_noflush(tc->thin_md);
3606 tc = get_next_thin(pool, tc);
3612 struct thin_c *tc;
3615 tc = get_first_thin(pool);
3616 while (tc) {
3617 dm_internal_resume(tc->thin_md);
3618 tc = get_next_thin(pool, tc);
4128 static void thin_get(struct thin_c *tc)
4130 refcount_inc(&tc->refcount);
4133 static void thin_put(struct thin_c *tc)
4135 if (refcount_dec_and_test(&tc->refcount))
4136 complete(&tc->can_destroy);
4141 struct thin_c *tc = ti->private;
4143 spin_lock_irq(&tc->pool->lock);
4144 list_del_rcu(&tc->list);
4145 spin_unlock_irq(&tc->pool->lock);
4148 thin_put(tc);
4149 wait_for_completion(&tc->can_destroy);
4153 __pool_dec(tc->pool);
4154 dm_pool_close_thin_device(tc->td);
4155 dm_put_device(ti, tc->pool_dev);
4156 if (tc->origin_dev)
4157 dm_put_device(ti, tc->origin_dev);
4158 kfree(tc);
4178 struct thin_c *tc;
4190 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4191 if (!tc) {
4196 tc->thin_md = dm_table_get_md(ti->table);
4197 spin_lock_init(&tc->lock);
4198 INIT_LIST_HEAD(&tc->deferred_cells);
4199 bio_list_init(&tc->deferred_bio_list);
4200 bio_list_init(&tc->retry_on_resume_list);
4201 tc->sort_bio_list = RB_ROOT;
4215 tc->origin_dev = origin_dev;
4223 tc->pool_dev = pool_dev;
4225 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4231 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4238 tc->pool = __pool_table_lookup(pool_md);
4239 if (!tc->pool) {
4244 __pool_inc(tc->pool);
4246 if (get_pool_mode(tc->pool) == PM_FAIL) {
4252 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4258 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4268 if (tc->pool->pf.discard_enabled) {
4275 spin_lock_irq(&tc->pool->lock);
4276 if (tc->pool->suspended) {
4277 spin_unlock_irq(&tc->pool->lock);
4283 refcount_set(&tc->refcount, 1);
4284 init_completion(&tc->can_destroy);
4285 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4286 spin_unlock_irq(&tc->pool->lock);
4290 * added tc isn't yet visible). So this reduces latency since we
4300 dm_pool_close_thin_device(tc->td);
4302 __pool_dec(tc->pool);
4306 dm_put_device(ti, tc->pool_dev);
4308 if (tc->origin_dev)
4309 dm_put_device(ti, tc->origin_dev);
4311 kfree(tc);
4332 struct pool *pool = h->tc->pool;
4359 cell_defer_no_holder(h->tc, h->cell);
4366 struct thin_c *tc = ti->private;
4369 noflush_work(tc, do_noflush_start);
4374 struct thin_c *tc = ti->private;
4380 noflush_work(tc, do_noflush_stop);
4385 struct thin_c *tc = ti->private;
4387 if (tc->origin_dev)
4388 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4403 struct thin_c *tc = ti->private;
4405 if (get_pool_mode(tc->pool) == PM_FAIL) {
4410 if (!tc->td)
4415 r = dm_thin_get_mapped_count(tc->td, &mapped);
4421 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
4427 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4430 tc->pool->sectors_per_block) - 1);
4437 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4438 (unsigned long) tc->dev_id);
4439 if (tc->origin_dev)
4440 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
4455 struct thin_c *tc = ti->private;
4456 struct pool *pool = tc->pool;
4468 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4475 struct thin_c *tc = ti->private;
4476 struct pool *pool = tc->pool;