Lines Matching refs:tc

226 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
227 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
382 struct thin_c *tc;
388 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
392 op->tc = tc;
400 struct thin_c *tc = op->tc;
401 sector_t s = block_to_sectors(tc->pool, data_b);
402 sector_t len = block_to_sectors(tc->pool, data_e - data_b);
404 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
586 struct thin_c *tc;
610 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
617 spin_lock_irq(&tc->lock);
619 spin_unlock_irq(&tc->lock);
624 static void requeue_deferred_cells(struct thin_c *tc)
626 struct pool *pool = tc->pool;
632 spin_lock_irq(&tc->lock);
633 list_splice_init(&tc->deferred_cells, &cells);
634 spin_unlock_irq(&tc->lock);
640 static void requeue_io(struct thin_c *tc)
646 spin_lock_irq(&tc->lock);
647 __merge_bio_list(&bios, &tc->deferred_bio_list);
648 __merge_bio_list(&bios, &tc->retry_on_resume_list);
649 spin_unlock_irq(&tc->lock);
652 requeue_deferred_cells(tc);
657 struct thin_c *tc;
660 list_for_each_entry_rcu(tc, &pool->active_thins, list)
661 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
677 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
679 struct pool *pool = tc->pool;
693 static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
696 struct pool *pool = tc->pool;
718 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
720 struct pool *pool = tc->pool;
723 bio_set_dev(bio, tc->pool_dev->bdev);
733 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
735 bio_set_dev(bio, tc->origin_dev->bdev);
738 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
741 dm_thin_changed_this_transaction(tc->td);
755 static void issue(struct thin_c *tc, struct bio *bio)
757 struct pool *pool = tc->pool;
759 if (!bio_triggers_commit(tc, bio)) {
769 if (dm_thin_aborted_changes(tc->td)) {
783 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
785 remap_to_origin(tc, bio);
786 issue(tc, bio);
789 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
792 remap(tc, bio, block);
793 issue(tc, bio);
815 struct thin_c *tc;
832 struct pool *pool = m->tc->pool;
843 struct pool *pool = m->tc->pool;
883 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
885 struct pool *pool = tc->pool;
893 spin_lock_irqsave(&tc->lock, flags);
894 bio_list_merge(&tc->deferred_bio_list, &bios);
895 spin_unlock_irqrestore(&tc->lock, flags);
900 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
903 struct thin_c *tc;
918 inc_all_io_entry(info->tc->pool, bio);
930 static void inc_remap_and_issue_cell(struct thin_c *tc,
937 info.tc = tc;
946 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
950 thin_defer_bio(tc, bio);
953 remap_and_issue(info.tc, bio, block);
958 cell_error(m->tc->pool, m->cell);
960 mempool_free(m, &m->tc->pool->mapping_pool);
963 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
965 struct pool *pool = tc->pool;
971 if (!bio_triggers_commit(tc, bio)) {
981 if (dm_thin_aborted_changes(tc->td)) {
997 struct thin_c *tc = m->tc;
998 struct pool *pool = tc->pool;
1012 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block);
1026 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1027 complete_overwrite_bio(tc, bio);
1029 inc_all_io_entry(tc->pool, m->cell->holder);
1030 remap_and_issue(tc, m->cell->holder, m->data_block);
1031 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1043 struct thin_c *tc = m->tc;
1046 cell_defer_no_holder(tc, m->cell);
1047 mempool_free(m, &tc->pool->mapping_pool);
1065 struct thin_c *tc = m->tc;
1067 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1069 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
1074 cell_defer_no_holder(tc, m->cell);
1075 mempool_free(m, &tc->pool->mapping_pool);
1089 struct thin_c *tc = m->tc;
1090 struct pool *pool = tc->pool;
1094 begin_discard(&op, tc, discard_parent);
1132 struct pool *pool = m->tc->pool;
1153 struct thin_c *tc = m->tc;
1154 struct pool *pool = tc->pool;
1163 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
1167 cell_defer_no_holder(tc, m->cell);
1180 cell_defer_no_holder(tc, m->cell);
1193 begin_discard(&op, tc, discard_parent);
1202 struct thin_c *tc = m->tc;
1203 struct pool *pool = tc->pool;
1217 cell_defer_no_holder(tc, m->cell);
1283 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
1288 to.bdev = tc->pool_dev->bdev;
1292 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
1295 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1299 struct pool *pool = tc->pool;
1306 remap_and_issue(tc, bio, data_begin);
1312 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1318 struct pool *pool = tc->pool;
1321 m->tc = tc;
1344 remap_and_issue_overwrite(tc, bio, data_dest, m);
1352 to.bdev = tc->pool_dev->bdev;
1364 ll_zero(tc, m,
1373 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1377 schedule_copy(tc, virt_block, tc->pool_dev,
1379 tc->pool->sectors_per_block);
1382 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1386 struct pool *pool = tc->pool;
1390 m->tc = tc;
1403 remap_and_issue_overwrite(tc, bio, data_block, m);
1405 ll_zero(tc, m, data_block * pool->sectors_per_block,
1411 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1415 struct pool *pool = tc->pool;
1419 if (virt_block_end <= tc->origin_size)
1420 schedule_copy(tc, virt_block, tc->origin_dev,
1424 else if (virt_block_begin < tc->origin_size)
1425 schedule_copy(tc, virt_block, tc->origin_dev,
1427 tc->origin_size - virt_block_begin);
1430 schedule_zero(tc, virt_block, data_dest, cell, bio);
1517 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1521 struct pool *pool = tc->pool;
1587 struct thin_c *tc = h->tc;
1589 spin_lock_irq(&tc->lock);
1590 bio_list_add(&tc->retry_on_resume_list, bio);
1591 spin_unlock_irq(&tc->lock);
1648 static void process_discard_cell_no_passdown(struct thin_c *tc,
1651 struct pool *pool = tc->pool;
1658 m->tc = tc;
1668 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
1671 struct pool *pool = tc->pool;
1682 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end,
1707 (void) build_key(tc->td, PHYSICAL, data_begin, data_begin + len, &data_key);
1708 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) {
1719 m->tc = tc;
1747 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell)
1758 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1768 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1774 get_bio_block_range(tc, bio, &begin, &end);
1783 if (unlikely(!build_key(tc->td, VIRTUAL, begin, end, &virt_key))) {
1789 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) {
1800 tc->pool->process_discard_cell(tc, virt_cell);
1803 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1810 struct pool *pool = tc->pool;
1812 r = alloc_data_block(tc, &data_block);
1815 schedule_internal_copy(tc, block, lookup_result->block,
1844 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1845 inc_all_io_entry(info->tc->pool, bio);
1851 static void remap_and_issue_shared_cell(struct thin_c *tc,
1858 info.tc = tc;
1862 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1866 thin_defer_bio(tc, bio);
1869 remap_and_issue(tc, bio, block);
1872 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1878 struct pool *pool = tc->pool;
1885 build_data_key(tc->td, lookup_result->block, &key);
1887 cell_defer_no_holder(tc, virt_cell);
1892 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1893 cell_defer_no_holder(tc, virt_cell);
1899 remap_and_issue(tc, bio, lookup_result->block);
1901 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1902 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1906 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1911 struct pool *pool = tc->pool;
1918 cell_defer_no_holder(tc, cell);
1920 remap_and_issue(tc, bio, 0);
1929 cell_defer_no_holder(tc, cell);
1934 r = alloc_data_block(tc, &data_block);
1937 if (tc->origin_dev)
1938 schedule_external_copy(tc, block, data_block, cell, bio);
1940 schedule_zero(tc, block, data_block, cell, bio);
1955 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1958 struct pool *pool = tc->pool;
1960 dm_block_t block = get_bio_block(tc, bio);
1963 if (tc->requeue_mode) {
1968 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1972 process_shared_bio(tc, bio, block, &lookup_result, cell);
1975 remap_and_issue(tc, bio, lookup_result.block);
1976 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1981 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1983 cell_defer_no_holder(tc, cell);
1985 if (bio_end_sector(bio) <= tc->origin_size)
1986 remap_to_origin_and_issue(tc, bio);
1988 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1990 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1991 remap_to_origin_and_issue(tc, bio);
1998 provision_block(tc, bio, block, cell);
2004 cell_defer_no_holder(tc, cell);
2010 static void process_bio(struct thin_c *tc, struct bio *bio)
2012 struct pool *pool = tc->pool;
2013 dm_block_t block = get_bio_block(tc, bio);
2021 build_virtual_key(tc->td, block, &key);
2025 process_cell(tc, cell);
2028 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
2033 dm_block_t block = get_bio_block(tc, bio);
2036 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
2040 handle_unserviceable_bio(tc->pool, bio);
2042 cell_defer_no_holder(tc, cell);
2044 inc_all_io_entry(tc->pool, bio);
2045 remap_and_issue(tc, bio, lookup_result.block);
2047 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
2053 cell_defer_no_holder(tc, cell);
2055 handle_unserviceable_bio(tc->pool, bio);
2059 if (tc->origin_dev) {
2060 inc_all_io_entry(tc->pool, bio);
2061 remap_to_origin_and_issue(tc, bio);
2073 cell_defer_no_holder(tc, cell);
2079 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
2081 __process_bio_read_only(tc, bio, NULL);
2084 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2086 __process_bio_read_only(tc, cell->holder, cell);
2089 static void process_bio_success(struct thin_c *tc, struct bio *bio)
2094 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
2099 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2101 cell_success(tc->pool, cell);
2104 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2106 cell_error(tc->pool, cell);
2122 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2128 rbp = &tc->sort_bio_list.rb_node;
2142 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
2145 static void __extract_sorted_bios(struct thin_c *tc)
2151 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
2155 bio_list_add(&tc->deferred_bio_list, bio);
2156 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
2159 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
2162 static void __sort_thin_deferred_bios(struct thin_c *tc)
2168 bio_list_merge(&bios, &tc->deferred_bio_list);
2169 bio_list_init(&tc->deferred_bio_list);
2173 __thin_bio_rb_add(tc, bio);
2180 __extract_sorted_bios(tc);
2183 static void process_thin_deferred_bios(struct thin_c *tc)
2185 struct pool *pool = tc->pool;
2191 if (tc->requeue_mode) {
2192 error_thin_bio_list(tc, &tc->deferred_bio_list,
2199 spin_lock_irq(&tc->lock);
2201 if (bio_list_empty(&tc->deferred_bio_list)) {
2202 spin_unlock_irq(&tc->lock);
2206 __sort_thin_deferred_bios(tc);
2208 bio_list_merge(&bios, &tc->deferred_bio_list);
2209 bio_list_init(&tc->deferred_bio_list);
2211 spin_unlock_irq(&tc->lock);
2221 spin_lock_irq(&tc->lock);
2222 bio_list_add(&tc->deferred_bio_list, bio);
2223 bio_list_merge(&tc->deferred_bio_list, &bios);
2224 spin_unlock_irq(&tc->lock);
2229 pool->process_discard(tc, bio);
2231 pool->process_bio(tc, bio);
2277 static void process_thin_deferred_cells(struct thin_c *tc)
2279 struct pool *pool = tc->pool;
2286 spin_lock_irq(&tc->lock);
2287 list_splice_init(&tc->deferred_cells, &cells);
2288 spin_unlock_irq(&tc->lock);
2294 count = sort_cells(tc->pool, &cells);
2309 spin_lock_irq(&tc->lock);
2310 list_splice(&cells, &tc->deferred_cells);
2311 spin_unlock_irq(&tc->lock);
2316 pool->process_discard_cell(tc, cell);
2318 pool->process_cell(tc, cell);
2324 static void thin_get(struct thin_c *tc);
2325 static void thin_put(struct thin_c *tc);
2334 struct thin_c *tc = NULL;
2338 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
2339 thin_get(tc);
2343 return tc;
2346 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
2348 struct thin_c *old_tc = tc;
2351 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
2352 thin_get(tc);
2355 return tc;
2367 struct thin_c *tc;
2369 tc = get_first_thin(pool);
2370 while (tc) {
2371 process_thin_deferred_cells(tc);
2372 process_thin_deferred_bios(tc);
2373 tc = get_next_thin(pool, tc);
2495 struct thin_c *tc;
2507 w->tc->requeue_mode = true;
2508 requeue_io(w->tc);
2516 w->tc->requeue_mode = false;
2520 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2524 w.tc = tc;
2525 pool_work_wait(&w.pw, tc->pool, fn);
2676 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2678 struct pool *pool = tc->pool;
2680 spin_lock_irq(&tc->lock);
2681 bio_list_add(&tc->deferred_bio_list, bio);
2682 spin_unlock_irq(&tc->lock);
2687 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2689 struct pool *pool = tc->pool;
2692 thin_defer_bio(tc, bio);
2696 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2698 struct pool *pool = tc->pool;
2701 spin_lock_irq(&tc->lock);
2702 list_add_tail(&cell->user_list, &tc->deferred_cells);
2703 spin_unlock_irq(&tc->lock);
2709 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2713 h->tc = tc;
2726 struct thin_c *tc = ti->private;
2727 dm_block_t block = get_bio_block(tc, bio);
2728 struct dm_thin_device *td = tc->td;
2733 thin_hook_bio(tc, bio);
2735 if (tc->requeue_mode) {
2741 if (get_pool_mode(tc->pool) == PM_FAIL) {
2747 thin_defer_bio_with_throttle(tc, bio);
2755 build_virtual_key(tc->td, block, &key);
2756 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2781 thin_defer_cell(tc, virt_cell);
2785 build_data_key(tc->td, result.block, &key);
2786 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2787 cell_defer_no_holder(tc, virt_cell);
2791 inc_all_io_entry(tc->pool, bio);
2792 cell_defer_no_holder(tc, data_cell);
2793 cell_defer_no_holder(tc, virt_cell);
2795 remap(tc, bio, result.block);
2800 thin_defer_cell(tc, virt_cell);
2810 cell_defer_no_holder(tc, virt_cell);
2817 struct thin_c *tc;
2820 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2821 spin_lock_irq(&tc->lock);
2822 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2823 bio_list_init(&tc->retry_on_resume_list);
2824 spin_unlock_irq(&tc->lock);
3605 struct thin_c *tc;
3608 tc = get_first_thin(pool);
3609 while (tc) {
3610 dm_internal_suspend_noflush(tc->thin_md);
3611 tc = get_next_thin(pool, tc);
3617 struct thin_c *tc;
3620 tc = get_first_thin(pool);
3621 while (tc) {
3622 dm_internal_resume(tc->thin_md);
3623 tc = get_next_thin(pool, tc);
4138 static void thin_get(struct thin_c *tc)
4140 refcount_inc(&tc->refcount);
4143 static void thin_put(struct thin_c *tc)
4145 if (refcount_dec_and_test(&tc->refcount))
4146 complete(&tc->can_destroy);
4151 struct thin_c *tc = ti->private;
4153 spin_lock_irq(&tc->pool->lock);
4154 list_del_rcu(&tc->list);
4155 spin_unlock_irq(&tc->pool->lock);
4158 thin_put(tc);
4159 wait_for_completion(&tc->can_destroy);
4163 __pool_dec(tc->pool);
4164 dm_pool_close_thin_device(tc->td);
4165 dm_put_device(ti, tc->pool_dev);
4166 if (tc->origin_dev)
4167 dm_put_device(ti, tc->origin_dev);
4168 kfree(tc);
4188 struct thin_c *tc;
4200 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
4201 if (!tc) {
4206 tc->thin_md = dm_table_get_md(ti->table);
4207 spin_lock_init(&tc->lock);
4208 INIT_LIST_HEAD(&tc->deferred_cells);
4209 bio_list_init(&tc->deferred_bio_list);
4210 bio_list_init(&tc->retry_on_resume_list);
4211 tc->sort_bio_list = RB_ROOT;
4225 tc->origin_dev = origin_dev;
4233 tc->pool_dev = pool_dev;
4235 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
4241 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
4248 tc->pool = __pool_table_lookup(pool_md);
4249 if (!tc->pool) {
4254 __pool_inc(tc->pool);
4256 if (get_pool_mode(tc->pool) == PM_FAIL) {
4262 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
4268 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
4279 if (tc->pool->pf.discard_enabled) {
4287 spin_lock_irq(&tc->pool->lock);
4288 if (tc->pool->suspended) {
4289 spin_unlock_irq(&tc->pool->lock);
4295 refcount_set(&tc->refcount, 1);
4296 init_completion(&tc->can_destroy);
4297 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4298 spin_unlock_irq(&tc->pool->lock);
4302 * added tc isn't yet visible). So this reduces latency since we
4312 dm_pool_close_thin_device(tc->td);
4314 __pool_dec(tc->pool);
4318 dm_put_device(ti, tc->pool_dev);
4320 if (tc->origin_dev)
4321 dm_put_device(ti, tc->origin_dev);
4323 kfree(tc);
4344 struct pool *pool = h->tc->pool;
4371 cell_defer_no_holder(h->tc, h->cell);
4378 struct thin_c *tc = ti->private;
4381 noflush_work(tc, do_noflush_start);
4386 struct thin_c *tc = ti->private;
4392 noflush_work(tc, do_noflush_stop);
4397 struct thin_c *tc = ti->private;
4399 if (tc->origin_dev)
4400 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
4415 struct thin_c *tc = ti->private;
4417 if (get_pool_mode(tc->pool) == PM_FAIL) {
4422 if (!tc->td)
4427 r = dm_thin_get_mapped_count(tc->td, &mapped);
4433 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
4439 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
4442 tc->pool->sectors_per_block) - 1);
4449 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
4450 (unsigned long) tc->dev_id);
4451 if (tc->origin_dev)
4452 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
4471 struct thin_c *tc = ti->private;
4472 struct pool *pool = tc->pool;
4484 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4491 struct thin_c *tc = ti->private;
4492 struct pool *pool = tc->pool;