Lines Matching defs:cell
224 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
450 * Allocate a cell from the prison's mempool.
458 * We reused an old cell; we can get rid of
467 struct dm_bio_prison_cell *cell,
470 dm_cell_release(pool->prison, cell, bios);
471 dm_bio_prison_free_cell(pool->prison, cell);
477 struct dm_bio_prison_cell *cell)
479 dm_cell_visit_release(pool->prison, fn, context, cell);
480 dm_bio_prison_free_cell(pool->prison, cell);
484 struct dm_bio_prison_cell *cell,
487 dm_cell_release_no_holder(pool->prison, cell, bios);
488 dm_bio_prison_free_cell(pool->prison, cell);
492 struct dm_bio_prison_cell *cell, blk_status_t error_code)
494 dm_cell_error(pool->prison, cell, error_code);
495 dm_bio_prison_free_cell(pool->prison, cell);
503 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
505 cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
508 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
510 cell_error_with_code(pool, cell, 0);
513 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
515 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
591 struct dm_bio_prison_cell *cell;
628 struct dm_bio_prison_cell *cell, *tmp;
636 list_for_each_entry_safe(cell, tmp, &cells, user_list)
637 cell_requeue(pool, cell);
818 struct dm_bio_prison_cell *cell;
823 * still be in the cell, so care has to be taken to avoid issuing
880 * This sends the bios in the cell, except the original holder, back
883 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
890 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
907 struct dm_bio_prison_cell *cell)
912 while ((bio = bio_list_pop(&cell->bios))) {
929 struct dm_bio_prison_cell *cell,
941 * before the cell is released, and avoid a race with new bios
942 * being added to the cell.
945 &info, cell);
956 cell_error(m->tc->pool, m->cell);
1001 cell_error(pool, m->cell);
1013 cell_error(pool, m->cell);
1021 * the bios in the cell.
1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1027 inc_all_io_entry(tc->pool, m->cell->holder);
1028 remap_and_issue(tc, m->cell->holder, m->data_block);
1029 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1042 if (m->cell)
1043 cell_defer_no_holder(tc, m->cell);
1064 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1071 cell_defer_no_holder(tc, m->cell);
1164 cell_defer_no_holder(tc, m->cell);
1177 cell_defer_no_holder(tc, m->cell);
1222 cell_defer_no_holder(tc, m->cell);
1320 struct dm_bio_prison_cell *cell, struct bio *bio,
1330 m->cell = cell;
1380 struct dm_bio_prison_cell *cell, struct bio *bio)
1383 data_origin, data_dest, cell, bio,
1388 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1399 m->cell = cell;
1418 struct dm_bio_prison_cell *cell, struct bio *bio)
1426 virt_block, data_dest, cell, bio,
1431 virt_block, data_dest, cell, bio,
1435 schedule_zero(tc, virt_block, data_dest, cell, bio);
1634 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1642 cell_error_with_code(pool, cell, error);
1647 cell_release(pool, cell, &bios);
1666 m->cell = virt_cell;
1717 m->cell = data_cell;
1746 h->cell = virt_cell;
1779 * cell will never be granted.
1789 struct dm_bio_prison_cell *cell)
1799 data_block, cell, bio);
1803 retry_bios_on_resume(pool, cell);
1809 cell_error(pool, cell);
1815 struct dm_bio_prison_cell *cell)
1820 while ((bio = bio_list_pop(&cell->bios))) {
1835 struct dm_bio_prison_cell *cell,
1846 &info, cell);
1865 * If cell is already occupied, then sharing is already in the process
1890 struct dm_bio_prison_cell *cell)
1901 cell_defer_no_holder(tc, cell);
1912 cell_defer_no_holder(tc, cell);
1921 schedule_external_copy(tc, block, data_block, cell, bio);
1923 schedule_zero(tc, block, data_block, cell, bio);
1927 retry_bios_on_resume(pool, cell);
1933 cell_error(pool, cell);
1938 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1942 struct bio *bio = cell->holder;
1947 cell_requeue(pool, cell);
1955 process_shared_bio(tc, bio, block, &lookup_result, cell);
1959 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1966 cell_defer_no_holder(tc, cell);
1981 provision_block(tc, bio, block, cell);
1987 cell_defer_no_holder(tc, cell);
1997 struct dm_bio_prison_cell *cell;
2001 * If cell is already occupied, then the block is already
2005 if (bio_detain(pool, &key, bio, &cell))
2008 process_cell(tc, cell);
2012 struct dm_bio_prison_cell *cell)
2024 if (cell)
2025 cell_defer_no_holder(tc, cell);
2029 if (cell)
2030 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
2035 if (cell)
2036 cell_defer_no_holder(tc, cell);
2055 if (cell)
2056 cell_defer_no_holder(tc, cell);
2067 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2069 __process_bio_read_only(tc, cell->holder, cell);
2082 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2084 cell_success(tc->pool, cell);
2087 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2089 cell_error(tc->pool, cell);
2245 struct dm_bio_prison_cell *cell, *tmp;
2247 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2251 pool->cell_sort_array[count++] = cell;
2252 list_del(&cell->user_list);
2255 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2264 struct dm_bio_prison_cell *cell;
2280 cell = pool->cell_sort_array[i];
2281 BUG_ON(!cell->holder);
2298 if (bio_op(cell->holder) == REQ_OP_DISCARD)
2299 pool->process_discard_cell(tc, cell);
2301 pool->process_cell(tc, cell);
2681 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2687 list_add_tail(&cell->user_list, &tc->deferred_cells);
2702 h->cell = NULL;
2737 * We must hold the virtual cell before doing the lookup, otherwise
3030 *error = "Error allocating cell sort array";
4358 if (h->cell)
4359 cell_defer_no_holder(h->tc, h->cell);