Lines Matching defs:cell

227 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
450 * Allocate a cell from the prison's mempool.
458 * We reused an old cell; we can get rid of
467 struct dm_bio_prison_cell *cell,
470 dm_cell_release(pool->prison, cell, bios);
471 dm_bio_prison_free_cell(pool->prison, cell);
477 struct dm_bio_prison_cell *cell)
479 dm_cell_visit_release(pool->prison, fn, context, cell);
480 dm_bio_prison_free_cell(pool->prison, cell);
484 struct dm_bio_prison_cell *cell,
487 dm_cell_release_no_holder(pool->prison, cell, bios);
488 dm_bio_prison_free_cell(pool->prison, cell);
492 struct dm_bio_prison_cell *cell, blk_status_t error_code)
494 dm_cell_error(pool->prison, cell, error_code);
495 dm_bio_prison_free_cell(pool->prison, cell);
503 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
505 cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
508 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
510 cell_error_with_code(pool, cell, 0);
513 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
515 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
591 struct dm_bio_prison_cell *cell;
628 struct dm_bio_prison_cell *cell, *tmp;
636 list_for_each_entry_safe(cell, tmp, &cells, user_list)
637 cell_requeue(pool, cell);
818 struct dm_bio_prison_cell *cell;
823 * still be in the cell, so care has to be taken to avoid issuing
880 * This sends the bios in the cell, except the original holder, back
883 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
890 cell_release_no_holder(pool, cell, &bios);
909 struct dm_bio_prison_cell *cell)
914 while ((bio = bio_list_pop(&cell->bios))) {
931 struct dm_bio_prison_cell *cell,
943 * before the cell is released, and avoid a race with new bios
944 * being added to the cell.
947 &info, cell);
958 cell_error(m->tc->pool, m->cell);
1003 cell_error(pool, m->cell);
1015 cell_error(pool, m->cell);
1023 * the bios in the cell.
1026 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1029 inc_all_io_entry(tc->pool, m->cell->holder);
1030 remap_and_issue(tc, m->cell->holder, m->data_block);
1031 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1045 if (m->cell)
1046 cell_defer_no_holder(tc, m->cell);
1067 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1074 cell_defer_no_holder(tc, m->cell);
1167 cell_defer_no_holder(tc, m->cell);
1180 cell_defer_no_holder(tc, m->cell);
1217 cell_defer_no_holder(tc, m->cell);
1315 struct dm_bio_prison_cell *cell, struct bio *bio,
1325 m->cell = cell;
1375 struct dm_bio_prison_cell *cell, struct bio *bio)
1378 data_origin, data_dest, cell, bio,
1383 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1394 m->cell = cell;
1413 struct dm_bio_prison_cell *cell, struct bio *bio)
1421 virt_block, data_dest, cell, bio,
1426 virt_block, data_dest, cell, bio,
1430 schedule_zero(tc, virt_block, data_dest, cell, bio);
1629 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1637 cell_error_with_code(pool, cell, error);
1642 cell_release(pool, cell, &bios);
1661 m->cell = virt_cell;
1724 m->cell = data_cell;
1757 h->cell = virt_cell;
1795 * cell will never be granted.
1806 struct dm_bio_prison_cell *cell)
1816 data_block, cell, bio);
1820 retry_bios_on_resume(pool, cell);
1826 cell_error(pool, cell);
1832 struct dm_bio_prison_cell *cell)
1837 while ((bio = bio_list_pop(&cell->bios))) {
1852 struct dm_bio_prison_cell *cell,
1863 &info, cell);
1882 * If cell is already occupied, then sharing is already in the process
1907 struct dm_bio_prison_cell *cell)
1918 cell_defer_no_holder(tc, cell);
1929 cell_defer_no_holder(tc, cell);
1938 schedule_external_copy(tc, block, data_block, cell, bio);
1940 schedule_zero(tc, block, data_block, cell, bio);
1944 retry_bios_on_resume(pool, cell);
1950 cell_error(pool, cell);
1955 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1959 struct bio *bio = cell->holder;
1964 cell_requeue(pool, cell);
1972 process_shared_bio(tc, bio, block, &lookup_result, cell);
1976 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1983 cell_defer_no_holder(tc, cell);
1998 provision_block(tc, bio, block, cell);
2004 cell_defer_no_holder(tc, cell);
2014 struct dm_bio_prison_cell *cell;
2018 * If cell is already occupied, then the block is already
2022 if (bio_detain(pool, &key, bio, &cell))
2025 process_cell(tc, cell);
2029 struct dm_bio_prison_cell *cell)
2041 if (cell)
2042 cell_defer_no_holder(tc, cell);
2046 if (cell)
2047 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
2052 if (cell)
2053 cell_defer_no_holder(tc, cell);
2072 if (cell)
2073 cell_defer_no_holder(tc, cell);
2084 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2086 __process_bio_read_only(tc, cell->holder, cell);
2099 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2101 cell_success(tc->pool, cell);
2104 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2106 cell_error(tc->pool, cell);
2262 struct dm_bio_prison_cell *cell, *tmp;
2264 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2268 pool->cell_sort_array[count++] = cell;
2269 list_del(&cell->user_list);
2272 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2281 struct dm_bio_prison_cell *cell;
2297 cell = pool->cell_sort_array[i];
2298 BUG_ON(!cell->holder);
2315 if (bio_op(cell->holder) == REQ_OP_DISCARD)
2316 pool->process_discard_cell(tc, cell);
2318 pool->process_cell(tc, cell);
2696 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2702 list_add_tail(&cell->user_list, &tc->deferred_cells);
2717 h->cell = NULL;
2752 * We must hold the virtual cell before doing the lookup, otherwise
3039 *error = "Error allocating cell sort array";
4370 if (h->cell)
4371 cell_defer_no_holder(h->tc, h->cell);