Lines Matching refs:dio
372 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
1570 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1576 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1602 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1604 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1606 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1607 submit_flush_bio(ic, dio);
1612 static void dec_in_flight(struct dm_integrity_io *dio)
1614 if (atomic_dec_and_test(&dio->in_flight)) {
1615 struct dm_integrity_c *ic = dio->ic;
1618 remove_range(ic, &dio->range);
1620 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1623 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1624 if (unlikely(dio->bi_status) && !bio->bi_status)
1625 bio->bi_status = dio->bi_status;
1626 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1627 dio->range.logical_sector += dio->range.n_sectors;
1628 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1629 INIT_WORK(&dio->work, integrity_bio_wait);
1630 queue_work(ic->offload_wq, &dio->work);
1633 do_endio_flush(ic, dio);
1639 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1641 dm_bio_restore(&dio->bio_details, bio);
1645 if (dio->completion)
1646 complete(dio->completion);
1648 dec_in_flight(dio);
1704 static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
1706 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1707 struct dm_integrity_c *ic = dio->ic;
1713 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1714 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
1715 &dio->metadata_offset);
1717 logical_sector = dio->range.logical_sector;
1721 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1741 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
1750 dio->bi_status = errno_to_blk_status(r);
1755 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
1756 &dio->metadata_offset, ic->tag_size, TAG_CMP);
1766 dio->bi_status = errno_to_blk_status(r);
1785 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1786 struct dm_integrity_c *ic = dio->ic;
1794 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1804 if (likely(dio->op != REQ_OP_DISCARD))
1818 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1819 unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
1829 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1845 sector = dio->range.logical_sector;
1846 sectors_to_process = dio->range.n_sectors;
1848 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1866 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1867 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1872 integrity_recheck(dio, checksums_onstack);
1891 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1896 unsigned int data_to_process = dio->range.n_sectors;
1908 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1909 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1919 dec_in_flight(dio);
1922 dio->bi_status = errno_to_blk_status(r);
1923 dec_in_flight(dio);
1929 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1934 dio->ic = ic;
1935 dio->bi_status = 0;
1936 dio->op = bio_op(bio);
1938 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1954 submit_flush_bio(ic, dio);
1958 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1959 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1960 if (unlikely(dio->fua)) {
1967 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1969 dio->range.logical_sector, bio_sectors(bio),
1973 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
1976 dio->range.logical_sector, bio_sectors(bio));
1980 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
2015 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
2018 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2019 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2022 dm_integrity_map_continue(dio, true);
2026 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
2029 struct dm_integrity_c *ic = dio->ic;
2033 logical_sector = dio->range.logical_sector;
2034 n_sectors = dio->range.n_sectors;
2045 if (likely(dio->op == REQ_OP_WRITE))
2051 if (unlikely(dio->op == REQ_OP_READ)) {
2102 if (likely(dio->op == REQ_OP_WRITE))
2110 } else if (likely(dio->op == REQ_OP_WRITE))
2114 if (likely(dio->op == REQ_OP_WRITE)) {
2152 if (unlikely(dio->op == REQ_OP_READ))
2157 if (likely(dio->op == REQ_OP_WRITE)) {
2166 remove_range(ic, &dio->range);
2171 dio->range.logical_sector = logical_sector;
2172 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2173 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2180 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2182 struct dm_integrity_c *ic = dio->ic;
2183 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2188 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2190 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2194 INIT_WORK(&dio->work, integrity_bio_wait);
2195 queue_work(ic->offload_wq, &dio->work);
2207 dio->range.n_sectors = bio_sectors(bio);
2209 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2210 if (dio->op == REQ_OP_WRITE) {
2214 dio->range.n_sectors = min(dio->range.n_sectors,
2216 if (unlikely(!dio->range.n_sectors)) {
2222 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2240 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2254 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2261 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2263 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2264 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2269 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2270 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2273 dio->range.n_sectors = i;
2277 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2286 INIT_WORK(&dio->work, integrity_bio_wait);
2287 queue_work(ic->wait_wq, &dio->work);
2291 dio->range.n_sectors = ic->sectors_per_block;
2292 wait_and_add_new_range(ic, &dio->range);
2302 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2304 remove_range_unlocked(ic, &dio->range);
2309 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2313 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2315 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2316 remove_range_unlocked(ic, &dio->range);
2334 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2335 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2336 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2339 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2348 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2352 dio->completion = &read_comp;
2354 dio->completion = NULL;
2356 dm_bio_record(&dio->bio_details, bio);
2361 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2363 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2364 integrity_metadata(&dio->work);
2367 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2368 dio->completion = NULL;
2380 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2383 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2384 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2389 integrity_metadata(&dio->work);
2392 dec_in_flight(dio);
2394 INIT_WORK(&dio->work, integrity_metadata);
2395 queue_work(ic->metadata_wq, &dio->work);
2401 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2404 do_endio_flush(ic, dio);
2410 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2412 dm_integrity_map_continue(dio, false);
2897 struct dm_integrity_io *dio;
2899 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2901 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2902 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2903 remove_range(ic, &dio->range);
2904 INIT_WORK(&dio->work, integrity_bio_wait);
2905 queue_work(ic->offload_wq, &dio->work);
2907 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2908 dio->range.n_sectors, BITMAP_OP_SET);
2921 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2923 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2924 dio->range.n_sectors, BITMAP_OP_SET);
2926 remove_range(ic, &dio->range);
2927 INIT_WORK(&dio->work, integrity_bio_wait);
2928 queue_work(ic->offload_wq, &dio->work);