Lines Matching defs:bio
229 struct bio bio;
1247 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1251 int rw = bio_data_dir(bio);
1255 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1266 flush_dcache_page(bio_page(bio));
1269 bio->bi_status = BLK_STS_IOERR;
1272 flush_dcache_page(bio_page(bio));
1280 bio_advance(bio, size);
1289 struct bio *bio;
1292 bio = bio_list_pop(&wc->flush_list);
1293 if (!bio) {
1306 if (bio_op(bio) == REQ_OP_DISCARD) {
1307 writecache_discard(wc, bio->bi_iter.bi_sector,
1308 bio_end_sector(bio));
1310 bio_set_dev(bio, wc->dev->bdev);
1311 submit_bio_noacct(bio);
1316 bio->bi_status = BLK_STS_IOERR;
1317 bio_endio(bio);
1324 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1328 bio_list_add(&wc->flush_list, bio);
1339 static void writecache_map_remap_origin(struct dm_writecache *wc, struct bio *bio,
1344 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1345 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
1346 dm_accept_partial_bio(bio, next_boundary);
1350 static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
1357 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1358 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1361 bio_copy_block(wc, bio, memory_data(wc, e));
1362 if (bio->bi_iter.bi_size)
1366 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1367 bio_set_dev(bio, wc->ssd_dev->bdev);
1368 bio->bi_iter.bi_sector = cache_sector(wc, e);
1374 writecache_map_remap_origin(wc, bio, e);
1375 wc->stats.reads += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
1382 static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
1389 while (bio_size < bio->bi_iter.bi_size) {
1395 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1421 bio_set_dev(bio, wc->ssd_dev->bdev);
1422 bio->bi_iter.bi_sector = start_cache_sec;
1423 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1425 wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
1426 wc->stats.writes_allocate += (bio->bi_iter.bi_size - wc->block_size) >> wc->block_size_bits;
1436 static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
1445 wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
1448 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1464 (wc->metadata_only && !(bio->bi_opf & REQ_META)))
1471 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1472 writecache_map_remap_origin(wc, bio, e);
1473 wc->stats.writes_around += bio->bi_iter.bi_size >> wc->block_size_bits;
1474 wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
1481 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1487 bio_copy_block(wc, bio, memory_data(wc, e));
1490 writecache_bio_copy_ssd(wc, bio, e, search_used);
1493 } while (bio->bi_iter.bi_size);
1495 if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
1503 static enum wc_map_op writecache_map_flush(struct dm_writecache *wc, struct bio *bio)
1518 if (dm_bio_get_target_bio_nr(bio))
1521 writecache_offload_bio(wc, bio);
1525 static enum wc_map_op writecache_map_discard(struct dm_writecache *wc, struct bio *bio)
1527 wc->stats.discards += bio->bi_iter.bi_size >> wc->block_size_bits;
1533 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1537 writecache_offload_bio(wc, bio);
1541 static int writecache_map(struct dm_target *ti, struct bio *bio)
1546 bio->bi_private = NULL;
1550 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1551 map_op = writecache_map_flush(wc, bio);
1555 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1557 if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1560 (unsigned long long)bio->bi_iter.bi_sector,
1561 bio->bi_iter.bi_size, wc->block_size);
1566 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1567 map_op = writecache_map_discard(wc, bio);
1571 if (bio_data_dir(bio) == READ)
1572 map_op = writecache_map_read(wc, bio);
1574 map_op = writecache_map_write(wc, bio);
1579 if (bio_op(bio) == REQ_OP_WRITE) {
1581 bio->bi_private = (void *)2;
1584 bio_set_dev(bio, wc->dev->bdev);
1590 bio->bi_private = (void *)1;
1591 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1597 bio_endio(bio);
1606 bio_io_error(bio);
1616 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1620 if (bio->bi_private == (void *)1) {
1621 int dir = bio_data_dir(bio);
1626 } else if (bio->bi_private == (void *)2) {
1655 static void writecache_writeback_endio(struct bio *bio)
1657 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1693 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1694 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1695 "write error %d", wb->bio.bi_status);
1717 bio_put(&wb->bio);
1805 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
1808 return bio_add_page(&wb->bio, persistent_memory_page(address),
1833 struct bio *bio;
1844 bio = bio_alloc_bioset(wc->dev->bdev, max_pages, REQ_OP_WRITE,
1846 wb = container_of(bio, struct writeback_struct, bio);
1848 bio->bi_end_io = writecache_writeback_endio;
1849 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1879 bio->bi_opf |= REQ_FUA;
1881 bio->bi_status = BLK_STS_IOERR;
1882 bio_endio(bio);
1883 } else if (unlikely(!bio_sectors(bio))) {
1884 bio->bi_status = BLK_STS_OK;
1885 bio_endio(bio);
1887 submit_bio(bio);
2329 offsetof(struct writeback_struct, bio),
2332 ti->error = "Could not allocate bio set";