Lines Matching defs:bio
72 struct bio *bio;
113 struct bio *bio;
117 * bio_offset is optional, can be used if the pages in the bio
118 * can't tell us where in the file the bio should go
639 static void end_workqueue_bio(struct bio *bio)
641 struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
646 end_io_wq->status = bio->bi_status;
648 if (bio_op(bio) == REQ_OP_WRITE) {
670 blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
679 end_io_wq->private = bio->bi_private;
680 end_io_wq->end_io = bio->bi_end_io;
683 end_io_wq->bio = bio;
686 bio->bi_private = end_io_wq;
687 bio->bi_end_io = end_workqueue_bio;
697 ret = async->submit_bio_start(async->private_data, async->bio,
705 * until bio submission time. All the pages in the bio are checksummed and
720 /* If an error occurred we just want to clean up the bio and move on */
722 async->bio->bi_status = async->status;
723 bio_endio(async->bio);
732 async->bio->bi_opf |= REQ_CGROUP_PUNT;
733 ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio, async->mirror_num);
735 async->bio->bi_status = ret;
736 bio_endio(async->bio);
748 blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
760 async->bio = bio;
771 if (op_is_sync(bio->bi_opf))
778 static blk_status_t btree_csum_one_bio(struct bio *bio)
785 ASSERT(!bio_flagged(bio, BIO_CLONED));
786 bio_for_each_segment_all(bvec, bio, iter_all) {
796 static blk_status_t btree_submit_bio_start(void *private_data, struct bio *bio,
803 return btree_csum_one_bio(bio);
816 blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
823 if (bio_op(bio) != REQ_OP_WRITE) {
828 ret = btrfs_bio_wq_end_io(fs_info, bio,
832 ret = btrfs_map_bio(fs_info, bio, mirror_num);
834 ret = btree_csum_one_bio(bio);
837 ret = btrfs_map_bio(fs_info, bio, mirror_num);
843 ret = btrfs_wq_submit_bio(fs_info, bio, mirror_num, 0,
852 bio->bi_status = ret;
853 bio_endio(bio);
1702 * called by the kthread helper functions to finally call the bio end_io
1707 struct bio *bio;
1711 bio = end_io_wq->bio;
1713 bio->bi_status = end_io_wq->status;
1714 bio->bi_private = end_io_wq->private;
1715 bio->bi_end_io = end_io_wq->end_io;
1716 bio_endio(bio);
3498 static void btrfs_end_super_write(struct bio *bio)
3500 struct btrfs_device *device = bio->bi_private;
3505 bio_for_each_segment_all(bvec, bio, iter_all) {
3508 if (bio->bi_status) {
3512 blk_status_to_errno(bio->bi_status));
3525 bio_put(bio);
3614 struct bio *bio;
3649 bio = bio_alloc(GFP_NOFS, 1);
3650 bio_set_dev(bio, device->bdev);
3651 bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3652 bio->bi_private = device;
3653 bio->bi_end_io = btrfs_end_super_write;
3654 __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3662 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
3664 bio->bi_opf |= REQ_FUA;
3666 btrfsic_submit_bio(bio);
3733 static void btrfs_end_empty_barrier(struct bio *bio)
3735 complete(bio->bi_private);
3744 struct bio *bio = device->flush_bio;
3748 * When a disk has write caching disabled, we skip submission of a bio
3752 * superblock that were not properly flushed. So don't skip the bio
3762 bio_reset(bio);
3763 bio->bi_end_io = btrfs_end_empty_barrier;
3764 bio_set_dev(bio, device->bdev);
3765 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3767 bio->bi_private = &device->flush_wait;
3769 btrfsic_submit_bio(bio);
3774 * If the flush bio has been submitted by write_dev_flush, wait for it.
3778 struct bio *bio = device->flush_bio;
3786 return bio->bi_status;