Lines Matching defs:bio

16 #include <linux/bio.h>
111 struct bio *bio;
118 * bio that belong to a compressed cluster yet.
125 * Update and unlock a bio's pages, and free the bio.
127 * This marks pages up-to-date only if there was no error in the bio (I/O error,
128 * decryption error, or verity error), as indicated by bio->bi_status.
132 * cluster basis rather than a per-bio basis. Instead, we only must do two
136 * release the bio's reference to the decompress_io_ctx of the page's cluster.
138 static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
142 struct bio_post_read_ctx *ctx = bio->bi_private;
144 bio_for_each_segment_all(bv, bio, iter_all) {
155 if (bio->bi_status)
165 bio_put(bio);
172 struct bio *bio = ctx->bio;
183 bio->bi_private = NULL;
186 * Verify the bio's pages with fs-verity. Exclude compressed pages,
193 bio_for_each_segment_all(bv, bio, iter_all) {
198 bio->bi_status = BLK_STS_IOERR;
203 fsverity_verify_bio(bio);
206 f2fs_finish_read_bio(bio, true);
210 * If the bio's data needs to be verified with fs-verity, then enqueue the
211 * verity work for the bio. Otherwise finish the bio now.
218 static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
220 struct bio_post_read_ctx *ctx = bio->bi_private;
226 f2fs_finish_read_bio(bio, in_task);
232 * remaining page was read by @ctx->bio.
234 * Note that a bio may span clusters (even a mix of compressed and uncompressed
236 * that the bio includes at least one compressed page. The actual decompression
237 * is done on a per-cluster basis, not a per-bio basis.
247 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
262 * Optimization: if all the bio's pages are compressed, then scheduling
263 * the per-bio verity work is unnecessary, as verity will be fully
274 struct bio *bio = ctx->bio;
276 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
277 f2fs_finish_read_bio(bio, true);
284 f2fs_verify_and_finish_bio(bio, true);
287 static void f2fs_read_end_io(struct bio *bio)
289 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
293 iostat_update_and_unbind_ctx(bio);
294 ctx = bio->bi_private;
297 bio->bi_status = BLK_STS_IOERR;
299 if (bio->bi_status) {
300 f2fs_finish_read_bio(bio, intask);
322 f2fs_verify_and_finish_bio(bio, intask);
325 static void f2fs_write_end_io(struct bio *bio)
331 iostat_update_and_unbind_ctx(bio);
332 sbi = bio->bi_private;
335 bio->bi_status = BLK_STS_IOERR;
337 bio_for_each_segment_all(bvec, bio, iter_all) {
346 if (unlikely(bio->bi_status))
356 f2fs_compress_write_end_io(bio, page);
361 if (unlikely(bio->bi_status)) {
381 bio_put(bio);
385 static void f2fs_zone_write_end_io(struct bio *bio)
387 struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
389 bio->bi_private = io->bi_private;
391 f2fs_write_end_io(bio);
461 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
466 struct bio *bio;
469 bio = bio_alloc_bioset(bdev, npages,
472 bio->bi_iter.bi_sector = sector;
474 bio->bi_end_io = f2fs_read_end_io;
475 bio->bi_private = NULL;
477 bio->bi_end_io = f2fs_write_end_io;
478 bio->bi_private = sbi;
480 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
483 wbc_init_bio(fio->io_wbc, bio);
485 return bio;
488 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
498 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
501 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
510 return !bio_has_crypt_ctx(bio);
512 return fscrypt_mergeable_bio(bio, inode, next_idx);
515 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
518 WARN_ON_ONCE(!is_read_io(bio_op(bio)));
519 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
521 iostat_update_submit_ctx(bio, type);
522 submit_bio(bio);
525 static void f2fs_align_write_bio(struct f2fs_sb_info *sbi, struct bio *bio)
528 (bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS) % F2FS_IO_SIZE(sbi);
545 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
550 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
553 WARN_ON_ONCE(is_read_io(bio_op(bio)));
560 f2fs_align_write_bio(sbi, bio);
570 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
571 iostat_update_submit_ctx(bio, type);
572 submit_bio(bio);
579 if (!io->bio)
583 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
584 f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
586 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
587 f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
589 io->bio = NULL;
592 static bool __has_merged_page(struct bio *bio, struct inode *inode,
598 if (!bio)
604 bio_for_each_segment_all(bvec, bio, iter_all) {
646 sbi->write_io[i][j].bio = NULL;
670 if (!io->bio)
676 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
678 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
698 ret = __has_merged_page(io->bio, inode, page, ino);
735 struct bio *bio;
748 /* Allocate a new bio */
749 bio = __bio_alloc(fio, 1);
751 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
754 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
755 bio_put(bio);
765 if (is_read_io(bio_op(bio)))
766 f2fs_submit_read_bio(fio->sbi, bio, fio->type);
768 f2fs_submit_write_bio(fio->sbi, bio, fio->type);
772 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
776 bio->bi_iter.bi_size >= sbi->max_io_bytes))
780 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
791 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
799 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
801 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
803 /* IOs in bio is aligned and left space of vectors is not enough */
807 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
812 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
819 be->bio = bio;
820 bio_get(bio);
822 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
836 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
851 if (be->bio != *bio)
856 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
859 if (f2fs_crypt_mergeable_bio(*bio,
862 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
868 /* page can't be merged into bio; submit the bio */
870 f2fs_submit_write_bio(sbi, *bio, DATA);
877 bio_put(*bio);
878 *bio = NULL;
885 struct bio **bio, struct page *page)
889 struct bio *target = bio ? *bio : NULL;
904 found = (target == be->bio);
906 found = __has_merged_page(be->bio, NULL,
921 found = (target == be->bio);
923 found = __has_merged_page(be->bio, NULL,
926 target = be->bio;
936 if (bio && *bio) {
937 bio_put(*bio);
938 *bio = NULL;
944 struct bio *bio = *fio->bio;
956 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
958 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
960 if (!bio) {
961 bio = __bio_alloc(fio, BIO_MAX_VECS);
962 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
965 add_bio_entry(fio->sbi, bio, page, fio->temp);
967 if (add_ipu_page(fio, &bio, page))
977 *fio->bio = bio;
1050 if (io->bio &&
1051 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
1053 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
1057 if (io->bio == NULL) {
1066 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
1067 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1072 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1086 bio_get(io->bio);
1088 io->bi_private = io->bio->bi_private;
1089 io->bio->bi_private = io;
1090 io->bio->bi_end_io = f2fs_zone_write_end_io;
1091 io->zone_pending_bio = io->bio;
1105 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1110 struct bio *bio;
1116 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1119 if (!bio)
1121 bio->bi_iter.bi_sector = sector;
1122 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1123 bio->bi_end_io = f2fs_read_end_io;
1141 ctx->bio = bio;
1146 bio->bi_private = ctx;
1148 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1150 return bio;
1159 struct bio *bio;
1161 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1163 if (IS_ERR(bio))
1164 return PTR_ERR(bio);
1169 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1170 iostat_update_and_unbind_ctx(bio);
1171 if (bio->bi_private)
1172 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1173 bio_put(bio);
1178 f2fs_submit_read_bio(sbi, bio, DATA);
2123 struct bio **bio_ret,
2127 struct bio *bio = *bio_ret;
2193 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2195 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2197 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2198 bio = NULL;
2200 if (bio == NULL) {
2201 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2204 if (IS_ERR(bio)) {
2205 ret = PTR_ERR(bio);
2206 bio = NULL;
2217 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2225 *bio_ret = bio;
2230 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2237 struct bio *bio = *bio_ret;
2345 if (bio && (!page_is_mergeable(sbi, bio,
2347 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2349 f2fs_submit_read_bio(sbi, bio, DATA);
2350 bio = NULL;
2353 if (!bio) {
2354 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2357 if (IS_ERR(bio)) {
2358 ret = PTR_ERR(bio);
2366 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2369 ctx = get_post_read_ctx(bio);
2381 *bio_ret = bio;
2394 *bio_ret = bio;
2406 struct bio *bio = NULL;
2445 ret = f2fs_read_multi_pages(&cc, &bio,
2482 &bio, &last_block_in_bio, rac);
2500 ret = f2fs_read_multi_pages(&cc, &bio,
2509 if (bio)
2510 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2811 struct bio **bio,
2843 .bio = bio,
2959 if (bio && *bio)
2960 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
3020 struct bio *bio = NULL;
3212 &submitted, &bio, &last_block,
3283 /* submit cached bio of IPU write */
3284 if (bio)
3285 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);