Lines Matching refs:fio
430 static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
436 if (fio->op != REQ_OP_WRITE)
438 if (fio->type == DATA)
439 io_flag = fio->sbi->data_io_flag;
440 else if (fio->type == NODE)
441 io_flag = fio->sbi->node_io_flag;
454 if (BIT(fio->temp) & meta_flag)
456 if (BIT(fio->temp) & fua_flag)
461 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
463 struct f2fs_sb_info *sbi = fio->sbi;
468 bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
470 fio->op | fio->op_flags | f2fs_io_flags(fio),
473 if (is_read_io(fio->op)) {
482 if (fio->io_wbc)
483 wbc_init_bio(fio->io_wbc, bio);
490 const struct f2fs_io_info *fio,
497 if (!fio || !fio->encrypted_page)
503 const struct f2fs_io_info *fio)
509 if (fio && fio->encrypted_page)
577 struct f2fs_io_info *fio = &io->fio;
582 if (is_read_io(fio->op)) {
583 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
584 f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
586 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
587 f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
675 io->fio.type = META_FLUSH;
733 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
736 struct page *page = fio->encrypted_page ?
737 fio->encrypted_page : fio->page;
739 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
740 fio->is_por ? META_POR : (__is_meta_io(fio) ?
742 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
746 trace_f2fs_submit_page_bio(page, fio);
749 bio = __bio_alloc(fio, 1);
751 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
752 fio->page->index, fio, GFP_NOIO);
759 if (fio->io_wbc && !is_read_io(fio->op))
760 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
762 inc_page_count(fio->sbi, is_read_io(fio->op) ?
763 __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
766 f2fs_submit_read_bio(fio->sbi, bio, fio->type);
768 f2fs_submit_write_bio(fio->sbi, bio, fio->type);
784 struct f2fs_io_info *fio)
786 if (io->fio.op != fio->op)
788 return io->fio.op_flags == fio->op_flags;
793 struct f2fs_io_info *fio,
797 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
809 return io_type_is_mergeable(io, fio);
836 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
839 struct f2fs_sb_info *sbi = fio->sbi;
857 *fio->last_block,
858 fio->new_blkaddr));
860 fio->page->mapping->host,
861 fio->page->index, fio) &&
942 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
944 struct bio *bio = *fio->bio;
945 struct page *page = fio->encrypted_page ?
946 fio->encrypted_page : fio->page;
948 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
949 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
950 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
954 trace_f2fs_submit_page_bio(page, fio);
956 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
957 fio->new_blkaddr))
958 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
961 bio = __bio_alloc(fio, BIO_MAX_VECS);
962 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
963 fio->page->index, fio, GFP_NOIO);
965 add_bio_entry(fio->sbi, bio, page, fio->temp);
967 if (add_ipu_page(fio, &bio, page))
971 if (fio->io_wbc)
972 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
974 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
976 *fio->last_block = fio->new_blkaddr;
977 *fio->bio = bio;
1002 void f2fs_submit_page_write(struct f2fs_io_info *fio)
1004 struct f2fs_sb_info *sbi = fio->sbi;
1005 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
1006 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
1010 f2fs_bug_on(sbi, is_read_io(fio->op));
1023 if (fio->in_list) {
1029 fio = list_first_entry(&io->io_list,
1031 list_del(&fio->list);
1035 verify_fio_blkaddr(fio);
1037 if (fio->encrypted_page)
1038 bio_page = fio->encrypted_page;
1039 else if (fio->compressed_page)
1040 bio_page = fio->compressed_page;
1042 bio_page = fio->page;
1045 fio->submitted = 1;
1047 type = WB_DATA_TYPE(bio_page, fio->compressed_page);
1051 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
1052 fio->new_blkaddr) ||
1053 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
1054 bio_page->index, fio)))
1059 (fio->type == DATA || fio->type == NODE) &&
1060 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
1062 fio->compressed_page));
1063 fio->retry = 1;
1066 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
1067 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1068 bio_page->index, fio, GFP_NOIO);
1069 io->fio = *fio;
1077 if (fio->io_wbc)
1078 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1080 io->last_block_in_bio = fio->new_blkaddr;
1082 trace_f2fs_submit_page_write(fio->page, fio);
1085 is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1096 if (fio->in_list)
2551 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2553 struct inode *inode = fio->page->mapping->host;
2560 page = fio->compressed_page ? fio->compressed_page : fio->page;
2566 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2568 if (IS_ERR(fio->encrypted_page)) {
2570 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2571 f2fs_flush_merged_writes(fio->sbi);
2576 return PTR_ERR(fio->encrypted_page);
2579 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2583 page_address(fio->encrypted_page), PAGE_SIZE);
2590 struct f2fs_io_info *fio)
2610 if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2611 !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2618 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2619 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2625 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2638 return check_inplace_update_policy(inode, fio);
2641 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2648 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2666 if (fio) {
2667 if (page_private_gcing(fio->page))
2669 if (page_private_dummy(fio->page))
2672 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2678 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2680 struct inode *inode = fio->page->mapping->host;
2682 if (f2fs_should_update_outplace(inode, fio))
2685 return f2fs_should_update_inplace(inode, fio);
2688 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2690 struct page *page = fio->page;
2703 if (need_inplace_update(fio) &&
2705 &fio->old_blkaddr)) {
2706 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2708 f2fs_handle_error(fio->sbi,
2714 fio->need_lock = LOCK_DONE;
2719 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2726 fio->old_blkaddr = dn.data_blkaddr;
2729 if (fio->old_blkaddr == NULL_ADDR) {
2735 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2736 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2739 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
2744 if (fio->post_read)
2745 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2752 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2753 need_inplace_update(fio))) {
2754 err = f2fs_encrypt_one_page(fio);
2760 if (fio->need_lock == LOCK_REQ)
2761 f2fs_unlock_op(fio->sbi);
2762 err = f2fs_inplace_write_data(fio);
2765 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2771 trace_f2fs_do_write_data_page(fio->page, IPU);
2775 if (fio->need_lock == LOCK_RETRY) {
2776 if (!f2fs_trylock_op(fio->sbi)) {
2780 fio->need_lock = LOCK_REQ;
2783 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
2787 fio->version = ni.version;
2789 err = f2fs_encrypt_one_page(fio);
2795 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2796 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2799 f2fs_outplace_write_data(&dn, fio);
2805 if (fio->need_lock == LOCK_REQ)
2806 f2fs_unlock_op(fio->sbi);
2828 struct f2fs_io_info fio = {
2894 fio.need_lock = LOCK_DONE;
2895 err = f2fs_do_write_data_page(&fio);
2918 err = f2fs_do_write_data_page(&fio);
2921 fio.need_lock = LOCK_REQ;
2922 err = f2fs_do_write_data_page(&fio);
2965 *submitted = fio.submitted;