18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * fs/mpage.c 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 2002, Linus Torvalds. 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * Contains functions related to preparing and submitting BIOs which contain 88c2ecf20Sopenharmony_ci * multiple pagecache pages. 98c2ecf20Sopenharmony_ci * 108c2ecf20Sopenharmony_ci * 15May2002 Andrew Morton 118c2ecf20Sopenharmony_ci * Initial version 128c2ecf20Sopenharmony_ci * 27Jun2002 axboe@suse.de 138c2ecf20Sopenharmony_ci * use bio_add_page() to build bio's just the right size 148c2ecf20Sopenharmony_ci */ 158c2ecf20Sopenharmony_ci 168c2ecf20Sopenharmony_ci#include <linux/kernel.h> 178c2ecf20Sopenharmony_ci#include <linux/export.h> 188c2ecf20Sopenharmony_ci#include <linux/mm.h> 198c2ecf20Sopenharmony_ci#include <linux/kdev_t.h> 208c2ecf20Sopenharmony_ci#include <linux/gfp.h> 218c2ecf20Sopenharmony_ci#include <linux/bio.h> 228c2ecf20Sopenharmony_ci#include <linux/fs.h> 238c2ecf20Sopenharmony_ci#include <linux/buffer_head.h> 248c2ecf20Sopenharmony_ci#include <linux/blkdev.h> 258c2ecf20Sopenharmony_ci#include <linux/highmem.h> 268c2ecf20Sopenharmony_ci#include <linux/prefetch.h> 278c2ecf20Sopenharmony_ci#include <linux/mpage.h> 288c2ecf20Sopenharmony_ci#include <linux/mm_inline.h> 298c2ecf20Sopenharmony_ci#include <linux/writeback.h> 308c2ecf20Sopenharmony_ci#include <linux/backing-dev.h> 318c2ecf20Sopenharmony_ci#include <linux/pagevec.h> 328c2ecf20Sopenharmony_ci#include <linux/cleancache.h> 338c2ecf20Sopenharmony_ci#include "internal.h" 348c2ecf20Sopenharmony_ci 358c2ecf20Sopenharmony_ci/* 368c2ecf20Sopenharmony_ci * I/O completion handler for multipage BIOs. 378c2ecf20Sopenharmony_ci * 388c2ecf20Sopenharmony_ci * The mpage code never puts partial pages into a BIO (except for end-of-file). 398c2ecf20Sopenharmony_ci * If a page does not map to a contiguous run of blocks then it simply falls 408c2ecf20Sopenharmony_ci * back to block_read_full_page(). 418c2ecf20Sopenharmony_ci * 428c2ecf20Sopenharmony_ci * Why is this? If a page's completion depends on a number of different BIOs 438c2ecf20Sopenharmony_ci * which can complete in any order (or at the same time) then determining the 448c2ecf20Sopenharmony_ci * status of that page is hard. See end_buffer_async_read() for the details. 458c2ecf20Sopenharmony_ci * There is no point in duplicating all that complexity. 468c2ecf20Sopenharmony_ci */ 478c2ecf20Sopenharmony_cistatic void mpage_end_io(struct bio *bio) 488c2ecf20Sopenharmony_ci{ 498c2ecf20Sopenharmony_ci struct bio_vec *bv; 508c2ecf20Sopenharmony_ci struct bvec_iter_all iter_all; 518c2ecf20Sopenharmony_ci 528c2ecf20Sopenharmony_ci bio_for_each_segment_all(bv, bio, iter_all) { 538c2ecf20Sopenharmony_ci struct page *page = bv->bv_page; 548c2ecf20Sopenharmony_ci page_endio(page, bio_op(bio), 558c2ecf20Sopenharmony_ci blk_status_to_errno(bio->bi_status)); 568c2ecf20Sopenharmony_ci } 578c2ecf20Sopenharmony_ci 588c2ecf20Sopenharmony_ci bio_put(bio); 598c2ecf20Sopenharmony_ci} 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_cistatic struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) 628c2ecf20Sopenharmony_ci{ 638c2ecf20Sopenharmony_ci bio->bi_end_io = mpage_end_io; 648c2ecf20Sopenharmony_ci bio_set_op_attrs(bio, op, op_flags); 658c2ecf20Sopenharmony_ci guard_bio_eod(bio); 668c2ecf20Sopenharmony_ci submit_bio(bio); 678c2ecf20Sopenharmony_ci return NULL; 688c2ecf20Sopenharmony_ci} 698c2ecf20Sopenharmony_ci 708c2ecf20Sopenharmony_cistatic struct bio * 718c2ecf20Sopenharmony_cimpage_alloc(struct block_device *bdev, 728c2ecf20Sopenharmony_ci sector_t first_sector, int nr_vecs, 738c2ecf20Sopenharmony_ci gfp_t gfp_flags) 748c2ecf20Sopenharmony_ci{ 758c2ecf20Sopenharmony_ci struct bio *bio; 768c2ecf20Sopenharmony_ci 778c2ecf20Sopenharmony_ci /* Restrict the given (page cache) mask for slab allocations */ 788c2ecf20Sopenharmony_ci gfp_flags &= GFP_KERNEL; 798c2ecf20Sopenharmony_ci bio = bio_alloc(gfp_flags, nr_vecs); 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_ci if (bio == NULL && (current->flags & PF_MEMALLOC)) { 828c2ecf20Sopenharmony_ci while (!bio && (nr_vecs /= 2)) 838c2ecf20Sopenharmony_ci bio = bio_alloc(gfp_flags, nr_vecs); 848c2ecf20Sopenharmony_ci } 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci if (bio) { 878c2ecf20Sopenharmony_ci bio_set_dev(bio, bdev); 888c2ecf20Sopenharmony_ci bio->bi_iter.bi_sector = first_sector; 898c2ecf20Sopenharmony_ci } 908c2ecf20Sopenharmony_ci return bio; 918c2ecf20Sopenharmony_ci} 928c2ecf20Sopenharmony_ci 938c2ecf20Sopenharmony_ci/* 948c2ecf20Sopenharmony_ci * support function for mpage_readahead. The fs supplied get_block might 958c2ecf20Sopenharmony_ci * return an up to date buffer. This is used to map that buffer into 968c2ecf20Sopenharmony_ci * the page, which allows readpage to avoid triggering a duplicate call 978c2ecf20Sopenharmony_ci * to get_block. 988c2ecf20Sopenharmony_ci * 998c2ecf20Sopenharmony_ci * The idea is to avoid adding buffers to pages that don't already have 1008c2ecf20Sopenharmony_ci * them. So when the buffer is up to date and the page size == block size, 1018c2ecf20Sopenharmony_ci * this marks the page up to date instead of adding new buffers. 1028c2ecf20Sopenharmony_ci */ 1038c2ecf20Sopenharmony_cistatic void 1048c2ecf20Sopenharmony_cimap_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 1058c2ecf20Sopenharmony_ci{ 1068c2ecf20Sopenharmony_ci struct inode *inode = page->mapping->host; 1078c2ecf20Sopenharmony_ci struct buffer_head *page_bh, *head; 1088c2ecf20Sopenharmony_ci int block = 0; 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci if (!page_has_buffers(page)) { 1118c2ecf20Sopenharmony_ci /* 1128c2ecf20Sopenharmony_ci * don't make any buffers if there is only one buffer on 1138c2ecf20Sopenharmony_ci * the page and the page just needs to be set up to date 1148c2ecf20Sopenharmony_ci */ 1158c2ecf20Sopenharmony_ci if (inode->i_blkbits == PAGE_SHIFT && 1168c2ecf20Sopenharmony_ci buffer_uptodate(bh)) { 1178c2ecf20Sopenharmony_ci SetPageUptodate(page); 1188c2ecf20Sopenharmony_ci return; 1198c2ecf20Sopenharmony_ci } 1208c2ecf20Sopenharmony_ci create_empty_buffers(page, i_blocksize(inode), 0); 1218c2ecf20Sopenharmony_ci } 1228c2ecf20Sopenharmony_ci head = page_buffers(page); 1238c2ecf20Sopenharmony_ci page_bh = head; 1248c2ecf20Sopenharmony_ci do { 1258c2ecf20Sopenharmony_ci if (block == page_block) { 1268c2ecf20Sopenharmony_ci page_bh->b_state = bh->b_state; 1278c2ecf20Sopenharmony_ci page_bh->b_bdev = bh->b_bdev; 1288c2ecf20Sopenharmony_ci page_bh->b_blocknr = bh->b_blocknr; 1298c2ecf20Sopenharmony_ci break; 1308c2ecf20Sopenharmony_ci } 1318c2ecf20Sopenharmony_ci page_bh = page_bh->b_this_page; 1328c2ecf20Sopenharmony_ci block++; 1338c2ecf20Sopenharmony_ci } while (page_bh != head); 1348c2ecf20Sopenharmony_ci} 1358c2ecf20Sopenharmony_ci 1368c2ecf20Sopenharmony_cistruct mpage_readpage_args { 1378c2ecf20Sopenharmony_ci struct bio *bio; 1388c2ecf20Sopenharmony_ci struct page *page; 1398c2ecf20Sopenharmony_ci unsigned int nr_pages; 1408c2ecf20Sopenharmony_ci bool is_readahead; 1418c2ecf20Sopenharmony_ci sector_t last_block_in_bio; 1428c2ecf20Sopenharmony_ci struct buffer_head map_bh; 1438c2ecf20Sopenharmony_ci unsigned long first_logical_block; 1448c2ecf20Sopenharmony_ci get_block_t *get_block; 1458c2ecf20Sopenharmony_ci}; 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_ci/* 1488c2ecf20Sopenharmony_ci * This is the worker routine which does all the work of mapping the disk 1498c2ecf20Sopenharmony_ci * blocks and constructs largest possible bios, submits them for IO if the 1508c2ecf20Sopenharmony_ci * blocks are not contiguous on the disk. 1518c2ecf20Sopenharmony_ci * 1528c2ecf20Sopenharmony_ci * We pass a buffer_head back and forth and use its buffer_mapped() flag to 1538c2ecf20Sopenharmony_ci * represent the validity of its disk mapping and to decide when to do the next 1548c2ecf20Sopenharmony_ci * get_block() call. 1558c2ecf20Sopenharmony_ci */ 1568c2ecf20Sopenharmony_cistatic struct bio *do_mpage_readpage(struct mpage_readpage_args *args) 1578c2ecf20Sopenharmony_ci{ 1588c2ecf20Sopenharmony_ci struct page *page = args->page; 1598c2ecf20Sopenharmony_ci struct inode *inode = page->mapping->host; 1608c2ecf20Sopenharmony_ci const unsigned blkbits = inode->i_blkbits; 1618c2ecf20Sopenharmony_ci const unsigned blocks_per_page = PAGE_SIZE >> blkbits; 1628c2ecf20Sopenharmony_ci const unsigned blocksize = 1 << blkbits; 1638c2ecf20Sopenharmony_ci struct buffer_head *map_bh = &args->map_bh; 1648c2ecf20Sopenharmony_ci sector_t block_in_file; 1658c2ecf20Sopenharmony_ci sector_t last_block; 1668c2ecf20Sopenharmony_ci sector_t last_block_in_file; 1678c2ecf20Sopenharmony_ci sector_t blocks[MAX_BUF_PER_PAGE]; 1688c2ecf20Sopenharmony_ci unsigned page_block; 1698c2ecf20Sopenharmony_ci unsigned first_hole = blocks_per_page; 1708c2ecf20Sopenharmony_ci struct block_device *bdev = NULL; 1718c2ecf20Sopenharmony_ci int length; 1728c2ecf20Sopenharmony_ci int fully_mapped = 1; 1738c2ecf20Sopenharmony_ci int op_flags; 1748c2ecf20Sopenharmony_ci unsigned nblocks; 1758c2ecf20Sopenharmony_ci unsigned relative_block; 1768c2ecf20Sopenharmony_ci gfp_t gfp; 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_ci if (args->is_readahead) { 1798c2ecf20Sopenharmony_ci op_flags = REQ_RAHEAD; 1808c2ecf20Sopenharmony_ci gfp = readahead_gfp_mask(page->mapping); 1818c2ecf20Sopenharmony_ci } else { 1828c2ecf20Sopenharmony_ci op_flags = 0; 1838c2ecf20Sopenharmony_ci gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); 1848c2ecf20Sopenharmony_ci } 1858c2ecf20Sopenharmony_ci 1868c2ecf20Sopenharmony_ci if (page_has_buffers(page)) 1878c2ecf20Sopenharmony_ci goto confused; 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 1908c2ecf20Sopenharmony_ci last_block = block_in_file + args->nr_pages * blocks_per_page; 1918c2ecf20Sopenharmony_ci last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; 1928c2ecf20Sopenharmony_ci if (last_block > last_block_in_file) 1938c2ecf20Sopenharmony_ci last_block = last_block_in_file; 1948c2ecf20Sopenharmony_ci page_block = 0; 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci /* 1978c2ecf20Sopenharmony_ci * Map blocks using the result from the previous get_blocks call first. 1988c2ecf20Sopenharmony_ci */ 1998c2ecf20Sopenharmony_ci nblocks = map_bh->b_size >> blkbits; 2008c2ecf20Sopenharmony_ci if (buffer_mapped(map_bh) && 2018c2ecf20Sopenharmony_ci block_in_file > args->first_logical_block && 2028c2ecf20Sopenharmony_ci block_in_file < (args->first_logical_block + nblocks)) { 2038c2ecf20Sopenharmony_ci unsigned map_offset = block_in_file - args->first_logical_block; 2048c2ecf20Sopenharmony_ci unsigned last = nblocks - map_offset; 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci for (relative_block = 0; ; relative_block++) { 2078c2ecf20Sopenharmony_ci if (relative_block == last) { 2088c2ecf20Sopenharmony_ci clear_buffer_mapped(map_bh); 2098c2ecf20Sopenharmony_ci break; 2108c2ecf20Sopenharmony_ci } 2118c2ecf20Sopenharmony_ci if (page_block == blocks_per_page) 2128c2ecf20Sopenharmony_ci break; 2138c2ecf20Sopenharmony_ci blocks[page_block] = map_bh->b_blocknr + map_offset + 2148c2ecf20Sopenharmony_ci relative_block; 2158c2ecf20Sopenharmony_ci page_block++; 2168c2ecf20Sopenharmony_ci block_in_file++; 2178c2ecf20Sopenharmony_ci } 2188c2ecf20Sopenharmony_ci bdev = map_bh->b_bdev; 2198c2ecf20Sopenharmony_ci } 2208c2ecf20Sopenharmony_ci 2218c2ecf20Sopenharmony_ci /* 2228c2ecf20Sopenharmony_ci * Then do more get_blocks calls until we are done with this page. 2238c2ecf20Sopenharmony_ci */ 2248c2ecf20Sopenharmony_ci map_bh->b_page = page; 2258c2ecf20Sopenharmony_ci while (page_block < blocks_per_page) { 2268c2ecf20Sopenharmony_ci map_bh->b_state = 0; 2278c2ecf20Sopenharmony_ci map_bh->b_size = 0; 2288c2ecf20Sopenharmony_ci 2298c2ecf20Sopenharmony_ci if (block_in_file < last_block) { 2308c2ecf20Sopenharmony_ci map_bh->b_size = (last_block-block_in_file) << blkbits; 2318c2ecf20Sopenharmony_ci if (args->get_block(inode, block_in_file, map_bh, 0)) 2328c2ecf20Sopenharmony_ci goto confused; 2338c2ecf20Sopenharmony_ci args->first_logical_block = block_in_file; 2348c2ecf20Sopenharmony_ci } 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci if (!buffer_mapped(map_bh)) { 2378c2ecf20Sopenharmony_ci fully_mapped = 0; 2388c2ecf20Sopenharmony_ci if (first_hole == blocks_per_page) 2398c2ecf20Sopenharmony_ci first_hole = page_block; 2408c2ecf20Sopenharmony_ci page_block++; 2418c2ecf20Sopenharmony_ci block_in_file++; 2428c2ecf20Sopenharmony_ci continue; 2438c2ecf20Sopenharmony_ci } 2448c2ecf20Sopenharmony_ci 2458c2ecf20Sopenharmony_ci /* some filesystems will copy data into the page during 2468c2ecf20Sopenharmony_ci * the get_block call, in which case we don't want to 2478c2ecf20Sopenharmony_ci * read it again. map_buffer_to_page copies the data 2488c2ecf20Sopenharmony_ci * we just collected from get_block into the page's buffers 2498c2ecf20Sopenharmony_ci * so readpage doesn't have to repeat the get_block call 2508c2ecf20Sopenharmony_ci */ 2518c2ecf20Sopenharmony_ci if (buffer_uptodate(map_bh)) { 2528c2ecf20Sopenharmony_ci map_buffer_to_page(page, map_bh, page_block); 2538c2ecf20Sopenharmony_ci goto confused; 2548c2ecf20Sopenharmony_ci } 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ci if (first_hole != blocks_per_page) 2578c2ecf20Sopenharmony_ci goto confused; /* hole -> non-hole */ 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_ci /* Contiguous blocks? */ 2608c2ecf20Sopenharmony_ci if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) 2618c2ecf20Sopenharmony_ci goto confused; 2628c2ecf20Sopenharmony_ci nblocks = map_bh->b_size >> blkbits; 2638c2ecf20Sopenharmony_ci for (relative_block = 0; ; relative_block++) { 2648c2ecf20Sopenharmony_ci if (relative_block == nblocks) { 2658c2ecf20Sopenharmony_ci clear_buffer_mapped(map_bh); 2668c2ecf20Sopenharmony_ci break; 2678c2ecf20Sopenharmony_ci } else if (page_block == blocks_per_page) 2688c2ecf20Sopenharmony_ci break; 2698c2ecf20Sopenharmony_ci blocks[page_block] = map_bh->b_blocknr+relative_block; 2708c2ecf20Sopenharmony_ci page_block++; 2718c2ecf20Sopenharmony_ci block_in_file++; 2728c2ecf20Sopenharmony_ci } 2738c2ecf20Sopenharmony_ci bdev = map_bh->b_bdev; 2748c2ecf20Sopenharmony_ci } 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_ci if (first_hole != blocks_per_page) { 2778c2ecf20Sopenharmony_ci zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); 2788c2ecf20Sopenharmony_ci if (first_hole == 0) { 2798c2ecf20Sopenharmony_ci SetPageUptodate(page); 2808c2ecf20Sopenharmony_ci unlock_page(page); 2818c2ecf20Sopenharmony_ci goto out; 2828c2ecf20Sopenharmony_ci } 2838c2ecf20Sopenharmony_ci } else if (fully_mapped) { 2848c2ecf20Sopenharmony_ci SetPageMappedToDisk(page); 2858c2ecf20Sopenharmony_ci } 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ci if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && 2888c2ecf20Sopenharmony_ci cleancache_get_page(page) == 0) { 2898c2ecf20Sopenharmony_ci SetPageUptodate(page); 2908c2ecf20Sopenharmony_ci goto confused; 2918c2ecf20Sopenharmony_ci } 2928c2ecf20Sopenharmony_ci 2938c2ecf20Sopenharmony_ci /* 2948c2ecf20Sopenharmony_ci * This page will go to BIO. Do we need to send this BIO off first? 2958c2ecf20Sopenharmony_ci */ 2968c2ecf20Sopenharmony_ci if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) 2978c2ecf20Sopenharmony_ci args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_cialloc_new: 3008c2ecf20Sopenharmony_ci if (args->bio == NULL) { 3018c2ecf20Sopenharmony_ci if (first_hole == blocks_per_page) { 3028c2ecf20Sopenharmony_ci if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), 3038c2ecf20Sopenharmony_ci page)) 3048c2ecf20Sopenharmony_ci goto out; 3058c2ecf20Sopenharmony_ci } 3068c2ecf20Sopenharmony_ci args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 3078c2ecf20Sopenharmony_ci min_t(int, args->nr_pages, 3088c2ecf20Sopenharmony_ci BIO_MAX_PAGES), 3098c2ecf20Sopenharmony_ci gfp); 3108c2ecf20Sopenharmony_ci if (args->bio == NULL) 3118c2ecf20Sopenharmony_ci goto confused; 3128c2ecf20Sopenharmony_ci } 3138c2ecf20Sopenharmony_ci 3148c2ecf20Sopenharmony_ci length = first_hole << blkbits; 3158c2ecf20Sopenharmony_ci if (bio_add_page(args->bio, page, length, 0) < length) { 3168c2ecf20Sopenharmony_ci args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); 3178c2ecf20Sopenharmony_ci goto alloc_new; 3188c2ecf20Sopenharmony_ci } 3198c2ecf20Sopenharmony_ci 3208c2ecf20Sopenharmony_ci relative_block = block_in_file - args->first_logical_block; 3218c2ecf20Sopenharmony_ci nblocks = map_bh->b_size >> blkbits; 3228c2ecf20Sopenharmony_ci if ((buffer_boundary(map_bh) && relative_block == nblocks) || 3238c2ecf20Sopenharmony_ci (first_hole != blocks_per_page)) 3248c2ecf20Sopenharmony_ci args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); 3258c2ecf20Sopenharmony_ci else 3268c2ecf20Sopenharmony_ci args->last_block_in_bio = blocks[blocks_per_page - 1]; 3278c2ecf20Sopenharmony_ciout: 3288c2ecf20Sopenharmony_ci return args->bio; 3298c2ecf20Sopenharmony_ci 3308c2ecf20Sopenharmony_ciconfused: 3318c2ecf20Sopenharmony_ci if (args->bio) 3328c2ecf20Sopenharmony_ci args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); 3338c2ecf20Sopenharmony_ci if (!PageUptodate(page)) 3348c2ecf20Sopenharmony_ci block_read_full_page(page, args->get_block); 3358c2ecf20Sopenharmony_ci else 3368c2ecf20Sopenharmony_ci unlock_page(page); 3378c2ecf20Sopenharmony_ci goto out; 3388c2ecf20Sopenharmony_ci} 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci/** 3418c2ecf20Sopenharmony_ci * mpage_readahead - start reads against pages 3428c2ecf20Sopenharmony_ci * @rac: Describes which pages to read. 3438c2ecf20Sopenharmony_ci * @get_block: The filesystem's block mapper function. 3448c2ecf20Sopenharmony_ci * 3458c2ecf20Sopenharmony_ci * This function walks the pages and the blocks within each page, building and 3468c2ecf20Sopenharmony_ci * emitting large BIOs. 3478c2ecf20Sopenharmony_ci * 3488c2ecf20Sopenharmony_ci * If anything unusual happens, such as: 3498c2ecf20Sopenharmony_ci * 3508c2ecf20Sopenharmony_ci * - encountering a page which has buffers 3518c2ecf20Sopenharmony_ci * - encountering a page which has a non-hole after a hole 3528c2ecf20Sopenharmony_ci * - encountering a page with non-contiguous blocks 3538c2ecf20Sopenharmony_ci * 3548c2ecf20Sopenharmony_ci * then this code just gives up and calls the buffer_head-based read function. 3558c2ecf20Sopenharmony_ci * It does handle a page which has holes at the end - that is a common case: 3568c2ecf20Sopenharmony_ci * the end-of-file on blocksize < PAGE_SIZE setups. 3578c2ecf20Sopenharmony_ci * 3588c2ecf20Sopenharmony_ci * BH_Boundary explanation: 3598c2ecf20Sopenharmony_ci * 3608c2ecf20Sopenharmony_ci * There is a problem. The mpage read code assembles several pages, gets all 3618c2ecf20Sopenharmony_ci * their disk mappings, and then submits them all. That's fine, but obtaining 3628c2ecf20Sopenharmony_ci * the disk mappings may require I/O. Reads of indirect blocks, for example. 3638c2ecf20Sopenharmony_ci * 3648c2ecf20Sopenharmony_ci * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be 3658c2ecf20Sopenharmony_ci * submitted in the following order: 3668c2ecf20Sopenharmony_ci * 3678c2ecf20Sopenharmony_ci * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 3688c2ecf20Sopenharmony_ci * 3698c2ecf20Sopenharmony_ci * because the indirect block has to be read to get the mappings of blocks 3708c2ecf20Sopenharmony_ci * 13,14,15,16. Obviously, this impacts performance. 3718c2ecf20Sopenharmony_ci * 3728c2ecf20Sopenharmony_ci * So what we do it to allow the filesystem's get_block() function to set 3738c2ecf20Sopenharmony_ci * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block 3748c2ecf20Sopenharmony_ci * after this one will require I/O against a block which is probably close to 3758c2ecf20Sopenharmony_ci * this one. So you should push what I/O you have currently accumulated. 3768c2ecf20Sopenharmony_ci * 3778c2ecf20Sopenharmony_ci * This all causes the disk requests to be issued in the correct order. 3788c2ecf20Sopenharmony_ci */ 3798c2ecf20Sopenharmony_civoid mpage_readahead(struct readahead_control *rac, get_block_t get_block) 3808c2ecf20Sopenharmony_ci{ 3818c2ecf20Sopenharmony_ci struct page *page; 3828c2ecf20Sopenharmony_ci struct mpage_readpage_args args = { 3838c2ecf20Sopenharmony_ci .get_block = get_block, 3848c2ecf20Sopenharmony_ci .is_readahead = true, 3858c2ecf20Sopenharmony_ci }; 3868c2ecf20Sopenharmony_ci 3878c2ecf20Sopenharmony_ci while ((page = readahead_page(rac))) { 3888c2ecf20Sopenharmony_ci prefetchw(&page->flags); 3898c2ecf20Sopenharmony_ci args.page = page; 3908c2ecf20Sopenharmony_ci args.nr_pages = readahead_count(rac); 3918c2ecf20Sopenharmony_ci args.bio = do_mpage_readpage(&args); 3928c2ecf20Sopenharmony_ci put_page(page); 3938c2ecf20Sopenharmony_ci } 3948c2ecf20Sopenharmony_ci if (args.bio) 3958c2ecf20Sopenharmony_ci mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio); 3968c2ecf20Sopenharmony_ci} 3978c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mpage_readahead); 3988c2ecf20Sopenharmony_ci 3998c2ecf20Sopenharmony_ci/* 4008c2ecf20Sopenharmony_ci * This isn't called much at all 4018c2ecf20Sopenharmony_ci */ 4028c2ecf20Sopenharmony_ciint mpage_readpage(struct page *page, get_block_t get_block) 4038c2ecf20Sopenharmony_ci{ 4048c2ecf20Sopenharmony_ci struct mpage_readpage_args args = { 4058c2ecf20Sopenharmony_ci .page = page, 4068c2ecf20Sopenharmony_ci .nr_pages = 1, 4078c2ecf20Sopenharmony_ci .get_block = get_block, 4088c2ecf20Sopenharmony_ci }; 4098c2ecf20Sopenharmony_ci 4108c2ecf20Sopenharmony_ci args.bio = do_mpage_readpage(&args); 4118c2ecf20Sopenharmony_ci if (args.bio) 4128c2ecf20Sopenharmony_ci mpage_bio_submit(REQ_OP_READ, 0, args.bio); 4138c2ecf20Sopenharmony_ci return 0; 4148c2ecf20Sopenharmony_ci} 4158c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mpage_readpage); 4168c2ecf20Sopenharmony_ci 4178c2ecf20Sopenharmony_ci/* 4188c2ecf20Sopenharmony_ci * Writing is not so simple. 4198c2ecf20Sopenharmony_ci * 4208c2ecf20Sopenharmony_ci * If the page has buffers then they will be used for obtaining the disk 4218c2ecf20Sopenharmony_ci * mapping. We only support pages which are fully mapped-and-dirty, with a 4228c2ecf20Sopenharmony_ci * special case for pages which are unmapped at the end: end-of-file. 4238c2ecf20Sopenharmony_ci * 4248c2ecf20Sopenharmony_ci * If the page has no buffers (preferred) then the page is mapped here. 4258c2ecf20Sopenharmony_ci * 4268c2ecf20Sopenharmony_ci * If all blocks are found to be contiguous then the page can go into the 4278c2ecf20Sopenharmony_ci * BIO. Otherwise fall back to the mapping's writepage(). 4288c2ecf20Sopenharmony_ci * 4298c2ecf20Sopenharmony_ci * FIXME: This code wants an estimate of how many pages are still to be 4308c2ecf20Sopenharmony_ci * written, so it can intelligently allocate a suitably-sized BIO. For now, 4318c2ecf20Sopenharmony_ci * just allocate full-size (16-page) BIOs. 4328c2ecf20Sopenharmony_ci */ 4338c2ecf20Sopenharmony_ci 4348c2ecf20Sopenharmony_cistruct mpage_data { 4358c2ecf20Sopenharmony_ci struct bio *bio; 4368c2ecf20Sopenharmony_ci sector_t last_block_in_bio; 4378c2ecf20Sopenharmony_ci get_block_t *get_block; 4388c2ecf20Sopenharmony_ci unsigned use_writepage; 4398c2ecf20Sopenharmony_ci}; 4408c2ecf20Sopenharmony_ci 4418c2ecf20Sopenharmony_ci/* 4428c2ecf20Sopenharmony_ci * We have our BIO, so we can now mark the buffers clean. Make 4438c2ecf20Sopenharmony_ci * sure to only clean buffers which we know we'll be writing. 4448c2ecf20Sopenharmony_ci */ 4458c2ecf20Sopenharmony_cistatic void clean_buffers(struct page *page, unsigned first_unmapped) 4468c2ecf20Sopenharmony_ci{ 4478c2ecf20Sopenharmony_ci unsigned buffer_counter = 0; 4488c2ecf20Sopenharmony_ci struct buffer_head *bh, *head; 4498c2ecf20Sopenharmony_ci if (!page_has_buffers(page)) 4508c2ecf20Sopenharmony_ci return; 4518c2ecf20Sopenharmony_ci head = page_buffers(page); 4528c2ecf20Sopenharmony_ci bh = head; 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_ci do { 4558c2ecf20Sopenharmony_ci if (buffer_counter++ == first_unmapped) 4568c2ecf20Sopenharmony_ci break; 4578c2ecf20Sopenharmony_ci clear_buffer_dirty(bh); 4588c2ecf20Sopenharmony_ci bh = bh->b_this_page; 4598c2ecf20Sopenharmony_ci } while (bh != head); 4608c2ecf20Sopenharmony_ci 4618c2ecf20Sopenharmony_ci /* 4628c2ecf20Sopenharmony_ci * we cannot drop the bh if the page is not uptodate or a concurrent 4638c2ecf20Sopenharmony_ci * readpage would fail to serialize with the bh and it would read from 4648c2ecf20Sopenharmony_ci * disk before we reach the platter. 4658c2ecf20Sopenharmony_ci */ 4668c2ecf20Sopenharmony_ci if (buffer_heads_over_limit && PageUptodate(page)) 4678c2ecf20Sopenharmony_ci try_to_free_buffers(page); 4688c2ecf20Sopenharmony_ci} 4698c2ecf20Sopenharmony_ci 4708c2ecf20Sopenharmony_ci/* 4718c2ecf20Sopenharmony_ci * For situations where we want to clean all buffers attached to a page. 4728c2ecf20Sopenharmony_ci * We don't need to calculate how many buffers are attached to the page, 4738c2ecf20Sopenharmony_ci * we just need to specify a number larger than the maximum number of buffers. 4748c2ecf20Sopenharmony_ci */ 4758c2ecf20Sopenharmony_civoid clean_page_buffers(struct page *page) 4768c2ecf20Sopenharmony_ci{ 4778c2ecf20Sopenharmony_ci clean_buffers(page, ~0U); 4788c2ecf20Sopenharmony_ci} 4798c2ecf20Sopenharmony_ci 4808c2ecf20Sopenharmony_cistatic int __mpage_writepage(struct page *page, struct writeback_control *wbc, 4818c2ecf20Sopenharmony_ci void *data) 4828c2ecf20Sopenharmony_ci{ 4838c2ecf20Sopenharmony_ci struct mpage_data *mpd = data; 4848c2ecf20Sopenharmony_ci struct bio *bio = mpd->bio; 4858c2ecf20Sopenharmony_ci struct address_space *mapping = page->mapping; 4868c2ecf20Sopenharmony_ci struct inode *inode = page->mapping->host; 4878c2ecf20Sopenharmony_ci const unsigned blkbits = inode->i_blkbits; 4888c2ecf20Sopenharmony_ci unsigned long end_index; 4898c2ecf20Sopenharmony_ci const unsigned blocks_per_page = PAGE_SIZE >> blkbits; 4908c2ecf20Sopenharmony_ci sector_t last_block; 4918c2ecf20Sopenharmony_ci sector_t block_in_file; 4928c2ecf20Sopenharmony_ci sector_t blocks[MAX_BUF_PER_PAGE]; 4938c2ecf20Sopenharmony_ci unsigned page_block; 4948c2ecf20Sopenharmony_ci unsigned first_unmapped = blocks_per_page; 4958c2ecf20Sopenharmony_ci struct block_device *bdev = NULL; 4968c2ecf20Sopenharmony_ci int boundary = 0; 4978c2ecf20Sopenharmony_ci sector_t boundary_block = 0; 4988c2ecf20Sopenharmony_ci struct block_device *boundary_bdev = NULL; 4998c2ecf20Sopenharmony_ci int length; 5008c2ecf20Sopenharmony_ci struct buffer_head map_bh; 5018c2ecf20Sopenharmony_ci loff_t i_size = i_size_read(inode); 5028c2ecf20Sopenharmony_ci int ret = 0; 5038c2ecf20Sopenharmony_ci int op_flags = wbc_to_write_flags(wbc); 5048c2ecf20Sopenharmony_ci 5058c2ecf20Sopenharmony_ci if (page_has_buffers(page)) { 5068c2ecf20Sopenharmony_ci struct buffer_head *head = page_buffers(page); 5078c2ecf20Sopenharmony_ci struct buffer_head *bh = head; 5088c2ecf20Sopenharmony_ci 5098c2ecf20Sopenharmony_ci /* If they're all mapped and dirty, do it */ 5108c2ecf20Sopenharmony_ci page_block = 0; 5118c2ecf20Sopenharmony_ci do { 5128c2ecf20Sopenharmony_ci BUG_ON(buffer_locked(bh)); 5138c2ecf20Sopenharmony_ci if (!buffer_mapped(bh)) { 5148c2ecf20Sopenharmony_ci /* 5158c2ecf20Sopenharmony_ci * unmapped dirty buffers are created by 5168c2ecf20Sopenharmony_ci * __set_page_dirty_buffers -> mmapped data 5178c2ecf20Sopenharmony_ci */ 5188c2ecf20Sopenharmony_ci if (buffer_dirty(bh)) 5198c2ecf20Sopenharmony_ci goto confused; 5208c2ecf20Sopenharmony_ci if (first_unmapped == blocks_per_page) 5218c2ecf20Sopenharmony_ci first_unmapped = page_block; 5228c2ecf20Sopenharmony_ci continue; 5238c2ecf20Sopenharmony_ci } 5248c2ecf20Sopenharmony_ci 5258c2ecf20Sopenharmony_ci if (first_unmapped != blocks_per_page) 5268c2ecf20Sopenharmony_ci goto confused; /* hole -> non-hole */ 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci if (!buffer_dirty(bh) || !buffer_uptodate(bh)) 5298c2ecf20Sopenharmony_ci goto confused; 5308c2ecf20Sopenharmony_ci if (page_block) { 5318c2ecf20Sopenharmony_ci if (bh->b_blocknr != blocks[page_block-1] + 1) 5328c2ecf20Sopenharmony_ci goto confused; 5338c2ecf20Sopenharmony_ci } 5348c2ecf20Sopenharmony_ci blocks[page_block++] = bh->b_blocknr; 5358c2ecf20Sopenharmony_ci boundary = buffer_boundary(bh); 5368c2ecf20Sopenharmony_ci if (boundary) { 5378c2ecf20Sopenharmony_ci boundary_block = bh->b_blocknr; 5388c2ecf20Sopenharmony_ci boundary_bdev = bh->b_bdev; 5398c2ecf20Sopenharmony_ci } 5408c2ecf20Sopenharmony_ci bdev = bh->b_bdev; 5418c2ecf20Sopenharmony_ci } while ((bh = bh->b_this_page) != head); 5428c2ecf20Sopenharmony_ci 5438c2ecf20Sopenharmony_ci if (first_unmapped) 5448c2ecf20Sopenharmony_ci goto page_is_mapped; 5458c2ecf20Sopenharmony_ci 5468c2ecf20Sopenharmony_ci /* 5478c2ecf20Sopenharmony_ci * Page has buffers, but they are all unmapped. The page was 5488c2ecf20Sopenharmony_ci * created by pagein or read over a hole which was handled by 5498c2ecf20Sopenharmony_ci * block_read_full_page(). If this address_space is also 5508c2ecf20Sopenharmony_ci * using mpage_readahead then this can rarely happen. 5518c2ecf20Sopenharmony_ci */ 5528c2ecf20Sopenharmony_ci goto confused; 5538c2ecf20Sopenharmony_ci } 5548c2ecf20Sopenharmony_ci 5558c2ecf20Sopenharmony_ci /* 5568c2ecf20Sopenharmony_ci * The page has no buffers: map it to disk 5578c2ecf20Sopenharmony_ci */ 5588c2ecf20Sopenharmony_ci BUG_ON(!PageUptodate(page)); 5598c2ecf20Sopenharmony_ci block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); 5608c2ecf20Sopenharmony_ci last_block = (i_size - 1) >> blkbits; 5618c2ecf20Sopenharmony_ci map_bh.b_page = page; 5628c2ecf20Sopenharmony_ci for (page_block = 0; page_block < blocks_per_page; ) { 5638c2ecf20Sopenharmony_ci 5648c2ecf20Sopenharmony_ci map_bh.b_state = 0; 5658c2ecf20Sopenharmony_ci map_bh.b_size = 1 << blkbits; 5668c2ecf20Sopenharmony_ci if (mpd->get_block(inode, block_in_file, &map_bh, 1)) 5678c2ecf20Sopenharmony_ci goto confused; 5688c2ecf20Sopenharmony_ci if (buffer_new(&map_bh)) 5698c2ecf20Sopenharmony_ci clean_bdev_bh_alias(&map_bh); 5708c2ecf20Sopenharmony_ci if (buffer_boundary(&map_bh)) { 5718c2ecf20Sopenharmony_ci boundary_block = map_bh.b_blocknr; 5728c2ecf20Sopenharmony_ci boundary_bdev = map_bh.b_bdev; 5738c2ecf20Sopenharmony_ci } 5748c2ecf20Sopenharmony_ci if (page_block) { 5758c2ecf20Sopenharmony_ci if (map_bh.b_blocknr != blocks[page_block-1] + 1) 5768c2ecf20Sopenharmony_ci goto confused; 5778c2ecf20Sopenharmony_ci } 5788c2ecf20Sopenharmony_ci blocks[page_block++] = map_bh.b_blocknr; 5798c2ecf20Sopenharmony_ci boundary = buffer_boundary(&map_bh); 5808c2ecf20Sopenharmony_ci bdev = map_bh.b_bdev; 5818c2ecf20Sopenharmony_ci if (block_in_file == last_block) 5828c2ecf20Sopenharmony_ci break; 5838c2ecf20Sopenharmony_ci block_in_file++; 5848c2ecf20Sopenharmony_ci } 5858c2ecf20Sopenharmony_ci BUG_ON(page_block == 0); 5868c2ecf20Sopenharmony_ci 5878c2ecf20Sopenharmony_ci first_unmapped = page_block; 5888c2ecf20Sopenharmony_ci 5898c2ecf20Sopenharmony_cipage_is_mapped: 5908c2ecf20Sopenharmony_ci end_index = i_size >> PAGE_SHIFT; 5918c2ecf20Sopenharmony_ci if (page->index >= end_index) { 5928c2ecf20Sopenharmony_ci /* 5938c2ecf20Sopenharmony_ci * The page straddles i_size. It must be zeroed out on each 5948c2ecf20Sopenharmony_ci * and every writepage invocation because it may be mmapped. 5958c2ecf20Sopenharmony_ci * "A file is mapped in multiples of the page size. For a file 5968c2ecf20Sopenharmony_ci * that is not a multiple of the page size, the remaining memory 5978c2ecf20Sopenharmony_ci * is zeroed when mapped, and writes to that region are not 5988c2ecf20Sopenharmony_ci * written out to the file." 5998c2ecf20Sopenharmony_ci */ 6008c2ecf20Sopenharmony_ci unsigned offset = i_size & (PAGE_SIZE - 1); 6018c2ecf20Sopenharmony_ci 6028c2ecf20Sopenharmony_ci if (page->index > end_index || !offset) 6038c2ecf20Sopenharmony_ci goto confused; 6048c2ecf20Sopenharmony_ci zero_user_segment(page, offset, PAGE_SIZE); 6058c2ecf20Sopenharmony_ci } 6068c2ecf20Sopenharmony_ci 6078c2ecf20Sopenharmony_ci /* 6088c2ecf20Sopenharmony_ci * This page will go to BIO. Do we need to send this BIO off first? 6098c2ecf20Sopenharmony_ci */ 6108c2ecf20Sopenharmony_ci if (bio && mpd->last_block_in_bio != blocks[0] - 1) 6118c2ecf20Sopenharmony_ci bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); 6128c2ecf20Sopenharmony_ci 6138c2ecf20Sopenharmony_cialloc_new: 6148c2ecf20Sopenharmony_ci if (bio == NULL) { 6158c2ecf20Sopenharmony_ci if (first_unmapped == blocks_per_page) { 6168c2ecf20Sopenharmony_ci if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), 6178c2ecf20Sopenharmony_ci page, wbc)) 6188c2ecf20Sopenharmony_ci goto out; 6198c2ecf20Sopenharmony_ci } 6208c2ecf20Sopenharmony_ci bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 6218c2ecf20Sopenharmony_ci BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); 6228c2ecf20Sopenharmony_ci if (bio == NULL) 6238c2ecf20Sopenharmony_ci goto confused; 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_ci wbc_init_bio(wbc, bio); 6268c2ecf20Sopenharmony_ci bio->bi_write_hint = inode->i_write_hint; 6278c2ecf20Sopenharmony_ci } 6288c2ecf20Sopenharmony_ci 6298c2ecf20Sopenharmony_ci /* 6308c2ecf20Sopenharmony_ci * Must try to add the page before marking the buffer clean or 6318c2ecf20Sopenharmony_ci * the confused fail path above (OOM) will be very confused when 6328c2ecf20Sopenharmony_ci * it finds all bh marked clean (i.e. it will not write anything) 6338c2ecf20Sopenharmony_ci */ 6348c2ecf20Sopenharmony_ci wbc_account_cgroup_owner(wbc, page, PAGE_SIZE); 6358c2ecf20Sopenharmony_ci length = first_unmapped << blkbits; 6368c2ecf20Sopenharmony_ci if (bio_add_page(bio, page, length, 0) < length) { 6378c2ecf20Sopenharmony_ci bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); 6388c2ecf20Sopenharmony_ci goto alloc_new; 6398c2ecf20Sopenharmony_ci } 6408c2ecf20Sopenharmony_ci 6418c2ecf20Sopenharmony_ci clean_buffers(page, first_unmapped); 6428c2ecf20Sopenharmony_ci 6438c2ecf20Sopenharmony_ci BUG_ON(PageWriteback(page)); 6448c2ecf20Sopenharmony_ci set_page_writeback(page); 6458c2ecf20Sopenharmony_ci unlock_page(page); 6468c2ecf20Sopenharmony_ci if (boundary || (first_unmapped != blocks_per_page)) { 6478c2ecf20Sopenharmony_ci bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); 6488c2ecf20Sopenharmony_ci if (boundary_block) { 6498c2ecf20Sopenharmony_ci write_boundary_block(boundary_bdev, 6508c2ecf20Sopenharmony_ci boundary_block, 1 << blkbits); 6518c2ecf20Sopenharmony_ci } 6528c2ecf20Sopenharmony_ci } else { 6538c2ecf20Sopenharmony_ci mpd->last_block_in_bio = blocks[blocks_per_page - 1]; 6548c2ecf20Sopenharmony_ci } 6558c2ecf20Sopenharmony_ci goto out; 6568c2ecf20Sopenharmony_ci 6578c2ecf20Sopenharmony_ciconfused: 6588c2ecf20Sopenharmony_ci if (bio) 6598c2ecf20Sopenharmony_ci bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); 6608c2ecf20Sopenharmony_ci 6618c2ecf20Sopenharmony_ci if (mpd->use_writepage) { 6628c2ecf20Sopenharmony_ci ret = mapping->a_ops->writepage(page, wbc); 6638c2ecf20Sopenharmony_ci } else { 6648c2ecf20Sopenharmony_ci ret = -EAGAIN; 6658c2ecf20Sopenharmony_ci goto out; 6668c2ecf20Sopenharmony_ci } 6678c2ecf20Sopenharmony_ci /* 6688c2ecf20Sopenharmony_ci * The caller has a ref on the inode, so *mapping is stable 6698c2ecf20Sopenharmony_ci */ 6708c2ecf20Sopenharmony_ci mapping_set_error(mapping, ret); 6718c2ecf20Sopenharmony_ciout: 6728c2ecf20Sopenharmony_ci mpd->bio = bio; 6738c2ecf20Sopenharmony_ci return ret; 6748c2ecf20Sopenharmony_ci} 6758c2ecf20Sopenharmony_ci 6768c2ecf20Sopenharmony_ci/** 6778c2ecf20Sopenharmony_ci * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them 6788c2ecf20Sopenharmony_ci * @mapping: address space structure to write 6798c2ecf20Sopenharmony_ci * @wbc: subtract the number of written pages from *@wbc->nr_to_write 6808c2ecf20Sopenharmony_ci * @get_block: the filesystem's block mapper function. 6818c2ecf20Sopenharmony_ci * If this is NULL then use a_ops->writepage. Otherwise, go 6828c2ecf20Sopenharmony_ci * direct-to-BIO. 6838c2ecf20Sopenharmony_ci * 6848c2ecf20Sopenharmony_ci * This is a library function, which implements the writepages() 6858c2ecf20Sopenharmony_ci * address_space_operation. 6868c2ecf20Sopenharmony_ci * 6878c2ecf20Sopenharmony_ci * If a page is already under I/O, generic_writepages() skips it, even 6888c2ecf20Sopenharmony_ci * if it's dirty. This is desirable behaviour for memory-cleaning writeback, 6898c2ecf20Sopenharmony_ci * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() 6908c2ecf20Sopenharmony_ci * and msync() need to guarantee that all the data which was dirty at the time 6918c2ecf20Sopenharmony_ci * the call was made get new I/O started against them. If wbc->sync_mode is 6928c2ecf20Sopenharmony_ci * WB_SYNC_ALL then we were called for data integrity and we must wait for 6938c2ecf20Sopenharmony_ci * existing IO to complete. 6948c2ecf20Sopenharmony_ci */ 6958c2ecf20Sopenharmony_ciint 6968c2ecf20Sopenharmony_cimpage_writepages(struct address_space *mapping, 6978c2ecf20Sopenharmony_ci struct writeback_control *wbc, get_block_t get_block) 6988c2ecf20Sopenharmony_ci{ 6998c2ecf20Sopenharmony_ci struct blk_plug plug; 7008c2ecf20Sopenharmony_ci int ret; 7018c2ecf20Sopenharmony_ci 7028c2ecf20Sopenharmony_ci blk_start_plug(&plug); 7038c2ecf20Sopenharmony_ci 7048c2ecf20Sopenharmony_ci if (!get_block) 7058c2ecf20Sopenharmony_ci ret = generic_writepages(mapping, wbc); 7068c2ecf20Sopenharmony_ci else { 7078c2ecf20Sopenharmony_ci struct mpage_data mpd = { 7088c2ecf20Sopenharmony_ci .bio = NULL, 7098c2ecf20Sopenharmony_ci .last_block_in_bio = 0, 7108c2ecf20Sopenharmony_ci .get_block = get_block, 7118c2ecf20Sopenharmony_ci .use_writepage = 1, 7128c2ecf20Sopenharmony_ci }; 7138c2ecf20Sopenharmony_ci 7148c2ecf20Sopenharmony_ci ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); 7158c2ecf20Sopenharmony_ci if (mpd.bio) { 7168c2ecf20Sopenharmony_ci int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? 7178c2ecf20Sopenharmony_ci REQ_SYNC : 0); 7188c2ecf20Sopenharmony_ci mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); 7198c2ecf20Sopenharmony_ci } 7208c2ecf20Sopenharmony_ci } 7218c2ecf20Sopenharmony_ci blk_finish_plug(&plug); 7228c2ecf20Sopenharmony_ci return ret; 7238c2ecf20Sopenharmony_ci} 7248c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mpage_writepages); 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ciint mpage_writepage(struct page *page, get_block_t get_block, 7278c2ecf20Sopenharmony_ci struct writeback_control *wbc) 7288c2ecf20Sopenharmony_ci{ 7298c2ecf20Sopenharmony_ci struct mpage_data mpd = { 7308c2ecf20Sopenharmony_ci .bio = NULL, 7318c2ecf20Sopenharmony_ci .last_block_in_bio = 0, 7328c2ecf20Sopenharmony_ci .get_block = get_block, 7338c2ecf20Sopenharmony_ci .use_writepage = 0, 7348c2ecf20Sopenharmony_ci }; 7358c2ecf20Sopenharmony_ci int ret = __mpage_writepage(page, wbc, &mpd); 7368c2ecf20Sopenharmony_ci if (mpd.bio) { 7378c2ecf20Sopenharmony_ci int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? 7388c2ecf20Sopenharmony_ci REQ_SYNC : 0); 7398c2ecf20Sopenharmony_ci mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); 7408c2ecf20Sopenharmony_ci } 7418c2ecf20Sopenharmony_ci return ret; 7428c2ecf20Sopenharmony_ci} 7438c2ecf20Sopenharmony_ciEXPORT_SYMBOL(mpage_writepage); 744