Lines Matching refs:page
51 bool f2fs_is_cp_guaranteed(struct page *page)
53 struct address_space *mapping = page->mapping;
69 page_private_gcing(page))
74 static enum count_type __read_io_type(struct page *page)
76 struct address_space *mapping = page_file_mapping(page);
133 * things for each compressed page here: call f2fs_end_read_compressed_page()
136 * release the bio's reference to the decompress_io_ctx of the page's cluster.
145 struct page *page = bv->bv_page;
147 if (f2fs_is_compressed_page(page)) {
149 f2fs_end_read_compressed_page(page, true, 0,
151 f2fs_put_page_dic(page, in_task);
156 ClearPageUptodate(page);
158 SetPageUptodate(page);
159 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
160 unlock_page(page);
194 struct page *page = bv->bv_page;
196 if (!f2fs_is_compressed_page(page) &&
197 !fsverity_verify_page(page)) {
232 * remaining page was read by @ctx->bio.
236 * that the bio includes at least one compressed page. The actual decompression
248 struct page *page = bv->bv_page;
250 if (f2fs_is_compressed_page(page))
251 f2fs_end_read_compressed_page(page, false, blkaddr,
338 struct page *page = bvec->bv_page;
339 enum count_type type = WB_DATA_TYPE(page, false);
341 if (page_private_dummy(page)) {
342 clear_page_private_dummy(page);
343 unlock_page(page);
344 mempool_free(page, sbi->write_io_dummy);
352 fscrypt_finalize_bounce_page(&page);
355 if (f2fs_is_compressed_page(page)) {
356 f2fs_compress_write_end_io(bio, page);
362 mapping_set_error(page->mapping, -EIO);
368 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
369 page->index != nid_of_node(page));
372 if (f2fs_in_warm_node_list(sbi, page))
373 f2fs_del_fsync_node_entry(sbi, page);
374 clear_page_private_gcing(page);
375 end_page_writeback(page);
535 struct page *page =
538 f2fs_bug_on(sbi, !page);
540 lock_page(page);
542 zero_user_segment(page, 0, PAGE_SIZE);
543 set_page_private_dummy(page);
545 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
593 struct page *page, nid_t ino)
601 if (!inode && !page && !ino)
605 struct page *target = bvec->bv_page;
620 if (page && page == target)
686 struct inode *inode, struct page *page,
698 ret = __has_merged_page(io->bio, inode, page, ino);
716 struct inode *inode, struct page *page,
719 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
730 * Fill the locked page with data located in the block address.
731 * A caller needs to unlock the page on failure.
736 struct page *page = fio->encrypted_page ?
737 fio->encrypted_page : fio->page;
746 trace_f2fs_submit_page_bio(page, fio);
751 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
752 fio->page->index, fio, GFP_NOIO);
754 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
760 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
763 __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
813 struct page *page, enum temp_type temp)
822 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
837 struct page *page)
860 fio->page->mapping->host,
861 fio->page->index, fio) &&
862 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
868 /* page can't be merged into bio; submit the bio */
885 struct bio **bio, struct page *page)
891 f2fs_bug_on(sbi, !target && !page);
907 page, 0);
924 page, 0);
945 struct page *page = fio->encrypted_page ?
946 fio->encrypted_page : fio->page;
954 trace_f2fs_submit_page_bio(page, fio);
962 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
963 fio->page->index, fio, GFP_NOIO);
965 add_bio_entry(fio->sbi, bio, page, fio->temp);
967 if (add_ipu_page(fio, &bio, page))
972 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
974 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
1007 struct page *bio_page;
1042 bio_page = fio->page;
1053 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
1067 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1078 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1082 trace_f2fs_submit_page_write(fio->page, fio);
1154 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1162 page->index, for_write);
1166 /* wait for GCed page writeback via META_MAPPING */
1169 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1194 * update block addresses in the node page
1271 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1277 struct page *page;
1280 page = f2fs_grab_cache_page(mapping, index, for_write);
1281 if (!page)
1321 if (PageUptodate(page)) {
1322 unlock_page(page);
1323 return page;
1327 * A new dentry page is allocated but not able to be written, since its
1328 * new inode page couldn't be allocated due to -ENOSPC.
1334 zero_user_segment(page, 0, PAGE_SIZE);
1335 if (!PageUptodate(page))
1336 SetPageUptodate(page);
1337 unlock_page(page);
1338 return page;
1341 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1345 return page;
1348 f2fs_put_page(page, 1);
1352 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1356 struct page *page;
1358 page = find_get_page(mapping, index);
1359 if (page && PageUptodate(page))
1360 return page;
1361 f2fs_put_page(page, 0);
1363 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
1364 if (IS_ERR(page))
1365 return page;
1367 if (PageUptodate(page))
1368 return page;
1370 wait_on_page_locked(page);
1371 if (unlikely(!PageUptodate(page))) {
1372 f2fs_put_page(page, 0);
1375 return page;
1381 * whether this page exists or not.
1383 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1387 struct page *page;
1389 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
1390 if (IS_ERR(page))
1391 return page;
1394 lock_page(page);
1395 if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
1396 f2fs_put_page(page, 1);
1399 return page;
1403 * Caller ensures that this data page is never allocated.
1404 * A new zero-filled data page is allocated in the page cache.
1411 struct page *f2fs_get_new_data_page(struct inode *inode,
1412 struct page *ipage, pgoff_t index, bool new_i_size)
1415 struct page *page;
1419 page = f2fs_grab_cache_page(mapping, index, true);
1420 if (!page) {
1432 f2fs_put_page(page, 1);
1438 if (PageUptodate(page))
1442 zero_user_segment(page, 0, PAGE_SIZE);
1443 if (!PageUptodate(page))
1444 SetPageUptodate(page);
1446 f2fs_put_page(page, 1);
1450 page = f2fs_get_lock_data_page(inode, index, true);
1451 if (IS_ERR(page))
1452 return page;
1458 return page;
1615 /* it only supports block size == page size */
1623 /* When reading holes, we need its node page */
1747 /* preallocate blocks in batch for one dnode page */
1876 struct page *page;
1886 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1888 if (!page)
1893 f2fs_put_page(page, 1);
1905 f2fs_put_page(page, 1);
1919 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1920 if (!page)
1925 f2fs_put_page(page, 1);
1932 f2fs_put_page(page, 1);
2120 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2135 block_in_file = (sector_t)page_index(page);
2142 /* just zeroing out page which is beyond EOF */
2155 * done with this page.
2166 SetPageMappedToDisk(page);
2177 zero_user_segment(page, 0, PAGE_SIZE);
2178 if (f2fs_need_verity(inode, page->index) &&
2179 !fsverity_verify_page(page)) {
2183 if (!PageUptodate(page))
2184 SetPageUptodate(page);
2185 unlock_page(page);
2190 * This page will go to BIO. Do we need to send this
2195 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2202 is_readahead ? REQ_RAHEAD : 0, page->index,
2212 * If the page is under writeback, we need to wait for
2217 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2254 struct page *page = cc->rpages[i];
2256 if (!page)
2258 if ((sector_t)page->index >= last_block_in_file) {
2259 zero_user_segment(page, 0, PAGE_SIZE);
2260 if (!PageUptodate(page))
2261 SetPageUptodate(page);
2262 } else if (!PageUptodate(page)) {
2265 unlock_page(page);
2267 put_page(page);
2327 struct page *page = dic->cpages[i];
2337 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2347 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2356 page->index, for_write);
2366 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2404 struct readahead_control *rac, struct page *page)
2437 page = readahead_page(rac);
2438 prefetchw(&page->flags);
2444 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2455 page->index >> cc.log_cluster_size) {
2459 ret = f2fs_is_compressed_cluster(inode, page->index);
2464 page->index >> cc.log_cluster_size;
2474 f2fs_compress_ctx_add_page(&cc, page);
2481 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2487 zero_user_segment(page, 0, PAGE_SIZE);
2488 unlock_page(page);
2494 put_page(page);
2498 /* last page */
2516 struct page *page = &folio->page;
2517 struct inode *inode = page_file_mapping(page)->host;
2520 trace_f2fs_readpage(page, DATA);
2523 unlock_page(page);
2529 ret = f2fs_read_inline_data(inode, page);
2531 ret = f2fs_mpage_readpages(inode, NULL, page);
2553 struct inode *inode = fio->page->mapping->host;
2554 struct page *mpage, *page;
2560 page = fio->compressed_page ? fio->compressed_page : fio->page;
2566 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2667 if (page_private_gcing(fio->page))
2669 if (page_private_dummy(fio->page))
2680 struct inode *inode = fio->page->mapping->host;
2690 struct page *page = fio->page;
2691 struct inode *inode = page->mapping->host;
2704 f2fs_lookup_read_extent_cache_block(inode, page->index,
2718 /* Deadlock due to between page->lock and f2fs_lock_op */
2722 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2728 /* This page is already truncated */
2730 ClearPageUptodate(page);
2731 clear_page_private_gcing(page);
2743 /* wait for GCed page writeback via META_MAPPING */
2758 set_page_writeback(page);
2766 if (PageWriteback(page))
2767 end_page_writeback(page);
2771 trace_f2fs_do_write_data_page(fio->page, IPU);
2793 set_page_writeback(page);
2800 trace_f2fs_do_write_data_page(page, OPU);
2810 int f2fs_write_single_data_page(struct page *page, int *submitted,
2818 struct inode *inode = page->mapping->host;
2823 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2835 .page = page,
2847 trace_f2fs_writepage(page, DATA);
2851 mapping_set_error(page->mapping, -EIO);
2869 if (page->index < end_index ||
2876 * this page does not have to be written to disk.
2879 if ((page->index >= end_index + 1) || !offset)
2882 zero_user_segment(page, offset, PAGE_SIZE);
2912 err = f2fs_write_inline_data(inode, page);
2942 ClearPageUptodate(page);
2943 clear_page_private_gcing(page);
2947 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2952 unlock_page(page);
2970 redirty_page_for_writepage(wbc, page);
2979 unlock_page(page);
2983 static int f2fs_write_data_page(struct page *page,
2987 struct inode *inode = page->mapping->host;
2993 if (f2fs_is_compressed_cluster(inode, page->index)) {
2994 redirty_page_for_writepage(wbc, page);
3001 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
3006 * This function was copied from write_cache_pages from mm/page-writeback.c.
3007 * The major change is making write step of cold data page separately from
3008 * warm/hot data page.
3016 struct page *pages_local[F2FS_ONSTACK_PAGES];
3017 struct page **pages = pages_local;
3054 pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
3117 struct page *page = pages[i];
3118 struct folio *folio = page_folio(page);
3125 struct page *pagep;
3198 f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
3207 f2fs_compress_ctx_add_page(&cc, &folio->page);
3211 ret = f2fs_write_single_data_page(&folio->page,
3330 /* skip writing if there is no dirty page in this inode */
3419 struct page *page, loff_t pos, unsigned len,
3422 struct inode *inode = page->mapping->host;
3423 pgoff_t index = page->index;
3425 struct page *ipage;
3431 * If a whole page is being written and we already preallocated all the
3460 f2fs_do_read_inline_data(page, ipage);
3466 err = f2fs_convert_inline_page(&dn, page);
3505 struct page *ipage;
3533 struct page *ipage;
3559 struct page *page, loff_t pos, unsigned int len,
3562 struct inode *inode = page->mapping->host;
3564 pgoff_t index = page->index;
3602 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
3606 struct page *page = NULL;
3621 * We should check this at this moment to avoid deadlock on inode page
3622 * and #0 page. The locking rule for inline_data conversion should be:
3623 * lock_page(page #0) -> lock_page(inode_page)
3656 page = f2fs_pagecache_get_page(mapping, index,
3658 if (!page) {
3665 *pagep = page;
3668 err = prepare_atomic_write_begin(sbi, page, pos, len,
3671 err = prepare_write_begin(sbi, page, pos, len,
3678 unlock_page(page);
3680 lock_page(page);
3681 if (page->mapping != mapping) {
3682 /* The page got truncated from under us */
3683 f2fs_put_page(page, 1);
3688 f2fs_wait_on_page_writeback(page, DATA, false, true);
3690 if (len == PAGE_SIZE || PageUptodate(page))
3695 zero_user_segment(page, len, PAGE_SIZE);
3700 zero_user_segment(page, 0, PAGE_SIZE);
3701 SetPageUptodate(page);
3710 F2FS_I(inode)->cow_inode : inode, page,
3715 lock_page(page);
3716 if (unlikely(page->mapping != mapping)) {
3717 f2fs_put_page(page, 1);
3720 if (unlikely(!PageUptodate(page))) {
3728 f2fs_put_page(page, 1);
3736 struct page *page, void *fsdata)
3738 struct inode *inode = page->mapping->host;
3747 if (!PageUptodate(page)) {
3751 SetPageUptodate(page);
3757 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3770 set_page_dirty(page);
3780 f2fs_put_page(page, 1);
3804 clear_page_private_all(&folio->page);
3813 clear_page_private_all(&folio->page);
3822 trace_f2fs_set_page_dirty(&folio->page, DATA);
3927 struct page *page;
3930 page = f2fs_get_lock_data_page(inode, blkidx, true);
3931 if (IS_ERR(page)) {
3933 ret = PTR_ERR(page);
3937 set_page_dirty(page);
3938 f2fs_put_page(page, 1);
4039 if (cur_lblock) { /* exclude the header page */
4141 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4143 struct address_space *mapping = page_mapping(page);
4147 __xa_clear_mark(&mapping->i_pages, page_index(page),