Lines Matching defs:page
74 static bool __is_cp_guaranteed(struct page *page)
76 struct address_space *mapping = page->mapping;
83 if (f2fs_is_compressed_page(page))
94 is_cold_data(page))
99 static enum count_type __read_io_type(struct page *page)
101 struct address_space *mapping = page_file_mapping(page);
133 struct page *page;
138 page = bv->bv_page;
141 if (compr && f2fs_is_compressed_page(page)) {
142 f2fs_decompress_pages(bio, page, verity);
150 if (bio->bi_status || PageError(page)) {
151 ClearPageUptodate(page);
153 ClearPageError(page);
155 SetPageUptodate(page);
157 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
158 unlock_page(page);
188 static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
199 struct page *page = bv->bv_page;
202 dic = (struct decompress_io_ctx *)page_private(page);
213 if (bio->bi_status || PageError(page))
216 if (fsverity_verify_page(page)) {
217 SetPageUptodate(page);
221 ClearPageUptodate(page);
222 ClearPageError(page);
224 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
225 unlock_page(page);
349 struct page *page = bvec->bv_page;
350 enum count_type type = WB_DATA_TYPE(page);
352 if (IS_DUMMY_WRITTEN_PAGE(page)) {
353 set_page_private(page, (unsigned long)NULL);
354 ClearPagePrivate(page);
355 unlock_page(page);
356 mempool_free(page, sbi->write_io_dummy);
363 fscrypt_finalize_bounce_page(&page);
366 if (f2fs_is_compressed_page(page)) {
367 f2fs_compress_write_end_io(bio, page);
373 mapping_set_error(page->mapping, -EIO);
378 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
379 page->index != nid_of_node(page));
382 if (f2fs_in_warm_node_list(sbi, page))
383 f2fs_del_fsync_node_entry(sbi, page);
384 clear_cold_data(page);
385 end_page_writeback(page);
513 struct page *page =
516 f2fs_bug_on(sbi, !page);
518 zero_user_segment(page, 0, PAGE_SIZE);
519 SetPagePrivate(page);
520 set_page_private(page, DUMMY_WRITTEN_PAGE);
521 lock_page(page);
522 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
594 struct page *page, nid_t ino)
602 if (!inode && !page && !ino)
606 struct page *target = bvec->bv_page;
621 if (page && page == target)
651 struct inode *inode, struct page *page,
663 ret = __has_merged_page(io->bio, inode, page, ino);
681 struct inode *inode, struct page *page,
684 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
695 * Fill the locked page with data located in the block address.
696 * A caller needs to unlock the page on failure.
701 struct page *page = fio->encrypted_page ?
702 fio->encrypted_page : fio->page;
709 trace_f2fs_submit_page_bio(page, fio);
715 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
716 fio->page->index, fio, GFP_NOIO);
718 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
724 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
730 __read_io_type(page): WB_DATA_TYPE(fio->page));
774 struct page *page, enum temp_type temp)
783 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
798 struct page *page)
821 fio->page->mapping->host,
822 fio->page->index, fio) &&
823 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
829 /* page can't be merged into bio; submit the bio */
846 struct bio **bio, struct page *page)
852 f2fs_bug_on(sbi, !target && !page);
868 page, 0);
885 page, 0);
906 struct page *page = fio->encrypted_page ?
907 fio->encrypted_page : fio->page;
913 trace_f2fs_submit_page_bio(page, fio);
923 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
924 fio->page->index, fio, GFP_NOIO);
927 add_bio_entry(fio->sbi, bio, page, fio->temp);
929 if (add_ipu_page(fio, &bio, page))
934 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
936 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
949 struct page *bio_page;
974 bio_page = fio->page;
984 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
997 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1008 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1013 trace_f2fs_submit_page_write(fio->page, fio);
1078 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1085 page->index, for_write, true);
1089 /* wait for GCed page writeback via META_MAPPING */
1092 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1096 ClearPageError(page);
1121 * update block addresses in the node page
1211 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1216 struct page *page;
1220 page = f2fs_grab_cache_page(mapping, index, for_write);
1221 if (!page)
1252 if (PageUptodate(page)) {
1253 unlock_page(page);
1254 return page;
1258 * A new dentry page is allocated but not able to be written, since its
1259 * new inode page couldn't be allocated due to -ENOSPC.
1265 zero_user_segment(page, 0, PAGE_SIZE);
1266 if (!PageUptodate(page))
1267 SetPageUptodate(page);
1268 unlock_page(page);
1269 return page;
1272 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1276 return page;
1279 f2fs_put_page(page, 1);
1283 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1286 struct page *page;
1288 page = find_get_page(mapping, index);
1289 if (page && PageUptodate(page))
1290 return page;
1291 f2fs_put_page(page, 0);
1293 page = f2fs_get_read_data_page(inode, index, 0, false);
1294 if (IS_ERR(page))
1295 return page;
1297 if (PageUptodate(page))
1298 return page;
1300 wait_on_page_locked(page);
1301 if (unlikely(!PageUptodate(page))) {
1302 f2fs_put_page(page, 0);
1305 return page;
1311 * whether this page exists or not.
1313 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1317 struct page *page;
1319 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1320 if (IS_ERR(page))
1321 return page;
1324 lock_page(page);
1325 if (unlikely(page->mapping != mapping)) {
1326 f2fs_put_page(page, 1);
1329 if (unlikely(!PageUptodate(page))) {
1330 f2fs_put_page(page, 1);
1333 return page;
1337 * Caller ensures that this data page is never allocated.
1338 * A new zero-filled data page is allocated in the page cache.
1345 struct page *f2fs_get_new_data_page(struct inode *inode,
1346 struct page *ipage, pgoff_t index, bool new_i_size)
1349 struct page *page;
1353 page = f2fs_grab_cache_page(mapping, index, true);
1354 if (!page) {
1366 f2fs_put_page(page, 1);
1372 if (PageUptodate(page))
1376 zero_user_segment(page, 0, PAGE_SIZE);
1377 if (!PageUptodate(page))
1378 SetPageUptodate(page);
1380 f2fs_put_page(page, 1);
1384 page = f2fs_get_lock_data_page(inode, index, true);
1385 if (IS_ERR(page))
1386 return page;
1392 return page;
1527 /* it only supports block size == page size */
1553 /* When reading holes, we need its node page */
1679 /* preallocate blocks in batch for one dnode page */
1846 struct page *page;
1856 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1858 if (!page)
1863 f2fs_put_page(page, 1);
1875 f2fs_put_page(page, 1);
1889 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1890 if (!page)
1895 f2fs_put_page(page, 1);
1902 f2fs_put_page(page, 1);
2069 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2085 block_in_file = (sector_t)page_index(page);
2092 /* just zeroing out page which is beyond EOF */
2105 * done with this page.
2116 SetPageMappedToDisk(page);
2118 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2119 !cleancache_get_page(page))) {
2120 SetPageUptodate(page);
2131 zero_user_segment(page, 0, PAGE_SIZE);
2132 if (f2fs_need_verity(inode, page->index) &&
2133 !fsverity_verify_page(page)) {
2137 if (!PageUptodate(page))
2138 SetPageUptodate(page);
2139 unlock_page(page);
2144 * This page will go to BIO. Do we need to send this
2149 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2156 is_readahead ? REQ_RAHEAD : 0, page->index,
2166 * If the page is under writeback, we need to wait for
2171 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2176 ClearPageError(page);
2184 unlock_page(page);
2216 struct page *page = cc->rpages[i];
2218 if (!page)
2220 if ((sector_t)page->index >= last_block_in_file) {
2221 zero_user_segment(page, 0, PAGE_SIZE);
2222 if (!PageUptodate(page))
2223 SetPageUptodate(page);
2224 } else if (!PageUptodate(page)) {
2227 unlock_page(page);
2229 put_page(page);
2294 struct page *page = dic->cpages[i];
2302 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2311 page->index, for_write, for_verity);
2344 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2355 ClearPageError(page);
2383 struct readahead_control *rac, struct page *page)
2428 page = readahead_page(rac);
2429 prefetchw(&page->flags);
2431 f2fs_put_page(page, 1);
2439 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2448 ret = f2fs_is_compressed_cluster(inode, page->index);
2458 f2fs_compress_ctx_add_page(&cc, page);
2465 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2471 SetPageError(page);
2472 zero_user_segment(page, 0, PAGE_SIZE);
2473 unlock_page(page);
2479 put_page(page);
2483 /* last page */
2502 static int f2fs_read_data_page(struct file *file, struct page *page)
2504 struct inode *inode = page_file_mapping(page)->host;
2507 trace_f2fs_readpage(page, DATA);
2510 unlock_page(page);
2516 ret = f2fs_read_inline_data(inode, page);
2518 ret = f2fs_mpage_readpages(inode, NULL, page);
2540 struct inode *inode = fio->page->mapping->host;
2541 struct page *mpage, *page;
2547 page = fio->compressed_page ? fio->compressed_page : fio->page;
2549 /* wait for GCed page writeback via META_MAPPING */
2556 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2642 if (is_cold_data(fio->page))
2644 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2655 struct inode *inode = fio->page->mapping->host;
2665 struct page *page = fio->page;
2666 struct inode *inode = page->mapping->host;
2675 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2676 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2687 /* Deadlock due to between page->lock and f2fs_lock_op */
2691 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2697 /* This page is already truncated */
2699 ClearPageUptodate(page);
2700 clear_cold_data(page);
2721 set_page_writeback(page);
2722 ClearPageError(page);
2730 if (PageWriteback(page))
2731 end_page_writeback(page);
2735 trace_f2fs_do_write_data_page(fio->page, IPU);
2757 set_page_writeback(page);
2758 ClearPageError(page);
2765 trace_f2fs_do_write_data_page(page, OPU);
2767 if (page->index == 0)
2777 int f2fs_write_single_data_page(struct page *page, int *submitted,
2785 struct inode *inode = page->mapping->host;
2790 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2801 .page = page,
2812 trace_f2fs_writepage(page, DATA);
2816 mapping_set_error(page->mapping, -EIO);
2830 if (page->index < end_index ||
2837 * this page does not have to be written to disk.
2840 if ((page->index >= end_index + 1) || !offset)
2843 zero_user_segment(page, offset, PAGE_SIZE);
2847 /* we should not write 0'th page having journal header */
2848 if (f2fs_is_volatile_file(inode) && (!page->index ||
2881 err = f2fs_write_inline_data(inode, page);
2910 ClearPageUptodate(page);
2911 clear_cold_data(page);
2915 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2920 unlock_page(page);
2938 redirty_page_for_writepage(wbc, page);
2947 unlock_page(page);
2951 static int f2fs_write_data_page(struct page *page,
2955 struct inode *inode = page->mapping->host;
2961 if (f2fs_is_compressed_cluster(inode, page->index)) {
2962 redirty_page_for_writepage(wbc, page);
2969 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2974 * This function was copied from write_cche_pages from mm/page-writeback.c.
2975 * The major change is making write step of cold data page separately from
2976 * warm/hot data page.
3047 struct page *page = pvec.pages[i];
3060 page->index)) {
3073 struct page *pagep;
3078 page->index, &fsdata);
3085 fsdata, page->index,
3104 done_index = page->index;
3106 lock_page(page);
3108 if (unlikely(page->mapping != mapping)) {
3110 unlock_page(page);
3114 if (!PageDirty(page)) {
3119 if (PageWriteback(page)) {
3121 f2fs_wait_on_page_writeback(page,
3127 if (!clear_page_dirty_for_io(page))
3132 get_page(page);
3133 f2fs_compress_ctx_add_page(&cc, page);
3137 ret = f2fs_write_single_data_page(page, &submitted,
3141 unlock_page(page);
3166 done_index = page->index + 1;
3252 /* skip writing if there is no dirty page in this inode */
3342 struct page *page, loff_t pos, unsigned len,
3345 struct inode *inode = page->mapping->host;
3346 pgoff_t index = page->index;
3348 struct page *ipage;
3356 * the block addresses when there is no need to fill the page.
3387 f2fs_do_read_inline_data(page, ipage);
3392 err = f2fs_convert_inline_page(&dn, page);
3430 struct page **pagep, void **fsdata)
3434 struct page *page = NULL;
3456 * We should check this at this moment to avoid deadlock on inode page
3457 * and #0 page. The locking rule for inline_data conversion should be:
3458 * lock_page(page #0) -> lock_page(inode_page)
3491 page = f2fs_pagecache_get_page(mapping, index,
3493 if (!page) {
3500 *pagep = page;
3502 err = prepare_write_begin(sbi, page, pos, len,
3509 unlock_page(page);
3511 lock_page(page);
3512 if (page->mapping != mapping) {
3513 /* The page got truncated from under us */
3514 f2fs_put_page(page, 1);
3519 f2fs_wait_on_page_writeback(page, DATA, false, true);
3521 if (len == PAGE_SIZE || PageUptodate(page))
3526 zero_user_segment(page, len, PAGE_SIZE);
3531 zero_user_segment(page, 0, PAGE_SIZE);
3532 SetPageUptodate(page);
3539 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3543 lock_page(page);
3544 if (unlikely(page->mapping != mapping)) {
3545 f2fs_put_page(page, 1);
3548 if (unlikely(!PageUptodate(page))) {
3556 f2fs_put_page(page, 1);
3566 struct page *page, void *fsdata)
3568 struct inode *inode = page->mapping->host;
3577 if (!PageUptodate(page)) {
3581 SetPageUptodate(page);
3587 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3600 set_page_dirty(page);
3606 f2fs_put_page(page, 1);
3763 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3766 struct inode *inode = page->mapping->host;
3773 if (PageDirty(page)) {
3784 clear_cold_data(page);
3786 if (IS_ATOMIC_WRITTEN_PAGE(page))
3787 return f2fs_drop_inmem_page(inode, page);
3789 f2fs_clear_page_private(page);
3792 int f2fs_release_page(struct page *page, gfp_t wait)
3794 /* If this is dirty page, keep PagePrivate */
3795 if (PageDirty(page))
3798 /* This is atomic written page, keep Private */
3799 if (IS_ATOMIC_WRITTEN_PAGE(page))
3802 clear_cold_data(page);
3803 f2fs_clear_page_private(page);
3807 static int f2fs_set_data_page_dirty(struct page *page)
3809 struct inode *inode = page_file_mapping(page)->host;
3811 trace_f2fs_set_page_dirty(page, DATA);
3813 if (!PageUptodate(page))
3814 SetPageUptodate(page);
3815 if (PageSwapCache(page))
3816 return __set_page_dirty_nobuffers(page);
3819 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3820 f2fs_register_inmem_page(inode, page);
3824 * Previously, this page has been registered, we just
3830 if (!PageDirty(page)) {
3831 __set_page_dirty_nobuffers(page);
3832 f2fs_update_dirty_page(inode, page);
3902 struct page *newpage, struct page *page, enum migrate_mode mode)
3906 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3908 BUG_ON(PageWriteback(page));
3910 /* migrating an atomic written page is safe with the inmem_lock hold */
3918 /* one extra reference was held for atomic_write page */
3921 page, extra_count);
3931 if (cur->page == page) {
3932 cur->page = newpage;
3936 put_page(page);
3940 if (PagePrivate(page)) {
3941 f2fs_set_page_private(newpage, page_private(page));
3942 f2fs_clear_page_private(page);
3946 migrate_page_copy(newpage, page);
3948 migrate_page_states(newpage, page);
4002 if (cur_lblock) { /* exclude the header page */
4101 if (page_no) { /* exclude the header page */
4206 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4208 struct address_space *mapping = page_mapping(page);
4212 __xa_clear_mark(&mapping->i_pages, page_index(page),