Lines Matching defs:page

138 static void ext4_invalidatepage(struct page *page, unsigned int offset,
140 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
201 * don't use page cache.
995 * whole page. So we won't block on the journal in that case, which is good,
1023 * the bit before releasing a page lock and thus writeback cannot
1036 static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1041 struct inode *inode = page->mapping->host;
1051 BUG_ON(!PageLocked(page));
1056 if (!page_has_buffers(page))
1057 create_empty_buffers(page, blocksize, 0);
1058 head = page_buffers(page);
1060 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1066 if (PageUptodate(page)) {
1080 if (PageUptodate(page)) {
1087 zero_user_segments(page, to, block_end,
1092 if (PageUptodate(page)) {
1113 page_zero_new_buffers(page, from, to);
1118 err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1133 struct page **pagep, void **fsdata)
1139 struct page *page;
1167 * system is thrashing due to memory pressure, or if the page
1170 * the page (if needed) without using GFP_NOFS.
1173 page = grab_cache_page_write_begin(mapping, index, flags);
1174 if (!page)
1177 * The same as page allocation, we prealloc buffer heads before
1180 if (!page_has_buffers(page))
1181 create_empty_buffers(page, inode->i_sb->s_blocksize, 0);
1183 unlock_page(page);
1188 put_page(page);
1192 lock_page(page);
1193 if (page->mapping != mapping) {
1194 /* The page got truncated from under us */
1195 unlock_page(page);
1196 put_page(page);
1200 /* In case writeback began while the page was unlocked */
1201 wait_for_stable_page(page);
1205 ret = ext4_block_write_begin(page, pos, len,
1208 ret = ext4_block_write_begin(page, pos, len,
1212 ret = __block_write_begin(page, pos, len,
1215 ret = __block_write_begin(page, pos, len, ext4_get_block);
1218 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1227 unlock_page(page);
1255 put_page(page);
1258 *pagep = page;
1285 struct page *page, void *fsdata)
1298 return ext4_write_inline_data_end(inode, pos, len, copied, page);
1300 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1302 * it's important to update i_size while still holding page lock:
1303 * page writeout could otherwise come in and zero beyond i_size.
1310 unlock_page(page);
1311 put_page(page);
1316 * Don't mark the inode dirty under page lock. First, it unnecessarily
1317 * makes the holding time of page lock longer. Second, it forces lock
1318 * ordering of page lock and transaction start for journaling
1355 struct page *page,
1361 bh = head = page_buffers(page);
1366 if (!PageUptodate(page)) {
1372 zero_user(page, start, size);
1386 struct page *page, void *fsdata)
1404 return ext4_write_inline_data_end(inode, pos, len, copied, page);
1406 if (unlikely(copied < len) && !PageUptodate(page)) {
1408 ext4_journalled_zero_new_buffers(handle, page, from, to);
1411 ext4_journalled_zero_new_buffers(handle, page,
1413 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1417 SetPageUptodate(page);
1423 unlock_page(page);
1424 put_page(page);
1505 * function is called from invalidate page, it's
1533 pgoff_t first_page; /* The first page to write */
1534 pgoff_t next_page; /* Current page to examine */
1535 pgoff_t last_page; /* Last page to examine */
1583 struct page *page = pvec.pages[i];
1585 BUG_ON(!PageLocked(page));
1586 BUG_ON(PageWriteback(page));
1588 if (page_mapped(page))
1589 clear_page_dirty_for_io(page);
1590 block_invalidatepage(page, 0, PAGE_SIZE);
1591 ClearPageUptodate(page);
1593 unlock_page(page);
1870 static int __ext4_journalled_writepage(struct page *page,
1873 struct address_space *mapping = page->mapping;
1881 ClearPageChecked(page);
1884 BUG_ON(page->index != 0);
1886 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1890 page_bufs = page_buffers(page);
1899 * We need to release the page lock before we start the
1900 * journal, so grab a reference so the page won't disappear
1903 get_page(page);
1904 unlock_page(page);
1910 put_page(page);
1915 lock_page(page);
1916 put_page(page);
1917 if (page->mapping != mapping) {
1918 /* The page got truncated from under us */
1935 err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
1945 unlock_page(page);
1954 static void cancel_page_dirty_status(struct page *page)
1956 struct address_space *mapping = page_mapping(page);
1959 cancel_dirty_page(page);
1961 __xa_clear_mark(&mapping->i_pages, page_index(page),
1963 __xa_clear_mark(&mapping->i_pages, page_index(page),
1975 * cannot start transaction directly because transaction start ranks above page
1979 * - ext4_writepages after taking page lock (have journal handle)
1984 * We don't do any block allocation in this function. If we have page with
1991 * we have in the page first buffer_head mapped via page_mkwrite call back
1998 * We redirty the page if we have any buffer_heads that is either delay or
1999 * unwritten in the page.
2009 static int ext4_writepage(struct page *page,
2016 struct inode *inode = page->mapping->host;
2021 inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
2022 unlock_page(page);
2026 if (WARN_ON(!page_has_buffers(page))) {
2027 cancel_page_dirty_status(page);
2028 unlock_page(page);
2032 trace_ext4_writepage(page);
2034 if (page->index == size >> PAGE_SHIFT &&
2041 if (!page_has_buffers(page)) {
2043 "page %lu does not have buffers attached", page->index);
2044 ClearPageDirty(page);
2045 unlock_page(page);
2049 page_bufs = page_buffers(page);
2053 * the page. But we may reach here when we do a journal commit via
2057 * Also, if there is only one buffer per page (the fs block
2058 * size == the page size), if one buffer needs block
2060 * unwritten flag, we know that the page can't be written at
2062 * Unfortunately if the block size != page size, we can't as
2069 redirty_page_for_writepage(wbc, page);
2079 unlock_page(page);
2085 if (PageChecked(page) && ext4_should_journal_data(inode))
2088 * doesn't seem much point in redirtying the page here.
2090 return __ext4_journalled_writepage(page, len);
2095 redirty_page_for_writepage(wbc, page);
2096 unlock_page(page);
2099 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2106 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2112 BUG_ON(page->index != mpd->first_page);
2113 clear_page_dirty_for_io(page);
2116 * against i_size changes and the page can be writeably mapped into
2117 * page tables. So an application can be growing i_size and writing
2119 * write-protects our page in page tables and the page cannot get
2120 * written to again until we release page lock. So only after
2122 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2125 * after page tables are updated.
2128 if (page->index == size >> PAGE_SHIFT &&
2133 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2203 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2206 * @head - the first buffer in the page
2210 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2211 * the page for IO if all buffers in this page were mapped and there's no
2212 * accumulated extent of buffers to map or add buffers in the page to the
2214 * by processing the next page, 0 if it should stop adding buffers to the
2245 /* So far everything mapped? Submit the page for IO. */
2259 * mpage_process_page - update page buffers corresponding to changed extent and
2260 * may submit fully mapped page for IO
2265 * @map_bh - determines on return whether this page requires any further
2267 * Scan given page buffers corresponding to changed extent and update buffer
2270 * If the given page is not fully mapped, we update @map to the next extent in
2271 * the given page that needs mapping & return @map_bh as true.
2273 static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
2286 bh = head = page_buffers(page);
2293 * Find next buffer in the page to map.
2341 * and do extent conversion after IO is finished. If the last page is not fully
2342 * mapped, we update @map to the next extent in the last page that needs
2343 * mapping. Otherwise we submit the page for IO.
2369 struct page *page = pvec.pages[i];
2371 err = mpage_process_page(mpd, page, &lblk, &pblock,
2374 * If map_bh is true, means page may require further bh
2375 * mapping, or maybe the page was submitted for IO.
2381 err = mpage_submit_page(mpd, page);
2387 /* Extent fully mapped and matches with page boundary. We are done. */
2417 * the data was copied into the page cache.
2460 * the other hand we always make sure that the last touched page is fully
2558 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2576 * IO immediately. When we find a page which isn't mapped we start accumulating
2585 * case as we need to track IO to all buffers underlying a page in one io_end.
2616 struct page *page = pvec.pages[i];
2629 /* If we can't merge this page, we are done. */
2630 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2633 lock_page(page);
2635 * If the page is no longer dirty, or its mapping no
2638 * page is already under writeback and we are not doing
2639 * a data integrity writeback, skip the page
2641 if (!PageDirty(page) ||
2642 (PageWriteback(page) &&
2644 unlikely(page->mapping != mapping)) {
2645 unlock_page(page);
2649 if (WARN_ON(!page_has_buffers(page))) {
2650 cancel_page_dirty_status(page);
2651 unlock_page(page);
2655 wait_on_page_writeback(page);
2656 BUG_ON(PageWriteback(page));
2667 if (!page_has_buffers(page)) {
2668 ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
2669 ClearPageDirty(page);
2670 unlock_page(page);
2675 mpd->first_page = page->index;
2676 mpd->next_page = page->index + 1;
2678 lblk = ((ext4_lblk_t)page->index) <<
2680 head = page_buffers(page);
2749 * we will soon create the block for the 1st page, so
2768 * the page and we may dirty the inode.
2829 * must always write out whole page (makes a difference when
2831 * try to write out the rest of the page. Journalled mode is
2861 * complete or on page lock to be released. In that
2863 * submitted all the IO, released page locks we hold,
2986 struct page **pagep, void **fsdata)
2989 struct page *page;
3018 page = grab_cache_page_write_begin(mapping, index, flags);
3019 if (!page)
3022 /* In case writeback began while the page was unlocked */
3023 wait_for_stable_page(page);
3026 ret = ext4_block_write_begin(page, pos, len,
3029 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3032 unlock_page(page);
3033 put_page(page);
3048 *pagep = page;
3056 static int ext4_da_should_update_i_disksize(struct page *page,
3060 struct inode *inode = page->mapping->host;
3064 bh = page_buffers(page);
3078 struct page *page, void *fsdata)
3087 len, copied, page, fsdata);
3094 return ext4_write_inline_data_end(inode, pos, len, copied, page);
3116 ext4_da_should_update_i_disksize(page, end))
3119 return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
3146 * mm/page-writeback.c, marks pages clean in preparation for
3241 static int ext4_readpage(struct file *file, struct page *page)
3244 struct inode *inode = page->mapping->host;
3246 trace_ext4_readpage(page);
3249 ret = ext4_readpage_inline(inode, page);
3252 return ext4_mpage_readpages(inode, NULL, page);
3268 static void ext4_invalidatepage(struct page *page, unsigned int offset,
3271 trace_ext4_invalidatepage(page, offset, length);
3274 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3276 block_invalidatepage(page, offset, length);
3279 static int __ext4_journalled_invalidatepage(struct page *page,
3283 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3285 trace_ext4_journalled_invalidatepage(page, offset, length);
3291 ClearPageChecked(page);
3293 return jbd2_journal_invalidatepage(journal, page, offset, length);
3297 static void ext4_journalled_invalidatepage(struct page *page,
3301 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3304 static int ext4_releasepage(struct page *page, gfp_t wait)
3306 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3308 trace_ext4_releasepage(page);
3311 if (PageChecked(page))
3314 return jbd2_journal_try_to_free_buffers(journal, page);
3316 return try_to_free_buffers(page);
3623 * much here because ->set_page_dirty is called under VFS locks. The page is
3626 * We cannot just dirty the page and leave attached buffers clean, because the
3630 * So what we do is to mark the page "pending dirty" and next time writepage
3633 static int ext4_journalled_set_page_dirty(struct page *page)
3635 SetPageChecked(page);
3636 return __set_page_dirty_nobuffers(page);
3639 static int ext4_set_page_dirty(struct page *page)
3641 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3642 WARN_ON_ONCE(!page_has_buffers(page));
3643 return __set_page_dirty_buffers(page);
3744 struct page *page;
3747 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3749 if (!page)
3756 if (!page_has_buffers(page))
3757 create_empty_buffers(page, blocksize, 0);
3760 bh = page_buffers(page);
3782 if (PageUptodate(page))
3792 err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
3806 zero_user(page, offset, length);
3820 unlock_page(page);
3821 put_page(page);
3929 * page cache due to hole punching or zero range. Otherwise i_disksize update
3931 * that will never happen after we truncate page cache.
3968 struct page *page;
3975 page = dax_layout_busy_page(inode->i_mapping);
3976 if (!page)
3979 error = ___wait_var_event(&page->_refcount,
3980 atomic_read(&page->_refcount) == 1,
4032 * to end after the page that contains i_size
4068 * Prevent page faults from reinstantiating pages we have released from
4069 * page cache.
4234 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
5328 * buffers that are attached to a page stradding i_size and are undergoing
5333 struct page *page;
5341 * If the page is fully truncated, we don't need to wait for any commit
5343 * strip all buffers from the page but keep the page dirty which can then
5344 * confuse e.g. concurrent ext4_writepage() seeing dirty page without
5346 * the page remain valid. This is most beneficial for the common case of
5352 page = find_lock_page(inode->i_mapping,
5354 if (!page)
5356 ret = __ext4_journalled_invalidatepage(page, offset,
5358 unlock_page(page);
5359 put_page(page);
6146 struct page *page = vmf->page;
6172 * there's no delalloc; page truncated will be checked later; the
6190 lock_page(page);
6193 if (page->mapping != mapping || page_offset(page) > size) {
6194 unlock_page(page);
6199 if (page->index == size >> PAGE_SHIFT)
6210 if (page_has_buffers(page)) {
6211 if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6214 /* Wait so that we don't change page under IO */
6215 wait_for_stable_page(page);
6220 unlock_page(page);
6241 lock_page(page);
6244 if (page->mapping != mapping || page_offset(page) > size) {
6249 if (page->index == size >> PAGE_SHIFT)
6254 err = __block_write_begin(page, 0, len, ext4_get_block);
6257 if (ext4_walk_page_buffers(handle, page_buffers(page),
6260 if (ext4_walk_page_buffers(handle, page_buffers(page),
6264 page_offset(page), len))
6268 unlock_page(page);
6281 unlock_page(page);