Lines Matching refs:page
82 * Returns if the page has dirty or writeback buffers. If all the buffers
86 void buffer_check_dirty_writeback(struct page *page,
93 BUG_ON(!PageLocked(page));
95 if (!page_has_buffers(page))
98 if (PageWriteback(page))
101 head = page_buffers(page);
169 buffer_io_error(bh, ", lost sync page write");
180 * But it's the page lock which protects the buffers. To get around this,
185 * may be quite high. This code could TryLock the page, and if that
197 struct page *page;
202 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
203 if (!page)
207 if (!page_has_buffers(page))
209 head = page_buffers(page);
222 /* we might be here because some of the buffers on this page are
239 put_page(page);
249 struct page *page;
254 page = bh->b_page;
259 buffer_io_error(bh, ", async page read");
260 SetPageError(page);
266 * decide that the page is now completely done.
268 first = page_buffers(page);
286 * uptodate then we can set the page uptodate.
288 if (page_uptodate && !PageError(page))
289 SetPageUptodate(page);
290 unlock_page(page);
347 struct page *page;
351 page = bh->b_page;
355 buffer_io_error(bh, ", lost async page write");
358 SetPageError(page);
361 first = page_buffers(page);
375 end_page_writeback(page);
385 * If a page's buffers are under async readin (end_buffer_async_read
390 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
393 * The page comes unlocked when it has no locked buffer_async buffers
400 * page.
402 * PageLocked prevents anyone from starting writeback of a page which is
403 * under read I/O (PageWriteback is only ever set against a locked page).
592 * Mark the page dirty, and set it dirty in the page cache, and mark the inode
595 * If warn is true, then emit a warning if the page is not uptodate and has
600 void __set_page_dirty(struct page *page, struct address_space *mapping,
606 if (page->mapping) { /* Race with truncate? */
607 WARN_ON_ONCE(warn && !PageUptodate(page));
608 account_page_dirtied(page, mapping);
609 __xa_set_mark(&mapping->i_pages, page_index(page),
617 * Add a page to the dirty page list.
622 * If the page has buffers, the uptodate buffers are set dirty, to preserve
623 * dirty-state coherency between the page and the buffers. It the page does
627 * The buffers are dirtied before the page is dirtied. There's a small race
628 * window in which a writepage caller may see the page cleanness but not the
629 * buffer dirtiness. That's fine. If this code were to set the page dirty
630 * before the buffers, a concurrent writepage caller could clear the page dirty
632 * page on the dirty page list.
635 * page's buffer list. Also use this to protect against clean buffers being
636 * added to the page after it was set dirty.
641 int __set_page_dirty_buffers(struct page *page)
644 struct address_space *mapping = page_mapping(page);
647 return !TestSetPageDirty(page);
650 if (page_has_buffers(page)) {
651 struct buffer_head *head = page_buffers(page);
660 * Lock out page->mem_cgroup migration to keep PageDirty
661 * synchronized with per-memcg dirty page counters.
663 lock_page_memcg(page);
664 newly_dirty = !TestSetPageDirty(page);
668 __set_page_dirty(page, mapping, 1);
670 unlock_page_memcg(page);
831 * Create the appropriate buffers when given a page for data area and
839 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
850 memcg = get_mem_cgroup_from_page(page);
866 /* Link the buffer to its page */
867 set_bh_page(bh, page, offset);
890 link_dev_buffers(struct page *page, struct buffer_head *head)
900 attach_page_private(page, head);
916 * Initialise the state of a blockdev page's buffers.
919 init_page_buffers(struct page *page, struct block_device *bdev,
922 struct buffer_head *head = page_buffers(page);
924 int uptodate = PageUptodate(page);
949 * Create the page-cache page that contains the requested block.
958 struct page *page;
974 page = find_or_create_page(inode->i_mapping, index, gfp_mask);
976 BUG_ON(!PageLocked(page));
978 if (page_has_buffers(page)) {
979 bh = page_buffers(page);
981 end_block = init_page_buffers(page, bdev,
986 if (!try_to_free_buffers(page))
991 * Allocate some buffers for this page
993 bh = alloc_page_buffers(page, size, true);
996 * Link the page to the buffers and initialise them. Take the
998 * run under the page lock.
1001 link_dev_buffers(page, bh);
1002 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1008 unlock_page(page);
1009 put_page(page);
1014 * Create buffers for the specified block device block's page. If
1015 * that page was dirty, the buffers are set dirty also.
1042 /* Create a page with the proper size buffers.. */
1079 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1080 * the page is tagged dirty in the page cache.
1083 * subsections of the page. If the page has buffers, the page dirty bit is
1086 * When a page is set dirty in its entirety, all its buffers are marked dirty
1087 * (if the page has buffers).
1089 * When a buffer is marked dirty, its page is dirtied, but the page's other
1093 * individually become uptodate. But their backing page remains not
1095 * block_read_full_page() against that page will discover all the uptodate
1096 * buffers, will set the page uptodate and will perform no I/O.
1104 * its backing page dirty, then tag the page as dirty in the page cache
1130 struct page *page = bh->b_page;
1133 lock_page_memcg(page);
1134 if (!TestSetPageDirty(page)) {
1135 mapping = page_mapping(page);
1137 __set_page_dirty(page, mapping, 0);
1139 unlock_page_memcg(page);
1165 * Decrement a buffer_head's reference count. If all buffers against a page
1166 * have zero reference count, are clean and unlocked, and if the page is clean
1167 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1169 * a page but it ends up not being freed, and buffers may later be reattached).
1327 /* __find_get_block_slow will mark the page accessed */
1388 * @gfp: page allocation flag
1391 * The page cache can be allocated from non-movable area
1392 * not to prevent page migration if you set gfp to zero.
1444 struct page *page, unsigned long offset)
1446 bh->b_page = page;
1448 if (PageHighMem(page))
1454 bh->b_data = page_address(page) + offset;
1459 * Called when truncating a buffer on a page completely.
1486 * block_invalidatepage - invalidate part or all of a buffer-backed page
1488 * @page: the page which is affected
1492 * block_invalidatepage() is called when all or part of the page has become
1501 void block_invalidatepage(struct page *page, unsigned int offset,
1508 BUG_ON(!PageLocked(page));
1509 if (!page_has_buffers(page))
1517 head = page_buffers(page);
1539 * We release buffers only if the entire page is being invalidated.
1544 try_to_release_page(page, 0);
1554 * is already excluded via the page lock.
1556 void create_empty_buffers(struct page *page,
1561 head = alloc_page_buffers(page, blocksize, true);
1570 spin_lock(&page->mapping->private_lock);
1571 if (PageUptodate(page) || PageDirty(page)) {
1574 if (PageDirty(page))
1576 if (PageUptodate(page))
1581 attach_page_private(page, head);
1582 spin_unlock(&page->mapping->private_lock);
1622 struct page *page = pvec.pages[i];
1624 if (!page_has_buffers(page))
1627 * We use page lock instead of bd_mapping->private_lock
1631 lock_page(page);
1632 /* Recheck when the page is locked which pins bhs */
1633 if (!page_has_buffers(page))
1635 head = page_buffers(page);
1649 unlock_page(page);
1673 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1675 BUG_ON(!PageLocked(page));
1677 if (!page_has_buffers(page))
1678 create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
1680 return page_buffers(page);
1698 * the page lock, whoever dirtied the buffers may decide to clean them
1703 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1712 int __block_write_full_page(struct inode *inode, struct page *page,
1724 head = create_page_buffers(page, inode,
1731 * then we just miss that fact, and the page stays dirty.
1741 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1752 * this page can be outside i_size when there is a
1782 * lock the buffer then redirty the page. Note that this can
1790 redirty_page_for_writepage(wbc, page);
1801 * The page and its buffers are protected by PageWriteback(), so we can
1804 BUG_ON(PageWriteback(page));
1805 set_page_writeback(page);
1816 unlock_page(page);
1822 * The page was marked dirty, but the buffers were
1826 end_page_writeback(page);
1829 * The page and buffer_heads can be released at any time from
1840 * The page is currently locked and not marked for writeback
1852 * attachment to a dirty page.
1857 SetPageError(page);
1858 BUG_ON(PageWriteback(page));
1859 mapping_set_error(page->mapping, err);
1860 set_page_writeback(page);
1871 unlock_page(page);
1877 * If a page has any new buffers, zero them out here, and mark them uptodate
1881 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1886 BUG_ON(!PageLocked(page));
1887 if (!page_has_buffers(page))
1890 bh = head = page_buffers(page);
1897 if (!PageUptodate(page)) {
1903 zero_user(page, start, size);
1973 int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
1978 struct inode *inode = page->mapping->host;
1985 BUG_ON(!PageLocked(page));
1990 head = create_page_buffers(page, inode, 0);
1994 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
2000 if (PageUptodate(page)) {
2020 if (PageUptodate(page)) {
2027 zero_user_segments(page,
2033 if (PageUptodate(page)) {
2054 page_zero_new_buffers(page, from, to);
2058 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2061 return __block_write_begin_int(page, pos, len, get_block, NULL);
2065 static int __block_commit_write(struct inode *inode, struct page *page,
2073 bh = head = page_buffers(page);
2095 * the next read(). Here we 'discover' whether the page went
2099 SetPageUptodate(page);
2110 unsigned flags, struct page **pagep, get_block_t *get_block)
2113 struct page *page;
2116 page = grab_cache_page_write_begin(mapping, index, flags);
2117 if (!page)
2120 status = __block_write_begin(page, pos, len, get_block);
2122 unlock_page(page);
2123 put_page(page);
2124 page = NULL;
2127 *pagep = page;
2134 struct page *page, void *fsdata)
2151 * non uptodate page as a zero-length write, and force the
2154 if (!PageUptodate(page))
2157 page_zero_new_buffers(page, start+copied, start+len);
2159 flush_dcache_page(page);
2162 __block_commit_write(inode, page, start, start+copied);
2170 struct page *page, void *fsdata)
2176 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2182 * But it's important to update i_size while still holding page lock:
2183 * page writeout could otherwise come in and zero beyond i_size.
2190 unlock_page(page);
2191 put_page(page);
2196 * Don't mark the inode dirty under page lock. First, it unnecessarily
2197 * makes the holding time of page lock longer. Second, it forces lock
2198 * ordering of page lock and transaction start for journaling
2208 * block_is_partially_uptodate checks whether buffers within a page are
2214 int block_is_partially_uptodate(struct page *page, unsigned long from,
2222 if (!page_has_buffers(page))
2225 head = page_buffers(page);
2253 * Generic "read page" function for block devices that have the normal
2255 * Reads the page asynchronously --- the unlock_buffer() and
2257 * page struct once IO has completed.
2259 int block_read_full_page(struct page *page, get_block_t *get_block)
2261 struct inode *inode = page->mapping->host;
2268 head = create_page_buffers(page, inode, 0);
2272 iblock = (sector_t)page->index << (PAGE_SHIFT - bbits);
2290 SetPageError(page);
2293 zero_user(page, i * blocksize, blocksize);
2309 SetPageMappedToDisk(page);
2313 * All buffers are uptodate - we can set the page uptodate
2316 if (!PageError(page))
2317 SetPageUptodate(page);
2318 unlock_page(page);
2352 struct page *page;
2361 AOP_FLAG_CONT_EXPAND, &page, &fsdata);
2365 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2378 struct page *page;
2397 &page, &fsdata);
2400 zero_user(page, zerofrom, len);
2402 page, fsdata);
2416 /* page covers the boundary, find the boundary offset */
2430 &page, &fsdata);
2433 zero_user(page, zerofrom, len);
2435 page, fsdata);
2451 struct page **pagep, void **fsdata,
2473 int block_commit_write(struct page *page, unsigned from, unsigned to)
2475 struct inode *inode = page->mapping->host;
2476 __block_commit_write(inode,page,from,to);
2483 * called from a page fault handler when a page is first dirtied. Hence we must
2484 * be careful to check for EOF conditions here. We set the page up correctly
2485 * for a written page which means we get ENOSPC checking when writing into
2490 * protect against truncate races as the page could now be beyond EOF. Because
2492 * page lock we can determine safely if the page is beyond EOF. If it is not
2493 * beyond EOF, then the page is guaranteed safe against truncation until we
2494 * unlock the page.
2502 struct page *page = vmf->page;
2508 lock_page(page);
2510 if ((page->mapping != inode->i_mapping) ||
2511 (page_offset(page) > size)) {
2512 /* We overload EFAULT to mean page got truncated */
2517 /* page is wholly or partially inside EOF */
2518 if (((page->index + 1) << PAGE_SHIFT) > size)
2523 ret = __block_write_begin(page, 0, end, get_block);
2525 ret = block_commit_write(page, 0, end);
2529 set_page_dirty(page);
2530 wait_for_stable_page(page);
2533 unlock_page(page);
2540 * immediately, while under the page lock. So it needs a special end_io
2550 * the page (converting it to circular linked list and taking care of page
2553 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2557 BUG_ON(!PageLocked(page));
2559 spin_lock(&page->mapping->private_lock);
2562 if (PageDirty(page))
2568 attach_page_private(page, head);
2569 spin_unlock(&page->mapping->private_lock);
2573 * On entry, the page is fully not uptodate.
2574 * On exit the page is fully uptodate in the areas outside (from,to)
2579 struct page **pagep, void **fsdata,
2586 struct page *page;
2600 page = grab_cache_page_write_begin(mapping, index, flags);
2601 if (!page)
2603 *pagep = page;
2606 if (page_has_buffers(page)) {
2607 ret = __block_write_begin(page, pos, len, get_block);
2613 if (PageMappedToDisk(page))
2618 * attach them to the page if an error occurs. In the common case of
2620 * to the page (which is all OK, because we're under the page lock).
2625 head = alloc_page_buffers(page, blocksize, false);
2631 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2634 * We loop across all blocks in the page, whether or not they are
2636 * page is fully mapped-to-disk.
2656 if (PageUptodate(page)) {
2661 zero_user_segments(page, block_start, from,
2677 * The page is locked, so these buffers are protected from
2691 SetPageMappedToDisk(page);
2702 * Buffers need to be attached to the page at this point, otherwise
2706 attach_nobh_buffers(page, head);
2707 page_zero_new_buffers(page, from, to);
2710 unlock_page(page);
2711 put_page(page);
2720 struct page *page, void *fsdata)
2722 struct inode *inode = page->mapping->host;
2725 BUG_ON(fsdata != NULL && page_has_buffers(page));
2728 attach_nobh_buffers(page, head);
2729 if (page_has_buffers(page))
2731 copied, page, fsdata);
2733 SetPageUptodate(page);
2734 set_page_dirty(page);
2740 unlock_page(page);
2741 put_page(page);
2756 * the page.
2758 int nobh_writepage(struct page *page, get_block_t *get_block,
2761 struct inode * const inode = page->mapping->host;
2767 /* Is the page fully inside i_size? */
2768 if (page->index < end_index)
2771 /* Is the page fully outside i_size? (truncate in progress) */
2773 if (page->index >= end_index+1 || !offset) {
2774 unlock_page(page);
2779 * The page straddles i_size. It must be zeroed out on each and every
2781 * in multiples of the page size. For a file that is not a multiple of
2782 * the page size, the remaining memory is zeroed when mapped, and
2785 zero_user_segment(page, offset, PAGE_SIZE);
2787 ret = mpage_writepage(page, get_block, wbc);
2789 ret = __block_write_full_page(inode, page, get_block, wbc,
2804 struct page *page;
2818 page = grab_cache_page(mapping, index);
2820 if (!page)
2823 if (page_has_buffers(page)) {
2825 unlock_page(page);
2826 put_page(page);
2847 if (!PageUptodate(page)) {
2848 err = mapping->a_ops->readpage(NULL, page);
2850 put_page(page);
2853 lock_page(page);
2854 if (!PageUptodate(page)) {
2858 if (page_has_buffers(page))
2861 zero_user(page, offset, length);
2862 set_page_dirty(page);
2866 unlock_page(page);
2867 put_page(page);
2882 struct page *page;
2896 page = grab_cache_page(mapping, index);
2898 if (!page)
2901 if (!page_has_buffers(page))
2902 create_empty_buffers(page, blocksize, 0);
2905 bh = page_buffers(page);
2925 if (PageUptodate(page))
2937 zero_user(page, offset, length);
2942 unlock_page(page);
2943 put_page(page);
2952 int block_write_full_page(struct page *page, get_block_t *get_block,
2955 struct inode * const inode = page->mapping->host;
2960 /* Is the page fully inside i_size? */
2961 if (page->index < end_index)
2962 return __block_write_full_page(inode, page, get_block, wbc,
2965 /* Is the page fully outside i_size? (truncate in progress) */
2967 if (page->index >= end_index+1 || !offset) {
2968 unlock_page(page);
2973 * The page straddles i_size. It must be zeroed out on each and every
2975 * in multiples of the page size. For a file that is not a multiple of
2976 * the page size, the remaining memory is zeroed when mapped, and
2979 zero_user_segment(page, offset, PAGE_SIZE);
2980 return __block_write_full_page(inode, page, get_block, wbc,
3173 * try_to_free_buffers() checks if all the buffers on this particular page
3177 * locking the page or by holding its mapping's private_lock.
3179 * If the page is dirty but all the buffers are clean then we need to
3180 * be sure to mark the page clean as well. This is because the page
3182 * to a dirty page will set *all* buffers dirty. Which would corrupt
3186 * clean then we set the page clean and proceed. To do that, we require
3199 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3201 struct buffer_head *head = page_buffers(page);
3219 detach_page_private(page);
3225 int try_to_free_buffers(struct page *page)
3227 struct address_space * const mapping = page->mapping;
3231 BUG_ON(!PageLocked(page));
3232 if (PageWriteback(page))
3236 ret = drop_buffers(page, &buffers_to_free);
3241 ret = drop_buffers(page, &buffers_to_free);
3245 * then we can have clean buffers against a dirty page. We
3246 * clean the page here; otherwise the VM will never notice
3250 * the page's buffers clean. We discover that here and clean
3251 * the page also.
3258 cancel_dirty_page(page);