Lines Matching defs:page
129 struct page *locked_page, u64 start,
411 struct page *locked_page,
417 struct page *page;
426 * For locked page, we will call btrfs_mark_ordered_io_finished
429 * clear page Ordered and run the ordered extent accounting.
433 * for the page range, and the ordered extent will never finish.
439 page = find_get_page(inode->vfs_inode.i_mapping, index);
441 if (!page)
445 * Here we just clear all Ordered bits for every page in the
449 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page,
451 put_page(page);
455 /* The locked page covers the full range, nothing needs to be done */
459 * In case this page belongs to the delalloc range being
460 * instantiated then skip it, since the first page of a range is
507 struct page **compressed_pages,
512 struct page *page = NULL;
551 struct page *cpage;
569 page = find_get_page(inode->vfs_inode.i_mapping, 0);
571 kaddr = kmap_local_page(page);
574 put_page(page);
590 * without locking a page we already have locked.
615 struct page **compressed_pages,
687 * And at reserve time, it's always aligned to page size, so
688 * just free one page here.
700 struct page **pages;
708 struct page *locked_page;
726 struct page **pages,
762 * We lock the full page then run each delalloc range in the page, thus
770 * page [0, 64K), causing the one finished later will have page
771 * unlocked already, triggering various page lock requirement BUG_ON()s.
774 * if the range is fully page aligned.
776 * In theory we only need to ensure the first page is fully covered, but
777 * the tailing partial page will be locked until the full compression
781 * first to prevent any submitted async extent to unlock the full page.
783 * will unlock the full page.
842 struct page **pages;
853 * We need to call clear_page_dirty_for_io on each page in the range.
855 * the page contents while we are compressing them.
862 * unlock the page in truncate and fallocate, and then modify the i_size
901 * For subpage case, we require full page alignment for the sector
924 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
946 * Zero the tail end of the last page, as we might be sending it down
1009 * the page count read with the blocks on disk, compression must free at
1063 struct page *locked_page)
1104 struct page *locked_page = NULL;
1251 * locked_page is the page that writepage had locked already. We use
1258 * (In reality inline extents are limited to a single page, so locked_page is
1259 * the only page handled anyway).
1261 * When this function succeed and creates a normal extent, the page locking
1274 struct page *locked_page, u64 start, u64 end,
1306 * Due to the page size limit, for subpage we can only trigger the
1307 * writeback for the dirty sectors of page, that means data writeback
1343 * Here we manually unlock the page, since the caller
1458 * We're not doing compressed IO, don't unlock the first page
1462 * Do set the Ordered (Private2) bit so we know this page was
1610 struct page *locked_page, u64 start,
1652 * the original page we were actually given. As we spread
1667 * against the first page.
1703 struct page *locked_page, u64 start,
1746 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1963 struct page *locked_page,
2250 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2677 struct page *page;
2689 struct page *page = fixup->page;
2692 u64 page_start = page_offset(page);
2693 u64 page_end = page_offset(page) + PAGE_SIZE - 1;
2699 * we take the page lock.
2704 lock_page(page);
2707 * Before we queued this fixup, we took a reference on the page.
2708 * page->mapping may go NULL, but it shouldn't be moved to a different
2711 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2715 * 1) We got here and our page had already been dealt with and
2720 * 2) Our page was already dealt with, but we happened to get an
2723 * because the page was already dealt with we don't want to
2724 * mark the page with an error, so make sure we're resetting
2727 * when the page was already properly dealt with.
2740 * We can't mess with the page state unless it is locked, so now that
2749 if (PageOrdered(page))
2756 unlock_page(page);
2768 * Everything went as planned, we're now the owner of a dirty page with
2772 * The page was dirty when we started, nothing should have cleaned it.
2774 BUG_ON(!PageDirty(page));
2785 * We hit ENOSPC or other errors. Update the mapping and page
2786 * to reflect the errors and clean the page.
2788 mapping_set_error(page->mapping, ret);
2789 btrfs_mark_ordered_io_finished(inode, page, page_start,
2791 clear_page_dirty_for_io(page);
2793 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
2794 unlock_page(page);
2795 put_page(page);
2808 * set the page dirty bit without asking the filesystem if it is a
2815 * the delalloc bit and make it safe to write the page.
2817 int btrfs_writepage_cow_fixup(struct page *page)
2819 struct inode *inode = page->mapping->host;
2823 /* This page has ordered extent covering it already */
2824 if (PageOrdered(page))
2828 * PageChecked is set below when we create a fixup worker for this page,
2831 * The extent_io writepage code will redirty the page if we send back
2834 if (PageChecked(page))
2844 * takes place outside of the page lock, and we can't trust
2845 * page->mapping outside of the page lock.
2848 btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
2849 get_page(page);
2851 fixup->page = page;
3251 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
3261 kaddr = kmap_local_page(page) + pgoff;
4686 struct page *page;
4718 page = find_or_create_page(mapping, index, mask);
4719 if (!page) {
4727 if (!PageUptodate(page)) {
4728 ret = btrfs_read_folio(NULL, page_folio(page));
4729 lock_page(page);
4730 if (page->mapping != mapping) {
4731 unlock_page(page);
4732 put_page(page);
4735 if (!PageUptodate(page)) {
4742 * We unlock the page after the io is completed and then re-lock it
4744 * PagePrivate(), but left the page in the mapping. Set the page mapped
4747 ret = set_page_extent_mapped(page);
4751 wait_on_page_writeback(page);
4758 unlock_page(page);
4759 put_page(page);
4780 memzero_page(page, (block_start - page_offset(page)),
4783 memzero_page(page, (block_start - page_offset(page)) + offset,
4786 btrfs_page_clear_checked(fs_info, page, block_start,
4788 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start);
4804 unlock_page(page);
4805 put_page(page);
5813 * copy_to_user_inatomic so we don't have to worry about page faulting under the
6663 struct page *page,
6685 ret = btrfs_decompress(compress_type, tmp, page, 0, inline_size, max_size);
6696 memzero_page(page, max_size, PAGE_SIZE - max_size);
6702 struct page *page)
6708 if (!page || PageUptodate(page))
6711 ASSERT(page_offset(page) == 0);
6716 return uncompress_inline(path, page, fi);
6720 kaddr = kmap_local_page(page);
6725 memzero_page(page, copy_size, PAGE_SIZE - copy_size);
6733 * @page: page to read extent data into if the extent is inline
6734 * @pg_offset: offset into @page to copy to
6742 * If @page is not NULL and the extent is inline, this also reads the extent
6743 * data directly into the page and marks the extent up to date in the io_tree.
6748 struct page *page, size_t pg_offset,
6772 else if (em->block_start == EXTENT_MAP_INLINE && page)
6905 ret = read_inline_extent(inode, path, page);
7202 * deadlock with concurrent buffered writes on page
7226 * triggered a readahead) on a page lock due to an
7231 * that page.
7484 * to block on page locks. We also don't want to block on pages marked as
7590 * page fault error when trying to fault in pages for the buffer that is
7594 * those bios have completed by the time we get the page fault error,
7858 * If we continue to release/invalidate the page, we could cause use-after-free
7862 static void wait_subpage_spinlock(struct page *page)
7864 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
7867 if (!btrfs_is_subpage(fs_info, page))
7870 ASSERT(PagePrivate(page) && page->private);
7871 subpage = (struct btrfs_subpage *)page->private;
7877 * And since the page is not dirty nor writeback, and we have page
7879 * function to clear page writeback.
7882 * should exit and we're safe to release/invalidate the page.
7890 int ret = try_release_extent_mapping(&folio->page, gfp_flags);
7893 wait_subpage_spinlock(&folio->page);
7894 clear_page_extent_mapped(&folio->page);
7941 * page, nor bio can be submitted for this folio.
7953 wait_subpage_spinlock(&folio->page);
7960 * shouldn't clear page extent mapped, as folio->private can still
8009 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) {
8018 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
8021 * IO on this page will never be started, so we need to account
8071 * Since the IO will never happen for this page.
8083 * We have iterated through all ordered extents of the page, the page
8088 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
8091 clear_page_extent_mapped(&folio->page);
8096 * called from a page fault handler when a page is first dirtied. Hence we must
8097 * be careful to check for EOF conditions here. We set the page up correctly
8098 * for a written page which means we get ENOSPC checking when writing into
8103 * protect against truncate races as the page could now be beyond EOF. Because
8105 * the page lock we can determine safely if the page is beyond EOF. If it is not
8106 * beyond EOF, then the page is guaranteed safe against truncation until we
8107 * unlock the page.
8111 struct page *page = vmf->page;
8131 page_start = page_offset(page);
8136 * Reserving delalloc space after obtaining the page lock can lead to
8137 * deadlock. For example, if a dirty page is locked by this function
8139 * dirty page write out, then the btrfs_writepages() function could
8140 * end up waiting indefinitely to get a lock on the page currently
8159 lock_page(page);
8162 if ((page->mapping != inode->i_mapping) ||
8164 /* page got truncated out from underneath us */
8167 wait_on_page_writeback(page);
8170 ret2 = set_page_extent_mapped(page);
8185 unlock_page(page);
8192 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8204 * page_mkwrite gets called when the page is firstly dirtied after it's
8205 * faulted in, but write(2) could also dirty a page and set delalloc
8207 * clear any delalloc bits within this page range since we have to
8222 /* page is wholly or partially inside EOF */
8229 memzero_page(page, zero_start, PAGE_SIZE - zero_start);
8231 btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
8232 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start);
8233 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start);
8246 unlock_page(page);
9788 struct page *page;
9794 page = find_get_page(inode->vfs_inode.i_mapping, index);
9795 ASSERT(page); /* Pages should be in the extent_io_tree */
9797 btrfs_page_set_writeback(fs_info, page, start, len);
9798 put_page(page);
9942 u64 disk_io_size, struct page **pages)
9996 struct page **pages;
10003 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
10154 * Don't read beyond what we locked. This also limits the page
10210 struct page **pages;
10300 pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);