Lines Matching defs:page
85 struct page *locked_page,
109 struct page *locked_page,
117 struct page *page;
120 page = find_get_page(inode->vfs_inode.i_mapping, index);
122 if (!page)
124 ClearPagePrivate2(page);
125 put_page(page);
129 * In case this page belongs to the delalloc range being instantiated
130 * then skip it, since the first page of a range is going to be
165 struct page **compressed_pages)
168 struct page *page = NULL;
210 struct page *cpage;
228 page = find_get_page(inode->i_mapping,
231 kaddr = kmap_atomic(page);
235 put_page(page);
252 * a page we already have locked.
274 struct page **compressed_pages)
348 * And at reserve time, it's always aligned to page size, so
349 * just free one page here.
361 struct page **pages;
369 struct page *locked_page;
388 struct page **pages,
484 struct page **pages = NULL;
500 * unlock the page in truncate and fallocate, and then modify the i_size
552 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
566 * page in the range. Otherwise applications with the file
567 * mmap'd can wander in and change the page contents while
592 struct page *page = pages[nr_pages - 1];
595 /* zero the tail end of the last page, we might be
599 kaddr = kmap_atomic(page);
676 * win, compare the page count read with the blocks on disk,
704 * free any pages it allocated and our page pointer array
724 * we've been given so far. redirty the locked page if it corresponds
896 struct page *p = async_extent->pages[0];
970 * locked_page is the page that writepage had locked already. We use
978 struct page *locked_page,
1106 * page (which the caller expects to stay locked), don't
1109 * Do set the Private2 bit so we know this page was properly
1238 struct page *locked_page,
1304 * the original page we were actually given. As we spread
1319 * against the first page.
1373 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1448 struct page *locked_page,
1815 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2109 * @page - The page we are about to add to the bio
2114 * return 1 if page cannot be added to the bio
2115 * return 0 if page can be added to the bio
2118 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
2121 struct inode *inode = page->mapping->host;
2324 struct page *page;
2335 struct page *page;
2343 page = fixup->page;
2345 page_start = page_offset(page);
2346 page_end = page_offset(page) + PAGE_SIZE - 1;
2350 * we take the page lock.
2355 lock_page(page);
2358 * Before we queued this fixup, we took a reference on the page.
2359 * page->mapping may go NULL, but it shouldn't be moved to a different
2362 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2366 * 1) We got here and our page had already been dealt with and
2371 * 2) Our page was already dealt with, but we happened to get an
2374 * because the page was already dealt with we don't want to
2375 * mark the page with an error, so make sure we're resetting
2378 * when the page was already properly dealt with.
2391 * We can't mess with the page state unless it is locked, so now that
2400 if (PagePrivate2(page))
2407 unlock_page(page);
2419 * Everything went as planned, we're now the owner of a dirty page with
2423 * The page was dirty when we started, nothing should have cleaned it.
2425 BUG_ON(!PageDirty(page));
2437 * We hit ENOSPC or other errors. Update the mapping and page
2438 * to reflect the errors and clean the page.
2440 mapping_set_error(page->mapping, ret);
2441 end_extent_writepage(page, ret, page_start, page_end);
2442 clear_page_dirty_for_io(page);
2443 SetPageError(page);
2445 ClearPageChecked(page);
2446 unlock_page(page);
2447 put_page(page);
2460 * set the page dirty bit without asking the filesystem if it is a
2467 * the delalloc bit and make it safe to write the page.
2469 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
2471 struct inode *inode = page->mapping->host;
2475 /* this page is properly in the ordered list */
2476 if (TestClearPagePrivate2(page))
2480 * PageChecked is set below when we create a fixup worker for this page,
2483 * The extent_io writepage code will redirty the page if we send back
2486 if (PageChecked(page))
2496 * takes place outside of the page lock, and we can't trust
2497 * page->mapping outside of the page lock.
2500 SetPageChecked(page);
2501 get_page(page);
2503 fixup->page = page;
2831 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
2834 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
2839 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2841 ClearPagePrivate2(page);
2856 int icsum, struct page *page, int pgoff, u64 start,
2868 kaddr = kmap_atomic(page);
2885 flush_dcache_page(page);
2896 struct page *page, u64 start, u64 end, int mirror)
2898 size_t offset = start - page_offset(page);
2899 struct inode *inode = page->mapping->host;
2903 if (PageChecked(page)) {
2904 ClearPageChecked(page);
2918 return check_data_csum(inode, io_bio, phy_offset, page, offset, start,
4581 struct page *page;
4614 page = find_or_create_page(mapping, index, mask);
4615 if (!page) {
4623 if (!PageUptodate(page)) {
4624 ret = btrfs_readpage(NULL, page);
4625 lock_page(page);
4626 if (page->mapping != mapping) {
4627 unlock_page(page);
4628 put_page(page);
4631 if (!PageUptodate(page)) {
4636 wait_on_page_writeback(page);
4639 set_page_extent_mapped(page);
4645 unlock_page(page);
4646 put_page(page);
4667 kaddr = kmap(page);
4669 memset(kaddr + (block_start - page_offset(page)),
4672 memset(kaddr + (block_start - page_offset(page)) + offset,
4674 flush_dcache_page(page);
4675 kunmap(page);
4677 ClearPageChecked(page);
4678 set_page_dirty(page);
4696 unlock_page(page);
4697 put_page(page);
4976 * btrfs_invalidatepage() against each page of the inode. This is slow because
4982 * those expensive operations on a per page basis and do only the ordered io
5613 * copy_to_user_inatomic so we don't have to worry about page faulting under the
6526 struct page *page,
6551 ret = btrfs_decompress(compress_type, tmp, page,
6563 char *map = kmap(page);
6565 kunmap(page);
6574 * @page: page to read extent data into if the extent is inline
6575 * @pg_offset: offset into @page to copy to
6584 * If @page is not NULL and the extent is inline, this also reads the extent
6585 * data directly into the page and marks the extent up to date in the io_tree.
6590 struct page *page, size_t pg_offset,
6615 else if (em->block_start == EXTENT_MAP_INLINE && page)
6722 btrfs_extent_item_to_extent_map(inode, path, item, !page, em);
6734 if (!page)
6738 extent_offset = page_offset(page) + pg_offset - extent_start;
6748 if (!PageUptodate(page)) {
6751 ret = uncompress_inline(path, page, pg_offset,
6756 map = kmap(page);
6764 kunmap(page);
6766 flush_dcache_page(page);
7198 * deadlock with concurrent buffered writes on page
7222 * triggered a readahead) on a page lock due to an
7227 * that page.
8114 int btrfs_readpage(struct file *file, struct page *page)
8116 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
8117 u64 start = page_offset(page);
8125 ret = btrfs_do_readpage(page, NULL, &bio, &bio_flags, 0, NULL);
8131 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8133 struct inode *inode = page->mapping->host;
8137 redirty_page_for_writepage(wbc, page);
8138 unlock_page(page);
8148 redirty_page_for_writepage(wbc, page);
8151 ret = extent_write_full_page(page, wbc);
8167 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8169 int ret = try_release_extent_mapping(page, gfp_flags);
8171 detach_page_private(page);
8175 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8177 if (PageWriteback(page) || PageDirty(page))
8179 return __btrfs_releasepage(page, gfp_flags);
8184 struct page *newpage, struct page *page,
8189 ret = migrate_page_move_mapping(mapping, newpage, page, 0);
8193 if (page_has_private(page))
8194 attach_page_private(newpage, detach_page_private(page));
8196 if (PagePrivate2(page)) {
8197 ClearPagePrivate2(page);
8202 migrate_page_copy(newpage, page);
8204 migrate_page_states(newpage, page);
8209 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8212 struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
8216 u64 page_start = page_offset(page);
8223 * we have the page locked, so new writeback can't start,
8226 * Wait for IO on this page so that we can safely clear
8229 wait_on_page_writeback(page);
8235 * If the range doesn't cover the full page, we don't need to and
8236 * shouldn't clear page extent mapped, as page->private can still
8240 * cover the full page, like invalidating the last page, we're
8244 btrfs_releasepage(page, GFP_NOFS);
8258 * IO on this page will never be started, so we need
8270 if (TestClearPagePrivate2(page)) {
8311 * Since the IO will never happen for this page.
8320 __btrfs_releasepage(page, GFP_NOFS);
8323 ClearPageChecked(page);
8324 detach_page_private(page);
8329 * called from a page fault handler when a page is first dirtied. Hence we must
8330 * be careful to check for EOF conditions here. We set the page up correctly
8331 * for a written page which means we get ENOSPC checking when writing into
8336 * protect against truncate races as the page could now be beyond EOF. Because
8338 * the page lock we can determine safely if the page is beyond EOF. If it is not
8339 * beyond EOF, then the page is guaranteed safe against truncation until we
8340 * unlock the page.
8344 struct page *page = vmf->page;
8365 page_start = page_offset(page);
8370 * Reserving delalloc space after obtaining the page lock can lead to
8371 * deadlock. For example, if a dirty page is locked by this function
8373 * dirty page write out, then the btrfs_writepage() function could
8374 * end up waiting indefinitely to get a lock on the page currently
8392 lock_page(page);
8395 if ((page->mapping != inode->i_mapping) ||
8397 /* page got truncated out from underneath us */
8400 wait_on_page_writeback(page);
8403 set_page_extent_mapped(page);
8414 unlock_page(page);
8420 if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8432 * page_mkwrite gets called when the page is firstly dirtied after it's
8433 * faulted in, but write(2) could also dirty a page and set delalloc
8435 * clear any delalloc bits within this page range since we have to
8451 /* page is wholly or partially inside EOF */
8458 kaddr = kmap(page);
8460 flush_dcache_page(page);
8461 kunmap(page);
8463 ClearPageChecked(page);
8464 set_page_dirty(page);
8465 SetPageUptodate(page);
8477 unlock_page(page);
9925 static int btrfs_set_page_dirty(struct page *page)
9927 return __set_page_dirty_nobuffers(page);
10014 struct page *page;
10017 page = find_get_page(inode->i_mapping, index);
10018 ASSERT(page); /* Pages should be in the extent_io_tree */
10019 set_page_writeback(page);
10020 put_page(page);