Lines Matching refs:page

8 #include <linux/page-flags.h>
192 * Submit bio from extent page data via submit_one_bio
1495 struct page *page;
1498 page = find_get_page(inode->i_mapping, index);
1499 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1500 clear_page_dirty_for_io(page);
1501 put_page(page);
1510 struct page *page;
1513 page = find_get_page(inode->i_mapping, index);
1514 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1515 __set_page_dirty_nobuffers(page);
1516 account_page_redirty(page);
1517 put_page(page);
1811 struct page *locked_page,
1816 struct page *locked_page,
1831 struct page *locked_page,
1861 struct page *locked_page, u64 *start,
1900 /* step two, lock all the pages after the page that has start */
1942 struct page *locked_page,
1949 struct page *pages[16];
2020 struct page *locked_page,
2208 * helper function to set a given page up to date if all the
2209 * extents in the tree for that page are up to date
2211 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2213 u64 start = page_offset(page);
2216 SetPageUptodate(page);
2254 u64 length, u64 logical, struct page *page,
2315 bio_add_page(bio, page, length, pg_offset);
2345 struct page *p = eb->pages[i];
2364 struct page *page, u64 ino, unsigned int pg_offset)
2406 failrec->logical, page, pg_offset,
2644 struct page *page, unsigned int pgoff,
2693 bio_add_page(repair_bio, page, failrec->len, pgoff);
2712 void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2717 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2720 ClearPageUptodate(page);
2721 SetPageError(page);
2723 mapping_set_error(page->mapping, ret);
2731 * end_page_writeback if the page has no more pending IO
2746 struct page *page = bvec->bv_page;
2747 struct inode *inode = page->mapping->host;
2750 /* We always issue full-page reads, but if some block
2751 * in a page fails to read, blk_update_request() will
2754 * if they don't add up to a full page. */
2758 "partial page write in btrfs with offset %u and length %u",
2762 "incomplete page write in btrfs with offset %u and length %u",
2766 start = page_offset(page);
2769 end_extent_writepage(page, error, start, end);
2770 end_page_writeback(page);
2792 * set the page up to date if all extents in the tree are uptodate
2794 * unlock the page if there are no other extents locked for it
2817 struct page *page = bvec->bv_page;
2818 struct inode *inode = page->mapping->host;
2828 /* We always issue full-page reads, but if some block
2829 * in a page fails to read, blk_update_request() will
2832 * if they don't add up to a full page. */
2836 "partial page read in btrfs with offset %u and length %u",
2840 "incomplete page read in btrfs with offset %u and length %u",
2844 start = page_offset(page);
2851 ret = btrfs_verify_data_csum(io_bio, offset, page,
2855 offset, page, start, end, mirror);
2861 page,
2876 * 0 and we just go on with the next page in our bio.
2878 * we remain responsible for that page.
2880 if (!btrfs_submit_read_repair(inode, bio, offset, page,
2881 start - page_offset(page),
2891 eb = (struct extent_buffer *)page->private;
2905 /* Zero out the end if this page straddles i_size */
2907 if (page->index == end_index && off)
2908 zero_user_segment(page, off, PAGE_SIZE);
2909 SetPageUptodate(page);
2911 ClearPageUptodate(page);
2912 SetPageError(page);
2914 unlock_page(page);
3015 * @page: page to add to the bio
3017 * a contiguous page to the previous one
3018 * @size: portion of page that we want to write
3019 * @offset: starting offset in the page
3028 struct page *page, u64 offset,
3041 struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
3055 if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
3060 bio_add_page(bio, page, page_size, pg_offset) < page_size) {
3069 wbc_account_cgroup_owner(wbc, page, page_size);
3075 bio_add_page(bio, page, page_size, pg_offset);
3078 bio->bi_write_hint = page->mapping->host->i_write_hint;
3083 bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
3086 wbc_account_cgroup_owner(wbc, page, page_size);
3095 struct page *page)
3097 if (!PagePrivate(page))
3098 attach_page_private(page, eb);
3100 WARN_ON(page->private != (unsigned long)eb);
3103 void set_page_extent_mapped(struct page *page)
3105 if (!PagePrivate(page))
3106 attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3110 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3127 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3139 * XXX JDM: This needs looking at to ensure proper page locking
3142 int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3146 struct inode *inode = page->mapping->host;
3147 u64 start = page_offset(page);
3164 set_page_extent_mapped(page);
3166 if (!PageUptodate(page)) {
3167 if (cleancache_get_page(page) == 0) {
3174 if (page->index == last_byte >> PAGE_SHIFT) {
3180 userpage = kmap_atomic(page);
3182 flush_dcache_page(page);
3195 userpage = kmap_atomic(page);
3197 flush_dcache_page(page);
3205 em = __get_extent_map(inode, page, pg_offset, cur,
3208 SetPageError(page);
3286 userpage = kmap_atomic(page);
3288 flush_dcache_page(page);
3299 /* the get_extent function already copied into the page */
3302 check_page_uptodate(tree, page);
3312 SetPageError(page);
3320 page, offset, disk_io_size,
3330 SetPageError(page);
3339 if (!PageError(page))
3340 SetPageUptodate(page);
3341 unlock_page(page);
3346 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3375 * to write the page (copy into inline extent). In this case the IO has
3376 * been started and the page is already unlocked.
3378 * This returns 0 if all went well (page still locked)
3379 * This returns < 0 if there were errors (page still locked)
3382 struct page *page, struct writeback_control *wbc,
3394 found = find_lock_delalloc_range(&inode->vfs_inode, page,
3401 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3404 SetPageError(page);
3435 * we've unlocked the page, so we can't update
3448 * and does the loop to map the page into extents and bios.
3450 * We return 1 if the IO is started and the page is unlocked,
3451 * 0 if all went well (page still locked)
3452 * < 0 if there were errors (page still locked)
3455 struct page *page,
3463 u64 start = page_offset(page);
3478 ret = btrfs_writepage_cow_fixup(page, start, page_end);
3481 redirty_page_for_writepage(wbc, page);
3483 unlock_page(page);
3488 * we don't want to touch the inode after unlocking the page,
3501 btrfs_writepage_endio_finish_ordered(page, cur,
3507 SetPageError(page);
3533 btrfs_writepage_endio_finish_ordered(page, cur,
3541 if (!PageWriteback(page)) {
3543 "page %lu not writeback, cur %llu end %llu",
3544 page->index, cur, end);
3548 page, offset, iosize, pg_offset,
3553 SetPageError(page);
3554 if (PageWriteback(page))
3555 end_page_writeback(page);
3575 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3578 struct inode *inode = page->mapping->host;
3579 u64 start = page_offset(page);
3588 trace___extent_writepage(page, inode, wbc);
3590 WARN_ON(!PageLocked(page));
3592 ClearPageError(page);
3595 if (page->index > end_index ||
3596 (page->index == end_index && !pg_offset)) {
3597 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3598 unlock_page(page);
3602 if (page->index == end_index) {
3605 userpage = kmap_atomic(page);
3609 flush_dcache_page(page);
3612 set_page_extent_mapped(page);
3615 ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
3623 ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
3630 /* make sure the mapping tag for page dirty gets cleared */
3631 set_page_writeback(page);
3632 end_page_writeback(page);
3634 if (PageError(page)) {
3636 end_extent_writepage(page, ret, start, page_end);
3638 unlock_page(page);
3661 * Return <0 if something went wrong, no page is locked
3723 struct page *p = eb->pages[i];
3763 static void set_btree_ioerr(struct page *page)
3765 struct extent_buffer *eb = (struct extent_buffer *)page->private;
3768 SetPageError(page);
3848 struct page *page = bvec->bv_page;
3850 eb = (struct extent_buffer *)page->private;
3856 ClearPageUptodate(page);
3857 set_btree_ioerr(page);
3860 end_page_writeback(page);
3903 struct page *p = eb->pages[i];
3928 struct page *p = eb->pages[i];
3984 struct page *page = pvec.pages[i];
3986 if (!PagePrivate(page))
3990 if (!PagePrivate(page)) {
3995 eb = (struct extent_buffer *)page->private;
4049 * We hit the last page and there is more work to be done: wrap
4103 * If a page is already under I/O, write_cache_pages() skips it, even
4183 struct page *page = pvec.pages[i];
4185 done_index = page->index + 1;
4188 * the page lock: the page may be truncated or
4189 * invalidated (changing page->mapping to NULL),
4193 if (!trylock_page(page)) {
4196 lock_page(page);
4199 if (unlikely(page->mapping != mapping)) {
4200 unlock_page(page);
4205 if (PageWriteback(page)) {
4209 wait_on_page_writeback(page);
4212 if (PageWriteback(page) ||
4213 !clear_page_dirty_for_io(page)) {
4214 unlock_page(page);
4218 ret = __extent_writepage(page, wbc, epd);
4236 * We hit the last page and there is more work to be done: wrap
4243 * If we're looping we could run into a page that is locked by a
4245 * page in our current bio, and thus deadlock, so flush the
4260 int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4269 ret = __extent_writepage(page, wbc, &epd);
4286 struct page *page;
4307 page = find_get_page(mapping, start >> PAGE_SHIFT);
4308 if (clear_page_dirty_for_io(page))
4309 ret = __extent_writepage(page, &wbc_writepages, &epd);
4311 btrfs_writepage_endio_finish_ordered(page, start,
4313 unlock_page(page);
4315 put_page(page);
4353 struct page *pagepool[16];
4379 * ranges corresponding to the page, and then deletes any extent state
4383 struct page *page, unsigned long offset)
4386 u64 start = page_offset(page);
4388 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4395 wait_on_page_writeback(page);
4402 * a helper for releasepage, this tests for areas of the page that
4404 * to drop the page.
4407 struct page *page, gfp_t mask)
4409 u64 start = page_offset(page);
4437 * in the range corresponding to the page, both state records and extent
4440 int try_release_extent_mapping(struct page *page, gfp_t mask)
4443 u64 start = page_offset(page);
4445 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4450 page->mapping->host->i_size > SZ_16M) {
4517 return try_release_extent_state(tree, page, mask);
4909 struct page *page = eb->pages[i];
4911 if (!page)
4914 spin_lock(&page->mapping->private_lock);
4918 * and have this page now attached to the new eb. So
4922 if (PagePrivate(page) &&
4923 page->private == (unsigned long)eb) {
4925 BUG_ON(PageDirty(page));
4926 BUG_ON(PageWriteback(page));
4931 detach_page_private(page);
4935 spin_unlock(&page->mapping->private_lock);
4937 /* One for when we allocated the page */
4938 put_page(page);
4997 struct page *p;
5096 struct page *accessed)
5104 struct page *p = eb->pages[i];
5199 struct page *p;
5228 * We could have already allocated an eb for this page
5232 * overwrite page->private.
5262 * opens a race with btree_releasepage which can free a page
5294 * btree_releasepage will correctly detect that a page belongs to a
5407 struct page *page;
5412 page = eb->pages[i];
5413 if (!PageDirty(page))
5416 lock_page(page);
5417 WARN_ON(!PagePrivate(page));
5419 clear_page_dirty_for_io(page);
5420 xa_lock_irq(&page->mapping->i_pages);
5421 if (!PageDirty(page))
5422 __xa_clear_mark(&page->mapping->i_pages,
5423 page_index(page), PAGECACHE_TAG_DIRTY);
5424 xa_unlock_irq(&page->mapping->i_pages);
5425 ClearPageError(page);
5426 unlock_page(page);
5460 struct page *page;
5466 page = eb->pages[i];
5467 if (page)
5468 ClearPageUptodate(page);
5475 struct page *page;
5481 page = eb->pages[i];
5482 SetPageUptodate(page);
5489 struct page *page;
5504 page = eb->pages[i];
5506 if (!trylock_page(page))
5509 lock_page(page);
5519 page = eb->pages[i];
5520 if (!PageUptodate(page)) {
5540 page = eb->pages[i];
5542 if (!PageUptodate(page)) {
5545 unlock_page(page);
5549 ClearPageError(page);
5551 page, page_offset(page), PAGE_SIZE, 0,
5558 * i.e unlock page/set error bit.
5561 SetPageError(page);
5562 unlock_page(page);
5566 unlock_page(page);
5580 page = eb->pages[i];
5581 wait_on_page_locked(page);
5582 if (!PageUptodate(page))
5591 page = eb->pages[locked_pages];
5592 unlock_page(page);
5632 struct page *page;
5649 page = eb->pages[i];
5652 kaddr = page_address(page);
5668 struct page *page;
5680 page = eb->pages[i];
5683 kaddr = page_address(page);
5703 struct page *page;
5715 page = eb->pages[i];
5719 kaddr = page_address(page);
5758 struct page *page;
5769 page = eb->pages[i];
5770 WARN_ON(!PageUptodate(page));
5773 kaddr = page_address(page);
5788 struct page *page;
5798 page = eb->pages[i];
5799 WARN_ON(!PageUptodate(page));
5802 kaddr = page_address(page);
5833 struct page *page;
5846 page = dst->pages[i];
5847 WARN_ON(!PageUptodate(page));
5851 kaddr = page_address(page);
5862 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5867 * @page_index: return index of the page in the extent buffer that contains the
5869 * @page_offset: return offset into the page given by page_index
5903 struct page *page;
5908 page = eb->pages[i];
5909 WARN_ON(!PageUptodate(page));
5910 kaddr = page_address(page);
5925 struct page *page;
5933 page = eb->pages[i];
5934 WARN_ON(!PageUptodate(page));
5935 kaddr = page_address(page);
5944 page = eb->pages[++i];
5945 WARN_ON(!PageUptodate(page));
5946 kaddr = page_address(page);
5968 struct page *page;
5976 page = eb->pages[i];
5977 WARN_ON(!PageUptodate(page));
5978 kaddr = page_address(page);
5987 page = eb->pages[++i];
5988 WARN_ON(!PageUptodate(page));
5989 kaddr = page_address(page);
6004 static void copy_pages(struct page *dst_page, struct page *src_page,
6099 int try_release_extent_buffer(struct page *page)
6104 * We need to make sure nobody is attaching this page to an eb right
6107 spin_lock(&page->mapping->private_lock);
6108 if (!PagePrivate(page)) {
6109 spin_unlock(&page->mapping->private_lock);
6113 eb = (struct extent_buffer *)page->private;
6119 * this page.
6124 spin_unlock(&page->mapping->private_lock);
6127 spin_unlock(&page->mapping->private_lock);
6131 * so just return, this page will likely be freed soon anyway.