Lines Matching defs:page
45 struct page **prepared_pages,
56 struct page *page = prepared_pages[pg];
58 * Copy data from userspace to the current page
60 copied = copy_page_from_iter_atomic(page, offset, count, i);
62 /* Flush processor's dcache for this page */
63 flush_dcache_page(page);
72 * back to page at a time copies after we return 0.
75 if (!PageUptodate(page)) {
98 struct page **pages, size_t num_pages,
107 /* page checked is some magic around finding pages that
127 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
168 struct page *p = pages[i];
840 * on error we return an unlocked page and the error value
841 * on success we return a locked page and 0
844 struct page *page, u64 pos,
847 struct folio *folio = page_folio(page);
851 !PageUptodate(page)) {
855 lock_page(page);
856 if (!PageUptodate(page)) {
857 unlock_page(page);
864 * called to release the page. Here we check both inode
865 * mapping and PagePrivate() to make sure the page was not
869 * to store extra bitmap using page->private.
871 if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
872 unlock_page(page);
903 * this just gets pages into the page cache and locks them down.
905 static noinline int prepare_pages(struct inode *inode, struct page **pages,
976 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1182 struct page **pages = NULL;
1214 PAGE_SIZE / (sizeof(struct page *)));
1217 pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1343 * back to one page at a time
1535 * So here we disable page faults in the iov_iter and then retry if we
2197 * For subpage case, if the range is not at page boundary, we could
2201 * So here we need to do extra page alignment for
2658 /* Check the aligned pages after the first unaligned page,
2659 * if offset != orig_start, which means the first unaligned page
2663 /* after truncate page, check hole again */
2684 /* zero the front end of the last page */
2728 * If we only end up zeroing part of a page, we still need to
3076 * with page truncated or size expanded.
3758 * at btrfs_direct_write(), but we also disable page faults in addition
3761 * which can still trigger page fault ins despite having set ->nofault
3765 * to lock the extent range in the inode's tree during he page reads
3845 * So with compression we will find and lock a dirty page and clear the
3851 * since it will wait on the page lock, which won't be unlocked until