Lines Matching defs:locked_page
85 struct page *locked_page,
109 struct page *locked_page,
114 u64 page_start = page_offset(locked_page);
369 struct page *locked_page;
728 if (async_chunk->locked_page &&
729 (page_offset(async_chunk->locked_page) >= start &&
730 page_offset(async_chunk->locked_page)) <= end) {
731 __set_page_dirty_nobuffers(async_chunk->locked_page);
793 ret = cow_file_range(inode, async_chunk->locked_page,
813 else if (ret && async_chunk->locked_page)
814 unlock_page(async_chunk->locked_page);
970 * locked_page is the page that writepage had locked already. We use
973 * *page_started is set to one if we unlock locked_page and do everything
978 struct page *locked_page,
1116 locked_page,
1161 locked_page,
1168 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1238 struct page *locked_page,
1276 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1303 * The locked_page comes all the way from writepage and its
1306 * structs, only the first struct needs a pointer to locked_page
1311 if (locked_page) {
1321 wbc_account_cgroup_owner(wbc, locked_page,
1323 async_chunk[i].locked_page = locked_page;
1324 locked_page = NULL;
1326 async_chunk[i].locked_page = NULL;
1373 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1436 return cow_file_range(inode, locked_page, start, end, page_started,
1448 struct page *locked_page,
1467 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1687 ret = fallback_to_cow(inode, locked_page,
1746 locked_page, EXTENT_LOCKED |
1770 ret = fallback_to_cow(inode, locked_page, cow_start, end,
1782 locked_page, EXTENT_LOCKED |
1815 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
1823 ret = run_delalloc_nocow(inode, locked_page, start, end,
1826 ret = run_delalloc_nocow(inode, locked_page, start, end,
1830 ret = cow_file_range(inode, locked_page, start, end,
1834 ret = cow_file_range_async(inode, wbc, locked_page, start, end,
1838 btrfs_cleanup_ordered_extents(inode, locked_page, start,