Lines Matching defs:locked_page

129 				     struct page *locked_page, u64 start,
411 struct page *locked_page,
419 if (locked_page) {
420 page_start = page_offset(locked_page);
435 if (locked_page && index == (page_start >> PAGE_SHIFT)) {
454 if (locked_page) {
465 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
466 offset = page_offset(locked_page) + PAGE_SIZE;
708 struct page *locked_page;
1063 struct page *locked_page)
1076 ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
1079 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
1080 if (locked_page) {
1081 const u64 page_start = page_offset(locked_page);
1083 set_page_writeback(locked_page);
1084 end_page_writeback(locked_page);
1085 btrfs_mark_ordered_io_finished(inode, locked_page,
1088 mapping_set_error(locked_page->mapping, ret);
1089 unlock_page(locked_page);
1104 struct page *locked_page = NULL;
1114 * If async_chunk->locked_page is in the async_extent range, we need to
1117 if (async_chunk->locked_page) {
1118 u64 locked_page_start = page_offset(async_chunk->locked_page);
1122 locked_page = async_chunk->locked_page;
1127 submit_uncompressed_range(inode, async_extent, locked_page);
1251 * locked_page is the page that writepage had locked already. We use
1254 * When this function fails, it unlocks all pages except @locked_page.
1257 * unlocks all pages including locked_page and starts I/O on them.
1258 * (In reality inline extents are limited to a single page, so locked_page is
1265 * - Else all pages except for @locked_page are unlocked.
1274 struct page *locked_page, u64 start, u64 end,
1330 locked_page,
1336 * locked_page is locked by the caller of
1347 unlock_page(locked_page);
1469 locked_page,
1522 * (except @locked_page) to ensure all the pages are unlocked.
1525 if (!locked_page)
1528 locked_page, 0, page_ops);
1544 locked_page,
1558 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1610 struct page *locked_page, u64 start,
1651 * The locked_page comes all the way from writepage and its
1654 * structs, only the first struct needs a pointer to locked_page
1659 if (locked_page) {
1669 wbc_account_cgroup_owner(wbc, locked_page,
1671 async_chunk[i].locked_page = locked_page;
1672 locked_page = NULL;
1674 async_chunk[i].locked_page = NULL;
1703 struct page *locked_page, u64 start,
1711 ret = cow_file_range(inode, locked_page, start, end, &done_offset,
1715 extent_write_locked_range(&inode->vfs_inode, locked_page, start,
1746 static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
1813 ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
1963 struct page *locked_page,
2125 ret = fallback_to_cow(inode, locked_page,
2182 locked_page, EXTENT_LOCKED |
2206 ret = fallback_to_cow(inode, locked_page, cow_start, end);
2225 locked_page, EXTENT_LOCKED |
2250 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
2257 * The range must cover part of the @locked_page, or a return of 1
2260 ASSERT(!(end <= page_offset(locked_page) ||
2261 start >= page_offset(locked_page) + PAGE_SIZE));
2264 ret = run_delalloc_nocow(inode, locked_page, start, end);
2270 run_delalloc_compressed(inode, locked_page, start, end, wbc))
2274 ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
2277 ret = cow_file_range(inode, locked_page, start, end, NULL,
2282 btrfs_cleanup_ordered_extents(inode, locked_page, start,