Lines Matching refs:page

14  * - Only support 64K page size for now
15 * This is to make metadata handling easier, as 64K page would ensure
16 * all nodesize would fit inside one page, thus we don't need to handle
22 * - Metadata can't cross 64K page boundary
32 * needed range, other unrelated range in the same page will not be touched.
35 * The writeback is still for the full page, but we will only submit
36 * the dirty extent buffers in the page.
38 * This means, if we have a metadata page like this:
54 * record the status of each sector inside a page. This provides the extra
58 * Since we have multiple tree blocks inside one page, we can't rely on page
61 * the same page).
67 bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
74 * mapping. And if page->mapping->host is data inode, it's subpage.
77 if (!page->mapping || !page->mapping->host ||
78 is_data_inode(page->mapping->host))
119 struct page *page, enum btrfs_subpage_type type)
124 * We have cases like a dummy extent buffer page, which is not mapped
127 if (page->mapping)
128 ASSERT(PageLocked(page));
130 /* Either not subpage, or the page already has private attached */
131 if (!btrfs_is_subpage(fs_info, page) || PagePrivate(page))
138 attach_page_private(page, subpage);
143 struct page *page)
148 if (!btrfs_is_subpage(fs_info, page) || !PagePrivate(page))
151 subpage = detach_page_private(page);
189 * of the same page.
191 * detach_extent_buffer_page() won't detach the page private while we're still
195 struct page *page)
199 if (!btrfs_is_subpage(fs_info, page))
202 ASSERT(PagePrivate(page) && page->mapping);
203 lockdep_assert_held(&page->mapping->private_lock);
205 subpage = (struct btrfs_subpage *)page->private;
210 struct page *page)
214 if (!btrfs_is_subpage(fs_info, page))
217 ASSERT(PagePrivate(page) && page->mapping);
218 lockdep_assert_held(&page->mapping->private_lock);
220 subpage = (struct btrfs_subpage *)page->private;
226 struct page *page, u64 start, u32 len)
229 ASSERT(PagePrivate(page) && page->private);
233 * The range check only works for mapped page, we can still have
234 * unmapped page like dummy extent buffer pages.
236 if (page->mapping)
237 ASSERT(page_offset(page) <= start &&
238 start + len <= page_offset(page) + PAGE_SIZE);
242 struct page *page, u64 start, u32 len)
244 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
247 btrfs_subpage_assert(fs_info, page, start, len);
253 struct page *page, u64 start, u32 len)
255 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
260 btrfs_subpage_assert(fs_info, page, start, len);
261 is_data = is_data_inode(page->mapping->host);
266 * For data we need to unlock the page if the last read has finished.
273 unlock_page(page);
276 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len)
281 *start = max_t(u64, page_offset(page), orig_start);
287 if (page_offset(page) >= orig_start + orig_len)
290 *len = min_t(u64, page_offset(page) + PAGE_SIZE,
295 struct page *page, u64 start, u32 len)
297 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
301 btrfs_subpage_assert(fs_info, page, start, len);
309 struct page *page, u64 start, u32 len)
311 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
314 btrfs_subpage_assert(fs_info, page, start, len);
331 * Lock a page for delalloc page writeback.
333 * Return -EAGAIN if the page is not properly initialized.
334 * Return 0 with the page locked, and writer counter updated.
336 * Even with 0 returned, the page still need extra check to make sure
337 * it's really the correct page, as the caller is using
338 * filemap_get_folios_contig(), which can race with page invalidating.
341 struct page *page, u64 start, u32 len)
343 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) {
344 lock_page(page);
347 lock_page(page);
348 if (!PagePrivate(page) || !page->private) {
349 unlock_page(page);
352 btrfs_subpage_clamp_range(page, &start, &len);
353 btrfs_subpage_start_writer(fs_info, page, start, len);
358 struct page *page, u64 start, u32 len)
360 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page))
361 return unlock_page(page);
362 btrfs_subpage_clamp_range(page, &start, &len);
363 if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
364 unlock_page(page);
367 #define subpage_calc_start_bit(fs_info, page, name, start, len) \
371 btrfs_subpage_assert(fs_info, page, start, len); \
388 struct page *page, u64 start, u32 len)
390 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
391 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
398 SetPageUptodate(page);
403 struct page *page, u64 start, u32 len)
405 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
406 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
412 ClearPageUptodate(page);
417 struct page *page, u64 start, u32 len)
419 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
420 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
427 set_page_dirty(page);
437 * NOTE: Callers should manually clear page dirty for true case, as we have
441 struct page *page, u64 start, u32 len)
443 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
444 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
458 struct page *page, u64 start, u32 len)
462 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, start, len);
464 clear_page_dirty_for_io(page);
468 struct page *page, u64 start, u32 len)
470 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
471 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
477 set_page_writeback(page);
482 struct page *page, u64 start, u32 len)
484 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
485 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
492 ASSERT(PageWriteback(page));
493 end_page_writeback(page);
499 struct page *page, u64 start, u32 len)
501 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
502 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
508 SetPageOrdered(page);
513 struct page *page, u64 start, u32 len)
515 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
516 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
523 ClearPageOrdered(page);
528 struct page *page, u64 start, u32 len)
530 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
531 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
538 SetPageChecked(page);
543 struct page *page, u64 start, u32 len)
545 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
546 unsigned int start_bit = subpage_calc_start_bit(fs_info, page,
552 ClearPageChecked(page);
557 * Unlike set/clear which is dependent on each page status, for test all bits
562 struct page *page, u64 start, u32 len) \
564 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; \
565 unsigned int start_bit = subpage_calc_start_bit(fs_info, page, \
590 struct page *page, u64 start, u32 len) \
592 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
593 set_page_func(page); \
596 btrfs_subpage_set_##name(fs_info, page, start, len); \
599 struct page *page, u64 start, u32 len) \
601 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
602 clear_page_func(page); \
605 btrfs_subpage_clear_##name(fs_info, page, start, len); \
608 struct page *page, u64 start, u32 len) \
610 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
611 return test_page_func(page); \
612 return btrfs_subpage_test_##name(fs_info, page, start, len); \
615 struct page *page, u64 start, u32 len) \
617 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
618 set_page_func(page); \
621 btrfs_subpage_clamp_range(page, &start, &len); \
622 btrfs_subpage_set_##name(fs_info, page, start, len); \
625 struct page *page, u64 start, u32 len) \
627 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \
628 clear_page_func(page); \
631 btrfs_subpage_clamp_range(page, &start, &len); \
632 btrfs_subpage_clear_##name(fs_info, page, start, len); \
635 struct page *page, u64 start, u32 len) \
637 if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \
638 return test_page_func(page); \
639 btrfs_subpage_clamp_range(page, &start, &len); \
640 return btrfs_subpage_test_##name(fs_info, page, start, len); \
653 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
657 struct page *page)
659 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
664 ASSERT(!PageDirty(page));
665 if (!btrfs_is_subpage(fs_info, page))
668 ASSERT(PagePrivate(page) && page->private);
673 * Handle different locked pages with different page sizes:
678 * This is the most common locked page for __extent_writepage() called
687 void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
692 ASSERT(PageLocked(page));
693 /* For non-subpage case, we just unlock the page */
694 if (!btrfs_is_subpage(fs_info, page))
695 return unlock_page(page);
697 ASSERT(PagePrivate(page) && page->private);
698 subpage = (struct btrfs_subpage *)page->private;
701 * For subpage case, there are two types of locked page. With or
704 * Since we own the page lock, no one else could touch subpage::writers
709 return unlock_page(page);
712 btrfs_page_end_writer_lock(fs_info, page, start, len);
720 struct page *page, u64 start, u32 len)
732 ASSERT(PagePrivate(page) && page->private);
734 subpage = (struct btrfs_subpage *)page->private;
744 dump_page(page, "btrfs subpage dump");
746 "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl",
747 start, len, page_offset(page),