Lines Matching defs:page
3 * mm/page-writeback.c
249 * available for the page cache, which is essentially the number of
270 * page cache. This is the base value for the per-node dirty limits.
355 * page cache. This is the base value for the global dirty limits.
436 * Dirty throttling logic assumes the limits in page units fit into
494 * Dirty throttling logic assumes the limits in page units fit into
759 * when sleeping max_pause per page is not enough to keep the dirty pages under
1830 * In theory 1 page is enough to keep the consumer-producer
1831 * pipe going: the flusher cleans 1 page => the task dirties 1
1832 * more page. However wb_dirty has accounting errors. So use
1873 * called to throttle the page dirties. The solution is to save the not yet
1874 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1885 * Processes which are dirtying memory should call in here once for each page
2093 * Called early on to tune the page writeback dirty limits.
2121 * @start: starting page index
2122 * @end: ending page index (inclusive)
2124 * This function scans the page range from @start to @end (inclusive) and tags
2137 void *page;
2140 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2158 * @writepage: function called for each page
2161 * If a page is already under I/O, write_cache_pages() skips it, even
2174 * by the process clearing the DIRTY tag (and submitting the page for IO).
2178 * we do not loop back to the start of the file. Doing so causes a page
2179 * lock/page writeback access order inversion - we should only ever lock
2180 * multiple pages in ascending page->index order, and looping back to the start
2226 struct page *page = pvec.pages[i];
2228 done_index = page->index;
2230 lock_page(page);
2234 * then, even for data integrity operations: the page
2237 * even if there is now a new, dirty page at the same
2240 if (unlikely(page->mapping != mapping)) {
2242 unlock_page(page);
2246 if (!PageDirty(page)) {
2251 if (PageWriteback(page)) {
2253 wait_on_page_writeback(page);
2258 BUG_ON(PageWriteback(page));
2259 if (!clear_page_dirty_for_io(page))
2263 error = (*writepage)(page, wbc, data);
2269 * past this page so media errors won't choke
2273 * still have state to clear for each page. In
2278 unlock_page(page);
2282 done_index = page->index + 1;
2307 * If we hit the last page and there is more work to be done: wrap
2324 static int __writepage(struct page *page, struct writeback_control *wbc,
2328 int ret = mapping->a_ops->writepage(page, wbc);
2381 * write_one_page - write out a single page and wait on I/O
2382 * @page: the page to write
2384 * The page must be locked by the caller and will be unlocked upon return.
2391 int write_one_page(struct page *page)
2393 struct address_space *mapping = page->mapping;
2400 BUG_ON(!PageLocked(page));
2402 wait_on_page_writeback(page);
2404 if (clear_page_dirty_for_io(page)) {
2405 get_page(page);
2406 ret = mapping->a_ops->writepage(page, &wbc);
2408 wait_on_page_writeback(page);
2409 put_page(page);
2411 unlock_page(page);
2423 int __set_page_dirty_no_writeback(struct page *page)
2425 if (!PageDirty(page))
2426 return !TestSetPageDirty(page);
2437 void account_page_dirtied(struct page *page, struct address_space *mapping)
2441 trace_writeback_dirty_page(page, mapping);
2446 inode_attach_wb(inode, page);
2449 __inc_lruvec_page_state(page, NR_FILE_DIRTY);
2450 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2451 __inc_node_page_state(page, NR_DIRTIED);
2458 mem_cgroup_track_foreign_dirty(page, wb);
2463 * Helper function for deaccounting dirty page without writeback.
2467 void account_page_cleaned(struct page *page, struct address_space *mapping,
2471 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2472 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2479 * For address_spaces which do not use buffers. Just tag the page as dirty in
2483 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2487 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2490 int __set_page_dirty_nobuffers(struct page *page)
2492 lock_page_memcg(page);
2493 if (!TestSetPageDirty(page)) {
2494 struct address_space *mapping = page_mapping(page);
2498 unlock_page_memcg(page);
2503 BUG_ON(page_mapping(page) != mapping);
2504 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
2505 account_page_dirtied(page, mapping);
2506 __xa_set_mark(&mapping->i_pages, page_index(page),
2509 unlock_page_memcg(page);
2517 unlock_page_memcg(page);
2523 * Call this whenever redirtying a page, to de-account the dirty counters
2529 void account_page_redirty(struct page *page)
2531 struct address_space *mapping = page->mapping;
2540 dec_node_page_state(page, NR_DIRTIED);
2549 * page for some reason, it should redirty the locked page via
2550 * redirty_page_for_writepage() and it should then unlock the page and return 0
2552 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2557 ret = __set_page_dirty_nobuffers(page);
2558 account_page_redirty(page);
2564 * Dirty a page.
2566 * For pages with a mapping this should be done under the page lock
2574 int set_page_dirty(struct page *page)
2576 struct address_space *mapping = page_mapping(page);
2578 page = compound_head(page);
2580 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2584 * About readahead, if the page is written, the flags would be
2586 * About lru_deactivate_page, if the page is redirty, the flag
2587 * will be reset. So no problem. but if the page is used by readahead
2591 if (PageReclaim(page))
2592 ClearPageReclaim(page);
2597 return (*spd)(page);
2599 if (!PageDirty(page)) {
2600 if (!TestSetPageDirty(page))
2609 * page->mapping->host, and if the page is unlocked. This is because another
2610 * CPU could truncate the page off the mapping and then free the mapping.
2612 * Usually, the page _is_ locked, or the caller is a user-space process which
2615 * In other cases, the page should be locked before running set_page_dirty().
2617 int set_page_dirty_lock(struct page *page)
2621 lock_page(page);
2622 ret = set_page_dirty(page);
2623 unlock_page(page);
2629 * This cancels just the dirty bit on the kernel page itself, it does NOT
2631 * leaves the page tagged dirty, so any sync activity will still find it on
2635 * Doing this should *normally* only ever be done when a page is truncated,
2638 * page without actually doing it through the VM. Can you say "ext3 is
2641 void __cancel_dirty_page(struct page *page)
2643 struct address_space *mapping = page_mapping(page);
2650 lock_page_memcg(page);
2653 if (TestClearPageDirty(page))
2654 account_page_cleaned(page, mapping, wb);
2657 unlock_page_memcg(page);
2659 ClearPageDirty(page);
2665 * Clear a page's dirty flag, while caring for dirty memory accounting.
2666 * Returns true if the page was previously dirty.
2668 * This is for preparing to put the page under writeout. We leave the page
2672 * at which stage we bring the page's dirty flag and xarray dirty tag
2675 * This incoherency between the page's dirty flag and xarray tag is
2676 * unfortunate, but it only exists while the page is locked.
2678 int clear_page_dirty_for_io(struct page *page)
2680 struct address_space *mapping = page_mapping(page);
2683 VM_BUG_ON_PAGE(!PageLocked(page), page);
2696 * mark the whole page dirty if it was
2698 * (c) clean the page again and return 1 to
2705 * Note! Normally the "set_page_dirty(page)"
2711 * We basically use the page "master dirty bit"
2715 if (page_mkclean(page))
2716 set_page_dirty(page);
2719 * installing a dirty pte and marking the page dirty
2721 * page lock while dirtying the page, and pages are
2726 if (TestClearPageDirty(page)) {
2727 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2728 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2735 return TestClearPageDirty(page);
2739 int test_clear_page_writeback(struct page *page)
2741 struct address_space *mapping = page_mapping(page);
2746 memcg = lock_page_memcg(page);
2747 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
2754 ret = TestClearPageWriteback(page);
2756 __xa_clear_mark(&mapping->i_pages, page_index(page),
2772 ret = TestClearPageWriteback(page);
2776 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2777 inc_node_page_state(page, NR_WRITTEN);
2783 int __test_set_page_writeback(struct page *page, bool keep_write)
2785 struct address_space *mapping = page_mapping(page);
2788 lock_page_memcg(page);
2790 XA_STATE(xas, &mapping->i_pages, page_index(page));
2797 ret = TestSetPageWriteback(page);
2816 if (!PageDirty(page))
2822 ret = TestSetPageWriteback(page);
2825 inc_lruvec_page_state(page, NR_WRITEBACK);
2826 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2828 unlock_page_memcg(page);
2829 access_ret = arch_make_page_accessible(page);
2831 * If writeback has been triggered on a page that cannot be made
2834 VM_BUG_ON_PAGE(access_ret != 0, page);
2842 * Wait for a page to complete writeback
2844 void wait_on_page_writeback(struct page *page)
2846 while (PageWriteback(page)) {
2847 trace_wait_on_page_writeback(page, page_mapping(page));
2848 wait_on_page_bit(page, PG_writeback);
2855 * @page: The page to wait on.
2857 * This function determines if the given page is related to a backing device
2858 * that requires page contents to be held stable during writeback. If so, then
2861 void wait_for_stable_page(struct page *page)
2863 page = thp_head(page);
2864 if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES)
2865 wait_on_page_writeback(page);