Lines Matching defs:folio
2436 struct folio *folio = fbatch.folios[i];
2439 done_index = folio->index;
2441 folio_lock(folio);
2451 if (unlikely(folio->mapping != mapping)) {
2453 folio_unlock(folio);
2457 if (!folio_test_dirty(folio)) {
2462 if (folio_test_writeback(folio)) {
2464 folio_wait_writeback(folio);
2469 BUG_ON(folio_test_writeback(folio));
2470 if (!folio_clear_dirty_for_io(folio))
2474 error = writepage(folio, wbc, data);
2475 nr = folio_nr_pages(folio);
2490 folio_unlock(folio);
2494 done_index = folio->index + nr;
2533 static int writepage_cb(struct folio *folio, struct writeback_control *wbc,
2537 int ret = mapping->a_ops->writepage(&folio->page, wbc);
2591 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
2593 if (!folio_test_dirty(folio))
2594 return !folio_test_set_dirty(folio);
2606 static void folio_account_dirtied(struct folio *folio,
2611 trace_writeback_dirty_folio(folio, mapping);
2615 long nr = folio_nr_pages(folio);
2617 inode_attach_wb(inode, folio);
2620 __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2621 __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2622 __node_stat_mod_folio(folio, NR_DIRTIED, nr);
2629 mem_cgroup_track_foreign_dirty(folio, wb);
2638 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
2640 long nr = folio_nr_pages(folio);
2642 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2643 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2649 * Mark the folio dirty, and set it dirty in the page cache, and mark
2652 * If warn is true, then emit a warning if the folio is not uptodate and has
2655 * The caller must hold folio_memcg_lock(). Most callers have the folio
2656 * locked. A few have the folio blocked from truncation through other
2661 void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
2667 if (folio->mapping) { /* Race with truncate? */
2668 WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2669 folio_account_dirtied(folio, mapping);
2670 __xa_set_mark(&mapping->i_pages, folio_index(folio),
2677 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2678 * @mapping: Address space this folio belongs to.
2679 * @folio: Folio to be marked as dirty.
2687 * a single buffer is being dirtied: we want to set the folio dirty in
2692 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2693 * folio mapped and the pte lock held, which also locks out truncation.
2695 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
2697 folio_memcg_lock(folio);
2698 if (folio_test_set_dirty(folio)) {
2699 folio_memcg_unlock(folio);
2703 __folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2704 folio_memcg_unlock(folio);
2715 * folio_redirty_for_writepage - Decline to write a dirty folio.
2717 * @folio: The folio.
2720 * @folio for some reason, it should call this function, unlock @folio and
2723 * Return: True if we redirtied the folio. False if someone else dirtied
2727 struct folio *folio)
2729 struct address_space *mapping = folio->mapping;
2730 long nr = folio_nr_pages(folio);
2734 ret = filemap_dirty_folio(mapping, folio);
2742 node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2751 * folio_mark_dirty - Mark a folio as being modified.
2752 * @folio: The folio.
2754 * The folio may not be truncated while this function is running.
2755 * Holding the folio lock is sufficient to prevent truncation, but some
2758 * in this folio. Truncation will block on the page table lock as it
2759 * unmaps pages before removing the folio from its mapping.
2761 * Return: True if the folio was newly dirtied, false if it was already dirty.
2763 bool folio_mark_dirty(struct folio *folio)
2765 struct address_space *mapping = folio_mapping(folio);
2771 * About readahead, if the folio is written, the flags would be
2773 * About folio_deactivate, if the folio is redirtied,
2775 * folio is used by readahead it will confuse readahead
2779 if (folio_test_reclaim(folio))
2780 folio_clear_reclaim(folio);
2781 return mapping->a_ops->dirty_folio(mapping, folio);
2784 return noop_dirty_folio(mapping, folio);
2822 void __folio_cancel_dirty(struct folio *folio)
2824 struct address_space *mapping = folio_mapping(folio);
2831 folio_memcg_lock(folio);
2834 if (folio_test_clear_dirty(folio))
2835 folio_account_cleaned(folio, wb);
2838 folio_memcg_unlock(folio);
2840 folio_clear_dirty(folio);
2846 * Clear a folio's dirty flag, while caring for dirty memory accounting.
2847 * Returns true if the folio was previously dirty.
2849 * This is for preparing to put the folio under writeout. We leave
2850 * the folio tagged as dirty in the xarray so that a concurrent
2853 * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2856 * This incoherency between the folio's dirty flag and xarray tag is
2857 * unfortunate, but it only exists while the folio is locked.
2859 bool folio_clear_dirty_for_io(struct folio *folio)
2861 struct address_space *mapping = folio_mapping(folio);
2864 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2877 * mark the whole folio dirty if it was
2879 * (c) clean the folio again and return 1 to
2886 * Note! Normally the "folio_mark_dirty(folio)"
2892 * We basically use the folio "master dirty bit"
2896 if (folio_mkclean(folio))
2897 folio_mark_dirty(folio);
2900 * installing a dirty pte and marking the folio dirty
2902 * page lock while dirtying the folio, and folios are
2907 if (folio_test_clear_dirty(folio)) {
2908 long nr = folio_nr_pages(folio);
2909 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2910 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2917 return folio_test_clear_dirty(folio);
2943 bool __folio_end_writeback(struct folio *folio)
2945 long nr = folio_nr_pages(folio);
2946 struct address_space *mapping = folio_mapping(folio);
2949 folio_memcg_lock(folio);
2956 ret = folio_test_clear_writeback(folio);
2958 __xa_clear_mark(&mapping->i_pages, folio_index(folio),
2977 ret = folio_test_clear_writeback(folio);
2980 lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
2981 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2982 node_stat_mod_folio(folio, NR_WRITTEN, nr);
2984 folio_memcg_unlock(folio);
2988 bool __folio_start_writeback(struct folio *folio, bool keep_write)
2990 long nr = folio_nr_pages(folio);
2991 struct address_space *mapping = folio_mapping(folio);
2995 folio_memcg_lock(folio);
2997 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
3004 ret = folio_test_set_writeback(folio);
3028 if (!folio_test_dirty(folio))
3034 ret = folio_test_set_writeback(folio);
3037 lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3038 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
3040 folio_memcg_unlock(folio);
3041 access_ret = arch_make_folio_accessible(folio);
3046 VM_BUG_ON_FOLIO(access_ret != 0, folio);
3053 * folio_wait_writeback - Wait for a folio to finish writeback.
3054 * @folio: The folio to wait for.
3056 * If the folio is currently being written back to storage, wait for the
3060 * no spinlocks held. Caller should hold a reference on the folio.
3061 * If the folio is not locked, writeback may start again after writeback
3064 void folio_wait_writeback(struct folio *folio)
3066 while (folio_test_writeback(folio)) {
3067 trace_folio_wait_writeback(folio, folio_mapping(folio));
3068 folio_wait_bit(folio, PG_writeback);
3074 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3075 * @folio: The folio to wait for.
3077 * If the folio is currently being written back to storage, wait for the
3081 * no spinlocks held. Caller should hold a reference on the folio.
3082 * If the folio is not locked, writeback may start again after writeback
3086 int folio_wait_writeback_killable(struct folio *folio)
3088 while (folio_test_writeback(folio)) {
3089 trace_folio_wait_writeback(folio, folio_mapping(folio));
3090 if (folio_wait_bit_killable(folio, PG_writeback))
3100 * @folio: The folio to wait on.
3102 * This function determines if the given folio is related to a backing
3103 * device that requires folio contents to be held stable during writeback.
3107 * no spinlocks held. Caller should hold a reference on the folio.
3108 * If the folio is not locked, writeback may start again after writeback
3111 void folio_wait_stable(struct folio *folio)
3113 if (mapping_stable_writes(folio_mapping(folio)))
3114 folio_wait_writeback(folio);