Lines Matching defs:folio
53 struct folio *folio = page_folio(page);
56 !folio_test_anon(folio))
58 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
68 * Return the folio with ref appropriately incremented,
71 static inline struct folio *try_get_folio(struct page *page, int refs)
73 struct folio *folio;
76 folio = page_folio(page);
77 if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
79 if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
83 * At this point we have a stable reference to the folio; but it
85 * increment, the folio was split, in which case we'd end up
86 * holding a reference on a folio that has nothing to do with the page
88 * So now that the folio is stable, recheck that the page still
89 * belongs to this folio.
91 if (unlikely(page_folio(page) != folio)) {
92 if (!put_devmap_managed_page_refs(&folio->page, refs))
93 folio_put_refs(folio, refs);
97 return folio;
101 * try_grab_folio() - Attempt to get or pin a folio.
103 * @refs: the value to (effectively) add to the folio's refcount
107 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
113 * FOLL_GET: folio's refcount will be incremented by @refs.
115 * FOLL_PIN on large folios: folio's refcount will be incremented by
118 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
121 * Return: The folio containing @page (with refcount appropriately
126 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
128 struct folio *folio;
148 folio = try_get_folio(page, refs);
149 if (!folio)
158 !folio_is_longterm_pinnable(folio))) {
159 if (!put_devmap_managed_page_refs(&folio->page, refs))
160 folio_put_refs(folio, refs);
165 * When pinning a large folio, use an exact count to track it.
167 * However, be sure to *also* increment the normal folio
168 * refcount field at least once, so that the folio really
172 if (folio_test_large(folio))
173 atomic_add(refs, &folio->_pincount);
175 folio_ref_add(folio,
184 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
186 return folio;
189 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
192 if (is_zero_folio(folio))
194 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
195 if (folio_test_large(folio))
196 atomic_sub(refs, &folio->_pincount);
201 if (!put_devmap_managed_page_refs(&folio->page, refs))
202 folio_put_refs(folio, refs);
227 struct folio *folio = page_folio(page);
229 if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
236 folio_ref_inc(folio);
250 if (folio_test_large(folio)) {
251 folio_ref_add(folio, 1);
252 atomic_add(1, &folio->_pincount);
254 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
257 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
280 * folio_add_pin - Try to get an additional pin on a pinned folio
281 * @folio: The folio to be pinned
283 * Get an additional pin on a folio we already have a pin on. Makes no change
284 * if the folio is a zero_page.
286 void folio_add_pin(struct folio *folio)
288 if (is_zero_folio(folio))
296 if (folio_test_large(folio)) {
297 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
298 folio_ref_inc(folio);
299 atomic_inc(&folio->_pincount);
301 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
302 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
306 static inline struct folio *gup_folio_range_next(struct page *start,
310 struct folio *folio = page_folio(next);
313 if (folio_test_large(folio))
315 folio_nr_pages(folio) - folio_page_idx(folio, next));
318 return folio;
321 static inline struct folio *gup_folio_next(struct page **list,
324 struct folio *folio = page_folio(list[i]);
328 if (page_folio(list[nr]) != folio)
333 return folio;
362 struct folio *folio;
372 folio = gup_folio_next(pages, npages, i, &nr);
393 if (!folio_test_dirty(folio)) {
394 folio_lock(folio);
395 folio_mark_dirty(folio);
396 folio_unlock(folio);
398 gup_put_folio(folio, nr, FOLL_PIN);
428 struct folio *folio;
432 folio = gup_folio_range_next(page, npages, i, &nr);
433 if (make_dirty && !folio_test_dirty(folio)) {
434 folio_lock(folio);
435 folio_mark_dirty(folio);
436 folio_unlock(folio);
438 gup_put_folio(folio, nr, FOLL_PIN);
446 struct folio *folio;
455 folio = gup_folio_next(pages, npages, i, &nr);
456 gup_put_folio(folio, nr, FOLL_PIN);
472 struct folio *folio;
485 folio = gup_folio_next(pages, npages, i, &nr);
486 gup_put_folio(folio, nr, FOLL_PIN);
994 * Writing to file-backed mappings which require folio dirty tracking using GUP
1000 * 1. A folio is written to via GUP which write-faults the memory, notifying
1001 * the file system and dirtying the folio.
1002 * 2. Later, writeback is triggered, resulting in the folio being cleaned and
1004 * 3. The GUP caller writes to the folio, as it is mapped read/write via the
1009 * This results in both data being written to a folio without writenotify, and
1010 * the folio being dirtied unexpectedly (if the caller decides to do so).
1279 * This must be a large folio (and doesn't need to
1280 * be the whole folio; it can be part of it), do
1289 struct folio *folio;
1293 * large folio, this should never fail.
1295 folio = try_grab_folio(page, page_increm - 1,
1297 if (WARN_ON_ONCE(!folio)) {
1300 * folio is problematic, fail hard.
2034 struct folio *prev_folio = NULL;
2038 struct folio *folio = page_folio(pages[i]);
2040 if (folio == prev_folio)
2042 prev_folio = folio;
2044 if (folio_is_longterm_pinnable(folio))
2049 if (folio_is_device_coherent(folio))
2052 if (folio_test_hugetlb(folio)) {
2053 isolate_hugetlb(folio, movable_page_list);
2057 if (!folio_test_lru(folio) && drain_allow) {
2062 if (!folio_isolate_lru(folio))
2065 list_add_tail(&folio->lru, movable_page_list);
2066 node_stat_mod_folio(folio,
2067 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2068 folio_nr_pages(folio));
2088 struct folio *folio = page_folio(pages[i]);
2090 if (folio_is_device_coherent(folio)) {
2096 folio_get(folio);
2097 gup_put_folio(folio, 1, FOLL_PIN);
2099 if (migrate_device_coherent_page(&folio->page)) {
2446 * specific folio.
2448 * This call assumes the caller has pinned the folio, that the lowest page table
2449 * level still points to this folio, and that interrupts have been disabled.
2460 static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
2473 /* The folio is pinned, so we can safely access folio fields. */
2475 if (WARN_ON_ONCE(folio_test_slab(folio)))
2479 if (folio_test_hugetlb(folio))
2497 mapping = READ_ONCE(folio->mapping);
2569 struct folio *folio;
2599 folio = try_grab_folio(page, 1, flags);
2600 if (!folio)
2603 if (unlikely(folio_is_secretmem(folio))) {
2604 gup_put_folio(folio, 1, flags);
2610 gup_put_folio(folio, 1, flags);
2614 if (!folio_fast_pin_allowed(folio, flags)) {
2615 gup_put_folio(folio, 1, flags);
2620 gup_put_folio(folio, 1, flags);
2633 gup_put_folio(folio, 1, flags);
2637 folio_set_referenced(folio);
2783 struct folio *folio;
2802 folio = try_grab_folio(page, refs, flags);
2803 if (!folio)
2807 gup_put_folio(folio, refs, flags);
2811 if (!folio_fast_pin_allowed(folio, flags)) {
2812 gup_put_folio(folio, refs, flags);
2816 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
2817 gup_put_folio(folio, refs, flags);
2822 folio_set_referenced(folio);
2857 struct folio *folio;
2873 folio = try_grab_folio(page, refs, flags);
2874 if (!folio)
2878 gup_put_folio(folio, refs, flags);
2882 if (!folio_fast_pin_allowed(folio, flags)) {
2883 gup_put_folio(folio, refs, flags);
2886 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2887 gup_put_folio(folio, refs, flags);
2892 folio_set_referenced(folio);
2901 struct folio *folio;
2917 folio = try_grab_folio(page, refs, flags);
2918 if (!folio)
2922 gup_put_folio(folio, refs, flags);
2926 if (!folio_fast_pin_allowed(folio, flags)) {
2927 gup_put_folio(folio, refs, flags);
2931 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2932 gup_put_folio(folio, refs, flags);
2937 folio_set_referenced(folio);
2947 struct folio *folio;
2957 folio = try_grab_folio(page, refs, flags);
2958 if (!folio)
2962 gup_put_folio(folio, refs, flags);
2966 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2967 gup_put_folio(folio, refs, flags);
2971 if (!folio_fast_pin_allowed(folio, flags)) {
2972 gup_put_folio(folio, refs, flags);
2977 folio_set_referenced(folio);