Lines Matching defs:folio

560 struct deferred_split *get_deferred_split_queue(struct folio *folio)
562 struct mem_cgroup *memcg = folio_memcg(folio);
563 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
572 struct deferred_split *get_deferred_split_queue(struct folio *folio)
574 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
580 void folio_prep_large_rmappable(struct folio *folio)
582 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
583 INIT_LIST_HEAD(&folio->_deferred_list);
584 folio_set_large_rmappable(folio);
587 static inline bool is_transparent_hugepage(struct folio *folio)
589 if (!folio_test_large(folio))
592 return is_huge_zero_page(&folio->page) ||
593 folio_test_large_rmappable(folio);
650 struct folio *folio = page_folio(page);
655 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
657 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
658 folio_put(folio);
663 folio_throttle_swaprate(folio, gfp);
677 __folio_mark_uptodate(folio);
692 folio_put(folio);
701 folio_add_new_anon_rmap(folio, vma, haddr);
702 folio_add_lru_vma(folio, vma);
719 folio_put(folio);
778 struct folio *folio;
827 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
828 if (unlikely(!folio)) {
832 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1293 struct folio *folio;
1312 folio = page_folio(page);
1319 if (!folio_trylock(folio)) {
1320 folio_get(folio);
1322 folio_lock(folio);
1326 folio_unlock(folio);
1327 folio_put(folio);
1330 folio_put(folio);
1335 folio_unlock(folio);
1340 * See do_wp_page(): we can only reuse the folio exclusively if
1344 if (folio_ref_count(folio) >
1345 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1347 if (folio_test_swapcache(folio))
1348 folio_free_swap(folio);
1349 if (folio_ref_count(folio) == 1) {
1353 folio_unlock(folio);
1368 folio_unlock(folio);
1585 struct folio *folio;
1605 folio = pfn_folio(pmd_pfn(orig_pmd));
1607 * If other processes are mapping this folio, we couldn't discard
1608 * the folio unless they all do MADV_FREE so let's skip the folio.
1610 if (folio_estimated_sharers(folio) != 1)
1613 if (!folio_trylock(folio))
1621 folio_get(folio);
1623 split_folio(folio);
1624 folio_unlock(folio);
1625 folio_put(folio);
1629 if (folio_test_dirty(folio))
1630 folio_clear_dirty(folio);
1631 folio_unlock(folio);
1642 folio_mark_lazyfree(folio);
2267 unsigned long address, bool freeze, struct folio *folio)
2279 * If caller asks to setup a migration entry, we need a folio to check
2280 * pmd against. Otherwise we can end up replacing wrong folio.
2282 VM_BUG_ON(freeze && !folio);
2283 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2288 * It's safe to call pmd_page when folio is set because it's
2291 if (folio && folio != page_folio(pmd_page(*pmd)))
2302 bool freeze, struct folio *folio)
2309 __split_huge_pmd(vma, pmd, address, freeze, folio);
2347 static void unmap_folio(struct folio *folio)
2352 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2359 if (folio_test_anon(folio))
2360 try_to_migrate(folio, ttu_flags);
2362 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2365 static void remap_page(struct folio *folio, unsigned long nr)
2370 if (!folio_test_anon(folio))
2373 remove_migration_ptes(folio, folio, true);
2374 i += folio_nr_pages(folio);
2377 folio = folio_next(folio);
2405 static void __split_huge_page_tail(struct folio *folio, int tail,
2408 struct page *head = &folio->page;
2411 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2414 struct folio *new_folio = (struct folio *)page_tail;
2463 if (folio_test_swapcache(folio))
2464 new_folio->swap.val = folio->swap.val + tail;
2499 struct folio *folio = page_folio(page);
2500 struct page *head = &folio->page;
2510 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2511 offset = swp_offset(folio->swap);
2512 swap_cache = swap_address_space(folio->swap);
2517 lruvec = folio_lruvec_lock(folio);
2522 __split_huge_page_tail(folio, i, lruvec, list);
2525 struct folio *tail = page_folio(head + i);
2531 inode_to_wb(folio->mapping->host));
2567 remap_page(folio, nr);
2569 if (folio_test_swapcache(folio))
2570 split_swap_cluster(folio->swap);
2590 bool can_split_folio(struct folio *folio, int *pextra_pins)
2595 if (folio_test_anon(folio))
2596 extra_pins = folio_test_swapcache(folio) ?
2597 folio_nr_pages(folio) : 0;
2599 extra_pins = folio_nr_pages(folio);
2602 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2626 struct folio *folio = page_folio(page);
2627 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2628 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2635 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2636 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2638 is_hzp = is_huge_zero_page(&folio->page);
2644 if (folio_test_writeback(folio))
2647 if (folio_test_anon(folio)) {
2656 anon_vma = folio_get_anon_vma(folio);
2667 mapping = folio->mapping;
2678 if (!filemap_release_folio(folio, gfp)) {
2683 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
2697 * folio lock is good enough to serialize the trimming.
2708 if (!can_split_folio(folio, &extra_pins)) {
2713 unmap_folio(folio);
2719 * Check if the folio is present in page cache.
2720 * We assume all tail are present too, if folio is there.
2724 if (xas_load(&xas) != folio)
2730 if (folio_ref_freeze(folio, 1 + extra_pins)) {
2731 if (!list_empty(&folio->_deferred_list)) {
2733 list_del(&folio->_deferred_list);
2737 int nr = folio_nr_pages(folio);
2739 xas_split(&xas, folio, folio_order(folio));
2740 if (folio_test_pmd_mappable(folio)) {
2741 if (folio_test_swapbacked(folio)) {
2742 __lruvec_stat_mod_folio(folio,
2745 __lruvec_stat_mod_folio(folio,
2760 remap_page(folio, folio_nr_pages(folio));
2777 void folio_undo_large_rmappable(struct folio *folio)
2783 * At this point, there is no one trying to add the folio to
2784 * deferred_list. If folio is not in deferred_list, it's safe
2787 if (data_race(list_empty(&folio->_deferred_list)))
2790 ds_queue = get_deferred_split_queue(folio);
2792 if (!list_empty(&folio->_deferred_list)) {
2794 list_del(&folio->_deferred_list);
2799 void deferred_split_folio(struct folio *folio)
2801 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2803 struct mem_cgroup *memcg = folio_memcg(folio);
2807 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
2812 * And, if page reclaim is already handling the same folio, it is
2815 * Check the swapcache flag to determine if the folio is being
2816 * handled by page reclaim since THP swap would add the folio into
2819 if (folio_test_swapcache(folio))
2822 if (!list_empty(&folio->_deferred_list))
2826 if (list_empty(&folio->_deferred_list)) {
2828 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
2832 set_shrinker_bit(memcg, folio_nid(folio),
2859 struct folio *folio, *next;
2869 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
2871 if (folio_try_get(folio)) {
2872 list_move(&folio->_deferred_list, &list);
2875 list_del_init(&folio->_deferred_list);
2883 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
2884 if (!folio_trylock(folio))
2887 if (!split_folio(folio))
2889 folio_unlock(folio);
2891 folio_put(folio);
2920 struct folio *folio;
2935 folio = page_folio(page);
2936 if (!folio_try_get(folio))
2939 if (unlikely(page_folio(page) != folio))
2942 if (zone != folio_zone(folio))
2945 if (!folio_test_large(folio)
2946 || folio_test_hugetlb(folio)
2947 || !folio_test_lru(folio))
2951 folio_lock(folio);
2952 nr_pages = folio_nr_pages(folio);
2953 if (!split_folio(folio))
2956 folio_unlock(folio);
2958 folio_put(folio);
3015 struct folio *folio;
3032 folio = page_folio(page);
3033 if (!is_transparent_hugepage(folio))
3037 if (!can_split_folio(folio, NULL))
3040 if (!folio_trylock(folio))
3043 if (!split_folio(folio))
3046 folio_unlock(folio);
3048 folio_put(folio);
3085 struct folio *folio = filemap_get_folio(mapping, index);
3088 if (IS_ERR(folio))
3091 if (!folio_test_large(folio))
3095 nr_pages = folio_nr_pages(folio);
3097 if (!folio_trylock(folio))
3100 if (!split_folio(folio))
3103 folio_unlock(folio);
3105 folio_put(folio);