Lines Matching defs:folio
58 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
60 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
64 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
1317 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1319 int nid = folio_nid(folio);
1322 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1324 list_move(&folio->lru, &h->hugepage_freelists[nid]);
1327 folio_set_hugetlb_freed(folio);
1330 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1333 struct folio *folio;
1337 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1338 if (pin && !folio_is_longterm_pinnable(folio))
1341 if (folio_test_hwpoison(folio))
1344 list_move(&folio->lru, &h->hugepage_activelist);
1345 folio_ref_unfreeze(folio, 1);
1346 folio_clear_hugetlb_freed(folio);
1349 return folio;
1355 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1369 struct folio *folio;
1381 folio = dequeue_hugetlb_folio_node_exact(h, node);
1382 if (folio)
1383 return folio;
1396 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1401 struct folio *folio = NULL;
1423 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1430 if (!folio)
1431 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1434 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1435 folio_set_hugetlb_restore_reserve(folio);
1440 return folio;
1518 static void __destroy_compound_gigantic_folio(struct folio *folio,
1525 atomic_set(&folio->_entire_mapcount, 0);
1526 atomic_set(&folio->_nr_pages_mapped, 0);
1527 atomic_set(&folio->_pincount, 0);
1530 p = folio_page(folio, i);
1538 __folio_clear_head(folio);
1541 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
1544 __destroy_compound_gigantic_folio(folio, order, true);
1548 static void destroy_compound_gigantic_folio(struct folio *folio,
1551 __destroy_compound_gigantic_folio(folio, order, false);
1554 static void free_gigantic_folio(struct folio *folio, unsigned int order)
1561 int nid = folio_nid(folio);
1563 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
1567 free_contig_range(folio_pfn(folio), 1 << order);
1571 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1609 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1617 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1622 static inline void free_gigantic_folio(struct folio *folio,
1624 static inline void destroy_compound_gigantic_folio(struct folio *folio,
1629 struct folio *folio)
1633 folio_clear_hugetlb(folio);
1637 * Remove hugetlb folio from lists.
1638 * If vmemmap exists for the folio, update dtor so that the folio appears
1642 * A reference is held on the folio, except in the case of demote.
1646 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1650 int nid = folio_nid(folio);
1652 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1653 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1659 list_del(&folio->lru);
1661 if (folio_test_hugetlb_freed(folio)) {
1675 if (!folio_test_hugetlb_vmemmap_optimized(folio))
1676 __clear_hugetlb_destructor(h, folio);
1683 folio_ref_unfreeze(folio, 1);
1689 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1692 __remove_hugetlb_folio(h, folio, adjust_surplus, false);
1695 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
1698 __remove_hugetlb_folio(h, folio, adjust_surplus, true);
1701 static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1705 int nid = folio_nid(folio);
1707 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1711 INIT_LIST_HEAD(&folio->lru);
1720 folio_set_hugetlb(folio);
1721 folio_change_private(folio, NULL);
1724 * folio_change_private(folio, NULL) cleared it.
1726 folio_set_hugetlb_vmemmap_optimized(folio);
1729 * This folio is about to be managed by the hugetlb allocator and
1733 zeroed = folio_put_testzero(folio);
1737 * on the folio. In this case, we simply return as
1743 arch_clear_hugepage_flags(&folio->page);
1744 enqueue_hugetlb_folio(h, folio);
1748 struct folio *folio)
1750 bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
1759 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1762 if (hugetlb_vmemmap_restore(h, &folio->page)) {
1769 add_hugetlb_folio(h, folio, true);
1778 if (unlikely(folio_test_hwpoison(folio)))
1779 folio_clear_hugetlb_hwpoison(folio);
1787 __clear_hugetlb_destructor(h, folio);
1796 hugetlb_cma_folio(folio, huge_page_order(h))) {
1797 destroy_compound_gigantic_folio(folio, huge_page_order(h));
1798 free_gigantic_folio(folio, huge_page_order(h));
1800 __free_pages(&folio->page, huge_page_order(h));
1832 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1852 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1855 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1856 __update_and_free_hugetlb_folio(h, folio);
1867 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1874 struct folio *folio;
1877 folio = page_folio(page);
1878 update_and_free_hugetlb_folio(h, folio, false);
1894 void free_huge_folio(struct folio *folio)
1900 struct hstate *h = folio_hstate(folio);
1901 int nid = folio_nid(folio);
1902 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1906 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1907 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1909 hugetlb_set_folio_subpool(folio, NULL);
1910 if (folio_test_anon(folio))
1911 __ClearPageAnonExclusive(&folio->page);
1912 folio->mapping = NULL;
1913 restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1914 folio_clear_hugetlb_restore_reserve(folio);
1936 folio_clear_hugetlb_migratable(folio);
1938 pages_per_huge_page(h), folio);
1940 pages_per_huge_page(h), folio);
1944 if (folio_test_hugetlb_temporary(folio)) {
1945 remove_hugetlb_folio(h, folio, false);
1947 update_and_free_hugetlb_folio(h, folio, true);
1950 remove_hugetlb_folio(h, folio, true);
1952 update_and_free_hugetlb_folio(h, folio, true);
1954 arch_clear_hugepage_flags(&folio->page);
1955 enqueue_hugetlb_folio(h, folio);
1970 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1972 hugetlb_vmemmap_optimize(h, &folio->page);
1973 INIT_LIST_HEAD(&folio->lru);
1974 folio_set_hugetlb(folio);
1975 hugetlb_set_folio_subpool(folio, NULL);
1976 set_hugetlb_cgroup(folio, NULL);
1977 set_hugetlb_cgroup_rsvd(folio, NULL);
1980 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
1982 __prep_new_hugetlb_folio(h, folio);
1988 static bool __prep_compound_gigantic_folio(struct folio *folio,
1995 __folio_clear_reserved(folio);
1997 p = folio_page(folio, i);
2039 set_compound_head(p, &folio->page);
2041 __folio_set_head(folio);
2043 folio_set_order(folio, order);
2044 atomic_set(&folio->_entire_mapcount, -1);
2045 atomic_set(&folio->_nr_pages_mapped, 0);
2046 atomic_set(&folio->_pincount, 0);
2052 p = folio_page(folio, j);
2059 p = folio_page(folio, j);
2065 static bool prep_compound_gigantic_folio(struct folio *folio,
2068 return __prep_compound_gigantic_folio(folio, order, false);
2071 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
2074 return __prep_compound_gigantic_folio(folio, order, true);
2084 struct folio *folio;
2088 folio = page_folio(page);
2089 return folio_test_hugetlb(folio);
2127 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2197 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2201 struct folio *folio;
2206 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
2208 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2210 if (!folio)
2213 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
2218 free_gigantic_folio(folio, huge_page_order(h));
2226 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2228 return folio;
2238 struct folio *folio;
2243 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2245 if (folio) {
2246 free_huge_folio(folio); /* free it into the hugepage allocator */
2267 struct folio *folio;
2279 folio = page_folio(page);
2280 remove_hugetlb_folio(h, folio, acct_surplus);
2305 struct folio *folio = page_folio(page);
2309 if (!folio_test_hugetlb(folio))
2313 if (!folio_test_hugetlb(folio)) {
2318 if (!folio_ref_count(folio)) {
2319 struct hstate *h = folio_hstate(folio);
2327 if (unlikely(!folio_test_hugetlb_freed(folio))) {
2342 remove_hugetlb_folio(h, folio, false);
2354 rc = hugetlb_vmemmap_restore(h, &folio->page);
2356 update_and_free_hugetlb_folio(h, folio, false);
2359 add_hugetlb_folio(h, folio, false);
2407 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2410 struct folio *folio = NULL;
2420 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2421 if (!folio)
2433 folio_set_hugetlb_temporary(folio);
2435 free_huge_folio(folio);
2440 h->surplus_huge_pages_node[folio_nid(folio)]++;
2445 return folio;
2448 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2451 struct folio *folio;
2456 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2457 if (!folio)
2461 folio_ref_unfreeze(folio, 1);
2466 folio_set_hugetlb_temporary(folio);
2468 return folio;
2475 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2478 struct folio *folio = NULL;
2489 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2495 if (!folio)
2496 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2498 return folio;
2501 /* folio migration callback function */
2502 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2507 struct folio *folio;
2509 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2511 if (folio) {
2513 return folio;
2522 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
2527 struct folio *folio;
2533 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
2536 return folio;
2547 struct folio *folio, *tmp;
2566 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2568 if (!folio) {
2572 list_add(&folio->lru, &surplus_list);
2607 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2611 enqueue_hugetlb_folio(h, folio);
2620 list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2621 free_huge_folio(folio);
2830 * 1) A reservation was in place and the folio consumed the reservation.
2831 * hugetlb_restore_reserve is set in the folio.
2845 unsigned long address, struct folio *folio)
2849 if (folio_test_hugetlb_restore_reserve(folio)) {
2856 * as though the reservation for this folio was
2858 * faulting in the folio at a later time. This
2862 folio_clear_hugetlb_restore_reserve(folio);
2873 * hugetlb_restore_reserve would be set on the folio.
2883 * count will be incremented when the folio
2887 folio_set_hugetlb_restore_reserve(folio);
2904 * on the folio so reserve count will be
2908 folio_set_hugetlb_restore_reserve(folio);
2918 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2921 * @old_folio: Old folio to dissolve
2926 struct folio *old_folio, struct list_head *list)
2930 struct folio *new_folio;
2934 * Before dissolving the folio, we need to allocate a new one for the
2935 * pool to remain stable. Here, we allocate the folio and 'prep' it
2956 * Someone has grabbed the folio, try to isolate it here.
3011 struct folio *folio = page_folio(page);
3020 if (folio_test_hugetlb(folio)) {
3021 h = folio_hstate(folio);
3036 if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
3038 else if (!folio_ref_count(folio))
3039 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
3044 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
3049 struct folio *folio;
3112 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3113 if (!folio) {
3115 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3116 if (!folio)
3120 folio_set_hugetlb_restore_reserve(folio);
3123 list_add(&folio->lru, &h->hugepage_activelist);
3124 folio_ref_unfreeze(folio, 1);
3128 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3134 h_cg, folio);
3139 hugetlb_set_folio_subpool(folio, spool);
3158 pages_per_huge_page(h), folio);
3160 return folio;
3223 struct folio *folio = page_folio(page);
3227 WARN_ON(folio_ref_count(folio) != 1);
3228 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
3229 WARN_ON(folio_test_reserved(folio));
3230 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
3231 free_huge_folio(folio); /* add to the hugepage allocator */
3234 free_gigantic_folio(folio, huge_page_order(h));
3256 struct folio *folio;
3259 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3261 if (!folio)
3263 free_huge_folio(folio); /* free it into the hugepage allocator */
3609 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
3611 int i, nid = folio_nid(folio);
3614 struct folio *inner_folio;
3619 remove_hugetlb_folio_for_demote(h, folio, false);
3622 rc = hugetlb_vmemmap_restore(h, &folio->page);
3624 /* Allocation of vmemmmap failed, we can not demote folio */
3626 folio_ref_unfreeze(folio, 1);
3627 add_hugetlb_folio(h, folio, false);
3635 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
3648 subpage = folio_page(folio, i);
3678 struct folio *folio;
3689 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
3690 if (folio_test_hwpoison(folio))
3692 return demote_free_hugetlb_folio(h, folio);
5022 struct folio *new_folio, pte_t old, unsigned long sz)
5040 struct folio *pte_folio;
5154 struct folio *new_folio;
5174 /* Install the new hugetlb folio if src pte stable */
5569 struct folio *pagecache_folio, spinlock_t *ptl)
5574 struct folio *old_folio;
5575 struct folio *new_folio;
5763 struct folio *folio;
5765 folio = filemap_get_folio(mapping, idx);
5766 if (IS_ERR(folio))
5768 folio_put(folio);
5772 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5779 __folio_set_locked(folio);
5780 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5783 __folio_clear_locked(folio);
5786 folio_clear_hugetlb_restore_reserve(folio);
5789 * mark folio dirty so that it will not be removed from cache/file
5792 folio_mark_dirty(folio);
5862 struct folio *folio;
5886 folio = filemap_lock_folio(mapping, idx);
5887 if (IS_ERR(folio)) {
5920 folio = alloc_hugetlb_folio(vma, haddr, 0);
5921 if (IS_ERR(folio)) {
5935 ret = vmf_error(PTR_ERR(folio));
5940 clear_huge_page(&folio->page, address, pages_per_huge_page(h));
5941 __folio_mark_uptodate(folio);
5945 int err = hugetlb_add_to_page_cache(folio, mapping, idx);
5954 restore_reserve_on_error(h, vma, haddr, folio);
5955 folio_put(folio);
5960 folio_lock(folio);
5973 if (unlikely(folio_test_hwpoison(folio))) {
5981 folio_unlock(folio);
5982 folio_put(folio);
6016 hugepage_add_new_anon_rmap(folio, vma, haddr);
6018 page_dup_file_rmap(&folio->page, true);
6019 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
6032 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
6043 folio_set_hugetlb_migratable(folio);
6045 folio_unlock(folio);
6055 restore_reserve_on_error(h, vma, haddr, folio);
6057 folio_unlock(folio);
6058 folio_put(folio);
6094 struct folio *folio = NULL;
6095 struct folio *pagecache_folio = NULL;
6229 * when folio != pagecache_folio or !pagecache_folio.
6231 folio = page_folio(pte_page(entry));
6232 if (folio != pagecache_folio)
6233 if (!folio_trylock(folio)) {
6238 folio_get(folio);
6254 if (folio != pagecache_folio)
6255 folio_unlock(folio);
6256 folio_put(folio);
6275 folio_wait_locked(folio);
6289 struct folio **foliop)
6302 struct folio *folio;
6328 folio = filemap_lock_folio(mapping, idx);
6329 if (IS_ERR(folio))
6333 /* If a folio already exists, then it's UFFDIO_COPY for
6342 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6343 if (IS_ERR(folio)) {
6348 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6354 /* Free the allocated folio which may have
6357 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6358 folio_put(folio);
6360 /* Allocate a temporary folio to hold the copied
6363 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6364 if (!folio) {
6368 *foliop = folio;
6371 * folio.
6384 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6385 if (IS_ERR(folio)) {
6391 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6395 folio_put(folio);
6405 __folio_mark_uptodate(folio);
6420 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6429 if (folio_test_hwpoison(folio))
6442 page_dup_file_rmap(&folio->page, true);
6444 hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr);
6455 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
6477 folio_set_hugetlb_migratable(folio);
6479 folio_unlock(folio);
6486 folio_unlock(folio);
6489 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6490 folio_put(folio);
7207 bool isolate_hugetlb(struct folio *folio, struct list_head *list)
7212 if (!folio_test_hugetlb(folio) ||
7213 !folio_test_hugetlb_migratable(folio) ||
7214 !folio_try_get(folio)) {
7218 folio_clear_hugetlb_migratable(folio);
7219 list_move_tail(&folio->lru, list);
7225 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7231 if (folio_test_hugetlb(folio)) {
7233 if (folio_test_hugetlb_freed(folio))
7235 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7236 ret = folio_try_get(folio);
7255 void folio_putback_active_hugetlb(struct folio *folio)
7258 folio_set_hugetlb_migratable(folio);
7259 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7261 folio_put(folio);
7264 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7272 * transfer temporary state of the new hugetlb folio. This is