Lines Matching defs:folio
423 static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
437 * Check if the folio's nid is in qp->nmask.
442 static inline bool queue_folio_required(struct folio *folio,
445 int nid = folio_nid(folio);
457 * existing folio was already on a node that does not follow the
465 struct folio *folio;
473 folio = pfn_folio(pmd_pfn(*pmd));
474 if (is_huge_zero_page(&folio->page)) {
478 if (!queue_folio_required(folio, qp))
482 /* go to folio migration */
485 migrate_folio_add(folio, qp->pagelist, flags)) {
504 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
511 struct folio *folio;
531 folio = vm_normal_folio(vma, addr, ptent);
532 if (!folio || folio_is_zone_device(folio))
538 if (folio_test_reserved(folio))
540 if (!queue_folio_required(folio, qp))
555 if (migrate_folio_add(folio, qp->pagelist, flags))
574 struct folio *folio;
582 folio = pfn_folio(pte_pfn(entry));
583 if (!queue_folio_required(folio, qp))
588 * STRICT alone means only detecting misplaced folio and no
599 * Detecting misplaced folio but allow migrating folios which
610 * To check if the folio is shared, ideally we want to make sure
612 * expensive, so check the estimated mapcount of the folio instead.
615 (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
617 if (!isolate_hugetlb(folio, qp->pagelist) &&
620 * Failed to isolate folio but allow migrating pages
1035 static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1042 * To check if the folio is shared, ideally we want to make sure
1044 * expensive, so check the estimated mapcount of the folio instead.
1046 if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1047 if (folio_isolate_lru(folio)) {
1048 list_add_tail(&folio->lru, foliolist);
1049 node_stat_mod_folio(folio,
1050 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1051 folio_nr_pages(folio));
1054 * Non-movable folio may reach here. And, there may be
1218 static struct folio *new_folio(struct folio *src, unsigned long start)
1247 static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1259 static struct folio *new_folio(struct folio *src, unsigned long start)
2170 * vma_alloc_folio - Allocate a folio for a VMA.
2172 * @order: Order of the folio.
2177 * Allocate a folio for a specific address in @vma, using the appropriate
2182 * Return: The folio on success or NULL if allocation fails.
2184 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2189 struct folio *folio;
2203 folio = (struct folio *)page;
2204 if (folio && order > 1)
2205 folio_prep_large_rmappable(folio);
2216 folio = (struct folio *)page;
2217 if (folio && order > 1)
2218 folio_prep_large_rmappable(folio);
2245 folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2254 if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2255 folio = __folio_alloc(gfp, order, hpage_node,
2264 folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2267 return folio;
2311 struct folio *folio_alloc(gfp_t gfp, unsigned order)
2314 struct folio *folio = (struct folio *)page;
2316 if (folio && order > 1)
2317 folio_prep_large_rmappable(folio);
2318 return folio;