Lines Matching defs:folio

429 static void hugetlb_delete_from_page_cache(struct folio *folio)
431 folio_clear_dirty(folio);
432 folio_clear_uptodate(folio);
433 filemap_remove_folio(folio);
492 * this folio can be created while executing the routine.
496 struct folio *folio, pgoff_t index)
500 struct page *page = &folio->page;
615 struct folio *folio, pgoff_t index,
621 * If folio is mapped, it was faulted in after being
624 * until we finish removing the folio.
626 if (unlikely(folio_mapped(folio)))
627 hugetlb_unmap_file_folio(h, mapping, folio, index);
629 folio_lock(folio);
631 * We must remove the folio from page cache before removing
637 VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio);
638 hugetlb_delete_from_page_cache(folio);
646 folio_unlock(folio);
686 struct folio *folio = fbatch.folios[i];
689 index = folio->index;
694 * Remove folio that was part of folio_batch.
696 if (remove_inode_single_folio(h, inode, mapping, folio,
753 struct folio *folio;
755 folio = filemap_lock_folio(mapping, idx);
756 if (IS_ERR(folio))
764 folio_zero_segment(folio, (size_t)start, (size_t)end);
766 folio_unlock(folio);
767 folio_put(folio);
878 struct folio *folio;
900 folio = filemap_get_folio(mapping, index);
901 if (!IS_ERR(folio)) {
902 folio_put(folio);
908 * Allocate folio without setting the avoid_reserve argument.
916 folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
918 if (IS_ERR(folio)) {
920 error = PTR_ERR(folio);
923 clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
924 __folio_mark_uptodate(folio);
925 error = hugetlb_add_to_page_cache(folio, mapping, index);
927 restore_reserve_on_error(h, &pseudo_vma, addr, folio);
928 folio_put(folio);
935 folio_set_hugetlb_migratable(folio);
940 folio_unlock(folio);
941 folio_put(folio);
1142 struct folio *dst, struct folio *src,