Lines Matching defs:folio

183  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
199 * the folio's entire_mapcount.
201 static inline int folio_nr_pages_mapped(struct folio *folio)
203 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
206 static inline void *folio_raw_mapping(struct folio *folio)
208 unsigned long mapping = (unsigned long)folio->mapping;
213 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
215 static inline void acct_reclaim_writeback(struct folio *folio)
217 pg_data_t *pgdat = folio_pgdat(folio);
221 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
234 void folio_rotate_reclaimable(struct folio *folio);
235 bool __folio_end_writeback(struct folio *folio);
236 void deactivate_file_folio(struct folio *folio);
237 void folio_activate(struct folio *folio);
264 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
265 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
266 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
273 * folio_evictable - Test whether a folio is evictable.
274 * @folio: The folio to test.
276 * Test whether @folio is evictable -- i.e., should be placed on
279 * Reasons folio might not be evictable:
280 * 1. folio's mapping marked unevictable
281 * 2. One of the pages in the folio is part of an mlocked VMA
283 static inline bool folio_evictable(struct folio *folio)
289 ret = !mapping_unevictable(folio_mapping(folio)) &&
290 !folio_test_mlocked(folio);
307 * Return true if a folio needs ->release_folio() calling upon it.
309 static inline bool folio_needs_release(struct folio *folio)
311 struct address_space *mapping = folio_mapping(folio);
313 return folio_has_private(folio) ||
332 bool folio_isolate_lru(struct folio *folio);
334 void folio_putback_lru(struct folio *folio);
544 * caller passes in a non-large folio.
546 static inline void folio_set_order(struct folio *folio, unsigned int order)
548 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
551 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
553 folio->_folio_nr_pages = 1U << order;
557 void folio_undo_large_rmappable(struct folio *folio);
561 struct folio *folio = (struct folio *)page;
563 folio_set_order(folio, order);
564 atomic_set(&folio->_entire_mapcount, -1);
565 atomic_set(&folio->_nr_pages_mapped, 0);
566 atomic_set(&folio->_pincount, 0);
719 struct anon_vma *folio_anon_vma(struct folio *folio);
722 void unmap_mapping_folio(struct folio *folio);
743 void mlock_folio(struct folio *folio);
744 static inline void mlock_vma_folio(struct folio *folio,
756 (compound || !folio_test_large(folio)))
757 mlock_folio(folio);
760 void munlock_folio(struct folio *folio);
761 static inline void munlock_vma_folio(struct folio *folio,
765 (compound || !folio_test_large(folio)))
766 munlock_folio(folio);
769 void mlock_new_folio(struct folio *folio);
856 static inline void unmap_mapping_folio(struct folio *folio) { }
857 static inline void mlock_new_folio(struct folio *folio) { }
1044 struct folio *folio, loff_t fpos, size_t size);
1083 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
1134 * folio, that part is required to be marked exclusive.