Home
last modified time | relevance | path

Searched refs:reclaiming (Results 1 - 2 of 2) sorted by relevance

/kernel/linux/linux-6.6/include/linux/
H A Dmm_inline.h224 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in lru_gen_add_folio() argument
255 else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq) in lru_gen_add_folio()
267 if (reclaiming) in lru_gen_add_folio()
275 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in lru_gen_del_folio() argument
287 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; in lru_gen_del_folio()
309 static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in lru_gen_add_folio() argument
314 static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in lru_gen_del_folio() argument
/kernel/linux/linux-6.6/mm/
H A Dvmscan.c450 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
452 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
1811 * Before reclaiming the folio, try to relocate in shrink_folio_list()
2310 * this disrupts the LRU order when reclaiming for lower zones but in isolate_lru_folios()
2604 * pressure reclaiming all the clean cache. And in some cases, in shrink_inactive_list()
3047 * proportional to the cost of reclaiming each list, as in get_scan_count()
3755 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) in folio_inc_gen() argument
3775 if (reclaiming) in folio_inc_gen()
6293 * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal in shrink_lruvec()
6298 * reclaiming implie in shrink_lruvec()
[all...]

Completed in 9 milliseconds