Lines Matching refs:lruvec

563  * @lruvec: lru vector
567 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
574 if (!mem_cgroup_disabled() && is_node_lruvec(lruvec)) {
576 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
589 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
595 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
2187 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
2196 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
2221 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
2223 * lruvec->lru_lock is heavily contended. Some of the functions that
2233 * @lruvec: The LRU vector to pull pages from.
2242 struct lruvec *lruvec, struct list_head *dst,
2246 struct list_head *src = &lruvec->lists[lru];
2331 update_lru_sizes(lruvec, lru, nr_zone_taken);
2365 struct lruvec *lruvec;
2368 lruvec = folio_lruvec_lock_irq(folio);
2369 lruvec_del_folio(lruvec, folio);
2370 unlock_page_lruvec_irq(lruvec);
2425 * Returns the number of pages moved to the given lruvec.
2427 unsigned int move_folios_to_lru(struct lruvec *lruvec,
2443 spin_unlock_irq(&lruvec->lru_lock);
2445 spin_lock_irq(&lruvec->lru_lock);
2466 spin_unlock_irq(&lruvec->lru_lock);
2468 spin_lock_irq(&lruvec->lru_lock);
2476 * All pages were isolated from the same lruvec (and isolation
2479 VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio);
2480 lruvec_add_folio(lruvec, folio);
2488 lruvec = folio_lruvec(folio);
2489 workingset_age_nonresident(lruvec,
2492 workingset_age_nonresident(lruvec,
2498 workingset_age_nonresident(lruvec, nr_pages);
2525 struct lruvec *lruvec, struct scan_control *sc,
2535 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2556 spin_lock_irq(&lruvec->lru_lock);
2558 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
2565 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
2568 spin_unlock_irq(&lruvec->lru_lock);
2575 spin_lock_irq(&lruvec->lru_lock);
2576 move_folios_to_lru(lruvec, &folio_list);
2582 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
2584 spin_unlock_irq(&lruvec->lru_lock);
2590 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
2592 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed);
2655 struct lruvec *lruvec,
2668 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2672 spin_lock_irq(&lruvec->lru_lock);
2674 nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
2681 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2683 spin_unlock_irq(&lruvec->lru_lock);
2732 spin_lock_irq(&lruvec->lru_lock);
2734 nr_activate = move_folios_to_lru(lruvec, &l_active);
2735 nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
2740 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2743 spin_unlock_irq(&lruvec->lru_lock);
2746 lru_note_cost(lruvec, file, 0, nr_rotated);
2811 struct lruvec *lruvec, struct scan_control *sc)
2815 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2821 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2852 bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2859 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2860 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2874 struct lruvec *target_lruvec;
2883 * lruvec stats for heuristics.
2984 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2987 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2988 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3079 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
3222 #define DEFINE_MAX_SEQ(lruvec) \
3223 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
3225 #define DEFINE_MIN_SEQ(lruvec) \
3227 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
3228 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
3239 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
3245 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
3248 if (!lruvec->pgdat)
3249 lruvec->pgdat = pgdat;
3251 return lruvec;
3259 static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
3261 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3262 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
3274 static int get_nr_gens(struct lruvec *lruvec, int type)
3276 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
3279 static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
3282 return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
3283 get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
3284 get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
3330 static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
3336 filter = READ_ONCE(lruvec->mm_state.filters[gen]);
3345 static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
3351 filter = READ_ONCE(lruvec->mm_state.filters[gen]);
3363 static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
3368 filter = lruvec->mm_state.filters[gen];
3376 WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
3413 struct lruvec *lruvec = get_lruvec(memcg, nid);
3416 if (lruvec->mm_state.tail == &mm_list->fifo)
3417 lruvec->mm_state.tail = &mm->lru_gen.list;
3442 struct lruvec *lruvec = get_lruvec(memcg, nid);
3445 if (lruvec->mm_state.head == &mm->lru_gen.list)
3446 lruvec->mm_state.head = lruvec->mm_state.head->prev;
3449 if (lruvec->mm_state.tail == &mm->lru_gen.list)
3450 lruvec->mm_state.tail = lruvec->mm_state.tail->next;
3493 static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
3498 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
3504 WRITE_ONCE(lruvec->mm_state.stats[hist][i],
3505 lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
3511 hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
3514 WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
3522 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3542 static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
3548 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3550 struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3595 reset_mm_stats(lruvec, walk, last);
3600 reset_bloom_filter(lruvec, walk->max_seq + 1);
3610 static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
3613 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3615 struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
3625 reset_mm_stats(lruvec, NULL, true);
3663 static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
3666 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3678 static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
3681 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3685 lockdep_assert_held(&lruvec->lru_lock);
3755 static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
3758 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3779 lru_gen_update_size(lruvec, folio, old_gen, new_gen);
3800 static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
3803 struct lru_gen_folio *lrugen = &lruvec->lrugen;
3818 if (lru_gen_is_active(lruvec, gen))
3820 __update_lru_size(lruvec, lru, zone, delta);
3972 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3973 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
4040 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
4041 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
4153 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
4179 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
4190 update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
4241 static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
4250 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4255 DEFINE_MAX_SEQ(lruvec);
4277 spin_lock_irq(&lruvec->lru_lock);
4278 reset_batch_size(lruvec, walk);
4279 spin_unlock_irq(&lruvec->lru_lock);
4318 static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
4322 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4340 new_gen = folio_inc_gen(lruvec, folio, false);
4348 reset_ctrl_pos(lruvec, type, true);
4354 static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
4358 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4359 DEFINE_MIN_SEQ(lruvec);
4361 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4389 reset_ctrl_pos(lruvec, type, true);
4397 static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
4401 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4403 spin_lock_irq(&lruvec->lru_lock);
4405 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
4408 if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
4413 if (inc_min_seq(lruvec, type, can_swap))
4416 spin_unlock_irq(&lruvec->lru_lock);
4439 __update_lru_size(lruvec, lru, zone, delta);
4440 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
4445 reset_ctrl_pos(lruvec, type, false);
4451 spin_unlock_irq(&lruvec->lru_lock);
4454 static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
4460 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4465 if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
4477 success = iterate_mm_list_nowalk(lruvec, max_seq);
4483 success = iterate_mm_list_nowalk(lruvec, max_seq);
4487 walk->lruvec = lruvec;
4493 success = iterate_mm_list(lruvec, walk, &mm);
4495 walk_mm(lruvec, mm, walk);
4499 inc_max_seq(lruvec, can_swap, force_scan);
4508 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
4512 bool can_swap = get_swappiness(lruvec, sc);
4513 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4514 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4515 DEFINE_MAX_SEQ(lruvec);
4516 DEFINE_MIN_SEQ(lruvec);
4533 static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
4538 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4539 DEFINE_MIN_SEQ(lruvec);
4543 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
4548 if (!lruvec_is_sizable(lruvec, sc))
4572 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4574 if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) {
4623 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4624 DEFINE_MAX_SEQ(lruvec);
4707 update_bloom_filter(lruvec, max_seq, pvmw->pmd);
4725 static int lru_gen_memcg_seg(struct lruvec *lruvec)
4727 return READ_ONCE(lruvec->lrugen.seg);
4730 static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
4736 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4740 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
4743 new = old = lruvec->lrugen.gen;
4757 WRITE_ONCE(lruvec->lrugen.seg, seg);
4758 WRITE_ONCE(lruvec->lrugen.gen, new);
4760 hlist_nulls_del_rcu(&lruvec->lrugen.list);
4763 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4765 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4784 struct lruvec *lruvec = get_lruvec(memcg, nid);
4788 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
4792 lruvec->lrugen.gen = gen;
4794 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
4806 struct lruvec *lruvec = get_lruvec(memcg, nid);
4808 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD);
4819 struct lruvec *lruvec = get_lruvec(memcg, nid);
4823 if (hlist_nulls_unhashed(&lruvec->lrugen.list))
4826 gen = lruvec->lrugen.gen;
4828 hlist_nulls_del_init_rcu(&lruvec->lrugen.list);
4840 struct lruvec *lruvec = get_lruvec(memcg, nid);
4843 if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD)
4844 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD);
4849 static int lru_gen_memcg_seg(struct lruvec *lruvec)
4860 static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc,
4870 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4876 success = lru_gen_del_folio(lruvec, folio, true);
4879 lruvec_add_folio(lruvec, folio);
4886 success = lru_gen_del_folio(lruvec, folio, true);
4889 lruvec_add_folio_tail(lruvec, folio);
4903 gen = folio_inc_gen(lruvec, folio, false);
4913 gen = folio_inc_gen(lruvec, folio, false);
4921 gen = folio_inc_gen(lruvec, folio, true);
4929 static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
4957 success = lru_gen_del_folio(lruvec, folio, true);
4963 static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
4973 struct lru_gen_folio *lrugen = &lruvec->lrugen;
4974 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4978 if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
5000 if (sort_folio(lruvec, folio, sc, tier))
5002 else if (isolate_folio(lruvec, folio, sc)) {
5039 static int get_tier_idx(struct lruvec *lruvec, int type)
5049 read_ctrl_pos(lruvec, type, 0, 1, &sp);
5051 read_ctrl_pos(lruvec, type, tier, 2, &pv);
5059 static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
5071 read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
5072 read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
5075 read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
5077 read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
5087 static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
5094 DEFINE_MIN_SEQ(lruvec);
5110 type = get_type_to_scan(lruvec, swappiness, &tier);
5114 tier = get_tier_idx(lruvec, type);
5116 scanned = scan_folios(lruvec, sc, type, tier, list);
5129 static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
5142 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5143 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
5145 spin_lock_irq(&lruvec->lru_lock);
5147 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list);
5149 scanned += try_to_inc_min_seq(lruvec, swappiness);
5151 if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
5154 spin_unlock_irq(&lruvec->lru_lock);
5191 spin_lock_irq(&lruvec->lru_lock);
5193 move_folios_to_lru(lruvec, &list);
5197 reset_batch_size(lruvec, walk);
5205 spin_unlock_irq(&lruvec->lru_lock);
5221 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
5228 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5229 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5230 DEFINE_MIN_SEQ(lruvec);
5232 /* whether this lruvec is completely out of cold folios */
5293 static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap)
5296 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5297 DEFINE_MAX_SEQ(lruvec);
5302 if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
5309 /* skip this lruvec as it's low on cold folios */
5310 return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0;
5313 static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
5333 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
5344 static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5348 int swappiness = get_swappiness(lruvec, sc);
5357 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
5361 delta = evict_folios(lruvec, sc, swappiness);
5369 if (should_abort_scan(lruvec, sc))
5375 /* whether this lruvec should be rotated */
5379 static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
5384 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5385 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
5394 if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL)
5400 success = try_to_shrink_lruvec(lruvec, sc);
5413 if (!success && lruvec_is_sizable(lruvec, sc))
5417 return lru_gen_memcg_seg(lruvec) != MEMCG_LRU_TAIL ?
5429 struct lruvec *lruvec;
5444 lru_gen_rotate_memcg(lruvec, op);
5454 lruvec = container_of(lrugen, struct lruvec, lrugen);
5455 memcg = lruvec_memcg(lruvec);
5465 op = shrink_one(lruvec, sc);
5469 if (should_abort_scan(lruvec, sc))
5476 lru_gen_rotate_memcg(lruvec, op);
5494 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5507 if (try_to_shrink_lruvec(lruvec, sc))
5508 lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG);
5524 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
5536 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
5546 if (get_swappiness(lruvec, sc))
5601 static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
5603 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5609 if (!list_empty(&lruvec->lists[lru]))
5624 static bool fill_evictable(struct lruvec *lruvec)
5632 struct list_head *head = &lruvec->lists[lru];
5643 lruvec_del_folio(lruvec, folio);
5644 success = lru_gen_add_folio(lruvec, folio, false);
5655 static bool drain_evictable(struct lruvec *lruvec)
5661 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
5672 success = lru_gen_del_folio(lruvec, folio, false);
5674 lruvec_add_folio(lruvec, folio);
5708 struct lruvec *lruvec = get_lruvec(memcg, nid);
5710 spin_lock_irq(&lruvec->lru_lock);
5712 VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
5713 VM_WARN_ON_ONCE(!state_is_valid(lruvec));
5715 lruvec->lrugen.enabled = enabled;
5717 while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
5718 spin_unlock_irq(&lruvec->lru_lock);
5720 spin_lock_irq(&lruvec->lru_lock);
5723 spin_unlock_irq(&lruvec->lru_lock);
5871 static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
5878 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5911 n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5914 n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
5927 struct lruvec *lruvec = v;
5928 struct lru_gen_folio *lrugen = &lruvec->lrugen;
5929 int nid = lruvec_pgdat(lruvec)->node_id;
5930 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5931 DEFINE_MAX_SEQ(lruvec);
5932 DEFINE_MIN_SEQ(lruvec);
5956 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
5973 lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
5986 static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
5989 DEFINE_MAX_SEQ(lruvec);
5990 DEFINE_MIN_SEQ(lruvec);
6001 try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
6006 static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
6009 DEFINE_MAX_SEQ(lruvec);
6017 DEFINE_MIN_SEQ(lruvec);
6025 if (!evict_folios(lruvec, sc, swappiness))
6037 struct lruvec *lruvec;
6060 lruvec = get_lruvec(memcg, nid);
6063 swappiness = get_swappiness(lruvec, sc);
6069 err = run_aging(lruvec, seq, sc, swappiness, opt);
6072 err = run_eviction(lruvec, seq, sc, swappiness, opt);
6178 void lru_gen_init_lruvec(struct lruvec *lruvec)
6182 struct lru_gen_folio *lrugen = &lruvec->lrugen;
6193 lruvec->mm_state.seq = MIN_NR_GENS;
6224 struct lruvec *lruvec = get_lruvec(memcg, nid);
6226 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
6227 sizeof(lruvec->lrugen.nr_pages)));
6229 lruvec->lrugen.list.next = LIST_POISON1;
6232 bitmap_free(lruvec->mm_state.filters[i]);
6233 lruvec->mm_state.filters[i] = NULL;
6261 static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
6271 void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
6283 lru_gen_shrink_lruvec(lruvec, sc);
6287 get_scan_count(lruvec, sc, nr);
6318 lruvec, sc);
6383 if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) &&
6384 inactive_is_low(lruvec, LRU_INACTIVE_ANON))
6385 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
6467 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6504 shrink_lruvec(lruvec, sc);
6521 struct lruvec *target_lruvec;
6605 * Stall direct reclaim for IO completions if the lruvec is
6798 struct lruvec *target_lruvec;
6801 struct lruvec *lruvec;
6808 lruvec = node_lruvec(pgdat);
6809 lruvec->refaults[0] = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE_ANON); /* modified */
6810 lruvec->refaults[1] = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE_FILE); /* modified */
6880 struct lruvec *lruvec;
6882 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
6884 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
7105 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
7134 nr[LRU_ACTIVE_ANON] = lruvec_lru_size(lruvec,
7136 nr[LRU_INACTIVE_ANON] = lruvec_lru_size(lruvec,
7142 shrink_lruvec(lruvec, &sc);
7195 struct lruvec *lruvec;
7205 lruvec = mem_cgroup_lruvec(NULL, pgdat);
7206 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
7211 lruvec = mem_cgroup_lruvec(memcg, pgdat);
7212 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
7284 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
7286 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
7287 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
8159 struct lruvec *lruvec = NULL;
8174 lruvec = folio_lruvec_relock_irq(folio, lruvec);
8176 lruvec_del_folio(lruvec, folio);
8178 lruvec_add_folio(lruvec, folio);
8184 if (lruvec) {
8187 unlock_page_lruvec_irq(lruvec);
8203 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata);
8205 shrink_list(LRU_ACTIVE_PURGEABLE, -1, lruvec, sc);
8206 nr += shrink_list(LRU_INACTIVE_PURGEABLE, -1, lruvec, sc);