Lines Matching refs:lruvec
222 * @lruvec: lru vector
226 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
232 if (!mem_cgroup_disabled() && is_node_lruvec(lruvec)) {
234 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
246 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
252 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
1560 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1569 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1585 * @lruvec: The LRU vector to pull pages from.
1594 struct lruvec *lruvec, struct list_head *dst,
1598 struct list_head *src = &lruvec->lists[lru];
1676 update_lru_sizes(lruvec, lru, nr_zone_taken);
1715 struct lruvec *lruvec;
1718 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1723 del_page_from_lru_list(page, lruvec, lru);
1785 * Returns the number of pages moved to the given lruvec.
1788 unsigned move_pages_to_lru(struct lruvec *lruvec, struct list_head *list)
1790 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1810 lruvec = mem_cgroup_page_lruvec(page, pgdat);
1816 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
1817 list_move(&page->lru, &lruvec->lists[lru]);
1822 del_page_from_lru_list(page, lruvec, lru);
1837 lruvec = node_lruvec(pgdat);
1838 workingset_age_nonresident(lruvec,
1841 workingset_age_nonresident(lruvec,
1847 workingset_age_nonresident(lruvec, nr_pages);
1877 unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1887 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1910 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1917 __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
1929 move_pages_to_lru(lruvec, &page_list);
1936 lru_note_cost(lruvec, file, stat.nr_pageout);
1938 lru_note_cost(lruvec, file, stat.nr_pageout);
1945 __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
1982 struct lruvec *lruvec,
1996 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2002 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
2009 __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
2059 nr_activate = move_pages_to_lru(lruvec, &l_active);
2060 nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
2065 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate);
2131 struct lruvec *lruvec, struct scan_control *sc)
2142 shrink_active_list(nr_to_scan, lruvec, sc, lru);
2152 nr_reclaimed = shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2156 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
2188 bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
2195 inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
2196 active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
2217 static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
2220 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2311 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2413 void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2424 get_scan_count(lruvec, sc, nr);
2455 lruvec, sc);
2520 if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
2521 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2605 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2642 shrink_lruvec(lruvec, sc);
2659 struct lruvec *target_lruvec;
2979 struct lruvec *target_lruvec;
2983 struct lruvec *lruvec;
2985 lruvec = node_lruvec(pgdat);
2986 lruvec->refaults[0] = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE_ANON); /* modified */
2987 lruvec->refaults[1] = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE_FILE); /* modified */
3056 struct lruvec *lruvec;
3058 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
3060 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
3285 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3314 nr[LRU_ACTIVE_ANON] = lruvec_lru_size(lruvec,
3316 nr[LRU_INACTIVE_ANON] = lruvec_lru_size(lruvec,
3322 shrink_lruvec(lruvec, &sc);
3375 struct lruvec *lruvec;
3380 lruvec = mem_cgroup_lruvec(NULL, pgdat);
3381 if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
3386 lruvec = mem_cgroup_lruvec(memcg, pgdat);
3387 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
3456 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
3458 clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
4314 struct lruvec *lruvec;
4337 lruvec = mem_cgroup_page_lruvec(page, pgdat);
4347 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
4348 add_page_to_lru_list(page, lruvec, lru);
4370 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata);
4372 shrink_list(LRU_ACTIVE_PURGEABLE, -1, lruvec, sc);
4373 nr += shrink_list(LRU_INACTIVE_PURGEABLE, -1, lruvec, sc);