Lines Matching refs:list

562  * lruvec_lru_size -  Returns the number of pages on the given LRU list.
565 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
676 list_add_tail(&shrinker->list, &shrinker_list);
731 list_del(&shrinker->list);
981 list_for_each_entry(shrinker, &shrinker_list, list) {
1221 /* move folio to the active list, folio is locked */
1431 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
1432 * @folio: Folio to be returned to an LRU list.
1434 * Add previously isolated @folio to appropriate LRU list.
1465 * Let the folio, now marked Mlocked, be moved to the unevictable list.
1488 * inactive list. Another page table reference will
1525 * MADV_FREE anonymous folios are put into inactive file list too.
1725 * list has been processed.
1748 * inactive list and refilling from the active list. The
2221 * Isolating page from the lruvec to fill in @dst list by nr_to_scan times.
2232 * @nr_to_scan: The number of eligible pages to look through on the list.
2234 * @dst: The temp list to put pages on to.
2237 * @lru: LRU list id for isolating
2309 * Splice any skipped folios to the start of the LRU list. Note that
2336 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
2337 * @folio: Folio to isolate from its LRU list.
2339 * Isolate a @folio from an LRU list and adjust the vmstat statistic
2340 * corresponding to whatever LRU list the folio was on.
2343 * active list, it will have the Active flag set. If it was found on the
2344 * unevictable list, it will have the Unevictable flag set. These flags
2355 * Return: true if the folio was removed from an LRU list.
2356 * false if the folio was not on an LRU list.
2378 * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
2381 * the LRU list will go small and be scanned faster than necessary, leading to
2422 * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
2423 * On return, @list is reused as a list of folios to be freed by the caller.
2428 struct list_head *list)
2437 while (!list_empty(list)) {
2438 struct folio *folio = lru_to_folio(list);
2450 * The folio_set_lru needs to be kept here for list integrity.
2503 * To save our caller's stack, now use input list for pages to free.
2505 list_splice(&folios_to_free, list);
2710 * give them one more trip around the active list. So
2730 * Move folios back to the lru list.
2736 /* Keep all free folios in l_active list */
2825 * The inactive anon list should be small enough that the VM never has
2828 * The inactive file list should be small enough to leave most memory
2829 * to the established workingset on the scan-resistant active list,
2835 * If that fails and refaulting is observed, the inactive list grows.
2839 * of 3 means 3:1 or 25% of the folios are kept on the inactive list.
2896 * Target desirable inactive:active list ratios for the anon
3047 * proportional to the cost of reclaiming each list, as
3052 * Although we limit that influence to ensure no list gets
3380 * mm_struct list
3405 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
3417 lruvec->mm_state.tail = &mm->lru_gen.list;
3420 list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
3431 if (list_empty(&mm->lru_gen.list))
3445 if (lruvec->mm_state.head == &mm->lru_gen.list)
3449 if (lruvec->mm_state.tail == &mm->lru_gen.list)
3453 list_del_init(&mm->lru_gen.list);
3486 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
3589 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
4740 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
4760 hlist_nulls_del_rcu(&lruvec->lrugen.list);
4763 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4765 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4788 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
4794 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
4823 if (hlist_nulls_unhashed(&lruvec->lrugen.list))
4828 hlist_nulls_del_init_rcu(&lruvec->lrugen.list);
4964 int type, int tier, struct list_head *list)
4976 VM_WARN_ON_ONCE(!list_empty(list));
5003 list_add(&folio->lru, list);
5088 int *type_scanned, struct list_head *list)
5116 scanned = scan_folios(lruvec, sc, type, tier, list);
5134 LIST_HEAD(list);
5147 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list);
5156 if (list_empty(&list))
5159 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
5162 list_for_each_entry_safe_reverse(folio, next, &list, lru) {
5193 move_folios_to_lru(lruvec, &list);
5207 mem_cgroup_uncharge_list(&list);
5208 free_unref_page_list(&list);
5210 INIT_LIST_HEAD(&list);
5211 list_splice_init(&clean, &list);
5213 if (!list_empty(&list)) {
5442 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
6229 lruvec->lrugen.list.next = LIST_POISON1;
6424 * allocations through requiring that the full LRU list has been scanned
6823 * If a full scan of the inactive list fails to free enough memory then we
8150 * lru list
8154 * lru list, moves it to the appropriate evictable lru list. This function