Lines Matching refs:scanned

773 	long scanned = 0, next_deferred;
835 scanned += shrinkctl->nr_scanned;
846 next_deferred = max_t(long, (nr + delta - scanned), 0);
1723 * so instead note that the LRU is being scanned too
1916 * dirty folios, and when we've already scanned
2235 * @nr_scanned: The number of pages that were scanned.
2381 * the LRU list will go small and be scanned faster than necessary, leading to
2598 * If dirty folios are scanned that are not queued for IO, it
2979 * scanned.
4970 int scanned = 0;
4998 scanned += delta;
5036 return isolated || !remaining ? scanned : 0;
5092 int scanned;
5116 scanned = scan_folios(lruvec, sc, type, tier, list);
5117 if (scanned)
5126 return scanned;
5132 int scanned;
5147 scanned = isolate_folios(lruvec, sc, swappiness, &type, &list);
5149 scanned += try_to_inc_min_seq(lruvec, swappiness);
5152 scanned = 0;
5157 return scanned;
5218 return scanned;
5347 unsigned long scanned = 0;
5365 scanned += delta;
5366 if (scanned >= nr_to_scan)
5382 unsigned long scanned = sc->nr_scanned;
5405 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
6329 * requested. Ensure that the anon and file LRUs are scanned
6421 * number of pages that were scanned. This will return to the caller
6424 * allocations through requiring that the full LRU list has been scanned
6427 * where always a non-zero amount of pages were scanned.
6469 unsigned long scanned;
6502 scanned = sc->nr_scanned;
6512 sc->nr_scanned - scanned,
6760 * scanned pages. This works for global memory pressure
7333 * Returns true if kswapd scanned at least the requested number of pages to
7548 * enough pages are already being scanned that that high