Lines Matching refs:shadow
176 * slot of the evicted page. This is called a shadow entry.
178 * On cache misses for which there are shadow entries, an eligible
209 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
212 unsigned long entry = xa_to_value(shadow);
259 * Tests if the shadow entry is for a folio that was recently evicted.
260 * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
262 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
270 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
279 static void lru_gen_refault(struct folio *folio, void *shadow)
292 recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset);
333 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
339 static void lru_gen_refault(struct folio *folio, void *shadow)
378 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
416 * workingset_test_recent - tests if the shadow entry is for a folio that was
418 * shadow.
419 * @shadow: the shadow entry to be tested.
421 * @workingset: where the workingset value unpacked from shadow should
424 * Return: true if the shadow is for a recently evicted folio; false otherwise.
426 bool workingset_test_recent(void *shadow, bool file, bool *workingset)
438 return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset);
440 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
479 * special case: usually, shadow entries have a short lifetime
482 * nonresident_age to lap a shadow entry in the field, which
530 * @shadow: Shadow entry of the evicted folio.
536 void workingset_refault(struct folio *folio, void *shadow)
546 lru_gen_refault(folio, shadow);
578 if (!workingset_test_recent(shadow, file, &workingset))
658 * create excessive amounts of shadow nodes. To keep a lid on this,
659 * track shadow nodes and reclaim them when they grow way past the
670 * Track non-empty nodes that contain only shadow entries;
706 * containing shadow entries. We don't need to keep more
707 * shadow entries than possible pages on the active list,
714 * Nodes might be sparsely populated, with only one shadow
716 * node for every eligible shadow entry, so compromise on a
721 * each, this will reclaim shadow entries when they consume
764 * the shadow node LRU under the i_pages lock and the
799 * The nodes should only contain one or more shadow entries,
866 ret = prealloc_shrinker(&workingset_shadow_shrinker, "mm-shadow");