Lines Matching refs:lruvec
84 struct lruvec *lruvec;
87 lruvec = folio_lruvec_lock_irqsave(folio, &flags);
88 lruvec_del_folio(lruvec, folio);
90 unlock_page_lruvec_irqrestore(lruvec, flags);
161 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
163 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
199 lruvec_add_folio(lruvec, folio);
206 struct lruvec *lruvec = NULL;
216 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
217 move_fn(lruvec, folio);
222 if (lruvec)
223 unlock_page_lruvec_irqrestore(lruvec, flags);
237 static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
240 lruvec_del_folio(lruvec, folio);
242 lruvec_add_folio_tail(lruvec, folio);
269 void lru_note_cost(struct lruvec *lruvec, bool file,
287 * Hold lruvec->lru_lock is safe here, since
288 * 1) The pinned lruvec in reclaim, or
291 * and could move simultaneously to a new lruvec).
293 spin_lock_irq(&lruvec->lru_lock);
296 lruvec->file_cost += cost;
298 lruvec->anon_cost += cost;
308 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
309 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
310 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
311 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
313 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
314 lruvec->file_cost /= 2;
315 lruvec->anon_cost /= 2;
317 spin_unlock_irq(&lruvec->lru_lock);
318 } while ((lruvec = parent_lruvec(lruvec)));
334 static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
339 lruvec_del_folio(lruvec, folio);
341 lruvec_add_folio(lruvec, folio);
345 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
380 struct lruvec *lruvec;
383 lruvec = folio_lruvec_lock_irq(folio);
384 folio_activate_fn(lruvec, folio);
385 unlock_page_lruvec_irq(lruvec);
568 static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
580 lruvec_del_folio(lruvec, folio);
591 lruvec_add_folio(lruvec, folio);
598 lruvec_add_folio_tail(lruvec, folio);
604 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
609 static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
614 lruvec_del_folio(lruvec, folio);
617 lruvec_add_folio(lruvec, folio);
620 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
625 static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
631 lruvec_del_folio(lruvec, folio);
640 lruvec_add_folio(lruvec, folio);
643 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
972 struct lruvec *lruvec = NULL;
985 * same lruvec. The lock is held only if lruvec != NULL.
987 if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
988 unlock_page_lruvec_irqrestore(lruvec, flags);
989 lruvec = NULL;
996 if (lruvec) {
997 unlock_page_lruvec_irqrestore(lruvec, flags);
998 lruvec = NULL;
1011 if (lruvec) {
1012 unlock_page_lruvec_irqrestore(lruvec, flags);
1013 lruvec = NULL;
1020 struct lruvec *prev_lruvec = lruvec;
1022 lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
1024 if (prev_lruvec != lruvec)
1027 lruvec_del_folio(lruvec, folio);
1045 if (lruvec)
1046 unlock_page_lruvec_irqrestore(lruvec, flags);