Lines Matching defs:lruvec

473 	struct lruvec *lruvec = &mz->lruvec;
474 unsigned long nr_pages = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON,
475 MAX_NR_ZONES) + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON,
583 * memcg and lruvec stats flushing
800 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
806 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
833 /* Update lruvec */
841 * __mod_lruvec_state - update lruvec memory statistics
842 * @lruvec: the lruvec
846 * The lruvec is the intersection of the NUMA node and a cgroup. This
848 * change of state at this level: per-node, per-cgroup, per-lruvec.
850 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
854 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
856 /* Update memcg and lruvec */
859 if (is_node_lruvec(lruvec))
862 __mod_memcg_lruvec_state(lruvec, idx, val);
872 struct lruvec *lruvec;
883 /* Untracked pages have no memcg, no lruvec. Update only the node */
890 lruvec = mem_cgroup_lruvec(memcg, pgdat);
891 __mod_lruvec_state(lruvec, idx, val);
900 struct lruvec *lruvec;
906 * Untracked pages have no memcg, no lruvec. Update only the
914 lruvec = mem_cgroup_lruvec(memcg, pgdat);
915 __mod_lruvec_state(lruvec, idx, val);
1313 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1323 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1325 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1330 * folio_lruvec_lock - Lock the lruvec for a folio.
1339 * Return: The lruvec this folio is on with its lock held.
1341 struct lruvec *folio_lruvec_lock(struct folio *folio)
1343 struct lruvec *lruvec = folio_lruvec(folio);
1345 spin_lock(&lruvec->lru_lock);
1346 lruvec_memcg_debug(lruvec, folio);
1348 return lruvec;
1352 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1361 * Return: The lruvec this folio is on with its lock held and interrupts
1364 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1366 struct lruvec *lruvec = folio_lruvec(folio);
1368 spin_lock_irq(&lruvec->lru_lock);
1369 lruvec_memcg_debug(lruvec, folio);
1371 return lruvec;
1375 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1385 * Return: The lruvec this folio is on with its lock held and interrupts
1388 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1391 struct lruvec *lruvec = folio_lruvec(folio);
1393 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1394 lruvec_memcg_debug(lruvec, folio);
1396 return lruvec;
1401 * @lruvec: mem_cgroup per zone lru vector
1409 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1420 if (is_node_lruvec(lruvec))
1424 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1433 __func__, lruvec, lru, nr_pages, size)) {
2909 struct lruvec *lruvec;
2913 lruvec = mem_cgroup_lruvec(memcg, pgdat);
2914 mod_memcg_lruvec_state(lruvec, idx, nr);
4007 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
4017 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
4019 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
4197 anon_cost += mz->lruvec.anon_cost;
4198 file_cost += mz->lruvec.file_cost;
5284 lruvec_init(&pn->lruvec);
5286 pn->lruvec.pgdat = NODE_DATA(node);
5840 struct lruvec *from_vec, *to_vec;
6697 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6700 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6719 struct lruvec *lruvec;
6721 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6722 size = lruvec_page_state_output(lruvec,