Lines Matching refs:memcg
121 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
124 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
128 static int expand_one_shrinker_info(struct mem_cgroup *memcg,
139 pn = memcg->nodeinfo[nid];
140 old = shrinker_info_protected(memcg, nid);
141 /* Not yet online memcg */
172 void free_shrinker_info(struct mem_cgroup *memcg)
179 pn = memcg->nodeinfo[nid];
186 int alloc_shrinker_info(struct mem_cgroup *memcg)
199 free_shrinker_info(memcg);
206 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
219 struct mem_cgroup *memcg;
231 memcg = mem_cgroup_iter(NULL, NULL, NULL);
233 ret = expand_one_shrinker_info(memcg, map_size, defer_size,
237 mem_cgroup_iter_break(NULL, memcg);
240 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
248 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
250 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
254 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
304 struct mem_cgroup *memcg)
308 info = shrinker_info_protected(memcg, nid);
313 struct mem_cgroup *memcg)
317 info = shrinker_info_protected(memcg, nid);
321 void reparent_shrinker_deferred(struct mem_cgroup *memcg)
328 parent = parent_mem_cgroup(memcg);
335 child_info = shrinker_info_protected(memcg, nid);
365 * completely broken with the legacy memcg and direct stalling in
394 struct mem_cgroup *memcg)
400 struct mem_cgroup *memcg)
447 * single memcg. For example, a memcg-aware shrinker can free one object
448 * charged to the target memcg, causing an entire page to be freed.
449 * If we count the entire page as reclaimed from the memcg, we end up
453 * from the target memcg; preventing unnecessary retries during memcg
457 * charged to the target memcg, we end up underestimating the reclaimed
464 * memcg reclaim, to make reporting more accurate and reduce
481 if (sc->memcg &&
484 sc->memcg);
498 if (sc->memcg &&
501 sc->memcg);
518 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
522 if (memcg == NULL) {
524 * For non-memcg reclaim, is there
530 /* Is the memcg below its swap limit? */
531 if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
861 struct mem_cgroup *memcg, int priority)
867 if (!mem_cgroup_online(memcg))
873 info = shrinker_info_protected(memcg, nid);
881 .memcg = memcg,
903 * the memcg shrinker map, a new object might have been
920 set_shrinker_bit(memcg, nid, i);
935 struct mem_cgroup *memcg, int priority)
945 * @memcg: memory cgroup whose slab caches to target
953 * @memcg specifies the memory cgroup to target. Unaware shrinkers
962 struct mem_cgroup *memcg,
969 * The root memcg might be allocated even though memcg is disabled
971 * mem_cgroup_is_root() return false, then just run memcg slab
975 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
976 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
985 .memcg = memcg,
1012 struct mem_cgroup *memcg = NULL;
1014 memcg = mem_cgroup_iter(NULL, NULL, NULL);
1016 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
1017 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
1727 * 2) Global or new memcg reclaim encounters a folio that is
1740 * 3) Legacy memcg encounters a folio that already has the
1741 * reclaim flag set. memcg does not have any dirty folio
1776 * memcg reclaim reaches the tests above,
2477 * inhibits memcg migration).
2882 * Flush the memory cgroup stats, so that we read accurate per-memcg
2988 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2990 int swappiness = mem_cgroup_swappiness(memcg);
2998 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
3005 * swappiness, but memcg users want to use this knob to
3080 mem_cgroup_protection(sc->target_mem_cgroup, memcg,
3113 unsigned long cgroup_size = mem_cgroup_size(memcg);
3146 if (!scan && !mem_cgroup_online(memcg))
3161 scan = mem_cgroup_online(memcg) ?
3239 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
3244 if (memcg) {
3245 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
3261 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3268 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
3271 return mem_cgroup_swappiness(memcg);
3383 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
3391 if (memcg)
3392 return &memcg->mm_list;
3402 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
3403 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3407 VM_WARN_ON_ONCE(mm->lru_gen.memcg);
3408 mm->lru_gen.memcg = memcg;
3413 struct lruvec *lruvec = get_lruvec(memcg, nid);
3429 struct mem_cgroup *memcg = NULL;
3435 memcg = mm->lru_gen.memcg;
3437 mm_list = get_mm_list(memcg);
3442 struct lruvec *lruvec = get_lruvec(memcg, nid);
3458 mem_cgroup_put(mm->lru_gen.memcg);
3459 mm->lru_gen.memcg = NULL;
3466 struct mem_cgroup *memcg;
3477 if (!mm->lru_gen.memcg)
3481 memcg = mem_cgroup_from_task(task);
3483 if (memcg == mm->lru_gen.memcg)
3548 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3549 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3613 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3614 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3931 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3944 if (folio_memcg_rcu(folio) != memcg)
3972 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
4003 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
4040 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
4084 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
4250 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4264 if (!mem_cgroup_trylock_pages(memcg))
4514 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4530 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
4538 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4551 mem_cgroup_calculate_protection(NULL, memcg);
4553 return !mem_cgroup_below_min(NULL, memcg);
4561 struct mem_cgroup *memcg;
4570 memcg = mem_cgroup_iter(NULL, NULL, NULL);
4572 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4575 mem_cgroup_iter_break(NULL, memcg);
4580 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4621 struct mem_cgroup *memcg = folio_memcg(folio);
4623 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4655 if (!mem_cgroup_trylock_pages(memcg))
4673 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap);
4711 * memcg LRU
4776 void lru_gen_online_memcg(struct mem_cgroup *memcg)
4784 struct lruvec *lruvec = get_lruvec(memcg, nid);
4801 void lru_gen_offline_memcg(struct mem_cgroup *memcg)
4806 struct lruvec *lruvec = get_lruvec(memcg, nid);
4812 void lru_gen_release_memcg(struct mem_cgroup *memcg)
4819 struct lruvec *lruvec = get_lruvec(memcg, nid);
4838 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
4840 struct lruvec *lruvec = get_lruvec(memcg, nid);
4974 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5028 __count_memcg_events(memcg, item, isolated);
5029 __count_memcg_events(memcg, PGREFILL, sorted);
5142 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5202 __count_memcg_events(memcg, item, reclaimed);
5229 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5257 /* try to scrape all its memory if this memcg was deleted */
5258 if (!mem_cgroup_online(memcg)) {
5290 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
5296 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5299 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
5318 /* don't abort memcg reclaim to ensure fairness */
5384 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5387 mem_cgroup_calculate_protection(NULL, memcg);
5389 if (mem_cgroup_below_min(NULL, memcg))
5392 if (mem_cgroup_below_low(NULL, memcg)) {
5397 memcg_memory_event(memcg, MEMCG_LOW);
5402 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
5405 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
5410 if (success && mem_cgroup_online(memcg))
5431 struct mem_cgroup *memcg;
5438 memcg = NULL;
5448 mem_cgroup_put(memcg);
5449 memcg = NULL;
5455 memcg = lruvec_memcg(lruvec);
5457 if (!mem_cgroup_tryget(memcg)) {
5458 lru_gen_release_memcg(memcg);
5459 memcg = NULL;
5478 mem_cgroup_put(memcg);
5688 struct mem_cgroup *memcg;
5703 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5708 struct lruvec *lruvec = get_lruvec(memcg, nid);
5727 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5823 struct mem_cgroup *memcg;
5830 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5836 return get_lruvec(memcg, nid);
5838 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5855 struct mem_cgroup *memcg = lruvec_memcg(v);
5861 memcg = mem_cgroup_iter(NULL, memcg, NULL);
5862 if (!memcg)
5868 return get_lruvec(memcg, nid);
5930 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5935 const char *path = memcg ? m->private : "";
5938 if (memcg)
5939 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5941 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
6039 struct mem_cgroup *memcg = NULL;
6047 memcg = mem_cgroup_from_id(memcg_id);
6048 if (!mem_cgroup_tryget(memcg))
6049 memcg = NULL;
6053 if (!memcg)
6057 if (memcg_id != mem_cgroup_id(memcg))
6060 lruvec = get_lruvec(memcg, nid);
6076 mem_cgroup_put(memcg);
6210 void lru_gen_init_memcg(struct mem_cgroup *memcg)
6212 INIT_LIST_HEAD(&memcg->mm_list.fifo);
6213 spin_lock_init(&memcg->mm_list.lock);
6216 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
6221 VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo));
6224 struct lruvec *lruvec = get_lruvec(memcg, nid);
6299 * do a batch of work at once. For memcg reclaim one check is made to
6328 * For kswapd and memcg, reclaim at least the number of pages
6463 struct mem_cgroup *memcg;
6465 memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
6467 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6479 mem_cgroup_calculate_protection(target_memcg, memcg);
6481 if (mem_cgroup_below_min(target_memcg, memcg)) {
6487 } else if (mem_cgroup_below_low(target_memcg, memcg)) {
6498 memcg_memory_event(memcg, MEMCG_LOW);
6506 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
6511 vmpressure(sc->gfp_mask, memcg, false,
6515 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
6590 * Tag a node/memcg as congested if all the dirty pages were marked
6593 * Legacy memcg will stall in page writeback so avoid forcibly
6761 * and balancing, not for a memcg's limit.
7100 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
7105 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
7108 .target_mem_cgroup = memcg,
7140 shrink_anon_memcg(pgdat, memcg, &sc, nr);
7152 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
7164 .target_mem_cgroup = memcg,
7194 struct mem_cgroup *memcg;
7209 memcg = mem_cgroup_iter(NULL, NULL, NULL);
7211 lruvec = mem_cgroup_lruvec(memcg, pgdat);
7214 memcg = mem_cgroup_iter(NULL, memcg, NULL);
7215 } while (memcg);
8170 /* block memcg migration while the folio moves between lrus */
8197 struct mem_cgroup *memcg = NULL;
8200 while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)))
8203 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdata);