Lines Matching refs:memcg

70 u64 memcg_data_size(struct mem_cgroup *memcg, int type)
77 size += gsdev->ops->group_data_size(memcg->id.id, type, gsdev->priv);
83 u64 swapin_memcg(struct mem_cgroup *memcg, u64 req_size)
85 u64 swap_size = memcg_data_size(memcg, SWAP_SIZE);
87 u64 ratio = atomic64_read(&memcg->memcg_reclaimed.ub_ufs2zram_ratio);
94 read_size += gsdev->ops->group_read(memcg->id.id, req_size - read_size,
104 static u64 swapout_memcg(struct mem_cgroup *memcg, u64 req_size)
106 u64 cache_size = memcg_data_size(memcg, CACHE_SIZE);
107 u64 swap_size = memcg_data_size(memcg, SWAP_SIZE);
110 u32 ratio = atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio);
119 write_size += gsdev->ops->group_write(memcg->id.id, req_size - write_size,
131 struct mem_cgroup *memcg = NULL;
134 while ((memcg = get_next_memcg(memcg)) != NULL) {
135 write_size += swapout_memcg(memcg, req_size - write_size);
145 struct mem_cgroup *memcg = NULL;
148 while ((memcg = get_next_memcg(memcg)) != NULL)
149 zram_pages += memcg_data_size(memcg, CACHE_PAGE);
156 struct mem_cgroup *memcg = NULL;
159 while ((memcg = get_next_memcg(memcg)) != NULL)
160 eswap_pages += memcg_data_size(memcg, SWAP_PAGE);
167 struct mem_cgroup *memcg = NULL;
170 while ((memcg = get_next_memcg(memcg)) != NULL)
171 cache_fault += memcg_data_size(memcg, CACHE_FAULT);
239 struct mem_cgroup *memcg = NULL;
241 while ((memcg = get_next_memcg(memcg)) != NULL)
242 memcg->memcg_reclaimed.reclaimed_pagefault = memcg_data_size(memcg, CACHE_FAULT);
251 static bool get_memcg_anon_refault_status(struct mem_cgroup *memcg)
260 if (!memcg)
263 anon_pagefault = memcg_data_size(memcg, CACHE_FAULT);
264 if (anon_pagefault == memcg->memcg_reclaimed.reclaimed_pagefault)
267 mz = mem_cgroup_nodeinfo(memcg, 0);
277 memcg_data_size(memcg, SWAP_PAGE) + memcg_data_size(memcg, CACHE_PAGE);
279 ratio = div64_u64((anon_pagefault - memcg->memcg_reclaimed.reclaimed_pagefault) *
281 if (ratio > atomic_read(&memcg->memcg_reclaimed.refault_threshold))
561 struct mem_cgroup *memcg, struct scan_control *sc, unsigned long *nr)
563 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
590 struct mem_cgroup *memcg = NULL;
593 while ((memcg = get_next_memcg(memcg)) != NULL) {
594 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
599 get_next_memcg_break(memcg);
603 if (get_memcg_anon_refault_status(memcg)) {
610 nr_zram = memcg_data_size(memcg, CACHE_PAGE);
611 nr_eswap = memcg_data_size(memcg, SWAP_PAGE);
615 if (zram_ratio >= (u32)atomic_read(&memcg->memcg_reclaimed.ub_mem2zram_ratio)) {
626 zswapd_shrink_anon_memcg(pgdat, memcg, sc, nr);
630 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
633 get_next_memcg_break(memcg);