Lines Matching refs:memcg
71 u64 memcg_data_size(struct mem_cgroup *memcg, int type)
78 size += gsdev->ops->group_data_size(memcg->id.id, type, gsdev->priv);
84 u64 swapin_memcg(struct mem_cgroup *memcg, u64 req_size)
86 u64 swap_size = memcg_data_size(memcg, SWAP_SIZE);
88 u64 ratio = atomic64_read(&memcg->memcg_reclaimed.ub_ufs2zram_ratio);
95 read_size += gsdev->ops->group_read(memcg->id.id, req_size - read_size,
105 static u64 swapout_memcg(struct mem_cgroup *memcg, u64 req_size)
107 u64 cache_size = memcg_data_size(memcg, CACHE_SIZE);
108 u64 swap_size = memcg_data_size(memcg, SWAP_SIZE);
111 u32 ratio = atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio);
120 write_size += gsdev->ops->group_write(memcg->id.id, req_size - write_size,
132 struct mem_cgroup *memcg = NULL;
135 while ((memcg = get_next_memcg(memcg)) != NULL) {
136 write_size += swapout_memcg(memcg, req_size - write_size);
146 struct mem_cgroup *memcg = NULL;
149 while ((memcg = get_next_memcg(memcg)) != NULL)
150 zram_pages += memcg_data_size(memcg, CACHE_PAGE);
157 struct mem_cgroup *memcg = NULL;
160 while ((memcg = get_next_memcg(memcg)) != NULL)
161 eswap_pages += memcg_data_size(memcg, SWAP_PAGE);
168 struct mem_cgroup *memcg = NULL;
171 while ((memcg = get_next_memcg(memcg)) != NULL)
172 cache_fault += memcg_data_size(memcg, CACHE_FAULT);
240 struct mem_cgroup *memcg = NULL;
242 while ((memcg = get_next_memcg(memcg)) != NULL)
243 memcg->memcg_reclaimed.reclaimed_pagefault = memcg_data_size(memcg, CACHE_FAULT);
252 static bool get_memcg_anon_refault_status(struct mem_cgroup *memcg)
261 if (!memcg)
264 anon_pagefault = memcg_data_size(memcg, CACHE_FAULT);
265 if (anon_pagefault == memcg->memcg_reclaimed.reclaimed_pagefault)
268 mz = mem_cgroup_nodeinfo(memcg, 0);
278 memcg_data_size(memcg, SWAP_PAGE) + memcg_data_size(memcg, CACHE_PAGE);
280 ratio = div64_u64((anon_pagefault - memcg->memcg_reclaimed.reclaimed_pagefault) *
282 if (ratio > atomic_read(&memcg->memcg_reclaimed.refault_threshold))
562 struct mem_cgroup *memcg, struct scan_control *sc, unsigned long *nr)
564 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
591 struct mem_cgroup *memcg = NULL;
594 while ((memcg = get_next_memcg(memcg)) != NULL) {
595 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
600 get_next_memcg_break(memcg);
604 if (get_memcg_anon_refault_status(memcg)) {
611 nr_zram = memcg_data_size(memcg, CACHE_PAGE);
612 nr_eswap = memcg_data_size(memcg, SWAP_PAGE);
616 if (zram_ratio >= (u32)atomic_read(&memcg->memcg_reclaimed.ub_mem2zram_ratio)) {
627 zswapd_shrink_anon_memcg(pgdat, memcg, sc, nr);
631 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
634 get_next_memcg_break(memcg);