Lines Matching refs:memcg

78  * Per memcg event counter is incremented at every pagein/pageout. With THP,
81 * than using jiffies etc. to handle periodic memcg event.
99 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
100 * shrinkers, which have elements charged to this memcg.
147 struct mem_cgroup *memcg; /* Back pointer, we cannot */
189 int memcg_id; /* memcg->css.id of foreign inode */
202 struct mem_cgroup *memcg;
219 /* Private memcg ID. Used to ID objects that outlive the cgroup */
346 /* per-memcg mm_struct list */
376 * After the initialization objcg->memcg is always pointing at
377 * a valid memcg, but can be atomically swapped to the parent memcg.
379 * The caller must ensure that the returned memcg won't be released:
384 return READ_ONCE(objcg->memcg);
438 * For a non-kmem folio any of the following ensures folio and memcg binding
447 * For a kmem folio a caller should hold an rcu read lock to protect memcg
500 * For a non-kmem folio any of the following ensures folio and memcg binding
509 * For a kmem folio a caller should hold an rcu read lock to protect memcg
542 struct mem_cgroup *memcg;
546 memcg = obj_cgroup_memcg(objcg);
547 if (unlikely(!css_tryget(&memcg->css)))
551 return memcg;
584 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
586 return (memcg == root_mem_cgroup);
595 struct mem_cgroup *memcg,
637 if (root == memcg)
640 *min = READ_ONCE(memcg->memory.emin);
641 *low = READ_ONCE(memcg->memory.elow);
645 struct mem_cgroup *memcg);
648 struct mem_cgroup *memcg)
651 * The root memcg doesn't account charges, and doesn't support
652 * protection. The target memcg's protection is ignored, see
655 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
656 memcg == target;
660 struct mem_cgroup *memcg)
662 if (mem_cgroup_unprotected(target, memcg))
665 return READ_ONCE(memcg->memory.elow) >=
666 page_counter_read(&memcg->memory);
670 struct mem_cgroup *memcg)
672 if (mem_cgroup_unprotected(target, memcg))
675 return READ_ONCE(memcg->memory.emin) >=
676 page_counter_read(&memcg->memory);
687 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
689 * charge to the active memcg.
732 static inline struct mem_cgroup_per_node *mem_cgroup_nodeinfo(struct mem_cgroup *memcg,
735 return memcg->nodeinfo[nid];
739 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
740 * @memcg: memcg of the wanted lruvec
743 * Returns the lru list vector holding pages for a given @memcg &
747 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
758 if (!memcg)
759 memcg = root_mem_cgroup;
761 mz = memcg->nodeinfo[pgdat->node_id];
782 struct mem_cgroup *memcg = folio_memcg(folio);
784 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
785 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
832 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
834 return !memcg || css_tryget(&memcg->css);
837 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
839 if (memcg)
840 css_put(&memcg->css);
850 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
853 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
858 if (!memcg)
862 return memcg->id.id;
867 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
869 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
893 return mz->memcg;
897 * parent_mem_cgroup - find the accounting parent of a memcg
898 * @memcg: memcg whose parent to find
900 * Returns the parent memcg, or NULL if this is the root.
902 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
904 return mem_cgroup_from_css(memcg->css.parent);
907 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
910 if (root == memcg)
912 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
916 struct mem_cgroup *memcg)
924 match = mem_cgroup_is_descendant(task_memcg, memcg);
932 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
936 return !!(memcg->css.flags & CSS_ONLINE);
954 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
956 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
958 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
961 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
983 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
988 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
990 /* try to stablize folio_memcg() for all the pages in a memcg */
991 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
995 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
1008 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1014 __mod_memcg_state(memcg, idx, val);
1021 struct mem_cgroup *memcg;
1027 memcg = page_memcg(page);
1028 if (memcg)
1029 mod_memcg_state(memcg, idx, val);
1033 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
1118 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1121 static inline void count_memcg_events(struct mem_cgroup *memcg,
1128 __count_memcg_events(memcg, idx, count);
1135 struct mem_cgroup *memcg = page_memcg(page);
1137 if (memcg)
1138 count_memcg_events(memcg, idx, 1);
1144 struct mem_cgroup *memcg = folio_memcg(folio);
1146 if (memcg)
1147 count_memcg_events(memcg, idx, nr);
1153 struct mem_cgroup *memcg;
1159 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1160 if (likely(memcg))
1161 count_memcg_events(memcg, idx, 1);
1165 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1171 atomic_long_inc(&memcg->memory_events_local[event]);
1173 cgroup_file_notify(&memcg->events_local_file);
1176 atomic_long_inc(&memcg->memory_events[event]);
1178 cgroup_file_notify(&memcg->swap_events_file);
1180 cgroup_file_notify(&memcg->events_file);
1186 } while ((memcg = parent_mem_cgroup(memcg)) &&
1187 !mem_cgroup_is_root(memcg));
1193 struct mem_cgroup *memcg;
1199 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1200 if (likely(memcg))
1201 memcg_memory_event(memcg, event);
1251 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1261 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1272 struct mem_cgroup *memcg,
1280 struct mem_cgroup *memcg)
1285 struct mem_cgroup *memcg)
1290 struct mem_cgroup *memcg)
1296 struct mem_cgroup *memcg)
1329 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1346 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1352 struct mem_cgroup *memcg)
1372 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1377 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1419 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1424 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1437 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1458 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1470 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1475 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1481 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1486 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1498 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1538 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1542 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1548 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1559 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1605 static inline void count_memcg_events(struct mem_cgroup *memcg,
1611 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1657 struct mem_cgroup *memcg;
1659 memcg = lruvec_memcg(lruvec);
1660 if (!memcg)
1662 memcg = parent_mem_cgroup(memcg);
1663 if (!memcg)
1665 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1684 /* Test requires a stable page->memcg binding, see page_memcg() */
1733 struct mem_cgroup *memcg;
1738 memcg = folio_memcg(folio);
1739 if (unlikely(memcg && &memcg->css != wb->memcg_css))
1772 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1774 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1780 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1783 return !!memcg->tcpmem_pressure;
1785 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1787 } while ((memcg = parent_mem_cgroup(memcg)));
1791 int alloc_shrinker_info(struct mem_cgroup *memcg);
1792 void free_shrinker_info(struct mem_cgroup *memcg);
1793 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1794 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1799 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1804 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1849 * A helper for accessing memcg's kmem_id, used for getting
1852 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1854 return memcg ? memcg->kmemcg_id : -1;
1863 struct mem_cgroup *memcg;
1869 memcg = obj_cgroup_memcg(objcg);
1870 count_memcg_events(memcg, idx, 1);
1915 static inline int memcg_kmem_id(struct mem_cgroup *memcg)