Lines Matching defs:folio
361 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
362 * @folio: folio of interest
365 * with @folio is returned. The returned css remains associated with @folio
371 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
373 struct mem_cgroup *memcg = folio_memcg(folio);
1313 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1320 memcg = folio_memcg(folio);
1323 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1325 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1330 * folio_lruvec_lock - Lock the lruvec for a folio.
1331 * @folio: Pointer to the folio.
1334 * - folio locked
1337 * - folio frozen (refcount of 0)
1339 * Return: The lruvec this folio is on with its lock held.
1341 struct lruvec *folio_lruvec_lock(struct folio *folio)
1343 struct lruvec *lruvec = folio_lruvec(folio);
1346 lruvec_memcg_debug(lruvec, folio);
1352 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1353 * @folio: Pointer to the folio.
1356 * - folio locked
1359 * - folio frozen (refcount of 0)
1361 * Return: The lruvec this folio is on with its lock held and interrupts
1364 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1366 struct lruvec *lruvec = folio_lruvec(folio);
1369 lruvec_memcg_debug(lruvec, folio);
1375 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1376 * @folio: Pointer to the folio.
1380 * - folio locked
1383 * - folio frozen (refcount of 0)
1385 * Return: The lruvec this folio is on with its lock held and interrupts
1388 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1391 struct lruvec *lruvec = folio_lruvec(folio);
1394 lruvec_memcg_debug(lruvec, folio);
2129 * folio_memcg_lock - Bind a folio to its memcg.
2130 * @folio: The folio.
2136 * for the lifetime of the folio.
2138 void folio_memcg_lock(struct folio *folio)
2153 memcg = folio_memcg(folio);
2167 if (memcg != folio_memcg(folio)) {
2197 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2198 * @folio: The folio.
2201 * not change the accounting of this folio to its memcg, but it does
2204 void folio_memcg_unlock(struct folio *folio)
2206 __folio_memcg_unlock(folio_memcg(folio));
2876 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2878 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2888 folio->memcg_data = (unsigned long)memcg;
2954 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2961 if (folio_test_slab(folio)) {
2966 slab = folio_slab(folio);
2980 * a folio where the slab flag has been cleared already, but
2985 return folio_memcg_check(folio);
3004 struct folio *folio;
3010 folio = page_folio(vmalloc_to_page(p));
3012 folio = virt_to_folio(p);
3014 return mem_cgroup_from_obj_folio(folio, p);
3066 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3073 if (folio_memcg_kmem(folio)) {
3074 objcg = __folio_objcg(folio);
3080 memcg = __folio_memcg(folio);
3180 struct folio *folio = page_folio(page);
3184 if (!folio_memcg_kmem(folio))
3187 objcg = __folio_objcg(folio);
3189 folio->memcg_data = 0;
3446 struct folio *folio = page_folio(head);
3447 struct mem_cgroup *memcg = folio_memcg(folio);
3454 folio_page(folio, i)->memcg_data = folio->memcg_data;
3456 if (folio_memcg_kmem(folio))
3457 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
4698 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4701 struct mem_cgroup *memcg = folio_memcg(folio);
4708 trace_track_foreign_dirty(folio, wb);
5806 struct folio *folio;
5813 /* folio is moved even if it's not RSS of this task(page-faulted). */
5816 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5817 if (IS_ERR(folio))
5819 return folio_file_page(folio, index);
5839 struct folio *folio = page_folio(page);
5842 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5846 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5847 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5848 VM_BUG_ON(compound && !folio_test_large(folio));
5851 if (folio_memcg(folio) != from)
5854 pgdat = folio_pgdat(folio);
5858 folio_memcg_lock(folio);
5860 if (folio_test_anon(folio)) {
5861 if (folio_mapped(folio)) {
5864 if (folio_test_pmd_mappable(folio)) {
5875 if (folio_test_swapbacked(folio)) {
5880 if (folio_mapped(folio)) {
5885 if (folio_test_dirty(folio)) {
5886 struct address_space *mapping = folio_mapping(folio);
5898 if (folio_test_swapcache(folio)) {
5903 if (folio_test_writeback(folio)) {
5926 folio->memcg_data = (unsigned long)to;
5931 nid = folio_nid(folio);
7065 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7068 long nr_pages = folio_nr_pages(folio);
7076 commit_charge(folio, memcg);
7080 memcg_check_events(memcg, folio_nid(folio));
7086 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7092 ret = charge_memcg(folio, memcg, gfp);
7099 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7100 * @folio: folio to charge.
7103 * @entry: swap entry for which the folio is allocated
7105 * This function charges a folio allocated for swapin. Please call this before
7106 * adding the folio to the swapcache.
7110 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7127 ret = charge_memcg(folio, memcg, gfp);
7202 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7208 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7212 * folio memcg or objcg at this point, we have fully
7213 * exclusive access to the folio.
7215 if (folio_memcg_kmem(folio)) {
7216 objcg = __folio_objcg(folio);
7223 memcg = __folio_memcg(folio);
7235 ug->nid = folio_nid(folio);
7241 nr_pages = folio_nr_pages(folio);
7243 if (folio_memcg_kmem(folio)) {
7247 folio->memcg_data = 0;
7255 folio->memcg_data = 0;
7261 void __mem_cgroup_uncharge(struct folio *folio)
7265 /* Don't touch folio->lru of any random page, pre-check: */
7266 if (!folio_memcg(folio))
7270 uncharge_folio(folio, &ug);
7284 struct folio *folio;
7287 list_for_each_entry(folio, page_list, lru)
7288 uncharge_folio(folio, &ug);
7294 * mem_cgroup_migrate - Charge a folio's replacement.
7295 * @old: Currently circulating folio.
7296 * @new: Replacement folio.
7298 * Charge @new as a replacement folio for @old. @old will
7303 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7317 /* Page cache replacement: new folio already charged? */
7509 * @folio: folio whose memsw charge to transfer
7512 * Transfer the memsw charge of @folio to @entry.
7514 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7520 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7521 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7529 memcg = folio_memcg(folio);
7531 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7541 nr_entries = folio_nr_pages(folio);
7547 VM_BUG_ON_FOLIO(oldid, folio);
7550 folio->memcg_data = 0;
7570 memcg_check_events(memcg, folio_nid(folio));
7576 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7577 * @folio: folio being added to swap
7580 * Try to charge @folio's memcg for the swap space at @entry.
7584 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7586 unsigned int nr_pages = folio_nr_pages(folio);
7594 memcg = folio_memcg(folio);
7596 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7619 VM_BUG_ON_FOLIO(oldid, folio);
7664 bool mem_cgroup_swap_full(struct folio *folio)
7668 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7675 memcg = folio_memcg(folio);