/kernel/linux/linux-5.10/mm/ |
H A D | userfaultfd.c | 101 if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL)) in mcopy_atomic_pte()
|
H A D | swap_state.c | 531 if (mem_cgroup_charge(page, NULL, gfp_mask)) { in __read_swap_cache_async()
|
H A D | memory.c | 926 if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) { in page_copy_prealloc() 2907 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) in wp_page_copy() 3425 err = mem_cgroup_charge(page, vma->vm_mm, in do_swap_page() 3675 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page() 4202 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) { in do_cow_fault()
|
H A D | khugepaged.c | 1095 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_huge_page() 1700 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_file()
|
H A D | filemap.c | 849 error = mem_cgroup_charge(page, current->mm, gfp); in __add_to_page_cache_locked()
|
H A D | migrate.c | 2923 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
|
H A D | ksm.c | 2594 if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy()
|
H A D | huge_memory.c | 591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
|
H A D | shmem.c | 693 error = mem_cgroup_charge(page, charge_mm, gfp); in shmem_add_to_page_cache()
|
H A D | memcontrol.c | 6868 * mem_cgroup_charge - charge a newly allocated page to a cgroup 6878 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) in mem_cgroup_charge() function 7059 * Uncharge a page previously charged with mem_cgroup_charge(). 7082 * mem_cgroup_charge().
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | memcontrol.h | 458 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); 1043 static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, in mem_cgroup_charge() function
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | memcontrol.h | 682 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 695 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, in mem_cgroup_charge() function 713 * Uncharge a folio previously charged with mem_cgroup_charge(). 1301 static inline int mem_cgroup_charge(struct folio *folio, in mem_cgroup_charge() function
|
/kernel/linux/linux-6.6/mm/ |
H A D | migrate_device.c | 599 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
|
H A D | userfaultfd.c | 202 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) in mfill_atomic_pte_copy()
|
H A D | memory.c | 993 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { in page_copy_prealloc() 3110 if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL)) in wp_page_copy() 4165 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) in do_anonymous_page() 4656 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, in do_cow_fault()
|
H A D | khugepaged.c | 1070 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { in alloc_charge_hpage()
|
H A D | huge_memory.c | 657 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
|
H A D | filemap.c | 857 int error = mem_cgroup_charge(folio, NULL, gfp); in __filemap_add_folio()
|
H A D | ksm.c | 2817 mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { in ksm_might_need_to_copy()
|
H A D | shmem.c | 780 error = mem_cgroup_charge(folio, charge_mm, gfp); in shmem_add_to_page_cache()
|
/kernel/linux/linux-5.10/kernel/events/ |
H A D | uprobes.c | 170 err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL); in __replace_page()
|
/kernel/linux/linux-6.6/kernel/events/ |
H A D | uprobes.c | 168 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL); in __replace_page()
|