/kernel/linux/linux-5.10/mm/ |
H A D | userfaultfd.c | 22 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, in find_dst_vma() argument 32 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 51 static int mcopy_atomic_pte(struct mm_struct *dst_mm, in mcopy_atomic_pte() argument 101 if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL)) in mcopy_atomic_pte() 112 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mcopy_atomic_pte() 126 inc_mm_counter(dst_mm, MM_ANONPAGES); in mcopy_atomic_pte() 130 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); in mcopy_atomic_pte() 146 static int mfill_zeropage_pte(struct mm_struct *dst_mm, in mfill_zeropage_pte() argument 159 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mfill_zeropage_pte() 172 set_pte_at(dst_mm, dst_add in mfill_zeropage_pte() 207 __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, unsigned long dst_start, unsigned long src_start, unsigned long len, bool zeropage) __mcopy_atomic_hugetlb() argument 416 mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **page, bool zeropage, bool wp_copy) mfill_atomic_pte() argument 459 __mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool zeropage, bool *mmap_changing, __u64 mode) __mcopy_atomic() argument 628 mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, bool *mmap_changing, __u64 mode) mcopy_atomic() argument 636 mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool *mmap_changing) mfill_zeropage() argument 642 mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool enable_wp, bool *mmap_changing) mwriteprotect_range() argument [all...] |
H A D | huge_memory.c | 1013 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, in copy_huge_pmd() argument 1027 pgtable = pte_alloc_one(dst_mm); in copy_huge_pmd() 1031 dst_ptl = pmd_lock(dst_mm, dst_pmd); in copy_huge_pmd() 1052 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd() 1053 mm_inc_nr_ptes(dst_mm); in copy_huge_pmd() 1054 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); in copy_huge_pmd() 1057 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd() 1064 pte_free(dst_mm, pgtable); in copy_huge_pmd() 1078 mm_get_huge_zero_page(dst_mm); in copy_huge_pmd() 1095 pte_free(dst_mm, pgtabl in copy_huge_pmd() 1181 copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma) copy_huge_pud() argument [all...] |
H A D | memory.c | 700 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, in copy_nonpresent_pte() argument 713 /* make sure dst_mm is on swapoff's mmlist. */ in copy_nonpresent_pte() 714 if (unlikely(list_empty(&dst_mm->mmlist))) { in copy_nonpresent_pte() 716 if (list_empty(&dst_mm->mmlist)) in copy_nonpresent_pte() 717 list_add(&dst_mm->mmlist, in copy_nonpresent_pte() 775 set_pte_at(dst_mm, addr, dst_pte, pte); in copy_nonpresent_pte() 940 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() local 954 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); in copy_pte_range() 982 entry.val = copy_nonpresent_pte(dst_mm, src_mm, in copy_pte_range() 1016 add_mm_rss_vec(dst_mm, rs in copy_pte_range() 1047 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_pmd_range() local 1084 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_pud_range() local 1121 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_p4d_range() local 1147 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_page_range() local [all...] |
H A D | shmem.c | 2351 static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, in shmem_mfill_atomic_pte() argument 2424 gfp & GFP_RECLAIM_MASK, dst_mm); in shmem_mfill_atomic_pte() 2442 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in shmem_mfill_atomic_pte() 2461 inc_mm_counter(dst_mm, mm_counter_file(page)); in shmem_mfill_atomic_pte() 2463 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); in shmem_mfill_atomic_pte() 2484 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, in shmem_mcopy_atomic_pte() argument 2491 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, in shmem_mcopy_atomic_pte() 2495 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, in shmem_mfill_zeropage_pte() argument 2502 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, in shmem_mfill_zeropage_pte()
|
H A D | hugetlb.c | 4715 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, in hugetlb_mcopy_atomic_pte() argument 4794 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); in hugetlb_mcopy_atomic_pte() 4827 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); in hugetlb_mcopy_atomic_pte() 4831 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mcopy_atomic_pte() local
|
/kernel/linux/linux-6.6/mm/ |
H A D | userfaultfd.c | 23 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, in find_dst_vma() argument 33 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 76 struct mm_struct *dst_mm = dst_vma->vm_mm; in mfill_atomic_install_pte() local 94 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mfill_atomic_install_pte() 127 inc_mm_counter(dst_mm, mm_counter(page)); in mfill_atomic_install_pte() 129 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); in mfill_atomic_install_pte() 298 struct mm_struct *dst_mm = dst_vma->vm_mm; in mfill_atomic_pte_poison() local 304 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mfill_atomic_pte_poison() 318 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); in mfill_atomic_pte_poison() 363 struct mm_struct *dst_mm in mfill_atomic_hugetlb() local 567 mfill_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, atomic_t *mmap_changing, uffd_flags_t flags) mfill_atomic() argument 736 mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, atomic_t *mmap_changing, uffd_flags_t flags) mfill_atomic_copy() argument 744 mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, unsigned long len, atomic_t *mmap_changing) mfill_atomic_zeropage() argument 751 mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, unsigned long len, atomic_t *mmap_changing, uffd_flags_t flags) mfill_atomic_continue() argument 759 mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start, unsigned long len, atomic_t *mmap_changing, uffd_flags_t flags) mfill_atomic_poison() argument 796 mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, bool enable_wp, atomic_t *mmap_changing) mwriteprotect_range() argument [all...] |
H A D | huge_memory.c | 1062 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, in copy_huge_pmd() argument 1076 pgtable = pte_alloc_one(dst_mm); in copy_huge_pmd() 1080 dst_ptl = pmd_lock(dst_mm, dst_pmd); in copy_huge_pmd() 1102 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd() 1103 mm_inc_nr_ptes(dst_mm); in copy_huge_pmd() 1104 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); in copy_huge_pmd() 1107 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd() 1114 pte_free(dst_mm, pgtable); in copy_huge_pmd() 1128 mm_get_huge_zero_page(dst_mm); in copy_huge_pmd() 1139 pte_free(dst_mm, pgtabl in copy_huge_pmd() 1220 copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma) copy_huge_pud() argument [all...] |
H A D | memory.c | 768 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, in copy_nonpresent_pte() argument 782 /* make sure dst_mm is on swapoff's mmlist. */ in copy_nonpresent_pte() 783 if (unlikely(list_empty(&dst_mm->mmlist))) { in copy_nonpresent_pte() 785 if (list_empty(&dst_mm->mmlist)) in copy_nonpresent_pte() 786 list_add(&dst_mm->mmlist, in copy_nonpresent_pte() 865 set_pte_at(dst_mm, addr, dst_pte, in copy_nonpresent_pte() 871 set_pte_at(dst_mm, addr, dst_pte, pte); in copy_nonpresent_pte() 1007 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() local 1030 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); in copy_pte_range() 1063 ret = copy_nonpresent_pte(dst_mm, src_m in copy_pte_range() 1144 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_pmd_range() local 1181 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_pud_range() local 1218 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_p4d_range() local 1276 struct mm_struct *dst_mm = dst_vma->vm_mm; copy_page_range() local [all...] |
H A D | hugetlb.c | 6291 struct mm_struct *dst_mm = dst_vma->vm_mm; in hugetlb_mfill_atomic_pte() local 6307 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte() 6316 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, in hugetlb_mfill_atomic_pte() 6426 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte() 6468 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h)); in hugetlb_mfill_atomic_pte() 6470 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mfill_atomic_pte() local
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | shmem_fs.h | 122 extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, 127 extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, 132 #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ 134 #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
|
H A D | userfaultfd_k.h | 37 extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, 40 extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, 44 extern int mwriteprotect_range(struct mm_struct *dst_mm,
|
H A D | huge_mm.h | 11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
H A D | hugetlb.h | 138 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 312 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, in hugetlb_mcopy_atomic_pte() argument
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | userfaultfd_k.h | 77 extern ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, 80 extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, 84 extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start, 87 extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start, 90 extern int mwriteprotect_range(struct mm_struct *dst_mm,
|
H A D | huge_mm.h | 11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ttm.c | 361 struct drm_mm_node *src_mm, *dst_mm; in amdgpu_ttm_copy_mem_to_mem() local 381 dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset); in amdgpu_ttm_copy_mem_to_mem() 382 dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset; in amdgpu_ttm_copy_mem_to_mem() 384 dst_mm = NULL; in amdgpu_ttm_copy_mem_to_mem() 411 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm, in amdgpu_ttm_copy_mem_to_mem() 440 ++dst_mm; in amdgpu_ttm_copy_mem_to_mem() 441 dst_node_size = dst_mm->size << PAGE_SHIFT; in amdgpu_ttm_copy_mem_to_mem()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ttm.c | 300 struct amdgpu_res_cursor src_mm, dst_mm; in amdgpu_ttm_copy_mem_to_mem() local 310 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm); in amdgpu_ttm_copy_mem_to_mem() 318 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20); in amdgpu_ttm_copy_mem_to_mem() 326 r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, in amdgpu_ttm_copy_mem_to_mem() 340 amdgpu_res_next(&dst_mm, cur_size); in amdgpu_ttm_copy_mem_to_mem()
|