Lines Matching defs:src_vma

770 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
832 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
857 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
858 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
888 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
904 copy_user_highpage(&new_folio->page, page, addr, src_vma);
925 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
929 struct mm_struct *src_mm = src_vma->vm_mm;
930 unsigned long vm_flags = src_vma->vm_flags;
935 page = vm_normal_page(src_vma, addr, pte);
946 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
949 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
1003 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1008 struct mm_struct *src_mm = src_vma->vm_mm;
1065 dst_vma, src_vma,
1084 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1121 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1140 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1145 struct mm_struct *src_mm = src_vma->vm_mm;
1158 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1160 addr, dst_vma, src_vma);
1169 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1177 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1182 struct mm_struct *src_mm = src_vma->vm_mm;
1195 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1197 dst_pud, src_pud, addr, src_vma);
1206 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1214 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1230 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1243 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1254 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
1257 if (src_vma->anon_vma)
1270 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1274 unsigned long addr = src_vma->vm_start;
1275 unsigned long end = src_vma->vm_end;
1277 struct mm_struct *src_mm = src_vma->vm_mm;
1282 if (!vma_needs_copy(dst_vma, src_vma))
1285 if (is_vm_hugetlb_page(src_vma))
1286 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1288 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1293 ret = track_pfn_copy(src_vma);
1304 is_cow = is_cow_mapping(src_vma->vm_flags);
1317 vma_assert_write_locked(src_vma);
1328 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,