/kernel/linux/linux-6.6/mm/ |
H A D | userfaultfd.c | 31 struct vm_area_struct *dst_vma; in find_dst_vma() local 33 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 34 if (!range_in_vma(dst_vma, dst_start, dst_start + len)) in find_dst_vma() 42 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma() 45 return dst_vma; in find_dst_vma() 49 static bool mfill_file_over_size(struct vm_area_struct *dst_vma, in mfill_file_over_size() argument 55 if (!dst_vma->vm_file) in mfill_file_over_size() 58 inode = dst_vma->vm_file->f_inode; in mfill_file_over_size() 59 offset = linear_page_index(dst_vma, dst_addr); in mfill_file_over_size() 65 * Install PTEs, to map dst_addr (within dst_vma) t 70 mfill_atomic_install_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, struct page *page, bool newly_allocated, uffd_flags_t flags) mfill_atomic_install_pte() argument 140 mfill_atomic_pte_copy(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) mfill_atomic_pte_copy() argument 216 mfill_atomic_pte_zeropage(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr) mfill_atomic_pte_zeropage() argument 248 mfill_atomic_pte_continue(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, uffd_flags_t flags) mfill_atomic_pte_continue() argument 292 mfill_atomic_pte_poison(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, uffd_flags_t flags) mfill_atomic_pte_poison() argument 355 mfill_atomic_hugetlb( struct vm_area_struct *dst_vma, unsigned long dst_start, unsigned long src_start, unsigned long len, atomic_t *mmap_changing, uffd_flags_t flags) mfill_atomic_hugetlb() argument 523 mfill_atomic_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) mfill_atomic_pte() argument 574 struct vm_area_struct *dst_vma; mfill_atomic() local 767 uffd_wp_range(struct vm_area_struct *dst_vma, unsigned long start, unsigned long len, bool enable_wp) uffd_wp_range() argument 802 struct vm_area_struct *dst_vma; mwriteprotect_range() local [all...] |
H A D | memory.c | 769 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() 772 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 862 pte_marker marker = copy_pte_marker(entry, dst_vma); in copy_nonpresent_pte() 869 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 888 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 906 folio_add_new_anon_rmap(new_folio, dst_vma, addr); in copy_present_page() 907 folio_add_lru_vma(new_folio, dst_vma); in copy_present_page() 911 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); in copy_present_page() 912 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() 913 if (userfaultfd_pte_wp(dst_vma, ptep_ge in copy_present_page() 768 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long addr, int *rss) copy_nonpresent_pte() argument 925 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, struct folio **prealloc) copy_present_pte() argument 1003 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, unsigned long end) copy_pte_range() argument 1140 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, unsigned long end) copy_pmd_range() argument 1177 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, unsigned long end) copy_pud_range() argument 1214 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, unsigned long end) copy_p4d_range() argument 1243 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) vma_needs_copy() argument 1270 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) copy_page_range() argument [all...] |
H A D | hugetlb.c | 5036 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() 5075 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); in copy_hugetlb_page_range() 5105 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5124 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5129 pte_to_swp_entry(entry), dst_vma); in copy_hugetlb_page_range() 5159 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); in copy_hugetlb_page_range() 5167 addr, dst_vma); in copy_hugetlb_page_range() 5180 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range() 5186 hugetlb_install_folio(dst_vma, dst_pte, addr, in copy_hugetlb_page_range() 5205 if (!userfaultfd_wp(dst_vma)) in copy_hugetlb_page_range() 5035 copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) copy_hugetlb_page_range() argument 6284 hugetlb_mfill_atomic_pte(pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) hugetlb_mfill_atomic_pte() argument [all...] |
H A D | huge_memory.c | 1064 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() 1073 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1105 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1150 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1062 copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) copy_huge_pmd() argument
|
H A D | shmem.c | 2569 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() 2575 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 2579 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2658 gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); in shmem_mfill_atomic_pte() 2662 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, in shmem_mfill_atomic_pte() 2568 shmem_mfill_atomic_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) shmem_mfill_atomic_pte() argument
|
/kernel/linux/linux-5.10/mm/ |
H A D | userfaultfd.c | 30 struct vm_area_struct *dst_vma; in find_dst_vma() local 32 dst_vma = find_vma(dst_mm, dst_start); in find_dst_vma() 33 if (!dst_vma) in find_dst_vma() 36 if (dst_start < dst_vma->vm_start || in find_dst_vma() 37 dst_start + len > dst_vma->vm_end) in find_dst_vma() 45 if (!dst_vma->vm_userfaultfd_ctx.ctx) in find_dst_vma() 48 return dst_vma; in find_dst_vma() 53 struct vm_area_struct *dst_vma, in mcopy_atomic_pte() 69 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); in mcopy_atomic_pte() 104 _dst_pte = pte_mkdirty(mk_pte(page, dst_vma in mcopy_atomic_pte() 51 mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep, bool wp_copy) mcopy_atomic_pte() argument 146 mfill_zeropage_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr) mfill_zeropage_pte() argument 207 __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, unsigned long dst_start, unsigned long src_start, unsigned long len, bool zeropage) __mcopy_atomic_hugetlb() argument 416 mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **page, bool zeropage, bool wp_copy) mfill_atomic_pte() argument 467 struct vm_area_struct *dst_vma; __mcopy_atomic() local 645 struct vm_area_struct *dst_vma; mwriteprotect_range() local [all...] |
H A D | memory.c | 701 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() 704 unsigned long vm_flags = dst_vma->vm_flags; in copy_nonpresent_pte() 773 if (!userfaultfd_wp(dst_vma)) in copy_nonpresent_pte() 800 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() argument 850 page_add_new_anon_rmap(new_page, dst_vma, addr, false); in copy_present_page() 851 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); in copy_present_page() 855 pte = mk_pte(new_page, dst_vma->vm_page_prot); in copy_present_page() 856 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); in copy_present_page() 857 if (userfaultfd_pte_wp(dst_vma, *src_pte)) in copy_present_page() 860 set_pte_at(dst_vma in copy_present_page() 700 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long addr, int *rss) copy_nonpresent_pte() argument 869 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, struct page **prealloc) copy_present_pte() argument 936 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, unsigned long end) copy_pte_range() argument 1043 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, unsigned long end) copy_pmd_range() argument 1080 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, unsigned long end) copy_pud_range() argument 1117 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, unsigned long end) copy_p4d_range() argument 1141 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) copy_page_range() argument [all...] |
H A D | shmem.c | 2353 struct vm_area_struct *dst_vma, in shmem_mfill_atomic_pte() 2359 struct inode *inode = file_inode(dst_vma->vm_file); in shmem_mfill_atomic_pte() 2363 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2418 offset = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() 2428 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); in shmem_mfill_atomic_pte() 2429 if (dst_vma->vm_flags & VM_WRITE) in shmem_mfill_atomic_pte() 2466 update_mmu_cache(dst_vma, dst_addr, dst_pte); in shmem_mfill_atomic_pte() 2486 struct vm_area_struct *dst_vma, in shmem_mcopy_atomic_pte() 2491 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, in shmem_mcopy_atomic_pte() 2497 struct vm_area_struct *dst_vma, in shmem_mfill_zeropage_pte() 2351 shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, bool zeropage, struct page **pagep) shmem_mfill_atomic_pte() argument 2484 shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep) shmem_mcopy_atomic_pte() argument 2495 shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr) shmem_mfill_zeropage_pte() argument [all...] |
H A D | hugetlb.c | 4717 struct vm_area_struct *dst_vma, in hugetlb_mcopy_atomic_pte() 4725 int vm_shared = dst_vma->vm_flags & VM_SHARED; in hugetlb_mcopy_atomic_pte() 4726 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mcopy_atomic_pte() 4737 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mcopy_atomic_pte() 4742 page = alloc_huge_page(dst_vma, dst_addr, 0); in hugetlb_mcopy_atomic_pte() 4771 mapping = dst_vma->vm_file->f_mapping; in hugetlb_mcopy_atomic_pte() 4772 idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte() 4819 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte() 4822 _dst_pte = make_huge_pte(dst_vma, page, dst_vma in hugetlb_mcopy_atomic_pte() 4715 hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep) hugetlb_mcopy_atomic_pte() argument [all...] |
H A D | huge_memory.c | 1015 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) in copy_huge_pmd() 1024 if (!vma_is_anonymous(dst_vma)) in copy_huge_pmd() 1055 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1109 if (!userfaultfd_wp(dst_vma)) in copy_huge_pmd() 1013 copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) copy_huge_pmd() argument
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | shmem_fs.h | 123 struct vm_area_struct *dst_vma, 129 struct vm_area_struct *dst_vma, 132 #define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ 134 #define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
|
H A D | hugetlb.h | 139 struct vm_area_struct *dst_vma, 314 struct vm_area_struct *dst_vma, in hugetlb_mcopy_atomic_pte() 312 hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep) hugetlb_mcopy_atomic_pte() argument
|
H A D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
H A D | mm.h | 1703 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | shmem_fs.h | 180 struct vm_area_struct *dst_vma, 186 #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
|
H A D | mm_inline.h | 537 * Computes the pte marker to copy from the given source entry into dst_vma. 542 swp_entry_t entry, struct vm_area_struct *dst_vma) in copy_pte_marker() 549 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) in copy_pte_marker() 541 copy_pte_marker( swp_entry_t entry, struct vm_area_struct *dst_vma) copy_pte_marker() argument
|
H A D | hugetlb.h | 154 struct vm_area_struct *dst_vma, 340 struct vm_area_struct *dst_vma, in copy_hugetlb_page_range() 426 struct vm_area_struct *dst_vma, in hugetlb_mfill_atomic_pte() 338 copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) copy_hugetlb_page_range() argument 425 hugetlb_mfill_atomic_pte(pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) hugetlb_mfill_atomic_pte() argument
|
H A D | userfaultfd_k.h | 73 struct vm_area_struct *dst_vma,
|
H A D | huge_mm.h | 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
H A D | mm.h | 2377 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|