162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * mm/userfaultfd.c 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (C) 2015 Red Hat, Inc. 662306a36Sopenharmony_ci */ 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include <linux/mm.h> 962306a36Sopenharmony_ci#include <linux/sched/signal.h> 1062306a36Sopenharmony_ci#include <linux/pagemap.h> 1162306a36Sopenharmony_ci#include <linux/rmap.h> 1262306a36Sopenharmony_ci#include <linux/swap.h> 1362306a36Sopenharmony_ci#include <linux/swapops.h> 1462306a36Sopenharmony_ci#include <linux/userfaultfd_k.h> 1562306a36Sopenharmony_ci#include <linux/mmu_notifier.h> 1662306a36Sopenharmony_ci#include <linux/hugetlb.h> 1762306a36Sopenharmony_ci#include <linux/shmem_fs.h> 1862306a36Sopenharmony_ci#include <asm/tlbflush.h> 1962306a36Sopenharmony_ci#include <asm/tlb.h> 2062306a36Sopenharmony_ci#include "internal.h" 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_cistatic __always_inline 2362306a36Sopenharmony_cistruct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, 2462306a36Sopenharmony_ci unsigned long dst_start, 2562306a36Sopenharmony_ci unsigned long len) 2662306a36Sopenharmony_ci{ 2762306a36Sopenharmony_ci /* 2862306a36Sopenharmony_ci * Make sure that the dst range is both valid and fully within a 2962306a36Sopenharmony_ci * single existing vma. 3062306a36Sopenharmony_ci */ 3162306a36Sopenharmony_ci struct vm_area_struct *dst_vma; 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_ci dst_vma = find_vma(dst_mm, dst_start); 3462306a36Sopenharmony_ci if (!range_in_vma(dst_vma, dst_start, dst_start + len)) 3562306a36Sopenharmony_ci return NULL; 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_ci /* 3862306a36Sopenharmony_ci * Check the vma is registered in uffd, this is required to 3962306a36Sopenharmony_ci * enforce the VM_MAYWRITE check done at uffd registration 4062306a36Sopenharmony_ci * time. 4162306a36Sopenharmony_ci */ 4262306a36Sopenharmony_ci if (!dst_vma->vm_userfaultfd_ctx.ctx) 4362306a36Sopenharmony_ci return NULL; 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_ci return dst_vma; 4662306a36Sopenharmony_ci} 4762306a36Sopenharmony_ci 4862306a36Sopenharmony_ci/* Check if dst_addr is outside of file's size. Must be called with ptl held. */ 4962306a36Sopenharmony_cistatic bool mfill_file_over_size(struct vm_area_struct *dst_vma, 5062306a36Sopenharmony_ci unsigned long dst_addr) 5162306a36Sopenharmony_ci{ 5262306a36Sopenharmony_ci struct inode *inode; 5362306a36Sopenharmony_ci pgoff_t offset, max_off; 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci if (!dst_vma->vm_file) 5662306a36Sopenharmony_ci return false; 5762306a36Sopenharmony_ci 5862306a36Sopenharmony_ci inode = dst_vma->vm_file->f_inode; 5962306a36Sopenharmony_ci offset = linear_page_index(dst_vma, dst_addr); 6062306a36Sopenharmony_ci max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 6162306a36Sopenharmony_ci return offset >= max_off; 6262306a36Sopenharmony_ci} 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci/* 6562306a36Sopenharmony_ci * Install PTEs, to map dst_addr (within dst_vma) to page. 6662306a36Sopenharmony_ci * 6762306a36Sopenharmony_ci * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem 6862306a36Sopenharmony_ci * and anon, and for both shared and private VMAs. 6962306a36Sopenharmony_ci */ 7062306a36Sopenharmony_ciint mfill_atomic_install_pte(pmd_t *dst_pmd, 7162306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 7262306a36Sopenharmony_ci unsigned long dst_addr, struct page *page, 7362306a36Sopenharmony_ci bool newly_allocated, uffd_flags_t flags) 7462306a36Sopenharmony_ci{ 7562306a36Sopenharmony_ci int ret; 7662306a36Sopenharmony_ci struct mm_struct *dst_mm = dst_vma->vm_mm; 7762306a36Sopenharmony_ci pte_t _dst_pte, *dst_pte; 7862306a36Sopenharmony_ci bool writable = dst_vma->vm_flags & VM_WRITE; 7962306a36Sopenharmony_ci bool vm_shared = dst_vma->vm_flags & VM_SHARED; 8062306a36Sopenharmony_ci bool page_in_cache = page_mapping(page); 8162306a36Sopenharmony_ci spinlock_t *ptl; 8262306a36Sopenharmony_ci struct folio *folio; 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_ci _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 8562306a36Sopenharmony_ci _dst_pte = pte_mkdirty(_dst_pte); 8662306a36Sopenharmony_ci if (page_in_cache && !vm_shared) 8762306a36Sopenharmony_ci writable = false; 8862306a36Sopenharmony_ci if (writable) 8962306a36Sopenharmony_ci _dst_pte = pte_mkwrite(_dst_pte, dst_vma); 9062306a36Sopenharmony_ci if (flags & MFILL_ATOMIC_WP) 9162306a36Sopenharmony_ci _dst_pte = pte_mkuffd_wp(_dst_pte); 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_ci ret = -EAGAIN; 9462306a36Sopenharmony_ci dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 9562306a36Sopenharmony_ci if (!dst_pte) 9662306a36Sopenharmony_ci goto out; 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci if (mfill_file_over_size(dst_vma, dst_addr)) { 9962306a36Sopenharmony_ci ret = -EFAULT; 10062306a36Sopenharmony_ci goto out_unlock; 10162306a36Sopenharmony_ci } 10262306a36Sopenharmony_ci 10362306a36Sopenharmony_ci ret = -EEXIST; 10462306a36Sopenharmony_ci /* 10562306a36Sopenharmony_ci * We allow to overwrite a pte marker: consider when both MISSING|WP 10662306a36Sopenharmony_ci * registered, we firstly wr-protect a none pte which has no page cache 10762306a36Sopenharmony_ci * page backing it, then access the page. 10862306a36Sopenharmony_ci */ 10962306a36Sopenharmony_ci if (!pte_none_mostly(ptep_get(dst_pte))) 11062306a36Sopenharmony_ci goto out_unlock; 11162306a36Sopenharmony_ci 11262306a36Sopenharmony_ci folio = page_folio(page); 11362306a36Sopenharmony_ci if (page_in_cache) { 11462306a36Sopenharmony_ci /* Usually, cache pages are already added to LRU */ 11562306a36Sopenharmony_ci if (newly_allocated) 11662306a36Sopenharmony_ci folio_add_lru(folio); 11762306a36Sopenharmony_ci page_add_file_rmap(page, dst_vma, false); 11862306a36Sopenharmony_ci } else { 11962306a36Sopenharmony_ci page_add_new_anon_rmap(page, dst_vma, dst_addr); 12062306a36Sopenharmony_ci folio_add_lru_vma(folio, dst_vma); 12162306a36Sopenharmony_ci } 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci /* 12462306a36Sopenharmony_ci * Must happen after rmap, as mm_counter() checks mapping (via 12562306a36Sopenharmony_ci * PageAnon()), which is set by __page_set_anon_rmap(). 12662306a36Sopenharmony_ci */ 12762306a36Sopenharmony_ci inc_mm_counter(dst_mm, mm_counter(page)); 12862306a36Sopenharmony_ci 12962306a36Sopenharmony_ci set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 13062306a36Sopenharmony_ci 13162306a36Sopenharmony_ci /* No need to invalidate - it was non-present before */ 13262306a36Sopenharmony_ci update_mmu_cache(dst_vma, dst_addr, dst_pte); 13362306a36Sopenharmony_ci ret = 0; 13462306a36Sopenharmony_ciout_unlock: 13562306a36Sopenharmony_ci pte_unmap_unlock(dst_pte, ptl); 13662306a36Sopenharmony_ciout: 13762306a36Sopenharmony_ci return ret; 13862306a36Sopenharmony_ci} 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_cistatic int mfill_atomic_pte_copy(pmd_t *dst_pmd, 14162306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 14262306a36Sopenharmony_ci unsigned long dst_addr, 14362306a36Sopenharmony_ci unsigned long src_addr, 14462306a36Sopenharmony_ci uffd_flags_t flags, 14562306a36Sopenharmony_ci struct folio **foliop) 14662306a36Sopenharmony_ci{ 14762306a36Sopenharmony_ci void *kaddr; 14862306a36Sopenharmony_ci int ret; 14962306a36Sopenharmony_ci struct folio *folio; 15062306a36Sopenharmony_ci 15162306a36Sopenharmony_ci if (!*foliop) { 15262306a36Sopenharmony_ci ret = -ENOMEM; 15362306a36Sopenharmony_ci folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, 15462306a36Sopenharmony_ci dst_addr, false); 15562306a36Sopenharmony_ci if (!folio) 15662306a36Sopenharmony_ci goto out; 15762306a36Sopenharmony_ci 15862306a36Sopenharmony_ci kaddr = kmap_local_folio(folio, 0); 15962306a36Sopenharmony_ci /* 16062306a36Sopenharmony_ci * The read mmap_lock is held here. Despite the 16162306a36Sopenharmony_ci * mmap_lock being read recursive a deadlock is still 16262306a36Sopenharmony_ci * possible if a writer has taken a lock. For example: 16362306a36Sopenharmony_ci * 16462306a36Sopenharmony_ci * process A thread 1 takes read lock on own mmap_lock 16562306a36Sopenharmony_ci * process A thread 2 calls mmap, blocks taking write lock 16662306a36Sopenharmony_ci * process B thread 1 takes page fault, read lock on own mmap lock 16762306a36Sopenharmony_ci * process B thread 2 calls mmap, blocks taking write lock 16862306a36Sopenharmony_ci * process A thread 1 blocks taking read lock on process B 16962306a36Sopenharmony_ci * process B thread 1 blocks taking read lock on process A 17062306a36Sopenharmony_ci * 17162306a36Sopenharmony_ci * Disable page faults to prevent potential deadlock 17262306a36Sopenharmony_ci * and retry the copy outside the mmap_lock. 17362306a36Sopenharmony_ci */ 17462306a36Sopenharmony_ci pagefault_disable(); 17562306a36Sopenharmony_ci ret = copy_from_user(kaddr, (const void __user *) src_addr, 17662306a36Sopenharmony_ci PAGE_SIZE); 17762306a36Sopenharmony_ci pagefault_enable(); 17862306a36Sopenharmony_ci kunmap_local(kaddr); 17962306a36Sopenharmony_ci 18062306a36Sopenharmony_ci /* fallback to copy_from_user outside mmap_lock */ 18162306a36Sopenharmony_ci if (unlikely(ret)) { 18262306a36Sopenharmony_ci ret = -ENOENT; 18362306a36Sopenharmony_ci *foliop = folio; 18462306a36Sopenharmony_ci /* don't free the page */ 18562306a36Sopenharmony_ci goto out; 18662306a36Sopenharmony_ci } 18762306a36Sopenharmony_ci 18862306a36Sopenharmony_ci flush_dcache_folio(folio); 18962306a36Sopenharmony_ci } else { 19062306a36Sopenharmony_ci folio = *foliop; 19162306a36Sopenharmony_ci *foliop = NULL; 19262306a36Sopenharmony_ci } 19362306a36Sopenharmony_ci 19462306a36Sopenharmony_ci /* 19562306a36Sopenharmony_ci * The memory barrier inside __folio_mark_uptodate makes sure that 19662306a36Sopenharmony_ci * preceding stores to the page contents become visible before 19762306a36Sopenharmony_ci * the set_pte_at() write. 19862306a36Sopenharmony_ci */ 19962306a36Sopenharmony_ci __folio_mark_uptodate(folio); 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci ret = -ENOMEM; 20262306a36Sopenharmony_ci if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) 20362306a36Sopenharmony_ci goto out_release; 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 20662306a36Sopenharmony_ci &folio->page, true, flags); 20762306a36Sopenharmony_ci if (ret) 20862306a36Sopenharmony_ci goto out_release; 20962306a36Sopenharmony_ciout: 21062306a36Sopenharmony_ci return ret; 21162306a36Sopenharmony_ciout_release: 21262306a36Sopenharmony_ci folio_put(folio); 21362306a36Sopenharmony_ci goto out; 21462306a36Sopenharmony_ci} 21562306a36Sopenharmony_ci 21662306a36Sopenharmony_cistatic int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, 21762306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 21862306a36Sopenharmony_ci unsigned long dst_addr) 21962306a36Sopenharmony_ci{ 22062306a36Sopenharmony_ci pte_t _dst_pte, *dst_pte; 22162306a36Sopenharmony_ci spinlock_t *ptl; 22262306a36Sopenharmony_ci int ret; 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), 22562306a36Sopenharmony_ci dst_vma->vm_page_prot)); 22662306a36Sopenharmony_ci ret = -EAGAIN; 22762306a36Sopenharmony_ci dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); 22862306a36Sopenharmony_ci if (!dst_pte) 22962306a36Sopenharmony_ci goto out; 23062306a36Sopenharmony_ci if (mfill_file_over_size(dst_vma, dst_addr)) { 23162306a36Sopenharmony_ci ret = -EFAULT; 23262306a36Sopenharmony_ci goto out_unlock; 23362306a36Sopenharmony_ci } 23462306a36Sopenharmony_ci ret = -EEXIST; 23562306a36Sopenharmony_ci if (!pte_none(ptep_get(dst_pte))) 23662306a36Sopenharmony_ci goto out_unlock; 23762306a36Sopenharmony_ci set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); 23862306a36Sopenharmony_ci /* No need to invalidate - it was non-present before */ 23962306a36Sopenharmony_ci update_mmu_cache(dst_vma, dst_addr, dst_pte); 24062306a36Sopenharmony_ci ret = 0; 24162306a36Sopenharmony_ciout_unlock: 24262306a36Sopenharmony_ci pte_unmap_unlock(dst_pte, ptl); 24362306a36Sopenharmony_ciout: 24462306a36Sopenharmony_ci return ret; 24562306a36Sopenharmony_ci} 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ 24862306a36Sopenharmony_cistatic int mfill_atomic_pte_continue(pmd_t *dst_pmd, 24962306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 25062306a36Sopenharmony_ci unsigned long dst_addr, 25162306a36Sopenharmony_ci uffd_flags_t flags) 25262306a36Sopenharmony_ci{ 25362306a36Sopenharmony_ci struct inode *inode = file_inode(dst_vma->vm_file); 25462306a36Sopenharmony_ci pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 25562306a36Sopenharmony_ci struct folio *folio; 25662306a36Sopenharmony_ci struct page *page; 25762306a36Sopenharmony_ci int ret; 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); 26062306a36Sopenharmony_ci /* Our caller expects us to return -EFAULT if we failed to find folio */ 26162306a36Sopenharmony_ci if (ret == -ENOENT) 26262306a36Sopenharmony_ci ret = -EFAULT; 26362306a36Sopenharmony_ci if (ret) 26462306a36Sopenharmony_ci goto out; 26562306a36Sopenharmony_ci if (!folio) { 26662306a36Sopenharmony_ci ret = -EFAULT; 26762306a36Sopenharmony_ci goto out; 26862306a36Sopenharmony_ci } 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_ci page = folio_file_page(folio, pgoff); 27162306a36Sopenharmony_ci if (PageHWPoison(page)) { 27262306a36Sopenharmony_ci ret = -EIO; 27362306a36Sopenharmony_ci goto out_release; 27462306a36Sopenharmony_ci } 27562306a36Sopenharmony_ci 27662306a36Sopenharmony_ci ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 27762306a36Sopenharmony_ci page, false, flags); 27862306a36Sopenharmony_ci if (ret) 27962306a36Sopenharmony_ci goto out_release; 28062306a36Sopenharmony_ci 28162306a36Sopenharmony_ci folio_unlock(folio); 28262306a36Sopenharmony_ci ret = 0; 28362306a36Sopenharmony_ciout: 28462306a36Sopenharmony_ci return ret; 28562306a36Sopenharmony_ciout_release: 28662306a36Sopenharmony_ci folio_unlock(folio); 28762306a36Sopenharmony_ci folio_put(folio); 28862306a36Sopenharmony_ci goto out; 28962306a36Sopenharmony_ci} 29062306a36Sopenharmony_ci 29162306a36Sopenharmony_ci/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ 29262306a36Sopenharmony_cistatic int mfill_atomic_pte_poison(pmd_t *dst_pmd, 29362306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 29462306a36Sopenharmony_ci unsigned long dst_addr, 29562306a36Sopenharmony_ci uffd_flags_t flags) 29662306a36Sopenharmony_ci{ 29762306a36Sopenharmony_ci int ret; 29862306a36Sopenharmony_ci struct mm_struct *dst_mm = dst_vma->vm_mm; 29962306a36Sopenharmony_ci pte_t _dst_pte, *dst_pte; 30062306a36Sopenharmony_ci spinlock_t *ptl; 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_ci _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 30362306a36Sopenharmony_ci ret = -EAGAIN; 30462306a36Sopenharmony_ci dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); 30562306a36Sopenharmony_ci if (!dst_pte) 30662306a36Sopenharmony_ci goto out; 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ci if (mfill_file_over_size(dst_vma, dst_addr)) { 30962306a36Sopenharmony_ci ret = -EFAULT; 31062306a36Sopenharmony_ci goto out_unlock; 31162306a36Sopenharmony_ci } 31262306a36Sopenharmony_ci 31362306a36Sopenharmony_ci ret = -EEXIST; 31462306a36Sopenharmony_ci /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ 31562306a36Sopenharmony_ci if (!pte_none(*dst_pte)) 31662306a36Sopenharmony_ci goto out_unlock; 31762306a36Sopenharmony_ci 31862306a36Sopenharmony_ci set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci /* No need to invalidate - it was non-present before */ 32162306a36Sopenharmony_ci update_mmu_cache(dst_vma, dst_addr, dst_pte); 32262306a36Sopenharmony_ci ret = 0; 32362306a36Sopenharmony_ciout_unlock: 32462306a36Sopenharmony_ci pte_unmap_unlock(dst_pte, ptl); 32562306a36Sopenharmony_ciout: 32662306a36Sopenharmony_ci return ret; 32762306a36Sopenharmony_ci} 32862306a36Sopenharmony_ci 32962306a36Sopenharmony_cistatic pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) 33062306a36Sopenharmony_ci{ 33162306a36Sopenharmony_ci pgd_t *pgd; 33262306a36Sopenharmony_ci p4d_t *p4d; 33362306a36Sopenharmony_ci pud_t *pud; 33462306a36Sopenharmony_ci 33562306a36Sopenharmony_ci pgd = pgd_offset(mm, address); 33662306a36Sopenharmony_ci p4d = p4d_alloc(mm, pgd, address); 33762306a36Sopenharmony_ci if (!p4d) 33862306a36Sopenharmony_ci return NULL; 33962306a36Sopenharmony_ci pud = pud_alloc(mm, p4d, address); 34062306a36Sopenharmony_ci if (!pud) 34162306a36Sopenharmony_ci return NULL; 34262306a36Sopenharmony_ci /* 34362306a36Sopenharmony_ci * Note that we didn't run this because the pmd was 34462306a36Sopenharmony_ci * missing, the *pmd may be already established and in 34562306a36Sopenharmony_ci * turn it may also be a trans_huge_pmd. 34662306a36Sopenharmony_ci */ 34762306a36Sopenharmony_ci return pmd_alloc(mm, pud, address); 34862306a36Sopenharmony_ci} 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci#ifdef CONFIG_HUGETLB_PAGE 35162306a36Sopenharmony_ci/* 35262306a36Sopenharmony_ci * mfill_atomic processing for HUGETLB vmas. Note that this routine is 35362306a36Sopenharmony_ci * called with mmap_lock held, it will release mmap_lock before returning. 35462306a36Sopenharmony_ci */ 35562306a36Sopenharmony_cistatic __always_inline ssize_t mfill_atomic_hugetlb( 35662306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 35762306a36Sopenharmony_ci unsigned long dst_start, 35862306a36Sopenharmony_ci unsigned long src_start, 35962306a36Sopenharmony_ci unsigned long len, 36062306a36Sopenharmony_ci atomic_t *mmap_changing, 36162306a36Sopenharmony_ci uffd_flags_t flags) 36262306a36Sopenharmony_ci{ 36362306a36Sopenharmony_ci struct mm_struct *dst_mm = dst_vma->vm_mm; 36462306a36Sopenharmony_ci int vm_shared = dst_vma->vm_flags & VM_SHARED; 36562306a36Sopenharmony_ci ssize_t err; 36662306a36Sopenharmony_ci pte_t *dst_pte; 36762306a36Sopenharmony_ci unsigned long src_addr, dst_addr; 36862306a36Sopenharmony_ci long copied; 36962306a36Sopenharmony_ci struct folio *folio; 37062306a36Sopenharmony_ci unsigned long vma_hpagesize; 37162306a36Sopenharmony_ci pgoff_t idx; 37262306a36Sopenharmony_ci u32 hash; 37362306a36Sopenharmony_ci struct address_space *mapping; 37462306a36Sopenharmony_ci 37562306a36Sopenharmony_ci /* 37662306a36Sopenharmony_ci * There is no default zero huge page for all huge page sizes as 37762306a36Sopenharmony_ci * supported by hugetlb. A PMD_SIZE huge pages may exist as used 37862306a36Sopenharmony_ci * by THP. Since we can not reliably insert a zero page, this 37962306a36Sopenharmony_ci * feature is not supported. 38062306a36Sopenharmony_ci */ 38162306a36Sopenharmony_ci if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { 38262306a36Sopenharmony_ci mmap_read_unlock(dst_mm); 38362306a36Sopenharmony_ci return -EINVAL; 38462306a36Sopenharmony_ci } 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_ci src_addr = src_start; 38762306a36Sopenharmony_ci dst_addr = dst_start; 38862306a36Sopenharmony_ci copied = 0; 38962306a36Sopenharmony_ci folio = NULL; 39062306a36Sopenharmony_ci vma_hpagesize = vma_kernel_pagesize(dst_vma); 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_ci /* 39362306a36Sopenharmony_ci * Validate alignment based on huge page size 39462306a36Sopenharmony_ci */ 39562306a36Sopenharmony_ci err = -EINVAL; 39662306a36Sopenharmony_ci if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) 39762306a36Sopenharmony_ci goto out_unlock; 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ciretry: 40062306a36Sopenharmony_ci /* 40162306a36Sopenharmony_ci * On routine entry dst_vma is set. If we had to drop mmap_lock and 40262306a36Sopenharmony_ci * retry, dst_vma will be set to NULL and we must lookup again. 40362306a36Sopenharmony_ci */ 40462306a36Sopenharmony_ci if (!dst_vma) { 40562306a36Sopenharmony_ci err = -ENOENT; 40662306a36Sopenharmony_ci dst_vma = find_dst_vma(dst_mm, dst_start, len); 40762306a36Sopenharmony_ci if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) 40862306a36Sopenharmony_ci goto out_unlock; 40962306a36Sopenharmony_ci 41062306a36Sopenharmony_ci err = -EINVAL; 41162306a36Sopenharmony_ci if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) 41262306a36Sopenharmony_ci goto out_unlock; 41362306a36Sopenharmony_ci 41462306a36Sopenharmony_ci vm_shared = dst_vma->vm_flags & VM_SHARED; 41562306a36Sopenharmony_ci } 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_ci /* 41862306a36Sopenharmony_ci * If not shared, ensure the dst_vma has a anon_vma. 41962306a36Sopenharmony_ci */ 42062306a36Sopenharmony_ci err = -ENOMEM; 42162306a36Sopenharmony_ci if (!vm_shared) { 42262306a36Sopenharmony_ci if (unlikely(anon_vma_prepare(dst_vma))) 42362306a36Sopenharmony_ci goto out_unlock; 42462306a36Sopenharmony_ci } 42562306a36Sopenharmony_ci 42662306a36Sopenharmony_ci while (src_addr < src_start + len) { 42762306a36Sopenharmony_ci BUG_ON(dst_addr >= dst_start + len); 42862306a36Sopenharmony_ci 42962306a36Sopenharmony_ci /* 43062306a36Sopenharmony_ci * Serialize via vma_lock and hugetlb_fault_mutex. 43162306a36Sopenharmony_ci * vma_lock ensures the dst_pte remains valid even 43262306a36Sopenharmony_ci * in the case of shared pmds. fault mutex prevents 43362306a36Sopenharmony_ci * races with other faulting threads. 43462306a36Sopenharmony_ci */ 43562306a36Sopenharmony_ci idx = linear_page_index(dst_vma, dst_addr); 43662306a36Sopenharmony_ci mapping = dst_vma->vm_file->f_mapping; 43762306a36Sopenharmony_ci hash = hugetlb_fault_mutex_hash(mapping, idx); 43862306a36Sopenharmony_ci mutex_lock(&hugetlb_fault_mutex_table[hash]); 43962306a36Sopenharmony_ci hugetlb_vma_lock_read(dst_vma); 44062306a36Sopenharmony_ci 44162306a36Sopenharmony_ci err = -ENOMEM; 44262306a36Sopenharmony_ci dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); 44362306a36Sopenharmony_ci if (!dst_pte) { 44462306a36Sopenharmony_ci hugetlb_vma_unlock_read(dst_vma); 44562306a36Sopenharmony_ci mutex_unlock(&hugetlb_fault_mutex_table[hash]); 44662306a36Sopenharmony_ci goto out_unlock; 44762306a36Sopenharmony_ci } 44862306a36Sopenharmony_ci 44962306a36Sopenharmony_ci if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && 45062306a36Sopenharmony_ci !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { 45162306a36Sopenharmony_ci err = -EEXIST; 45262306a36Sopenharmony_ci hugetlb_vma_unlock_read(dst_vma); 45362306a36Sopenharmony_ci mutex_unlock(&hugetlb_fault_mutex_table[hash]); 45462306a36Sopenharmony_ci goto out_unlock; 45562306a36Sopenharmony_ci } 45662306a36Sopenharmony_ci 45762306a36Sopenharmony_ci err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, 45862306a36Sopenharmony_ci src_addr, flags, &folio); 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci hugetlb_vma_unlock_read(dst_vma); 46162306a36Sopenharmony_ci mutex_unlock(&hugetlb_fault_mutex_table[hash]); 46262306a36Sopenharmony_ci 46362306a36Sopenharmony_ci cond_resched(); 46462306a36Sopenharmony_ci 46562306a36Sopenharmony_ci if (unlikely(err == -ENOENT)) { 46662306a36Sopenharmony_ci mmap_read_unlock(dst_mm); 46762306a36Sopenharmony_ci BUG_ON(!folio); 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_ci err = copy_folio_from_user(folio, 47062306a36Sopenharmony_ci (const void __user *)src_addr, true); 47162306a36Sopenharmony_ci if (unlikely(err)) { 47262306a36Sopenharmony_ci err = -EFAULT; 47362306a36Sopenharmony_ci goto out; 47462306a36Sopenharmony_ci } 47562306a36Sopenharmony_ci mmap_read_lock(dst_mm); 47662306a36Sopenharmony_ci /* 47762306a36Sopenharmony_ci * If memory mappings are changing because of non-cooperative 47862306a36Sopenharmony_ci * operation (e.g. mremap) running in parallel, bail out and 47962306a36Sopenharmony_ci * request the user to retry later 48062306a36Sopenharmony_ci */ 48162306a36Sopenharmony_ci if (mmap_changing && atomic_read(mmap_changing)) { 48262306a36Sopenharmony_ci err = -EAGAIN; 48362306a36Sopenharmony_ci break; 48462306a36Sopenharmony_ci } 48562306a36Sopenharmony_ci 48662306a36Sopenharmony_ci dst_vma = NULL; 48762306a36Sopenharmony_ci goto retry; 48862306a36Sopenharmony_ci } else 48962306a36Sopenharmony_ci BUG_ON(folio); 49062306a36Sopenharmony_ci 49162306a36Sopenharmony_ci if (!err) { 49262306a36Sopenharmony_ci dst_addr += vma_hpagesize; 49362306a36Sopenharmony_ci src_addr += vma_hpagesize; 49462306a36Sopenharmony_ci copied += vma_hpagesize; 49562306a36Sopenharmony_ci 49662306a36Sopenharmony_ci if (fatal_signal_pending(current)) 49762306a36Sopenharmony_ci err = -EINTR; 49862306a36Sopenharmony_ci } 49962306a36Sopenharmony_ci if (err) 50062306a36Sopenharmony_ci break; 50162306a36Sopenharmony_ci } 50262306a36Sopenharmony_ci 50362306a36Sopenharmony_ciout_unlock: 50462306a36Sopenharmony_ci mmap_read_unlock(dst_mm); 50562306a36Sopenharmony_ciout: 50662306a36Sopenharmony_ci if (folio) 50762306a36Sopenharmony_ci folio_put(folio); 50862306a36Sopenharmony_ci BUG_ON(copied < 0); 50962306a36Sopenharmony_ci BUG_ON(err > 0); 51062306a36Sopenharmony_ci BUG_ON(!copied && !err); 51162306a36Sopenharmony_ci return copied ? copied : err; 51262306a36Sopenharmony_ci} 51362306a36Sopenharmony_ci#else /* !CONFIG_HUGETLB_PAGE */ 51462306a36Sopenharmony_ci/* fail at build time if gcc attempts to use this */ 51562306a36Sopenharmony_ciextern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma, 51662306a36Sopenharmony_ci unsigned long dst_start, 51762306a36Sopenharmony_ci unsigned long src_start, 51862306a36Sopenharmony_ci unsigned long len, 51962306a36Sopenharmony_ci atomic_t *mmap_changing, 52062306a36Sopenharmony_ci uffd_flags_t flags); 52162306a36Sopenharmony_ci#endif /* CONFIG_HUGETLB_PAGE */ 52262306a36Sopenharmony_ci 52362306a36Sopenharmony_cistatic __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, 52462306a36Sopenharmony_ci struct vm_area_struct *dst_vma, 52562306a36Sopenharmony_ci unsigned long dst_addr, 52662306a36Sopenharmony_ci unsigned long src_addr, 52762306a36Sopenharmony_ci uffd_flags_t flags, 52862306a36Sopenharmony_ci struct folio **foliop) 52962306a36Sopenharmony_ci{ 53062306a36Sopenharmony_ci ssize_t err; 53162306a36Sopenharmony_ci 53262306a36Sopenharmony_ci if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { 53362306a36Sopenharmony_ci return mfill_atomic_pte_continue(dst_pmd, dst_vma, 53462306a36Sopenharmony_ci dst_addr, flags); 53562306a36Sopenharmony_ci } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 53662306a36Sopenharmony_ci return mfill_atomic_pte_poison(dst_pmd, dst_vma, 53762306a36Sopenharmony_ci dst_addr, flags); 53862306a36Sopenharmony_ci } 53962306a36Sopenharmony_ci 54062306a36Sopenharmony_ci /* 54162306a36Sopenharmony_ci * The normal page fault path for a shmem will invoke the 54262306a36Sopenharmony_ci * fault, fill the hole in the file and COW it right away. The 54362306a36Sopenharmony_ci * result generates plain anonymous memory. So when we are 54462306a36Sopenharmony_ci * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll 54562306a36Sopenharmony_ci * generate anonymous memory directly without actually filling 54662306a36Sopenharmony_ci * the hole. For the MAP_PRIVATE case the robustness check 54762306a36Sopenharmony_ci * only happens in the pagetable (to verify it's still none) 54862306a36Sopenharmony_ci * and not in the radix tree. 54962306a36Sopenharmony_ci */ 55062306a36Sopenharmony_ci if (!(dst_vma->vm_flags & VM_SHARED)) { 55162306a36Sopenharmony_ci if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) 55262306a36Sopenharmony_ci err = mfill_atomic_pte_copy(dst_pmd, dst_vma, 55362306a36Sopenharmony_ci dst_addr, src_addr, 55462306a36Sopenharmony_ci flags, foliop); 55562306a36Sopenharmony_ci else 55662306a36Sopenharmony_ci err = mfill_atomic_pte_zeropage(dst_pmd, 55762306a36Sopenharmony_ci dst_vma, dst_addr); 55862306a36Sopenharmony_ci } else { 55962306a36Sopenharmony_ci err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, 56062306a36Sopenharmony_ci dst_addr, src_addr, 56162306a36Sopenharmony_ci flags, foliop); 56262306a36Sopenharmony_ci } 56362306a36Sopenharmony_ci 56462306a36Sopenharmony_ci return err; 56562306a36Sopenharmony_ci} 56662306a36Sopenharmony_ci 56762306a36Sopenharmony_cistatic __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, 56862306a36Sopenharmony_ci unsigned long dst_start, 56962306a36Sopenharmony_ci unsigned long src_start, 57062306a36Sopenharmony_ci unsigned long len, 57162306a36Sopenharmony_ci atomic_t *mmap_changing, 57262306a36Sopenharmony_ci uffd_flags_t flags) 57362306a36Sopenharmony_ci{ 57462306a36Sopenharmony_ci struct vm_area_struct *dst_vma; 57562306a36Sopenharmony_ci ssize_t err; 57662306a36Sopenharmony_ci pmd_t *dst_pmd; 57762306a36Sopenharmony_ci unsigned long src_addr, dst_addr; 57862306a36Sopenharmony_ci long copied; 57962306a36Sopenharmony_ci struct folio *folio; 58062306a36Sopenharmony_ci 58162306a36Sopenharmony_ci /* 58262306a36Sopenharmony_ci * Sanitize the command parameters: 58362306a36Sopenharmony_ci */ 58462306a36Sopenharmony_ci BUG_ON(dst_start & ~PAGE_MASK); 58562306a36Sopenharmony_ci BUG_ON(len & ~PAGE_MASK); 58662306a36Sopenharmony_ci 58762306a36Sopenharmony_ci /* Does the address range wrap, or is the span zero-sized? */ 58862306a36Sopenharmony_ci BUG_ON(src_start + len <= src_start); 58962306a36Sopenharmony_ci BUG_ON(dst_start + len <= dst_start); 59062306a36Sopenharmony_ci 59162306a36Sopenharmony_ci src_addr = src_start; 59262306a36Sopenharmony_ci dst_addr = dst_start; 59362306a36Sopenharmony_ci copied = 0; 59462306a36Sopenharmony_ci folio = NULL; 59562306a36Sopenharmony_ciretry: 59662306a36Sopenharmony_ci mmap_read_lock(dst_mm); 59762306a36Sopenharmony_ci 59862306a36Sopenharmony_ci /* 59962306a36Sopenharmony_ci * If memory mappings are changing because of non-cooperative 60062306a36Sopenharmony_ci * operation (e.g. mremap) running in parallel, bail out and 60162306a36Sopenharmony_ci * request the user to retry later 60262306a36Sopenharmony_ci */ 60362306a36Sopenharmony_ci err = -EAGAIN; 60462306a36Sopenharmony_ci if (mmap_changing && atomic_read(mmap_changing)) 60562306a36Sopenharmony_ci goto out_unlock; 60662306a36Sopenharmony_ci 60762306a36Sopenharmony_ci /* 60862306a36Sopenharmony_ci * Make sure the vma is not shared, that the dst range is 60962306a36Sopenharmony_ci * both valid and fully within a single existing vma. 61062306a36Sopenharmony_ci */ 61162306a36Sopenharmony_ci err = -ENOENT; 61262306a36Sopenharmony_ci dst_vma = find_dst_vma(dst_mm, dst_start, len); 61362306a36Sopenharmony_ci if (!dst_vma) 61462306a36Sopenharmony_ci goto out_unlock; 61562306a36Sopenharmony_ci 61662306a36Sopenharmony_ci err = -EINVAL; 61762306a36Sopenharmony_ci /* 61862306a36Sopenharmony_ci * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but 61962306a36Sopenharmony_ci * it will overwrite vm_ops, so vma_is_anonymous must return false. 62062306a36Sopenharmony_ci */ 62162306a36Sopenharmony_ci if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && 62262306a36Sopenharmony_ci dst_vma->vm_flags & VM_SHARED)) 62362306a36Sopenharmony_ci goto out_unlock; 62462306a36Sopenharmony_ci 62562306a36Sopenharmony_ci /* 62662306a36Sopenharmony_ci * validate 'mode' now that we know the dst_vma: don't allow 62762306a36Sopenharmony_ci * a wrprotect copy if the userfaultfd didn't register as WP. 62862306a36Sopenharmony_ci */ 62962306a36Sopenharmony_ci if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) 63062306a36Sopenharmony_ci goto out_unlock; 63162306a36Sopenharmony_ci 63262306a36Sopenharmony_ci /* 63362306a36Sopenharmony_ci * If this is a HUGETLB vma, pass off to appropriate routine 63462306a36Sopenharmony_ci */ 63562306a36Sopenharmony_ci if (is_vm_hugetlb_page(dst_vma)) 63662306a36Sopenharmony_ci return mfill_atomic_hugetlb(dst_vma, dst_start, src_start, 63762306a36Sopenharmony_ci len, mmap_changing, flags); 63862306a36Sopenharmony_ci 63962306a36Sopenharmony_ci if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) 64062306a36Sopenharmony_ci goto out_unlock; 64162306a36Sopenharmony_ci if (!vma_is_shmem(dst_vma) && 64262306a36Sopenharmony_ci uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) 64362306a36Sopenharmony_ci goto out_unlock; 64462306a36Sopenharmony_ci 64562306a36Sopenharmony_ci /* 64662306a36Sopenharmony_ci * Ensure the dst_vma has a anon_vma or this page 64762306a36Sopenharmony_ci * would get a NULL anon_vma when moved in the 64862306a36Sopenharmony_ci * dst_vma. 64962306a36Sopenharmony_ci */ 65062306a36Sopenharmony_ci err = -ENOMEM; 65162306a36Sopenharmony_ci if (!(dst_vma->vm_flags & VM_SHARED) && 65262306a36Sopenharmony_ci unlikely(anon_vma_prepare(dst_vma))) 65362306a36Sopenharmony_ci goto out_unlock; 65462306a36Sopenharmony_ci 65562306a36Sopenharmony_ci while (src_addr < src_start + len) { 65662306a36Sopenharmony_ci pmd_t dst_pmdval; 65762306a36Sopenharmony_ci 65862306a36Sopenharmony_ci BUG_ON(dst_addr >= dst_start + len); 65962306a36Sopenharmony_ci 66062306a36Sopenharmony_ci dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); 66162306a36Sopenharmony_ci if (unlikely(!dst_pmd)) { 66262306a36Sopenharmony_ci err = -ENOMEM; 66362306a36Sopenharmony_ci break; 66462306a36Sopenharmony_ci } 66562306a36Sopenharmony_ci 66662306a36Sopenharmony_ci dst_pmdval = pmdp_get_lockless(dst_pmd); 66762306a36Sopenharmony_ci /* 66862306a36Sopenharmony_ci * If the dst_pmd is mapped as THP don't 66962306a36Sopenharmony_ci * override it and just be strict. 67062306a36Sopenharmony_ci */ 67162306a36Sopenharmony_ci if (unlikely(pmd_trans_huge(dst_pmdval))) { 67262306a36Sopenharmony_ci err = -EEXIST; 67362306a36Sopenharmony_ci break; 67462306a36Sopenharmony_ci } 67562306a36Sopenharmony_ci if (unlikely(pmd_none(dst_pmdval)) && 67662306a36Sopenharmony_ci unlikely(__pte_alloc(dst_mm, dst_pmd))) { 67762306a36Sopenharmony_ci err = -ENOMEM; 67862306a36Sopenharmony_ci break; 67962306a36Sopenharmony_ci } 68062306a36Sopenharmony_ci /* If an huge pmd materialized from under us fail */ 68162306a36Sopenharmony_ci if (unlikely(pmd_trans_huge(*dst_pmd))) { 68262306a36Sopenharmony_ci err = -EFAULT; 68362306a36Sopenharmony_ci break; 68462306a36Sopenharmony_ci } 68562306a36Sopenharmony_ci 68662306a36Sopenharmony_ci BUG_ON(pmd_none(*dst_pmd)); 68762306a36Sopenharmony_ci BUG_ON(pmd_trans_huge(*dst_pmd)); 68862306a36Sopenharmony_ci 68962306a36Sopenharmony_ci err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, 69062306a36Sopenharmony_ci src_addr, flags, &folio); 69162306a36Sopenharmony_ci cond_resched(); 69262306a36Sopenharmony_ci 69362306a36Sopenharmony_ci if (unlikely(err == -ENOENT)) { 69462306a36Sopenharmony_ci void *kaddr; 69562306a36Sopenharmony_ci 69662306a36Sopenharmony_ci mmap_read_unlock(dst_mm); 69762306a36Sopenharmony_ci BUG_ON(!folio); 69862306a36Sopenharmony_ci 69962306a36Sopenharmony_ci kaddr = kmap_local_folio(folio, 0); 70062306a36Sopenharmony_ci err = copy_from_user(kaddr, 70162306a36Sopenharmony_ci (const void __user *) src_addr, 70262306a36Sopenharmony_ci PAGE_SIZE); 70362306a36Sopenharmony_ci kunmap_local(kaddr); 70462306a36Sopenharmony_ci if (unlikely(err)) { 70562306a36Sopenharmony_ci err = -EFAULT; 70662306a36Sopenharmony_ci goto out; 70762306a36Sopenharmony_ci } 70862306a36Sopenharmony_ci flush_dcache_folio(folio); 70962306a36Sopenharmony_ci goto retry; 71062306a36Sopenharmony_ci } else 71162306a36Sopenharmony_ci BUG_ON(folio); 71262306a36Sopenharmony_ci 71362306a36Sopenharmony_ci if (!err) { 71462306a36Sopenharmony_ci dst_addr += PAGE_SIZE; 71562306a36Sopenharmony_ci src_addr += PAGE_SIZE; 71662306a36Sopenharmony_ci copied += PAGE_SIZE; 71762306a36Sopenharmony_ci 71862306a36Sopenharmony_ci if (fatal_signal_pending(current)) 71962306a36Sopenharmony_ci err = -EINTR; 72062306a36Sopenharmony_ci } 72162306a36Sopenharmony_ci if (err) 72262306a36Sopenharmony_ci break; 72362306a36Sopenharmony_ci } 72462306a36Sopenharmony_ci 72562306a36Sopenharmony_ciout_unlock: 72662306a36Sopenharmony_ci mmap_read_unlock(dst_mm); 72762306a36Sopenharmony_ciout: 72862306a36Sopenharmony_ci if (folio) 72962306a36Sopenharmony_ci folio_put(folio); 73062306a36Sopenharmony_ci BUG_ON(copied < 0); 73162306a36Sopenharmony_ci BUG_ON(err > 0); 73262306a36Sopenharmony_ci BUG_ON(!copied && !err); 73362306a36Sopenharmony_ci return copied ? copied : err; 73462306a36Sopenharmony_ci} 73562306a36Sopenharmony_ci 73662306a36Sopenharmony_cissize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, 73762306a36Sopenharmony_ci unsigned long src_start, unsigned long len, 73862306a36Sopenharmony_ci atomic_t *mmap_changing, uffd_flags_t flags) 73962306a36Sopenharmony_ci{ 74062306a36Sopenharmony_ci return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing, 74162306a36Sopenharmony_ci uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); 74262306a36Sopenharmony_ci} 74362306a36Sopenharmony_ci 74462306a36Sopenharmony_cissize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, 74562306a36Sopenharmony_ci unsigned long len, atomic_t *mmap_changing) 74662306a36Sopenharmony_ci{ 74762306a36Sopenharmony_ci return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 74862306a36Sopenharmony_ci uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); 74962306a36Sopenharmony_ci} 75062306a36Sopenharmony_ci 75162306a36Sopenharmony_cissize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, 75262306a36Sopenharmony_ci unsigned long len, atomic_t *mmap_changing, 75362306a36Sopenharmony_ci uffd_flags_t flags) 75462306a36Sopenharmony_ci{ 75562306a36Sopenharmony_ci return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 75662306a36Sopenharmony_ci uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); 75762306a36Sopenharmony_ci} 75862306a36Sopenharmony_ci 75962306a36Sopenharmony_cissize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start, 76062306a36Sopenharmony_ci unsigned long len, atomic_t *mmap_changing, 76162306a36Sopenharmony_ci uffd_flags_t flags) 76262306a36Sopenharmony_ci{ 76362306a36Sopenharmony_ci return mfill_atomic(dst_mm, start, 0, len, mmap_changing, 76462306a36Sopenharmony_ci uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON)); 76562306a36Sopenharmony_ci} 76662306a36Sopenharmony_ci 76762306a36Sopenharmony_cilong uffd_wp_range(struct vm_area_struct *dst_vma, 76862306a36Sopenharmony_ci unsigned long start, unsigned long len, bool enable_wp) 76962306a36Sopenharmony_ci{ 77062306a36Sopenharmony_ci unsigned int mm_cp_flags; 77162306a36Sopenharmony_ci struct mmu_gather tlb; 77262306a36Sopenharmony_ci long ret; 77362306a36Sopenharmony_ci 77462306a36Sopenharmony_ci VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, 77562306a36Sopenharmony_ci "The address range exceeds VMA boundary.\n"); 77662306a36Sopenharmony_ci if (enable_wp) 77762306a36Sopenharmony_ci mm_cp_flags = MM_CP_UFFD_WP; 77862306a36Sopenharmony_ci else 77962306a36Sopenharmony_ci mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; 78062306a36Sopenharmony_ci 78162306a36Sopenharmony_ci /* 78262306a36Sopenharmony_ci * vma->vm_page_prot already reflects that uffd-wp is enabled for this 78362306a36Sopenharmony_ci * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed 78462306a36Sopenharmony_ci * to be write-protected as default whenever protection changes. 78562306a36Sopenharmony_ci * Try upgrading write permissions manually. 78662306a36Sopenharmony_ci */ 78762306a36Sopenharmony_ci if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) 78862306a36Sopenharmony_ci mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; 78962306a36Sopenharmony_ci tlb_gather_mmu(&tlb, dst_vma->vm_mm); 79062306a36Sopenharmony_ci ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); 79162306a36Sopenharmony_ci tlb_finish_mmu(&tlb); 79262306a36Sopenharmony_ci 79362306a36Sopenharmony_ci return ret; 79462306a36Sopenharmony_ci} 79562306a36Sopenharmony_ci 79662306a36Sopenharmony_ciint mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, 79762306a36Sopenharmony_ci unsigned long len, bool enable_wp, 79862306a36Sopenharmony_ci atomic_t *mmap_changing) 79962306a36Sopenharmony_ci{ 80062306a36Sopenharmony_ci unsigned long end = start + len; 80162306a36Sopenharmony_ci unsigned long _start, _end; 80262306a36Sopenharmony_ci struct vm_area_struct *dst_vma; 80362306a36Sopenharmony_ci unsigned long page_mask; 80462306a36Sopenharmony_ci long err; 80562306a36Sopenharmony_ci VMA_ITERATOR(vmi, dst_mm, start); 80662306a36Sopenharmony_ci 80762306a36Sopenharmony_ci /* 80862306a36Sopenharmony_ci * Sanitize the command parameters: 80962306a36Sopenharmony_ci */ 81062306a36Sopenharmony_ci BUG_ON(start & ~PAGE_MASK); 81162306a36Sopenharmony_ci BUG_ON(len & ~PAGE_MASK); 81262306a36Sopenharmony_ci 81362306a36Sopenharmony_ci /* Does the address range wrap, or is the span zero-sized? */ 81462306a36Sopenharmony_ci BUG_ON(start + len <= start); 81562306a36Sopenharmony_ci 81662306a36Sopenharmony_ci mmap_read_lock(dst_mm); 81762306a36Sopenharmony_ci 81862306a36Sopenharmony_ci /* 81962306a36Sopenharmony_ci * If memory mappings are changing because of non-cooperative 82062306a36Sopenharmony_ci * operation (e.g. mremap) running in parallel, bail out and 82162306a36Sopenharmony_ci * request the user to retry later 82262306a36Sopenharmony_ci */ 82362306a36Sopenharmony_ci err = -EAGAIN; 82462306a36Sopenharmony_ci if (mmap_changing && atomic_read(mmap_changing)) 82562306a36Sopenharmony_ci goto out_unlock; 82662306a36Sopenharmony_ci 82762306a36Sopenharmony_ci err = -ENOENT; 82862306a36Sopenharmony_ci for_each_vma_range(vmi, dst_vma, end) { 82962306a36Sopenharmony_ci 83062306a36Sopenharmony_ci if (!userfaultfd_wp(dst_vma)) { 83162306a36Sopenharmony_ci err = -ENOENT; 83262306a36Sopenharmony_ci break; 83362306a36Sopenharmony_ci } 83462306a36Sopenharmony_ci 83562306a36Sopenharmony_ci if (is_vm_hugetlb_page(dst_vma)) { 83662306a36Sopenharmony_ci err = -EINVAL; 83762306a36Sopenharmony_ci page_mask = vma_kernel_pagesize(dst_vma) - 1; 83862306a36Sopenharmony_ci if ((start & page_mask) || (len & page_mask)) 83962306a36Sopenharmony_ci break; 84062306a36Sopenharmony_ci } 84162306a36Sopenharmony_ci 84262306a36Sopenharmony_ci _start = max(dst_vma->vm_start, start); 84362306a36Sopenharmony_ci _end = min(dst_vma->vm_end, end); 84462306a36Sopenharmony_ci 84562306a36Sopenharmony_ci err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); 84662306a36Sopenharmony_ci 84762306a36Sopenharmony_ci /* Return 0 on success, <0 on failures */ 84862306a36Sopenharmony_ci if (err < 0) 84962306a36Sopenharmony_ci break; 85062306a36Sopenharmony_ci err = 0; 85162306a36Sopenharmony_ci } 85262306a36Sopenharmony_ciout_unlock: 85362306a36Sopenharmony_ci mmap_read_unlock(dst_mm); 85462306a36Sopenharmony_ci return err; 85562306a36Sopenharmony_ci} 856