Lines Matching refs:vmf

1678 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
1680 unsigned int flags = vmf->flags;
1690 release_fault_lock(vmf);
1702 release_fault_lock(vmf);
3098 * @vmf - the vm_fault for this fault.
3108 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3119 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3122 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3123 if (vmf->flags & FAULT_FLAG_KILLABLE) {
3132 mmap_read_unlock(vmf->vma->vm_mm);
3148 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3150 struct file *file = vmf->vma->vm_file;
3153 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3155 unsigned long vm_flags = vmf->vma->vm_flags;
3161 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3183 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3203 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3204 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3217 static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3220 struct file *file = vmf->vma->vm_file;
3222 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3227 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3235 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3243 * @vmf: struct vm_fault containing details of the fault
3264 vm_fault_t filemap_fault(struct vm_fault *vmf)
3267 struct file *file = vmf->vma->vm_file;
3271 pgoff_t max_idx, index = vmf->pgoff;
3289 if (!(vmf->flags & FAULT_FLAG_TRIED))
3290 fpin = do_async_mmap_readahead(vmf, folio);
3298 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3300 fpin = do_sync_mmap_readahead(vmf);
3312 vmf->gfp_mask);
3321 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3374 vmf->page = folio_file_page(folio, index);
3384 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3412 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3415 struct mm_struct *mm = vmf->vma->vm_mm;
3418 if (pmd_trans_huge(*vmf->pmd)) {
3424 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3426 vm_fault_t ret = do_set_pmd(vmf, page);
3434 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
3435 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3485 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3493 pte_t *old_ptep = vmf->pte;
3506 if (!pte_none(vmf->pte[count]))
3513 set_pte_range(vmf, folio, page, count, addr);
3515 if (in_range(vmf->address, addr, count * PAGE_SIZE))
3521 vmf->pte += count;
3527 set_pte_range(vmf, folio, page, count, addr);
3529 if (in_range(vmf->address, addr, count * PAGE_SIZE))
3533 vmf->pte = old_ptep;
3538 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
3555 if (!pte_none(ptep_get(vmf->pte)))
3558 if (vmf->address == addr)
3561 set_pte_range(vmf, folio, page, 1, addr);
3567 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3570 struct vm_area_struct *vma = vmf->vma;
3585 if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3591 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3592 if (!vmf->pte) {
3601 vmf->pte += xas.xa_index - last_pgoff;
3607 ret |= filemap_map_order0_folio(vmf,
3610 ret |= filemap_map_folio_range(vmf, folio,
3617 pte_unmap_unlock(vmf->pte, vmf->ptl);
3631 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3633 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3634 struct folio *folio = page_folio(vmf->page);
3638 file_update_time(vmf->vma->vm_file);
3686 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)