Lines Matching refs:vmf

745 		struct address_space *mapping, struct vm_fault *vmf,
769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
1055 struct vm_fault *vmf)
1058 unsigned long vaddr = vmf->address;
1062 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1065 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1066 trace_dax_load_hole(inode, vmf, ret);
1260 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1263 struct vm_area_struct *vma = vmf->vma;
1265 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1267 unsigned long vaddr = vmf->address;
1268 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1273 bool write = vmf->flags & FAULT_FLAG_WRITE;
1279 trace_dax_pte_fault(inode, vmf, ret);
1290 if (write && !vmf->cow_page)
1305 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1327 if (vmf->cow_page) {
1333 clear_user_highpage(vmf->cow_page, vaddr);
1337 sector, vmf->cow_page, vaddr);
1348 __SetPageUptodate(vmf->cow_page);
1349 ret = finish_fault(vmf);
1368 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1386 trace_dax_insert_mapping(inode, vmf, entry);
1396 ret = dax_load_hole(&xas, mapping, &entry, vmf);
1425 trace_dax_pte_fault_done(inode, vmf, ret);
1430 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1433 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1434 unsigned long pmd_addr = vmf->address & PMD_MASK;
1435 struct vm_area_struct *vma = vmf->vma;
1443 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1449 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1458 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1459 if (!pmd_none(*(vmf->pmd))) {
1465 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1468 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1470 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1472 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1478 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1482 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1485 struct vm_area_struct *vma = vmf->vma;
1487 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1488 unsigned long pmd_addr = vmf->address & PMD_MASK;
1489 bool write = vmf->flags & FAULT_FLAG_WRITE;
1509 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1517 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1518 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1558 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1559 !pmd_devmap(*vmf->pmd)) {
1586 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1603 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1604 result = vmf_insert_pfn_pmd(vmf, pfn, write);
1610 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1636 split_huge_pmd(vma, vmf->pmd, vmf->address);
1640 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1644 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1653 * @vmf: The description of the fault
1664 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1669 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1671 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1680 * @vmf: The description of the fault
1688 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1690 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1691 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1702 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1710 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1713 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1718 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1724 * @vmf: The description of the fault
1732 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1736 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1740 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1743 return dax_insert_pfn_mkwrite(vmf, pfn, order);