/kernel/linux/linux-6.6/mm/ |
H A D | memory.c | 108 static vm_fault_t do_fault(struct vm_fault *vmf); 109 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 110 static bool vmf_pte_changed(struct vm_fault *vmf); 116 static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() argument 118 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in vmf_orig_pte_uffd_wp() 121 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp() 2793 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() argument 2798 spin_lock(vmf->ptl); in pte_unmap_same() 2799 same = pte_same(ptep_get(vmf->pte), vmf in pte_unmap_same() 2814 __wp_page_copy_user(struct page *dst, struct page *src, struct vm_fault *vmf) __wp_page_copy_user() argument 2932 do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) do_page_mkwrite() argument 2965 fault_dirty_shared_page(struct vm_fault *vmf) fault_dirty_shared_page() argument 3062 wp_page_copy(struct vm_fault *vmf) wp_page_copy() argument 3245 finish_mkwrite_fault(struct vm_fault *vmf) finish_mkwrite_fault() argument 3269 wp_pfn_shared(struct vm_fault *vmf) wp_pfn_shared() argument 3323 wp_page_reuse(vmf); global() variable 3447 wp_page_reuse(vmf); global() variable 3602 remove_device_exclusive_entry(struct vm_fault *vmf) remove_device_exclusive_entry() argument 3663 pte_marker_clear(struct vm_fault *vmf) pte_marker_clear() argument 3683 do_pte_missing(struct vm_fault *vmf) do_pte_missing() argument 3695 pte_marker_handle_uffd_wp(struct vm_fault *vmf) pte_marker_handle_uffd_wp() argument 3707 handle_pte_marker(struct vm_fault *vmf) handle_pte_marker() argument 3738 do_swap_page(struct vm_fault *vmf) do_swap_page() argument 4103 do_anonymous_page(struct vm_fault *vmf) do_anonymous_page() argument 4237 __do_fault(struct vm_fault *vmf) __do_fault() argument 4294 deposit_prealloc_pte(struct vm_fault *vmf) deposit_prealloc_pte() argument 4307 do_set_pmd(struct vm_fault *vmf, struct page *page) do_set_pmd() argument 4372 do_set_pmd(struct vm_fault *vmf, struct page *page) do_set_pmd() argument 4386 set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr) set_pte_range() argument 4423 vmf_pte_changed(struct vm_fault *vmf) vmf_pte_changed() argument 4446 finish_fault(struct vm_fault *vmf) finish_fault() argument 4560 do_fault_around(struct vm_fault *vmf) do_fault_around() argument 4593 should_fault_around(struct vm_fault *vmf) should_fault_around() argument 4606 do_read_fault(struct vm_fault *vmf) do_read_fault() argument 4639 do_cow_fault(struct vm_fault *vmf) do_cow_fault() argument 4683 do_shared_fault(struct vm_fault *vmf) do_shared_fault() argument 4734 do_fault(struct vm_fault *vmf) do_fault() argument 4795 do_numa_page(struct vm_fault *vmf) do_numa_page() argument 4912 create_huge_pmd(struct vm_fault *vmf) create_huge_pmd() argument 4923 wp_huge_pmd(struct vm_fault *vmf) wp_huge_pmd() argument 4950 create_huge_pud(struct vm_fault *vmf) create_huge_pud() argument 4964 wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) wp_huge_pud() argument 5003 handle_pte_fault(struct vm_fault *vmf) handle_pte_fault() argument 5090 struct vm_fault vmf = { __handle_mm_fault() local [all...] |
H A D | huge_memory.c | 646 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, in __do_huge_pmd_anonymous_page() argument 649 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() 652 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page() 671 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 679 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 680 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page() 691 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 694 ret = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page() 703 pgtable_trans_huge_deposit(vma->vm_mm, vmf in __do_huge_pmd_anonymous_page() 774 do_huge_pmd_anonymous_page(struct vm_fault *vmf) do_huge_pmd_anonymous_page() argument 892 vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) vmf_insert_pfn_pmd() argument 981 vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) vmf_insert_pfn_pud() argument 1261 huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) huge_pud_set_accessed() argument 1275 huge_pmd_set_accessed(struct vm_fault *vmf) huge_pmd_set_accessed() argument 1289 do_huge_pmd_wp_page(struct vm_fault *vmf) do_huge_pmd_wp_page() argument 1488 do_huge_pmd_numa_page(struct vm_fault *vmf) do_huge_pmd_numa_page() argument [all...] |
H A D | filemap.c | 1678 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) in __folio_lock_or_retry() argument 1680 unsigned int flags = vmf->flags; in __folio_lock_or_retry() 1690 release_fault_lock(vmf); in __folio_lock_or_retry() 1702 release_fault_lock(vmf); in __folio_lock_or_retry() 3098 * @vmf - the vm_fault for this fault. 3108 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument 3119 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap() 3122 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); in lock_folio_maybe_drop_mmap() 3123 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap() 3132 mmap_read_unlock(vmf in lock_folio_maybe_drop_mmap() 3148 do_sync_mmap_readahead(struct vm_fault *vmf) do_sync_mmap_readahead() argument 3217 do_async_mmap_readahead(struct vm_fault *vmf, struct folio *folio) do_async_mmap_readahead() argument 3264 filemap_fault(struct vm_fault *vmf) filemap_fault() argument 3412 filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, pgoff_t start) filemap_map_pmd() argument 3485 filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, unsigned int *mmap_miss) filemap_map_folio_range() argument 3538 filemap_map_order0_folio(struct vm_fault *vmf, struct folio *folio, unsigned long addr, unsigned int *mmap_miss) filemap_map_order0_folio() argument 3567 filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) filemap_map_pages() argument 3631 filemap_page_mkwrite(struct vm_fault *vmf) filemap_page_mkwrite() argument 3686 filemap_page_mkwrite(struct vm_fault *vmf) filemap_page_mkwrite() argument [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | memory.c | 2629 struct vm_fault *vmf) in cow_user_page() 2635 struct vm_area_struct *vma = vmf->vma; in cow_user_page() 2637 unsigned long addr = vmf->address; in cow_user_page() 2657 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { in cow_user_page() 2660 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page() 2662 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page() 2667 update_mmu_tlb(vma, addr, vmf in cow_user_page() 2628 cow_user_page(struct page *dst, struct page *src, struct vm_fault *vmf) cow_user_page() argument 2743 do_page_mkwrite(struct vm_fault *vmf) do_page_mkwrite() argument 2777 fault_dirty_shared_page(struct vm_fault *vmf) fault_dirty_shared_page() argument 2869 wp_page_copy(struct vm_fault *vmf) wp_page_copy() argument 3038 finish_mkwrite_fault(struct vm_fault *vmf) finish_mkwrite_fault() argument 3067 wp_pfn_shared(struct vm_fault *vmf) wp_pfn_shared() argument 3117 wp_page_reuse(vmf); global() variable 3208 wp_page_reuse(vmf); global() variable 3358 do_swap_page(struct vm_fault *vmf) do_swap_page() argument 3605 do_anonymous_page(struct vm_fault *vmf) do_anonymous_page() argument 3744 __do_fault(struct vm_fault *vmf) __do_fault() argument 3812 pte_alloc_one_map(struct vm_fault *vmf) pte_alloc_one_map() argument 3862 deposit_prealloc_pte(struct vm_fault *vmf) deposit_prealloc_pte() argument 3875 do_set_pmd(struct vm_fault *vmf, struct page *page) do_set_pmd() argument 3933 do_set_pmd(struct vm_fault *vmf, struct page *page) do_set_pmd() argument 3955 alloc_set_pte(struct vm_fault *vmf, struct page *page) alloc_set_pte() argument 4023 finish_fault(struct vm_fault *vmf) finish_fault() argument 4108 do_fault_around(struct vm_fault *vmf) do_fault_around() argument 4163 do_read_fault(struct vm_fault *vmf) do_read_fault() argument 4190 do_cow_fault(struct vm_fault *vmf) do_cow_fault() argument 4228 do_shared_fault(struct vm_fault *vmf) do_shared_fault() argument 4271 do_fault(struct vm_fault *vmf) do_fault() argument 4336 do_numa_page(struct vm_fault *vmf) do_numa_page() argument 4426 create_huge_pmd(struct vm_fault *vmf) create_huge_pmd() argument 4436 wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) wp_huge_pmd() argument 4456 create_huge_pud(struct vm_fault *vmf) create_huge_pud() argument 4469 wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) wp_huge_pud() argument 4504 handle_pte_fault(struct vm_fault *vmf) handle_pte_fault() argument 4600 struct vm_fault vmf = { __handle_mm_fault() local [all...] |
H A D | huge_memory.c | 581 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, in __do_huge_pmd_anonymous_page() argument 584 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() 586 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; in __do_huge_pmd_anonymous_page() 605 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 614 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page() 627 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 630 ret2 = handle_userfault(vmf, VM_UFFD_MISSING); in __do_huge_pmd_anonymous_page() 639 pgtable_trans_huge_deposit(vma->vm_mm, vmf in __do_huge_pmd_anonymous_page() 711 do_huge_pmd_anonymous_page(struct vm_fault *vmf) do_huge_pmd_anonymous_page() argument 832 vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, pgprot_t pgprot, bool write) vmf_insert_pfn_pmd_prot() argument 923 vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, pgprot_t pgprot, bool write) vmf_insert_pfn_pud_prot() argument 1228 huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) huge_pud_set_accessed() argument 1250 huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd) huge_pmd_set_accessed() argument 1271 do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) do_huge_pmd_wp_page() argument 1411 do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) do_huge_pmd_numa_page() argument [all...] |
/kernel/linux/linux-6.6/drivers/dax/ |
H A D | device.c | 76 static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn, in dax_set_mapping() argument 80 struct file *filp = vmf->vma->vm_file; in dax_set_mapping() 88 pgoff = linear_page_index(vmf->vma, in dax_set_mapping() 89 ALIGN(vmf->address, fault_size)); in dax_set_mapping() 104 struct vm_fault *vmf) in __dev_dax_pte_fault() 111 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault() 123 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault() 125 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault() 131 dax_set_mapping(vmf, pfn, fault_size); in __dev_dax_pte_fault() 133 return vmf_insert_mixed(vmf in __dev_dax_pte_fault() 103 __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) __dev_dax_pte_fault() argument 136 __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) __dev_dax_pmd_fault() argument 180 __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) __dev_dax_pud_fault() argument 224 __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf) __dev_dax_pud_fault() argument 231 dev_dax_huge_fault(struct vm_fault *vmf, unsigned int order) dev_dax_huge_fault() argument 257 dev_dax_fault(struct vm_fault *vmf) dev_dax_fault() argument [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | fs_dax.h | 11 TP_PROTO(struct inode *inode, struct vm_fault *vmf, 13 TP_ARGS(inode, vmf, max_pgoff, result), 29 __entry->vm_start = vmf->vma->vm_start; 30 __entry->vm_end = vmf->vma->vm_end; 31 __entry->vm_flags = vmf->vma->vm_flags; 32 __entry->address = vmf->address; 33 __entry->flags = vmf->flags; 34 __entry->pgoff = vmf->pgoff; 56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ 58 TP_ARGS(inode, vmf, max_pgof [all...] |
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | fs_dax.h | 11 TP_PROTO(struct inode *inode, struct vm_fault *vmf, 13 TP_ARGS(inode, vmf, max_pgoff, result), 29 __entry->vm_start = vmf->vma->vm_start; 30 __entry->vm_end = vmf->vma->vm_end; 31 __entry->vm_flags = vmf->vma->vm_flags; 32 __entry->address = vmf->address; 33 __entry->flags = vmf->flags; 34 __entry->pgoff = vmf->pgoff; 56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ 58 TP_ARGS(inode, vmf, max_pgof [all...] |
/kernel/linux/linux-5.10/drivers/dax/ |
H A D | device.c | 77 struct vm_fault *vmf, pfn_t *pfn) in __dev_dax_pte_fault() 83 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pte_fault() 95 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); in __dev_dax_pte_fault() 97 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); in __dev_dax_pte_fault() 103 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); in __dev_dax_pte_fault() 107 struct vm_fault *vmf, pfn_t *pfn) in __dev_dax_pmd_fault() 109 unsigned long pmd_addr = vmf->address & PMD_MASK; in __dev_dax_pmd_fault() 115 if (check_vma(dev_dax, vmf->vma, __func__)) in __dev_dax_pmd_fault() 130 if (pmd_addr < vmf in __dev_dax_pmd_fault() 76 __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) __dev_dax_pte_fault() argument 106 __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) __dev_dax_pmd_fault() argument 147 __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) __dev_dax_pud_fault() argument 188 __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) __dev_dax_pud_fault() argument 195 dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) dev_dax_huge_fault() argument 254 dev_dax_fault(struct vm_fault *vmf) dev_dax_fault() argument [all...] |
/kernel/linux/linux-5.10/fs/ |
H A D | dax.c | 745 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() 769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry() 1055 struct vm_fault *vmf) in dax_load_hole() 1058 unsigned long vaddr = vmf->address; in dax_load_hole() 1062 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole() 1065 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); in dax_load_hole() 1066 trace_dax_load_hole(inode, vmf, ret); in dax_load_hole() 1260 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, in dax_iomap_pte_fault() argument 1263 struct vm_area_struct *vma = vmf in dax_iomap_pte_fault() 744 dax_insert_entry(struct xa_state *xas, struct address_space *mapping, struct vm_fault *vmf, void *entry, pfn_t pfn, unsigned long flags, bool dirty) dax_insert_entry() argument 1053 dax_load_hole(struct xa_state *xas, struct address_space *mapping, void **entry, struct vm_fault *vmf) dax_load_hole() argument 1430 dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, struct iomap *iomap, void **entry) dax_pmd_load_hole() argument 1482 dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) dax_iomap_pmd_fault() argument 1644 dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) dax_iomap_pmd_fault() argument 1664 dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) dax_iomap_fault() argument 1688 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) dax_insert_pfn_mkwrite() argument 1732 dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t pfn) dax_finish_sync_fault() argument [all...] |
/kernel/linux/linux-6.6/fs/ |
H A D | dax.c | 830 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) in copy_cow_page_dax() argument 844 vto = kmap_atomic(vmf->cow_page); in copy_cow_page_dax() 845 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); in copy_cow_page_dax() 869 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument 873 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry() 876 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); in dax_insert_entry() 898 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry() 1186 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument 1202 dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) dax_pmd_load_hole() argument 1254 dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) dax_pmd_load_hole() argument 1603 dax_fault_cow_page(struct vm_fault *vmf, const struct iomap_iter *iter) dax_fault_cow_page() argument 1642 dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, pfn_t *pfnp, struct xa_state *xas, void **entry, bool pmd) dax_fault_iter() argument 1697 dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) dax_iomap_pte_fault() argument 1774 dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, pgoff_t max_pgoff) dax_fault_check_fallback() argument 1807 dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) dax_iomap_pmd_fault() argument 1887 dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) dax_iomap_pmd_fault() argument 1907 dax_iomap_fault(struct vm_fault *vmf, unsigned int order, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) dax_iomap_fault() argument 1929 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) dax_insert_pfn_mkwrite() argument 1973 dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, pfn_t pfn) dax_finish_sync_fault() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/ttm/ |
H A D | ttm_bo_vm.c | 42 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() 57 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle() 58 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle() 62 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle() 98 * @vmf: The fault structure handed to the callback 117 struct vm_fault *vmf) in ttm_bo_vm_reserve() 131 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve() 132 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve() 134 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve() 165 * @vmf 41 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) ttm_bo_vm_fault_idle() argument 116 ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct vm_fault *vmf) ttm_bo_vm_reserve() argument 181 ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, pgoff_t num_prefault) ttm_bo_vm_fault_reserved() argument 291 ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot) ttm_bo_vm_dummy_page() argument 321 ttm_bo_vm_fault(struct vm_fault *vmf) ttm_bo_vm_fault() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
H A D | ttm_bo_vm.c | 46 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() 65 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_fault_idle() 67 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in ttm_bo_vm_fault_idle() 71 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_fault_idle() 110 * @vmf: The fault structure handed to the callback 129 struct vm_fault *vmf) in ttm_bo_vm_reserve() 143 if (fault_flag_allow_retry_first(vmf->flags)) { in ttm_bo_vm_reserve() 144 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { in ttm_bo_vm_reserve() 146 mmap_read_unlock(vmf->vma->vm_mm); in ttm_bo_vm_reserve() 167 * @vmf 45 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) ttm_bo_vm_fault_idle() argument 128 ttm_bo_vm_reserve(struct ttm_buffer_object *bo, struct vm_fault *vmf) ttm_bo_vm_reserve() argument 178 ttm_bo_vm_insert_huge(struct vm_fault *vmf, struct ttm_buffer_object *bo, pgoff_t page_offset, pgoff_t fault_page_size, pgprot_t pgprot) ttm_bo_vm_insert_huge() argument 237 ttm_bo_vm_insert_huge(struct vm_fault *vmf, struct ttm_buffer_object *bo, pgoff_t page_offset, pgoff_t fault_page_size, pgprot_t pgprot) ttm_bo_vm_insert_huge() argument 266 ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, pgoff_t num_prefault, pgoff_t fault_page_size) ttm_bo_vm_fault_reserved() argument 405 ttm_bo_vm_fault(struct vm_fault *vmf) ttm_bo_vm_fault() argument [all...] |
/kernel/linux/linux-5.10/fs/ocfs2/ |
H A D | mmap.c | 33 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() argument 35 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault() 40 ret = filemap_fault(vmf); in ocfs2_fault() 44 vma, vmf->page, vmf->pgoff); in ocfs2_fault() 115 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite() argument 117 struct page *page = vmf->page; in ocfs2_page_mkwrite() 118 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite() 146 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); in ocfs2_page_mkwrite()
|
/kernel/linux/linux-6.6/fs/ocfs2/ |
H A D | mmap.c | 31 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() argument 33 struct vm_area_struct *vma = vmf->vma; in ocfs2_fault() 38 ret = filemap_fault(vmf); in ocfs2_fault() 42 vma, vmf->page, vmf->pgoff); in ocfs2_fault() 113 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite() argument 115 struct page *page = vmf->page; in ocfs2_page_mkwrite() 116 struct inode *inode = file_inode(vmf->vma->vm_file); in ocfs2_page_mkwrite() 144 ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); in ocfs2_page_mkwrite()
|
/kernel/linux/linux-6.6/drivers/video/fbdev/core/ |
H A D | fb_defio.c | 94 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument 98 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault() 100 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault() 110 if (vmf->vma->vm_file) in fb_deferred_io_fault() 111 page->mapping = vmf->vma->vm_file->f_mapping; in fb_deferred_io_fault() 116 page->index = vmf->pgoff; /* for page_mkclean() */ in fb_deferred_io_fault() 118 vmf->page = page; in fb_deferred_io_fault() 186 * @vmf: The VM fault 197 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) in fb_deferred_io_page_mkwrite() argument 199 unsigned long offset = vmf in fb_deferred_io_page_mkwrite() 208 fb_deferred_io_mkwrite(struct vm_fault *vmf) fb_deferred_io_mkwrite() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_page_dirty.c | 393 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument 395 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite() 408 save_flags = vmf->flags; in vmw_bo_vm_mkwrite() 409 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite() 410 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite() 411 vmf->flags = save_flags; in vmw_bo_vm_mkwrite() 415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite() 435 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument 437 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault() 446 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault() 491 vmw_bo_vm_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) vmw_bo_vm_huge_fault() argument [all...] |
/kernel/linux/linux-5.10/drivers/video/fbdev/core/ |
H A D | fb_defio.c | 40 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) in fb_deferred_io_fault() argument 44 struct fb_info *info = vmf->vma->vm_private_data; in fb_deferred_io_fault() 46 offset = vmf->pgoff << PAGE_SHIFT; in fb_deferred_io_fault() 56 if (vmf->vma->vm_file) in fb_deferred_io_fault() 57 page->mapping = vmf->vma->vm_file->f_mapping; in fb_deferred_io_fault() 62 page->index = vmf->pgoff; in fb_deferred_io_fault() 64 vmf->page = page; in fb_deferred_io_fault() 89 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) in fb_deferred_io_mkwrite() argument 91 struct page *page = vmf->page; in fb_deferred_io_mkwrite() 92 struct fb_info *info = vmf in fb_deferred_io_mkwrite() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
H A D | drm_vm.c | 115 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument 117 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() 144 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault() 174 vmf->page = page; in drm_vm_fault() 188 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument 204 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) in drm_vm_shm_fault() argument 206 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() 215 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault() 221 vmf->page = page; in drm_vm_shm_fault() 304 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) in drm_vm_dma_fault() argument 339 drm_vm_sg_fault(struct vm_fault *vmf) drm_vm_sg_fault() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/ |
H A D | drm_vm.c | 110 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument 112 struct vm_area_struct *vma = vmf->vma; in drm_vm_fault() 139 resource_size_t offset = vmf->address - vma->vm_start; in drm_vm_fault() 169 vmf->page = page; in drm_vm_fault() 183 static vm_fault_t drm_vm_fault(struct vm_fault *vmf) in drm_vm_fault() argument 199 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf) in drm_vm_shm_fault() argument 201 struct vm_area_struct *vma = vmf->vma; in drm_vm_shm_fault() 210 offset = vmf->address - vma->vm_start; in drm_vm_shm_fault() 216 vmf->page = page; in drm_vm_shm_fault() 299 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf) in drm_vm_dma_fault() argument 334 drm_vm_sg_fault(struct vm_fault *vmf) drm_vm_sg_fault() argument [all...] |
/kernel/linux/linux-5.10/arch/x86/entry/vdso/ |
H A D | vma.c | 60 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() 64 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault() 67 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault() 68 get_page(vmf->page); in vdso_fault() 167 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() 176 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + in vvar_fault() 211 addr = vmf->address + (image->sym_timens_page - sym_offset); in vvar_fault() 219 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault() 224 return vmf_insert_pfn_prot(vma, vmf in vvar_fault() 59 vdso_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) vdso_fault() argument 166 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) vvar_fault() argument [all...] |
/kernel/linux/linux-6.6/arch/x86/entry/vdso/ |
H A D | vma.c | 63 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() 67 if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) in vdso_fault() 70 vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); in vdso_fault() 71 get_page(vmf->page); in vdso_fault() 129 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_fault() 138 sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) + in vvar_fault() 173 addr = vmf->address + (image->sym_timens_page - sym_offset); in vvar_fault() 181 return vmf_insert_pfn(vma, vmf->address, pfn); in vvar_fault() 186 return vmf_insert_pfn_prot(vma, vmf in vvar_fault() 62 vdso_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) vdso_fault() argument 128 vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) vvar_fault() argument [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | huge_mm.h | 10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 14 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() argument 27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 41 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 46 * @vmf: Structure describing the fault 55 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, in vmf_insert_pfn_pmd() argument 58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf in vmf_insert_pfn_pmd() 74 vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) vmf_insert_pfn_pud() argument 447 do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) do_huge_pmd_numa_page() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_page_dirty.c | 375 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) in vmw_bo_vm_mkwrite() argument 377 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_mkwrite() 389 save_flags = vmf->flags; in vmw_bo_vm_mkwrite() 390 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; in vmw_bo_vm_mkwrite() 391 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_mkwrite() 392 vmf->flags = save_flags; in vmw_bo_vm_mkwrite() 396 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); in vmw_bo_vm_mkwrite() 416 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) in vmw_bo_vm_fault() argument 418 struct vm_area_struct *vma = vmf->vma; in vmw_bo_vm_fault() 426 ret = ttm_bo_vm_reserve(bo, vmf); in vmw_bo_vm_fault() [all...] |
/kernel/linux/linux-5.10/drivers/xen/ |
H A D | privcmd-buf.c | 117 static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf) in privcmd_buf_vma_fault() argument 120 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, in privcmd_buf_vma_fault() 121 vmf->pgoff, (void *)vmf->address); in privcmd_buf_vma_fault()
|