Lines Matching refs:vma
217 * per a vma. In ELF, the number of sections is represented in unsigned short.
303 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
321 #define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
322 #define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
532 * vm_fault is filled by the pagefault handler and passed to the vma's
533 * ->fault function. The vma's ->fault is responsible for returning a bitmask
542 struct vm_area_struct *vma; /* Target VMA */
545 pgoff_t pgoff; /* Logical page offset based on vma */
610 int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write);
612 /* Called by the /proc/PID/maps code to ask the vma whether it
614 * vma to be dumped unconditionally. */
615 const char *(*name)(struct vm_area_struct *vma);
625 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
629 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
631 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
632 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
633 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
637 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr);
644 struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr);
647 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
651 memset(vma, 0, sizeof(*vma));
652 vma->vm_mm = mm;
653 vma->vm_ops = &dummy_vm_ops;
654 INIT_LIST_HEAD(&vma->anon_vma_chain);
657 static inline void vma_set_anonymous(struct vm_area_struct *vma)
659 vma->vm_ops = NULL;
662 static inline bool vma_is_anonymous(struct vm_area_struct *vma)
664 return !vma->vm_ops;
667 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
669 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
675 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == VM_STACK_INCOMPLETE_SETUP) {
682 static inline bool vma_is_foreign(struct vm_area_struct *vma)
688 if (current->mm != vma->vm_mm) {
695 static inline bool vma_is_accessible(struct vm_area_struct *vma)
697 return vma->vm_flags & VM_ACCESS_FLAGS;
705 bool vma_is_shmem(struct vm_area_struct *vma);
707 static inline bool vma_is_shmem(struct vm_area_struct *vma)
713 int vma_is_stack_for_current(struct vm_area_struct *vma);
715 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1003 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
1008 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1010 if (likely(vma->vm_flags & VM_WRITE)) {
1702 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
1703 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd);
1705 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size);
1706 void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size);
1717 int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn);
1718 int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot,
1720 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write);
1731 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags,
1738 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags,
1867 extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr,
1886 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end,
1888 extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start,
2041 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2539 #define vma_interval_tree_foreach(vma, root, start, last) \
2540 for (vma = vma_interval_tree_iter_first(root, start, last); vma; \
2541 vma = vma_interval_tree_iter_next(vma, start, last))
2559 extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff,
2561 static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff,
2564 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2601 extern bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm);
2669 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2672 extern int expand_downwards(struct vm_area_struct *vma, unsigned long address);
2674 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2676 #define expand_upwards(vma, address) (0)
2688 struct vm_area_struct *vma = find_vma(mm, start_addr);
2690 if (vma && end_addr <= vma->vm_start) {
2691 vma = NULL;
2693 return vma;
2696 static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2698 unsigned long vm_start = vma->vm_start;
2700 if (vma->vm_flags & VM_GROWSDOWN) {
2702 if (vm_start > vma->vm_start) {
2709 static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2711 unsigned long vm_end = vma->vm_end;
2713 if (vma->vm_flags & VM_GROWSUP) {
2715 if (vm_end < vma->vm_end) {
2722 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2724 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2730 struct vm_area_struct *vma = find_vma(mm, vm_start);
2732 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) {
2733 vma = NULL;
2736 return vma;
2739 static inline bool range_in_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end)
2741 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2746 void vma_set_page_prot(struct vm_area_struct *vma);
2752 static inline void vma_set_page_prot(struct vm_area_struct *vma)
2754 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2759 unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end);
2765 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num);
2766 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num);
2767 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num);
2768 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
2769 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot);
2770 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn);
2771 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t pgprot);
2772 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn);
2773 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2775 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
2777 int err = vm_insert_page(vma, addr, page);
2788 static inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn,
2791 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
2803 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags);
3036 const char *arch_vma_name(struct vm_area_struct *vma);
3118 extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr_hint, struct vm_area_struct *vma,
3125 * @vma: Pointer to the struct vm_area_struct to consider
3133 static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
3135 return vma_is_dax(vma) || (vma->vm_file && (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
3205 * @vma: the vma to operate on
3208 * the vma flags. Return 0 if check pass, or <0 for errors.
3210 static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3217 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) {
3228 if (vma->vm_flags & VM_SHARED) {
3229 vma->vm_flags &= ~(VM_MAYWRITE);