Lines Matching refs:address
131 * support CFI can define this macro to return the actual function address
248 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
546 unsigned long address; /* Faulting virtual address */
548 * the 'address' */
550 * the 'address'
562 * the 'address'. NULL if the page
774 * Determine if an address is within the vmalloc range
1051 * page's address_space. Usually, this is the address of a circular list of
1579 static inline void set_page_address(struct page *page, void *address)
1581 page->virtual = address;
1596 #define set_page_address(page, address) \
1705 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size);
1706 void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size);
1714 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, struct mmu_notifier_range *range, pte_t **ptepp,
1716 int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp);
1717 int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn);
1718 int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot,
1731 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags,
1733 extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked);
1738 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags,
1745 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags,
2052 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2057 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2061 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
2073 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2093 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2106 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2168 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2170 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? NULL : p4d_offset(pgd, address);
2173 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
2175 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address);
2178 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2180 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address)) ? NULL : pmd_offset(pud, address);
2279 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
2282 pte_t *__pte = pte_offset_map(pmd, address); \
2296 #define pte_alloc_map(mm, pmd, address) (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2298 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2299 (pte_alloc(mm, pmd) ? NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2301 #define pte_alloc_kernel(pmd, address) \
2302 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd)) ? NULL : pte_offset_kernel(pmd, address))
2669 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2672 extern int expand_downwards(struct vm_area_struct *vma, unsigned long address);
2674 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2676 #define expand_upwards(vma, address) (0)
2803 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags);
2899 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn,
2901 extern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn,