Lines Matching refs:mm
213 * per mm struct. Users can overwrite this number by sysctl but there is a
255 * The idea being to have a "virtual" mm in the same way
257 * mm details, and allowing different kinds of memory mappings
438 /* This mask defines which mm->def_flags a process can inherit its parent */
466 * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
630 * in mm/mempolicy.c will do this automatically.
647 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
652 vma->vm_mm = mm;
684 if (!current->mm) {
688 if (current->mm != vma->vm_mm) {
715 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
716 #define TLB_FLUSH_VMA(mm, flags) \
718 .vm_mm = (mm), .vm_flags = (flags) \
915 /* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
1034 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1714 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, struct mmu_notifier_range *range, pte_t **ptepp,
1716 int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp);
1733 extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked);
1745 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags,
1771 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags);
1772 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len,
1775 long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
1777 long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags,
1793 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1794 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task,
1904 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1906 long val = atomic_long_read(&mm->rss_stat.count[member]);
1919 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
1922 void listen_rss_threshold(struct mm_struct *mm);
1925 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1927 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
1930 listen_rss_threshold(mm);
1933 mm_trace_rss_stat(mm, member, count);
1936 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1938 long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
1941 listen_rss_threshold(mm);
1944 mm_trace_rss_stat(mm, member, count);
1947 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1949 long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
1951 mm_trace_rss_stat(mm, member, count);
1971 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1973 return get_mm_counter(mm, MM_FILEPAGES) + get_mm_counter(mm, MM_ANONPAGES) + get_mm_counter(mm, MM_SHMEMPAGES);
1976 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1978 return max(mm->hiwater_rss, get_mm_rss(mm));
1981 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1983 return max(mm->hiwater_vm, mm->total_vm);
1986 static inline void update_hiwater_rss(struct mm_struct *mm)
1988 unsigned long _rss = get_mm_rss(mm);
1989 if ((mm)->hiwater_rss < _rss) {
1990 (mm)->hiwater_rss = _rss;
1994 static inline void update_hiwater_vm(struct mm_struct *mm)
1996 if (mm->hiwater_vm < mm->total_vm) {
1997 mm->hiwater_vm = mm->total_vm;
2001 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2003 mm->hiwater_rss = get_mm_rss(mm);
2006 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, struct mm_struct *mm)
2008 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2015 void sync_mm_rss(struct mm_struct *mm);
2017 static inline void sync_mm_rss(struct mm_struct *mm)
2043 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
2044 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
2047 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2052 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2057 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2061 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
2065 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2068 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2073 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2075 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2077 if (mm_pud_folded(mm)) {
2080 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2083 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2085 if (mm_pud_folded(mm)) {
2088 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2093 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2098 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2101 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2106 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2108 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2110 if (mm_pmd_folded(mm)) {
2113 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2116 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2118 if (mm_pmd_folded(mm)) {
2121 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2126 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2128 atomic_long_set(&mm->pgtables_bytes, 0);
2131 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2133 return atomic_long_read(&mm->pgtables_bytes);
2136 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2138 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2141 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2143 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2147 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2150 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2155 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2158 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2163 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2168 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2170 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? NULL : p4d_offset(pgd, address);
2173 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
2175 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address);
2178 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2180 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address)) ? NULL : pmd_offset(pud, address);
2214 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2238 * We use mm->page_table_lock to guard all pagetable pages of the mm.
2240 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2242 return &mm->page_table_lock;
2279 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
2281 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
2294 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2296 #define pte_alloc_map(mm, pmd, address) (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2298 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2299 (pte_alloc(mm, pmd) ? NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2312 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2333 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2337 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2339 return &mm->page_table_lock;
2350 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2354 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2356 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2384 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2386 return &mm->page_table_lock;
2389 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2391 spinlock_t *ptl = pud_lockptr(mm, pud);
2494 /* please see mm/page_alloc.c */
2558 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2591 extern int mm_take_all_locks(struct mm_struct *mm);
2592 extern void mm_drop_all_locks(struct mm_struct *mm);
2594 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2595 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2602 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len,
2605 extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags,
2619 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
2634 /* These take the mm semaphore themselves */
2663 /* mm/page-writeback.c */
2680 extern struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr);
2681 extern struct vm_area_struct *find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev);
2685 static inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr,
2688 struct vm_area_struct *vma = find_vma(mm, start_addr);
2728 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end)
2730 struct vm_area_struct *vma = find_vma(mm, vm_start);
2820 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
2899 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn,
2901 extern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn,
3002 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3004 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3006 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3014 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3020 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3237 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in,
3240 static inline int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in,