13d0407baSopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */ 23d0407baSopenharmony_ci#ifndef _LINUX_MM_H 33d0407baSopenharmony_ci#define _LINUX_MM_H 43d0407baSopenharmony_ci 53d0407baSopenharmony_ci#include <linux/errno.h> 63d0407baSopenharmony_ci 73d0407baSopenharmony_ci#ifdef __KERNEL__ 83d0407baSopenharmony_ci 93d0407baSopenharmony_ci#include <linux/mmdebug.h> 103d0407baSopenharmony_ci#include <linux/gfp.h> 113d0407baSopenharmony_ci#include <linux/bug.h> 123d0407baSopenharmony_ci#include <linux/list.h> 133d0407baSopenharmony_ci#include <linux/mmzone.h> 143d0407baSopenharmony_ci#include <linux/rbtree.h> 153d0407baSopenharmony_ci#include <linux/atomic.h> 163d0407baSopenharmony_ci#include <linux/debug_locks.h> 173d0407baSopenharmony_ci#include <linux/mm_types.h> 183d0407baSopenharmony_ci#include <linux/mmap_lock.h> 193d0407baSopenharmony_ci#include <linux/range.h> 203d0407baSopenharmony_ci#include <linux/pfn.h> 213d0407baSopenharmony_ci#include <linux/percpu-refcount.h> 223d0407baSopenharmony_ci#include <linux/bit_spinlock.h> 233d0407baSopenharmony_ci#include <linux/shrinker.h> 243d0407baSopenharmony_ci#include <linux/resource.h> 253d0407baSopenharmony_ci#include <linux/page_ext.h> 263d0407baSopenharmony_ci#include <linux/err.h> 273d0407baSopenharmony_ci#include <linux/page-flags.h> 283d0407baSopenharmony_ci#include <linux/page_ref.h> 293d0407baSopenharmony_ci#include <linux/memremap.h> 303d0407baSopenharmony_ci#include <linux/overflow.h> 313d0407baSopenharmony_ci#include <linux/sizes.h> 323d0407baSopenharmony_ci#include <linux/sched.h> 333d0407baSopenharmony_ci#include <linux/pgtable.h> 343d0407baSopenharmony_ci 353d0407baSopenharmony_cistruct mempolicy; 363d0407baSopenharmony_cistruct anon_vma; 373d0407baSopenharmony_cistruct anon_vma_chain; 383d0407baSopenharmony_cistruct file_ra_state; 393d0407baSopenharmony_cistruct user_struct; 403d0407baSopenharmony_cistruct writeback_control; 413d0407baSopenharmony_cistruct bdi_writeback; 423d0407baSopenharmony_cistruct pt_regs; 433d0407baSopenharmony_ci 443d0407baSopenharmony_ciextern int sysctl_page_lock_unfairness; 453d0407baSopenharmony_ci 463d0407baSopenharmony_civoid init_mm_internals(void); 473d0407baSopenharmony_ci 483d0407baSopenharmony_ci#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 493d0407baSopenharmony_ciextern unsigned long max_mapnr; 503d0407baSopenharmony_ci 513d0407baSopenharmony_cistatic inline void set_max_mapnr(unsigned long limit) 523d0407baSopenharmony_ci{ 533d0407baSopenharmony_ci max_mapnr = limit; 543d0407baSopenharmony_ci} 553d0407baSopenharmony_ci#else 563d0407baSopenharmony_cistatic inline void set_max_mapnr(unsigned long limit) 573d0407baSopenharmony_ci{ 583d0407baSopenharmony_ci} 593d0407baSopenharmony_ci#endif 603d0407baSopenharmony_ci 613d0407baSopenharmony_ciextern atomic_long_t _totalram_pages; 623d0407baSopenharmony_cistatic inline unsigned long totalram_pages(void) 633d0407baSopenharmony_ci{ 643d0407baSopenharmony_ci return (unsigned long)atomic_long_read(&_totalram_pages); 653d0407baSopenharmony_ci} 663d0407baSopenharmony_ci 673d0407baSopenharmony_cistatic inline void totalram_pages_inc(void) 683d0407baSopenharmony_ci{ 693d0407baSopenharmony_ci atomic_long_inc(&_totalram_pages); 703d0407baSopenharmony_ci} 713d0407baSopenharmony_ci 723d0407baSopenharmony_cistatic inline void totalram_pages_dec(void) 733d0407baSopenharmony_ci{ 743d0407baSopenharmony_ci atomic_long_dec(&_totalram_pages); 753d0407baSopenharmony_ci} 763d0407baSopenharmony_ci 773d0407baSopenharmony_cistatic inline void totalram_pages_add(long count) 783d0407baSopenharmony_ci{ 793d0407baSopenharmony_ci atomic_long_add(count, &_totalram_pages); 803d0407baSopenharmony_ci} 813d0407baSopenharmony_ci 823d0407baSopenharmony_ciextern void *high_memory; 833d0407baSopenharmony_ciextern int page_cluster; 843d0407baSopenharmony_ci 853d0407baSopenharmony_ci#ifdef CONFIG_SYSCTL 863d0407baSopenharmony_ciextern int sysctl_legacy_va_layout; 873d0407baSopenharmony_ci#else 883d0407baSopenharmony_ci#define sysctl_legacy_va_layout 0 893d0407baSopenharmony_ci#endif 903d0407baSopenharmony_ci 913d0407baSopenharmony_ci#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 923d0407baSopenharmony_ciextern const int mmap_rnd_bits_min; 933d0407baSopenharmony_ciextern const int mmap_rnd_bits_max; 943d0407baSopenharmony_ciextern int mmap_rnd_bits __read_mostly; 953d0407baSopenharmony_ci#endif 963d0407baSopenharmony_ci#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 973d0407baSopenharmony_ciextern const int mmap_rnd_compat_bits_min; 983d0407baSopenharmony_ciextern const int mmap_rnd_compat_bits_max; 993d0407baSopenharmony_ciextern int mmap_rnd_compat_bits __read_mostly; 1003d0407baSopenharmony_ci#endif 1013d0407baSopenharmony_ci 1023d0407baSopenharmony_ci#include <asm/page.h> 1033d0407baSopenharmony_ci#include <asm/processor.h> 1043d0407baSopenharmony_ci 1053d0407baSopenharmony_ci/* 1063d0407baSopenharmony_ci * Architectures that support memory tagging (assigning tags to memory regions, 1073d0407baSopenharmony_ci * embedding these tags into addresses that point to these memory regions, and 1083d0407baSopenharmony_ci * checking that the memory and the pointer tags match on memory accesses) 1093d0407baSopenharmony_ci * redefine this macro to strip tags from pointers. 1103d0407baSopenharmony_ci * It's defined as noop for arcitectures that don't support memory tagging. 1113d0407baSopenharmony_ci */ 1123d0407baSopenharmony_ci#ifndef untagged_addr 1133d0407baSopenharmony_ci#define untagged_addr(addr) (addr) 1143d0407baSopenharmony_ci#endif 1153d0407baSopenharmony_ci 1163d0407baSopenharmony_ci#ifndef __pa_symbol 1173d0407baSopenharmony_ci#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 1183d0407baSopenharmony_ci#endif 1193d0407baSopenharmony_ci 1203d0407baSopenharmony_ci#ifndef page_to_virt 1213d0407baSopenharmony_ci#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 1223d0407baSopenharmony_ci#endif 1233d0407baSopenharmony_ci 1243d0407baSopenharmony_ci#ifndef lm_alias 1253d0407baSopenharmony_ci#define lm_alias(x) __va(__pa_symbol(x)) 1263d0407baSopenharmony_ci#endif 1273d0407baSopenharmony_ci 1283d0407baSopenharmony_ci/* 1293d0407baSopenharmony_ci * With CONFIG_CFI_CLANG, the compiler replaces function addresses in 1303d0407baSopenharmony_ci * instrumented C code with jump table addresses. Architectures that 1313d0407baSopenharmony_ci * support CFI can define this macro to return the actual function address 1323d0407baSopenharmony_ci * when needed. 1333d0407baSopenharmony_ci */ 1343d0407baSopenharmony_ci#ifndef function_nocfi 1353d0407baSopenharmony_ci#define function_nocfi(x) (x) 1363d0407baSopenharmony_ci#endif 1373d0407baSopenharmony_ci 1383d0407baSopenharmony_ci#define MM_ZERO 0 1393d0407baSopenharmony_ci#define MM_ONE 1 1403d0407baSopenharmony_ci#define MM_TWO 2 1413d0407baSopenharmony_ci#define MM_THREE 3 1423d0407baSopenharmony_ci#define MM_FOUR 4 1433d0407baSopenharmony_ci#define MM_FIVE 5 1443d0407baSopenharmony_ci#define MM_SIX 6 1453d0407baSopenharmony_ci#define MM_SEVEN 7 1463d0407baSopenharmony_ci#define MM_EIGHT 8 1473d0407baSopenharmony_ci#define MM_NINE 9 1483d0407baSopenharmony_ci#define MM_FIFTYSIX 56 1493d0407baSopenharmony_ci#define MM_SIXTYFOUR 64 1503d0407baSopenharmony_ci#define MM_SEVENTYTWO 72 1513d0407baSopenharmony_ci#define MM_EIGHTY 80 1523d0407baSopenharmony_ci 1533d0407baSopenharmony_ci/* 1543d0407baSopenharmony_ci * To prevent common memory management code establishing 1553d0407baSopenharmony_ci * a zero page mapping on a read fault. 1563d0407baSopenharmony_ci * This macro should be defined within <asm/pgtable.h>. 1573d0407baSopenharmony_ci * s390 does this to prevent multiplexing of hardware bits 1583d0407baSopenharmony_ci * related to the physical page in case of virtualization. 1593d0407baSopenharmony_ci */ 1603d0407baSopenharmony_ci#ifndef mm_forbids_zeropage 1613d0407baSopenharmony_ci#define mm_forbids_zeropage(X) (0) 1623d0407baSopenharmony_ci#endif 1633d0407baSopenharmony_ci 1643d0407baSopenharmony_ci/* 1653d0407baSopenharmony_ci * On some architectures it is expensive to call memset() for small sizes. 1663d0407baSopenharmony_ci * If an architecture decides to implement their own version of 1673d0407baSopenharmony_ci * mm_zero_struct_page they should wrap the defines below in a #ifndef and 1683d0407baSopenharmony_ci * define their own version of this macro in <asm/pgtable.h> 1693d0407baSopenharmony_ci */ 1703d0407baSopenharmony_ci#if BITS_PER_LONG == 64 1713d0407baSopenharmony_ci/* This function must be updated when the size of struct page grows above 80 1723d0407baSopenharmony_ci * or reduces below 56. The idea that compiler optimizes out switch() 1733d0407baSopenharmony_ci * statement, and only leaves move/store instructions. Also the compiler can 1743d0407baSopenharmony_ci * combine write statments if they are both assignments and can be reordered, 1753d0407baSopenharmony_ci * this can result in several of the writes here being dropped. 1763d0407baSopenharmony_ci */ 1773d0407baSopenharmony_ci#define mm_zero_struct_page(pp) _mm_zero_struct_page(pp) 1783d0407baSopenharmony_cistatic inline void _mm_zero_struct_page(struct page *page) 1793d0407baSopenharmony_ci{ 1803d0407baSopenharmony_ci unsigned long *_pp = (void *)page; 1813d0407baSopenharmony_ci 1823d0407baSopenharmony_ci /* Check that struct page is either 56, 64, 72, or 80 bytes */ 1833d0407baSopenharmony_ci BUILD_BUG_ON(sizeof(struct page) & MM_SEVEN); 1843d0407baSopenharmony_ci BUILD_BUG_ON(sizeof(struct page) < MM_FIFTYSIX); 1853d0407baSopenharmony_ci BUILD_BUG_ON(sizeof(struct page) > MM_EIGHTY); 1863d0407baSopenharmony_ci 1873d0407baSopenharmony_ci switch (sizeof(struct page)) { 1883d0407baSopenharmony_ci case MM_EIGHTY: 1893d0407baSopenharmony_ci _pp[MM_NINE] = 0; 1903d0407baSopenharmony_ci fallthrough; 1913d0407baSopenharmony_ci case MM_SEVENTYTWO: 1923d0407baSopenharmony_ci _pp[MM_EIGHT] = 0; 1933d0407baSopenharmony_ci fallthrough; 1943d0407baSopenharmony_ci case MM_SIXTYFOUR: 1953d0407baSopenharmony_ci _pp[MM_SEVEN] = 0; 1963d0407baSopenharmony_ci fallthrough; 1973d0407baSopenharmony_ci case MM_FIFTYSIX: 1983d0407baSopenharmony_ci _pp[MM_SIX] = 0; 1993d0407baSopenharmony_ci _pp[MM_FIVE] = 0; 2003d0407baSopenharmony_ci _pp[MM_FOUR] = 0; 2013d0407baSopenharmony_ci _pp[MM_THREE] = 0; 2023d0407baSopenharmony_ci _pp[MM_TWO] = 0; 2033d0407baSopenharmony_ci _pp[MM_ONE] = 0; 2043d0407baSopenharmony_ci _pp[MM_ZERO] = 0; 2053d0407baSopenharmony_ci } 2063d0407baSopenharmony_ci} 2073d0407baSopenharmony_ci#else 2083d0407baSopenharmony_ci#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 2093d0407baSopenharmony_ci#endif 2103d0407baSopenharmony_ci 2113d0407baSopenharmony_ci/* 2123d0407baSopenharmony_ci * Default maximum number of active map areas, this limits the number of vmas 2133d0407baSopenharmony_ci * per mm struct. Users can overwrite this number by sysctl but there is a 2143d0407baSopenharmony_ci * problem. 2153d0407baSopenharmony_ci * 2163d0407baSopenharmony_ci * When a program's coredump is generated as ELF format, a section is created 2173d0407baSopenharmony_ci * per a vma. In ELF, the number of sections is represented in unsigned short. 2183d0407baSopenharmony_ci * This means the number of sections should be smaller than 65535 at coredump. 2193d0407baSopenharmony_ci * Because the kernel adds some informative sections to a image of program at 2203d0407baSopenharmony_ci * generating coredump, we need some margin. The number of extra sections is 2213d0407baSopenharmony_ci * 1-3 now and depends on arch. We use "5" as safe margin, here. 2223d0407baSopenharmony_ci * 2233d0407baSopenharmony_ci * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 2243d0407baSopenharmony_ci * not a hard limit any more. Although some userspace tools can be surprised by 2253d0407baSopenharmony_ci * that. 2263d0407baSopenharmony_ci */ 2273d0407baSopenharmony_ci#define MAPCOUNT_ELF_CORE_MARGIN (5) 2283d0407baSopenharmony_ci#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 2293d0407baSopenharmony_ci 2303d0407baSopenharmony_ciextern int sysctl_max_map_count; 2313d0407baSopenharmony_ci 2323d0407baSopenharmony_ciextern unsigned long sysctl_user_reserve_kbytes; 2333d0407baSopenharmony_ciextern unsigned long sysctl_admin_reserve_kbytes; 2343d0407baSopenharmony_ci 2353d0407baSopenharmony_ciextern int sysctl_overcommit_memory; 2363d0407baSopenharmony_ciextern int sysctl_overcommit_ratio; 2373d0407baSopenharmony_ciextern unsigned long sysctl_overcommit_kbytes; 2383d0407baSopenharmony_ci 2393d0407baSopenharmony_ciint overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 2403d0407baSopenharmony_ciint overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 2413d0407baSopenharmony_ciint overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 2423d0407baSopenharmony_ci 2433d0407baSopenharmony_ci#define nth_page(page, n) pfn_to_page(page_to_pfn((page)) + (n)) 2443d0407baSopenharmony_ci 2453d0407baSopenharmony_ci/* to align the pointer to the (next) page boundary */ 2463d0407baSopenharmony_ci#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 2473d0407baSopenharmony_ci 2483d0407baSopenharmony_ci/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 2493d0407baSopenharmony_ci#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 2503d0407baSopenharmony_ci 2513d0407baSopenharmony_ci#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) 2523d0407baSopenharmony_ci 2533d0407baSopenharmony_ci/* 2543d0407baSopenharmony_ci * Linux kernel virtual memory manager primitives. 2553d0407baSopenharmony_ci * The idea being to have a "virtual" mm in the same way 2563d0407baSopenharmony_ci * we have a virtual fs - giving a cleaner interface to the 2573d0407baSopenharmony_ci * mm details, and allowing different kinds of memory mappings 2583d0407baSopenharmony_ci * (from shared memory to executable loading to arbitrary 2593d0407baSopenharmony_ci * mmap() functions). 2603d0407baSopenharmony_ci */ 2613d0407baSopenharmony_ci 2623d0407baSopenharmony_cistruct vm_area_struct *vm_area_alloc(struct mm_struct *); 2633d0407baSopenharmony_cistruct vm_area_struct *vm_area_dup(struct vm_area_struct *); 2643d0407baSopenharmony_civoid vm_area_free(struct vm_area_struct *); 2653d0407baSopenharmony_ci 2663d0407baSopenharmony_ci#ifndef CONFIG_MMU 2673d0407baSopenharmony_ciextern struct rb_root nommu_region_tree; 2683d0407baSopenharmony_ciextern struct rw_semaphore nommu_region_sem; 2693d0407baSopenharmony_ci 2703d0407baSopenharmony_ciextern unsigned int kobjsize(const void *objp); 2713d0407baSopenharmony_ci#endif 2723d0407baSopenharmony_ci 2733d0407baSopenharmony_ci/* 2743d0407baSopenharmony_ci * vm_flags in vm_area_struct, see mm_types.h. 2753d0407baSopenharmony_ci * When changing, update also include/trace/events/mmflags.h 2763d0407baSopenharmony_ci */ 2773d0407baSopenharmony_ci#define VM_NONE 0x00000000 2783d0407baSopenharmony_ci 2793d0407baSopenharmony_ci#define VM_READ 0x00000001 /* currently active flags */ 2803d0407baSopenharmony_ci#define VM_WRITE 0x00000002 2813d0407baSopenharmony_ci#define VM_EXEC 0x00000004 2823d0407baSopenharmony_ci#define VM_SHARED 0x00000008 2833d0407baSopenharmony_ci 2843d0407baSopenharmony_ci/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 2853d0407baSopenharmony_ci#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 2863d0407baSopenharmony_ci#define VM_MAYWRITE 0x00000020 2873d0407baSopenharmony_ci#define VM_MAYEXEC 0x00000040 2883d0407baSopenharmony_ci#define VM_MAYSHARE 0x00000080 2893d0407baSopenharmony_ci 2903d0407baSopenharmony_ci#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 2913d0407baSopenharmony_ci#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 2923d0407baSopenharmony_ci#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 2933d0407baSopenharmony_ci#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 2943d0407baSopenharmony_ci#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 2953d0407baSopenharmony_ci 2963d0407baSopenharmony_ci#define VM_LOCKED 0x00002000 2973d0407baSopenharmony_ci#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 2983d0407baSopenharmony_ci 2993d0407baSopenharmony_ci/* Used by sys_madvise() */ 3003d0407baSopenharmony_ci#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 3013d0407baSopenharmony_ci#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 3023d0407baSopenharmony_ci 3033d0407baSopenharmony_ci#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 3043d0407baSopenharmony_ci#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 3053d0407baSopenharmony_ci#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 3063d0407baSopenharmony_ci#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 3073d0407baSopenharmony_ci#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 3083d0407baSopenharmony_ci#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 3093d0407baSopenharmony_ci#define VM_SYNC 0x00800000 /* Synchronous page faults */ 3103d0407baSopenharmony_ci#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 3113d0407baSopenharmony_ci#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ 3123d0407baSopenharmony_ci#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 3133d0407baSopenharmony_ci 3143d0407baSopenharmony_ci#ifdef CONFIG_MEM_SOFT_DIRTY 3153d0407baSopenharmony_ci#define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 3163d0407baSopenharmony_ci#else 3173d0407baSopenharmony_ci#define VM_SOFTDIRTY 0 3183d0407baSopenharmony_ci#endif 3193d0407baSopenharmony_ci 3203d0407baSopenharmony_ci#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 3213d0407baSopenharmony_ci#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 3223d0407baSopenharmony_ci#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 3233d0407baSopenharmony_ci#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 3243d0407baSopenharmony_ci 3253d0407baSopenharmony_ci#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 3263d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 3273d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 3283d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 3293d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 3303d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ 3313d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ 3323d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */ 3333d0407baSopenharmony_ci#define VM_HIGH_ARCH_BIT_7 39 /* bit only usable on 64-bit architectures */ 3343d0407baSopenharmony_ci#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 3353d0407baSopenharmony_ci#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 3363d0407baSopenharmony_ci#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 3373d0407baSopenharmony_ci#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 3383d0407baSopenharmony_ci#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) 3393d0407baSopenharmony_ci#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) 3403d0407baSopenharmony_ci#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6) 3413d0407baSopenharmony_ci#define VM_HIGH_ARCH_7 BIT(VM_HIGH_ARCH_BIT_7) 3423d0407baSopenharmony_ci#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 3433d0407baSopenharmony_ci 3443d0407baSopenharmony_ci#ifdef CONFIG_MEM_PURGEABLE 3453d0407baSopenharmony_ci#define VM_PURGEABLE VM_HIGH_ARCH_5 3463d0407baSopenharmony_ci#define VM_USEREXPTE VM_HIGH_ARCH_6 3473d0407baSopenharmony_ci#else /* CONFIG_MEM_PURGEABLE */ 3483d0407baSopenharmony_ci#define VM_PURGEABLE 0 3493d0407baSopenharmony_ci#define VM_USEREXPTE 0 3503d0407baSopenharmony_ci#endif /* CONFIG_MEM_PURGEABLE */ 3513d0407baSopenharmony_ci 3523d0407baSopenharmony_ci#ifdef CONFIG_SECURITY_XPM 3533d0407baSopenharmony_ci#define VM_XPM VM_HIGH_ARCH_7 3543d0407baSopenharmony_ci#else /* CONFIG_MEM_PURGEABLE */ 3553d0407baSopenharmony_ci#define VM_XPM VM_NONE 3563d0407baSopenharmony_ci#endif /* CONFIG_MEM_PURGEABLE */ 3573d0407baSopenharmony_ci 3583d0407baSopenharmony_ci#ifdef CONFIG_ARCH_HAS_PKEYS 3593d0407baSopenharmony_ci#define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 3603d0407baSopenharmony_ci#define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 3613d0407baSopenharmony_ci#define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ 3623d0407baSopenharmony_ci#define VM_PKEY_BIT2 VM_HIGH_ARCH_2 3633d0407baSopenharmony_ci#define VM_PKEY_BIT3 VM_HIGH_ARCH_3 3643d0407baSopenharmony_ci#ifdef CONFIG_PPC 3653d0407baSopenharmony_ci#define VM_PKEY_BIT4 VM_HIGH_ARCH_4 3663d0407baSopenharmony_ci#else 3673d0407baSopenharmony_ci#define VM_PKEY_BIT4 0 3683d0407baSopenharmony_ci#endif 3693d0407baSopenharmony_ci#endif /* CONFIG_ARCH_HAS_PKEYS */ 3703d0407baSopenharmony_ci 3713d0407baSopenharmony_ci#if defined(CONFIG_X86) 3723d0407baSopenharmony_ci#define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 3733d0407baSopenharmony_ci#elif defined(CONFIG_PPC) 3743d0407baSopenharmony_ci#define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 3753d0407baSopenharmony_ci#elif defined(CONFIG_PARISC) 3763d0407baSopenharmony_ci#define VM_GROWSUP VM_ARCH_1 3773d0407baSopenharmony_ci#elif defined(CONFIG_IA64) 3783d0407baSopenharmony_ci#define VM_GROWSUP VM_ARCH_1 3793d0407baSopenharmony_ci#elif defined(CONFIG_SPARC64) 3803d0407baSopenharmony_ci#define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ 3813d0407baSopenharmony_ci#define VM_ARCH_CLEAR VM_SPARC_ADI 3823d0407baSopenharmony_ci#elif defined(CONFIG_ARM64) 3833d0407baSopenharmony_ci#define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ 3843d0407baSopenharmony_ci#define VM_ARCH_CLEAR VM_ARM64_BTI 3853d0407baSopenharmony_ci#elif !defined(CONFIG_MMU) 3863d0407baSopenharmony_ci#define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 3873d0407baSopenharmony_ci#endif 3883d0407baSopenharmony_ci 3893d0407baSopenharmony_ci#if defined(CONFIG_ARM64_MTE) 3903d0407baSopenharmony_ci#define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ 3913d0407baSopenharmony_ci#define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ 3923d0407baSopenharmony_ci#else 3933d0407baSopenharmony_ci#define VM_MTE VM_NONE 3943d0407baSopenharmony_ci#define VM_MTE_ALLOWED VM_NONE 3953d0407baSopenharmony_ci#endif 3963d0407baSopenharmony_ci 3973d0407baSopenharmony_ci#ifndef VM_GROWSUP 3983d0407baSopenharmony_ci#define VM_GROWSUP VM_NONE 3993d0407baSopenharmony_ci#endif 4003d0407baSopenharmony_ci 4013d0407baSopenharmony_ci/* Bits set in the VMA until the stack is in its final location */ 4023d0407baSopenharmony_ci#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 4033d0407baSopenharmony_ci 4043d0407baSopenharmony_ci#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) 4053d0407baSopenharmony_ci 4063d0407baSopenharmony_ci/* Common data flag combinations */ 4073d0407baSopenharmony_ci#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 4083d0407baSopenharmony_ci#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 4093d0407baSopenharmony_ci#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 4103d0407baSopenharmony_ci 4113d0407baSopenharmony_ci#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */ 4123d0407baSopenharmony_ci#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC 4133d0407baSopenharmony_ci#endif 4143d0407baSopenharmony_ci 4153d0407baSopenharmony_ci#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 4163d0407baSopenharmony_ci#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 4173d0407baSopenharmony_ci#endif 4183d0407baSopenharmony_ci 4193d0407baSopenharmony_ci#ifdef CONFIG_STACK_GROWSUP 4203d0407baSopenharmony_ci#define VM_STACK VM_GROWSUP 4213d0407baSopenharmony_ci#else 4223d0407baSopenharmony_ci#define VM_STACK VM_GROWSDOWN 4233d0407baSopenharmony_ci#endif 4243d0407baSopenharmony_ci 4253d0407baSopenharmony_ci#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 4263d0407baSopenharmony_ci 4273d0407baSopenharmony_ci/* VMA basic access permission flags */ 4283d0407baSopenharmony_ci#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) 4293d0407baSopenharmony_ci 4303d0407baSopenharmony_ci/* 4313d0407baSopenharmony_ci * Special vmas that are non-mergable, non-mlock()able. 4323d0407baSopenharmony_ci */ 4333d0407baSopenharmony_ci#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 4343d0407baSopenharmony_ci 4353d0407baSopenharmony_ci/* This mask prevents VMA from being scanned with khugepaged */ 4363d0407baSopenharmony_ci#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 4373d0407baSopenharmony_ci 4383d0407baSopenharmony_ci/* This mask defines which mm->def_flags a process can inherit its parent */ 4393d0407baSopenharmony_ci#define VM_INIT_DEF_MASK VM_NOHUGEPAGE 4403d0407baSopenharmony_ci 4413d0407baSopenharmony_ci/* This mask is used to clear all the VMA flags used by mlock */ 4423d0407baSopenharmony_ci#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 4433d0407baSopenharmony_ci 4443d0407baSopenharmony_ci/* Arch-specific flags to clear when updating VM flags on protection change */ 4453d0407baSopenharmony_ci#ifndef VM_ARCH_CLEAR 4463d0407baSopenharmony_ci#define VM_ARCH_CLEAR VM_NONE 4473d0407baSopenharmony_ci#endif 4483d0407baSopenharmony_ci#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) 4493d0407baSopenharmony_ci 4503d0407baSopenharmony_ci/* 4513d0407baSopenharmony_ci * mapping from the currently active vm_flags protection bits (the 4523d0407baSopenharmony_ci * low four bits) to a page protection mask.. 4533d0407baSopenharmony_ci */ 4543d0407baSopenharmony_ciextern pgprot_t protection_map[16]; 4553d0407baSopenharmony_ci 4563d0407baSopenharmony_ci/** 4573d0407baSopenharmony_ci * Fault flag definitions. 4583d0407baSopenharmony_ci * 4593d0407baSopenharmony_ci * @FAULT_FLAG_WRITE: Fault was a write fault. 4603d0407baSopenharmony_ci * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. 4613d0407baSopenharmony_ci * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. 4623d0407baSopenharmony_ci * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. 4633d0407baSopenharmony_ci * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. 4643d0407baSopenharmony_ci * @FAULT_FLAG_TRIED: The fault has been tried once. 4653d0407baSopenharmony_ci * @FAULT_FLAG_USER: The fault originated in userspace. 4663d0407baSopenharmony_ci * @FAULT_FLAG_REMOTE: The fault is not for current task/mm. 4673d0407baSopenharmony_ci * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch. 4683d0407baSopenharmony_ci * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals. 4693d0407baSopenharmony_ci * 4703d0407baSopenharmony_ci * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify 4713d0407baSopenharmony_ci * whether we would allow page faults to retry by specifying these two 4723d0407baSopenharmony_ci * fault flags correctly. Currently there can be three legal combinations: 4733d0407baSopenharmony_ci * 4743d0407baSopenharmony_ci * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and 4753d0407baSopenharmony_ci * this is the first try 4763d0407baSopenharmony_ci * 4773d0407baSopenharmony_ci * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and 4783d0407baSopenharmony_ci * we've already tried at least once 4793d0407baSopenharmony_ci * 4803d0407baSopenharmony_ci * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry 4813d0407baSopenharmony_ci * 4823d0407baSopenharmony_ci * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never 4833d0407baSopenharmony_ci * be used. Note that page faults can be allowed to retry for multiple times, 4843d0407baSopenharmony_ci * in which case we'll have an initial fault with flags (a) then later on 4853d0407baSopenharmony_ci * continuous faults with flags (b). We should always try to detect pending 4863d0407baSopenharmony_ci * signals before a retry to make sure the continuous page faults can still be 4873d0407baSopenharmony_ci * interrupted if necessary. 4883d0407baSopenharmony_ci */ 4893d0407baSopenharmony_ci#define FAULT_FLAG_WRITE 0x01 4903d0407baSopenharmony_ci#define FAULT_FLAG_MKWRITE 0x02 4913d0407baSopenharmony_ci#define FAULT_FLAG_ALLOW_RETRY 0x04 4923d0407baSopenharmony_ci#define FAULT_FLAG_RETRY_NOWAIT 0x08 4933d0407baSopenharmony_ci#define FAULT_FLAG_KILLABLE 0x10 4943d0407baSopenharmony_ci#define FAULT_FLAG_TRIED 0x20 4953d0407baSopenharmony_ci#define FAULT_FLAG_USER 0x40 4963d0407baSopenharmony_ci#define FAULT_FLAG_REMOTE 0x80 4973d0407baSopenharmony_ci#define FAULT_FLAG_INSTRUCTION 0x100 4983d0407baSopenharmony_ci#define FAULT_FLAG_INTERRUPTIBLE 0x200 4993d0407baSopenharmony_ci 5003d0407baSopenharmony_ci/* 5013d0407baSopenharmony_ci * The default fault flags that should be used by most of the 5023d0407baSopenharmony_ci * arch-specific page fault handlers. 5033d0407baSopenharmony_ci */ 5043d0407baSopenharmony_ci#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | FAULT_FLAG_INTERRUPTIBLE) 5053d0407baSopenharmony_ci 5063d0407baSopenharmony_ci/** 5073d0407baSopenharmony_ci * fault_flag_allow_retry_first - check ALLOW_RETRY the first time 5083d0407baSopenharmony_ci * 5093d0407baSopenharmony_ci * This is mostly used for places where we want to try to avoid taking 5103d0407baSopenharmony_ci * the mmap_lock for too long a time when waiting for another condition 5113d0407baSopenharmony_ci * to change, in which case we can try to be polite to release the 5123d0407baSopenharmony_ci * mmap_lock in the first round to avoid potential starvation of other 5133d0407baSopenharmony_ci * processes that would also want the mmap_lock. 5143d0407baSopenharmony_ci * 5153d0407baSopenharmony_ci * Return: true if the page fault allows retry and this is the first 5163d0407baSopenharmony_ci * attempt of the fault handling; false otherwise. 5173d0407baSopenharmony_ci */ 5183d0407baSopenharmony_cistatic inline bool fault_flag_allow_retry_first(unsigned int flags) 5193d0407baSopenharmony_ci{ 5203d0407baSopenharmony_ci return (flags & FAULT_FLAG_ALLOW_RETRY) && (!(flags & FAULT_FLAG_TRIED)); 5213d0407baSopenharmony_ci} 5223d0407baSopenharmony_ci 5233d0407baSopenharmony_ci#define FAULT_FLAG_TRACE \ 5243d0407baSopenharmony_ci {FAULT_FLAG_WRITE, "WRITE"}, {FAULT_FLAG_MKWRITE, "MKWRITE"}, {FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY"}, \ 5253d0407baSopenharmony_ci {FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT"}, {FAULT_FLAG_KILLABLE, "KILLABLE"}, {FAULT_FLAG_TRIED, "TRIED"}, \ 5263d0407baSopenharmony_ci {FAULT_FLAG_USER, "USER"}, {FAULT_FLAG_REMOTE, "REMOTE"}, {FAULT_FLAG_INSTRUCTION, "INSTRUCTION"}, \ 5273d0407baSopenharmony_ci { \ 5283d0407baSopenharmony_ci FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" \ 5293d0407baSopenharmony_ci } 5303d0407baSopenharmony_ci 5313d0407baSopenharmony_ci/* 5323d0407baSopenharmony_ci * vm_fault is filled by the pagefault handler and passed to the vma's 5333d0407baSopenharmony_ci * ->fault function. The vma's ->fault is responsible for returning a bitmask 5343d0407baSopenharmony_ci * of VM_FAULT_xxx flags that give details about how the fault was handled. 5353d0407baSopenharmony_ci * 5363d0407baSopenharmony_ci * MM layer fills up gfp_mask for page allocations but fault handler might 5373d0407baSopenharmony_ci * alter it if its implementation requires a different allocation context. 5383d0407baSopenharmony_ci * 5393d0407baSopenharmony_ci * pgoff should be used in favour of virtual_address, if possible. 5403d0407baSopenharmony_ci */ 5413d0407baSopenharmony_cistruct vm_fault { 5423d0407baSopenharmony_ci struct vm_area_struct *vma; /* Target VMA */ 5433d0407baSopenharmony_ci unsigned int flags; /* FAULT_FLAG_xxx flags */ 5443d0407baSopenharmony_ci gfp_t gfp_mask; /* gfp mask to be used for allocations */ 5453d0407baSopenharmony_ci pgoff_t pgoff; /* Logical page offset based on vma */ 5463d0407baSopenharmony_ci unsigned long address; /* Faulting virtual address */ 5473d0407baSopenharmony_ci pmd_t *pmd; /* Pointer to pmd entry matching 5483d0407baSopenharmony_ci * the 'address' */ 5493d0407baSopenharmony_ci pud_t *pud; /* Pointer to pud entry matching 5503d0407baSopenharmony_ci * the 'address' 5513d0407baSopenharmony_ci */ 5523d0407baSopenharmony_ci pte_t orig_pte; /* Value of PTE at the time of fault */ 5533d0407baSopenharmony_ci 5543d0407baSopenharmony_ci struct page *cow_page; /* Page handler may use for COW fault */ 5553d0407baSopenharmony_ci struct page *page; /* ->fault handlers should return a 5563d0407baSopenharmony_ci * page here, unless VM_FAULT_NOPAGE 5573d0407baSopenharmony_ci * is set (which is also implied by 5583d0407baSopenharmony_ci * VM_FAULT_ERROR). 5593d0407baSopenharmony_ci */ 5603d0407baSopenharmony_ci /* These three entries are valid only while holding ptl lock */ 5613d0407baSopenharmony_ci pte_t *pte; /* Pointer to pte entry matching 5623d0407baSopenharmony_ci * the 'address'. NULL if the page 5633d0407baSopenharmony_ci * table hasn't been allocated. 5643d0407baSopenharmony_ci */ 5653d0407baSopenharmony_ci spinlock_t *ptl; /* Page table lock. 5663d0407baSopenharmony_ci * Protects pte page table if 'pte' 5673d0407baSopenharmony_ci * is not NULL, otherwise pmd. 5683d0407baSopenharmony_ci */ 5693d0407baSopenharmony_ci pgtable_t prealloc_pte; /* Pre-allocated pte page table. 5703d0407baSopenharmony_ci * vm_ops->map_pages() calls 5713d0407baSopenharmony_ci * alloc_set_pte() from atomic context. 5723d0407baSopenharmony_ci * do_fault_around() pre-allocates 5733d0407baSopenharmony_ci * page table to avoid allocation from 5743d0407baSopenharmony_ci * atomic context. 5753d0407baSopenharmony_ci */ 5763d0407baSopenharmony_ci}; 5773d0407baSopenharmony_ci 5783d0407baSopenharmony_ci/* page entry size for vm->huge_fault() */ 5793d0407baSopenharmony_cienum page_entry_size { 5803d0407baSopenharmony_ci PE_SIZE_PTE = 0, 5813d0407baSopenharmony_ci PE_SIZE_PMD, 5823d0407baSopenharmony_ci PE_SIZE_PUD, 5833d0407baSopenharmony_ci}; 5843d0407baSopenharmony_ci 5853d0407baSopenharmony_ci/* 5863d0407baSopenharmony_ci * These are the virtual MM functions - opening of an area, closing and 5873d0407baSopenharmony_ci * unmapping it (needed to keep files on disk up-to-date etc), pointer 5883d0407baSopenharmony_ci * to the functions called when a no-page or a wp-page exception occurs. 5893d0407baSopenharmony_ci */ 5903d0407baSopenharmony_cistruct vm_operations_struct { 5913d0407baSopenharmony_ci void (*open)(struct vm_area_struct *area); 5923d0407baSopenharmony_ci void (*close)(struct vm_area_struct *area); 5933d0407baSopenharmony_ci int (*split)(struct vm_area_struct *area, unsigned long addr); 5943d0407baSopenharmony_ci int (*mremap)(struct vm_area_struct *area); 5953d0407baSopenharmony_ci vm_fault_t (*fault)(struct vm_fault *vmf); 5963d0407baSopenharmony_ci vm_fault_t (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); 5973d0407baSopenharmony_ci void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); 5983d0407baSopenharmony_ci unsigned long (*pagesize)(struct vm_area_struct *area); 5993d0407baSopenharmony_ci 6003d0407baSopenharmony_ci /* notification that a previously read-only page is about to become 6013d0407baSopenharmony_ci * writable, if an error is returned it will cause a SIGBUS */ 6023d0407baSopenharmony_ci vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 6033d0407baSopenharmony_ci 6043d0407baSopenharmony_ci /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 6053d0407baSopenharmony_ci vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 6063d0407baSopenharmony_ci 6073d0407baSopenharmony_ci /* called by access_process_vm when get_user_pages() fails, typically 6083d0407baSopenharmony_ci * for use by special VMAs that can switch between memory and hardware 6093d0407baSopenharmony_ci */ 6103d0407baSopenharmony_ci int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); 6113d0407baSopenharmony_ci 6123d0407baSopenharmony_ci /* Called by the /proc/PID/maps code to ask the vma whether it 6133d0407baSopenharmony_ci * has a special name. Returning non-NULL will also cause this 6143d0407baSopenharmony_ci * vma to be dumped unconditionally. */ 6153d0407baSopenharmony_ci const char *(*name)(struct vm_area_struct *vma); 6163d0407baSopenharmony_ci 6173d0407baSopenharmony_ci#ifdef CONFIG_NUMA 6183d0407baSopenharmony_ci /* 6193d0407baSopenharmony_ci * set_policy() op must add a reference to any non-NULL @new mempolicy 6203d0407baSopenharmony_ci * to hold the policy upon return. Caller should pass NULL @new to 6213d0407baSopenharmony_ci * remove a policy and fall back to surrounding context--i.e. do not 6223d0407baSopenharmony_ci * install a MPOL_DEFAULT policy, nor the task or system default 6233d0407baSopenharmony_ci * mempolicy. 6243d0407baSopenharmony_ci */ 6253d0407baSopenharmony_ci int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 6263d0407baSopenharmony_ci 6273d0407baSopenharmony_ci /* 6283d0407baSopenharmony_ci * get_policy() op must add reference [mpol_get()] to any policy at 6293d0407baSopenharmony_ci * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 6303d0407baSopenharmony_ci * in mm/mempolicy.c will do this automatically. 6313d0407baSopenharmony_ci * get_policy() must NOT add a ref if the policy at (vma,addr) is not 6323d0407baSopenharmony_ci * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. 6333d0407baSopenharmony_ci * If no [shared/vma] mempolicy exists at the addr, get_policy() op 6343d0407baSopenharmony_ci * must return NULL--i.e., do not "fallback" to task or system default 6353d0407baSopenharmony_ci * policy. 6363d0407baSopenharmony_ci */ 6373d0407baSopenharmony_ci struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr); 6383d0407baSopenharmony_ci#endif 6393d0407baSopenharmony_ci /* 6403d0407baSopenharmony_ci * Called by vm_normal_page() for special PTEs to find the 6413d0407baSopenharmony_ci * page for @addr. This is useful if the default behavior 6423d0407baSopenharmony_ci * (using pte_page()) would not find the correct page. 6433d0407baSopenharmony_ci */ 6443d0407baSopenharmony_ci struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); 6453d0407baSopenharmony_ci}; 6463d0407baSopenharmony_ci 6473d0407baSopenharmony_cistatic inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) 6483d0407baSopenharmony_ci{ 6493d0407baSopenharmony_ci static const struct vm_operations_struct dummy_vm_ops = {}; 6503d0407baSopenharmony_ci 6513d0407baSopenharmony_ci memset(vma, 0, sizeof(*vma)); 6523d0407baSopenharmony_ci vma->vm_mm = mm; 6533d0407baSopenharmony_ci vma->vm_ops = &dummy_vm_ops; 6543d0407baSopenharmony_ci INIT_LIST_HEAD(&vma->anon_vma_chain); 6553d0407baSopenharmony_ci} 6563d0407baSopenharmony_ci 6573d0407baSopenharmony_cistatic inline void vma_set_anonymous(struct vm_area_struct *vma) 6583d0407baSopenharmony_ci{ 6593d0407baSopenharmony_ci vma->vm_ops = NULL; 6603d0407baSopenharmony_ci} 6613d0407baSopenharmony_ci 6623d0407baSopenharmony_cistatic inline bool vma_is_anonymous(struct vm_area_struct *vma) 6633d0407baSopenharmony_ci{ 6643d0407baSopenharmony_ci return !vma->vm_ops; 6653d0407baSopenharmony_ci} 6663d0407baSopenharmony_ci 6673d0407baSopenharmony_cistatic inline bool vma_is_temporary_stack(struct vm_area_struct *vma) 6683d0407baSopenharmony_ci{ 6693d0407baSopenharmony_ci int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 6703d0407baSopenharmony_ci 6713d0407baSopenharmony_ci if (!maybe_stack) { 6723d0407baSopenharmony_ci return false; 6733d0407baSopenharmony_ci } 6743d0407baSopenharmony_ci 6753d0407baSopenharmony_ci if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == VM_STACK_INCOMPLETE_SETUP) { 6763d0407baSopenharmony_ci return true; 6773d0407baSopenharmony_ci } 6783d0407baSopenharmony_ci 6793d0407baSopenharmony_ci return false; 6803d0407baSopenharmony_ci} 6813d0407baSopenharmony_ci 6823d0407baSopenharmony_cistatic inline bool vma_is_foreign(struct vm_area_struct *vma) 6833d0407baSopenharmony_ci{ 6843d0407baSopenharmony_ci if (!current->mm) { 6853d0407baSopenharmony_ci return true; 6863d0407baSopenharmony_ci } 6873d0407baSopenharmony_ci 6883d0407baSopenharmony_ci if (current->mm != vma->vm_mm) { 6893d0407baSopenharmony_ci return true; 6903d0407baSopenharmony_ci } 6913d0407baSopenharmony_ci 6923d0407baSopenharmony_ci return false; 6933d0407baSopenharmony_ci} 6943d0407baSopenharmony_ci 6953d0407baSopenharmony_cistatic inline bool vma_is_accessible(struct vm_area_struct *vma) 6963d0407baSopenharmony_ci{ 6973d0407baSopenharmony_ci return vma->vm_flags & VM_ACCESS_FLAGS; 6983d0407baSopenharmony_ci} 6993d0407baSopenharmony_ci 7003d0407baSopenharmony_ci#ifdef CONFIG_SHMEM 7013d0407baSopenharmony_ci/* 7023d0407baSopenharmony_ci * The vma_is_shmem is not inline because it is used only by slow 7033d0407baSopenharmony_ci * paths in userfault. 7043d0407baSopenharmony_ci */ 7053d0407baSopenharmony_cibool vma_is_shmem(struct vm_area_struct *vma); 7063d0407baSopenharmony_ci#else 7073d0407baSopenharmony_cistatic inline bool vma_is_shmem(struct vm_area_struct *vma) 7083d0407baSopenharmony_ci{ 7093d0407baSopenharmony_ci return false; 7103d0407baSopenharmony_ci} 7113d0407baSopenharmony_ci#endif 7123d0407baSopenharmony_ci 7133d0407baSopenharmony_ciint vma_is_stack_for_current(struct vm_area_struct *vma); 7143d0407baSopenharmony_ci 7153d0407baSopenharmony_ci/* flush_tlb_range() takes a vma, not a mm, and can care about flags */ 7163d0407baSopenharmony_ci#define TLB_FLUSH_VMA(mm, flags) \ 7173d0407baSopenharmony_ci { \ 7183d0407baSopenharmony_ci .vm_mm = (mm), .vm_flags = (flags) \ 7193d0407baSopenharmony_ci } 7203d0407baSopenharmony_ci 7213d0407baSopenharmony_cistruct mmu_gather; 7223d0407baSopenharmony_cistruct inode; 7233d0407baSopenharmony_ci 7243d0407baSopenharmony_ci#include <linux/huge_mm.h> 7253d0407baSopenharmony_ci 7263d0407baSopenharmony_ci/* 7273d0407baSopenharmony_ci * Methods to modify the page usage count. 7283d0407baSopenharmony_ci * 7293d0407baSopenharmony_ci * What counts for a page usage: 7303d0407baSopenharmony_ci * - cache mapping (page->mapping) 7313d0407baSopenharmony_ci * - private data (page->private) 7323d0407baSopenharmony_ci * - page mapped in a task's page tables, each mapping 7333d0407baSopenharmony_ci * is counted separately 7343d0407baSopenharmony_ci * 7353d0407baSopenharmony_ci * Also, many kernel routines increase the page count before a critical 7363d0407baSopenharmony_ci * routine so they can be sure the page doesn't go away from under them. 7373d0407baSopenharmony_ci */ 7383d0407baSopenharmony_ci 7393d0407baSopenharmony_ci/* 7403d0407baSopenharmony_ci * Drop a ref, return true if the refcount fell to zero (the page has no users) 7413d0407baSopenharmony_ci */ 7423d0407baSopenharmony_cistatic inline int put_page_testzero(struct page *page) 7433d0407baSopenharmony_ci{ 7443d0407baSopenharmony_ci VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 7453d0407baSopenharmony_ci return page_ref_dec_and_test(page); 7463d0407baSopenharmony_ci} 7473d0407baSopenharmony_ci 7483d0407baSopenharmony_ci/* 7493d0407baSopenharmony_ci * Try to grab a ref unless the page has a refcount of zero, return false if 7503d0407baSopenharmony_ci * that is the case. 7513d0407baSopenharmony_ci * This can be called when MMU is off so it must not access 7523d0407baSopenharmony_ci * any of the virtual mappings. 7533d0407baSopenharmony_ci */ 7543d0407baSopenharmony_cistatic inline int get_page_unless_zero(struct page *page) 7553d0407baSopenharmony_ci{ 7563d0407baSopenharmony_ci return page_ref_add_unless(page, 1, 0); 7573d0407baSopenharmony_ci} 7583d0407baSopenharmony_ci 7593d0407baSopenharmony_ciextern int page_is_ram(unsigned long pfn); 7603d0407baSopenharmony_ci 7613d0407baSopenharmony_cienum { 7623d0407baSopenharmony_ci REGION_INTERSECTS, 7633d0407baSopenharmony_ci REGION_DISJOINT, 7643d0407baSopenharmony_ci REGION_MIXED, 7653d0407baSopenharmony_ci}; 7663d0407baSopenharmony_ci 7673d0407baSopenharmony_ciint region_intersects(resource_size_t offset, size_t size, unsigned long flags, unsigned long desc); 7683d0407baSopenharmony_ci 7693d0407baSopenharmony_ci/* Support for virtually mapped pages */ 7703d0407baSopenharmony_cistruct page *vmalloc_to_page(const void *addr); 7713d0407baSopenharmony_ciunsigned long vmalloc_to_pfn(const void *addr); 7723d0407baSopenharmony_ci 7733d0407baSopenharmony_ci/* 7743d0407baSopenharmony_ci * Determine if an address is within the vmalloc range 7753d0407baSopenharmony_ci * 7763d0407baSopenharmony_ci * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 7773d0407baSopenharmony_ci * is no special casing required. 7783d0407baSopenharmony_ci */ 7793d0407baSopenharmony_ci 7803d0407baSopenharmony_ci#ifndef is_ioremap_addr 7813d0407baSopenharmony_ci#define is_ioremap_addr(x) is_vmalloc_addr(x) 7823d0407baSopenharmony_ci#endif 7833d0407baSopenharmony_ci 7843d0407baSopenharmony_ci#ifdef CONFIG_MMU 7853d0407baSopenharmony_ciextern bool is_vmalloc_addr(const void *x); 7863d0407baSopenharmony_ciextern int is_vmalloc_or_module_addr(const void *x); 7873d0407baSopenharmony_ci#else 7883d0407baSopenharmony_cistatic inline bool is_vmalloc_addr(const void *x) 7893d0407baSopenharmony_ci{ 7903d0407baSopenharmony_ci return false; 7913d0407baSopenharmony_ci} 7923d0407baSopenharmony_cistatic inline int is_vmalloc_or_module_addr(const void *x) 7933d0407baSopenharmony_ci{ 7943d0407baSopenharmony_ci return 0; 7953d0407baSopenharmony_ci} 7963d0407baSopenharmony_ci#endif 7973d0407baSopenharmony_ci 7983d0407baSopenharmony_ciextern void *kvmalloc_node(size_t size, gfp_t flags, int node); 7993d0407baSopenharmony_cistatic inline void *kvmalloc(size_t size, gfp_t flags) 8003d0407baSopenharmony_ci{ 8013d0407baSopenharmony_ci return kvmalloc_node(size, flags, NUMA_NO_NODE); 8023d0407baSopenharmony_ci} 8033d0407baSopenharmony_cistatic inline void *kvzalloc_node(size_t size, gfp_t flags, int node) 8043d0407baSopenharmony_ci{ 8053d0407baSopenharmony_ci return kvmalloc_node(size, flags | __GFP_ZERO, node); 8063d0407baSopenharmony_ci} 8073d0407baSopenharmony_cistatic inline void *kvzalloc(size_t size, gfp_t flags) 8083d0407baSopenharmony_ci{ 8093d0407baSopenharmony_ci return kvmalloc(size, flags | __GFP_ZERO); 8103d0407baSopenharmony_ci} 8113d0407baSopenharmony_ci 8123d0407baSopenharmony_cistatic inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 8133d0407baSopenharmony_ci{ 8143d0407baSopenharmony_ci size_t bytes; 8153d0407baSopenharmony_ci 8163d0407baSopenharmony_ci if (unlikely(check_mul_overflow(n, size, &bytes))) { 8173d0407baSopenharmony_ci return NULL; 8183d0407baSopenharmony_ci } 8193d0407baSopenharmony_ci 8203d0407baSopenharmony_ci return kvmalloc(bytes, flags); 8213d0407baSopenharmony_ci} 8223d0407baSopenharmony_ci 8233d0407baSopenharmony_cistatic inline void *kvcalloc(size_t n, size_t size, gfp_t flags) 8243d0407baSopenharmony_ci{ 8253d0407baSopenharmony_ci return kvmalloc_array(n, size, flags | __GFP_ZERO); 8263d0407baSopenharmony_ci} 8273d0407baSopenharmony_ci 8283d0407baSopenharmony_ciextern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags); 8293d0407baSopenharmony_ciextern void kvfree(const void *addr); 8303d0407baSopenharmony_ciextern void kvfree_sensitive(const void *addr, size_t len); 8313d0407baSopenharmony_ci 8323d0407baSopenharmony_cistatic inline int head_compound_mapcount(struct page *head) 8333d0407baSopenharmony_ci{ 8343d0407baSopenharmony_ci return atomic_read(compound_mapcount_ptr(head)) + 1; 8353d0407baSopenharmony_ci} 8363d0407baSopenharmony_ci 8373d0407baSopenharmony_ci/* 8383d0407baSopenharmony_ci * Mapcount of compound page as a whole, does not include mapped sub-pages. 8393d0407baSopenharmony_ci * 8403d0407baSopenharmony_ci * Must be called only for compound pages or any their tail sub-pages. 8413d0407baSopenharmony_ci */ 8423d0407baSopenharmony_cistatic inline int compound_mapcount(struct page *page) 8433d0407baSopenharmony_ci{ 8443d0407baSopenharmony_ci VM_BUG_ON_PAGE(!PageCompound(page), page); 8453d0407baSopenharmony_ci page = compound_head(page); 8463d0407baSopenharmony_ci return head_compound_mapcount(page); 8473d0407baSopenharmony_ci} 8483d0407baSopenharmony_ci 8493d0407baSopenharmony_ci/* 8503d0407baSopenharmony_ci * The atomic page->_mapcount, starts from -1: so that transitions 8513d0407baSopenharmony_ci * both from it and to it can be tracked, using atomic_inc_and_test 8523d0407baSopenharmony_ci * and atomic_add_negative(-1). 8533d0407baSopenharmony_ci */ 8543d0407baSopenharmony_cistatic inline void page_mapcount_reset(struct page *page) 8553d0407baSopenharmony_ci{ 8563d0407baSopenharmony_ci atomic_set(&(page)->_mapcount, -1); 8573d0407baSopenharmony_ci} 8583d0407baSopenharmony_ci 8593d0407baSopenharmony_ciint __page_mapcount(struct page *page); 8603d0407baSopenharmony_ci 8613d0407baSopenharmony_ci/* 8623d0407baSopenharmony_ci * Mapcount of 0-order page; when compound sub-page, includes 8633d0407baSopenharmony_ci * compound_mapcount(). 8643d0407baSopenharmony_ci * 8653d0407baSopenharmony_ci * Result is undefined for pages which cannot be mapped into userspace. 8663d0407baSopenharmony_ci * For example SLAB or special types of pages. See function page_has_type(). 8673d0407baSopenharmony_ci * They use this place in struct page differently. 8683d0407baSopenharmony_ci */ 8693d0407baSopenharmony_cistatic inline int page_mapcount(struct page *page) 8703d0407baSopenharmony_ci{ 8713d0407baSopenharmony_ci if (unlikely(PageCompound(page))) { 8723d0407baSopenharmony_ci return __page_mapcount(page); 8733d0407baSopenharmony_ci } 8743d0407baSopenharmony_ci return atomic_read(&page->_mapcount) + 1; 8753d0407baSopenharmony_ci} 8763d0407baSopenharmony_ci 8773d0407baSopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE 8783d0407baSopenharmony_ciint total_mapcount(struct page *page); 8793d0407baSopenharmony_ciint page_trans_huge_mapcount(struct page *page, int *total_mapcount); 8803d0407baSopenharmony_ci#else 8813d0407baSopenharmony_cistatic inline int total_mapcount(struct page *page) 8823d0407baSopenharmony_ci{ 8833d0407baSopenharmony_ci return page_mapcount(page); 8843d0407baSopenharmony_ci} 8853d0407baSopenharmony_cistatic inline int page_trans_huge_mapcount(struct page *page, int *total_mapcount) 8863d0407baSopenharmony_ci{ 8873d0407baSopenharmony_ci int mapcount = page_mapcount(page); 8883d0407baSopenharmony_ci if (total_mapcount) { 8893d0407baSopenharmony_ci *total_mapcount = mapcount; 8903d0407baSopenharmony_ci } 8913d0407baSopenharmony_ci return mapcount; 8923d0407baSopenharmony_ci} 8933d0407baSopenharmony_ci#endif 8943d0407baSopenharmony_ci 8953d0407baSopenharmony_cistatic inline struct page *virt_to_head_page(const void *x) 8963d0407baSopenharmony_ci{ 8973d0407baSopenharmony_ci struct page *page = virt_to_page(x); 8983d0407baSopenharmony_ci 8993d0407baSopenharmony_ci return compound_head(page); 9003d0407baSopenharmony_ci} 9013d0407baSopenharmony_ci 9023d0407baSopenharmony_civoid __put_page(struct page *page); 9033d0407baSopenharmony_ci 9043d0407baSopenharmony_civoid put_pages_list(struct list_head *pages); 9053d0407baSopenharmony_ci 9063d0407baSopenharmony_civoid split_page(struct page *page, unsigned int order); 9073d0407baSopenharmony_ci 9083d0407baSopenharmony_ci/* 9093d0407baSopenharmony_ci * Compound pages have a destructor function. Provide a 9103d0407baSopenharmony_ci * prototype for that function and accessor functions. 9113d0407baSopenharmony_ci * These are _only_ valid on the head of a compound page. 9123d0407baSopenharmony_ci */ 9133d0407baSopenharmony_citypedef void compound_page_dtor(struct page *); 9143d0407baSopenharmony_ci 9153d0407baSopenharmony_ci/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 9163d0407baSopenharmony_cienum compound_dtor_id { 9173d0407baSopenharmony_ci NULL_COMPOUND_DTOR, 9183d0407baSopenharmony_ci COMPOUND_PAGE_DTOR, 9193d0407baSopenharmony_ci#ifdef CONFIG_HUGETLB_PAGE 9203d0407baSopenharmony_ci HUGETLB_PAGE_DTOR, 9213d0407baSopenharmony_ci#endif 9223d0407baSopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE 9233d0407baSopenharmony_ci TRANSHUGE_PAGE_DTOR, 9243d0407baSopenharmony_ci#endif 9253d0407baSopenharmony_ci NR_COMPOUND_DTORS, 9263d0407baSopenharmony_ci}; 9273d0407baSopenharmony_ciextern compound_page_dtor *const compound_page_dtors[NR_COMPOUND_DTORS]; 9283d0407baSopenharmony_ci 9293d0407baSopenharmony_cistatic inline void set_compound_page_dtor(struct page *page, enum compound_dtor_id compound_dtor) 9303d0407baSopenharmony_ci{ 9313d0407baSopenharmony_ci VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 9323d0407baSopenharmony_ci page[1].compound_dtor = compound_dtor; 9333d0407baSopenharmony_ci} 9343d0407baSopenharmony_ci 9353d0407baSopenharmony_cistatic inline void destroy_compound_page(struct page *page) 9363d0407baSopenharmony_ci{ 9373d0407baSopenharmony_ci VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 9383d0407baSopenharmony_ci compound_page_dtors[page[1].compound_dtor](page); 9393d0407baSopenharmony_ci} 9403d0407baSopenharmony_ci 9413d0407baSopenharmony_cistatic inline unsigned int compound_order(struct page *page) 9423d0407baSopenharmony_ci{ 9433d0407baSopenharmony_ci if (!PageHead(page)) { 9443d0407baSopenharmony_ci return 0; 9453d0407baSopenharmony_ci } 9463d0407baSopenharmony_ci return page[1].compound_order; 9473d0407baSopenharmony_ci} 9483d0407baSopenharmony_ci 9493d0407baSopenharmony_cistatic inline bool hpage_pincount_available(struct page *page) 9503d0407baSopenharmony_ci{ 9513d0407baSopenharmony_ci /* 9523d0407baSopenharmony_ci * Can the page->hpage_pinned_refcount field be used? That field is in 9533d0407baSopenharmony_ci * the 3rd page of the compound page, so the smallest (2-page) compound 9543d0407baSopenharmony_ci * pages cannot support it. 9553d0407baSopenharmony_ci */ 9563d0407baSopenharmony_ci page = compound_head(page); 9573d0407baSopenharmony_ci return PageCompound(page) && compound_order(page) > 1; 9583d0407baSopenharmony_ci} 9593d0407baSopenharmony_ci 9603d0407baSopenharmony_cistatic inline int head_compound_pincount(struct page *head) 9613d0407baSopenharmony_ci{ 9623d0407baSopenharmony_ci return atomic_read(compound_pincount_ptr(head)); 9633d0407baSopenharmony_ci} 9643d0407baSopenharmony_ci 9653d0407baSopenharmony_cistatic inline int compound_pincount(struct page *page) 9663d0407baSopenharmony_ci{ 9673d0407baSopenharmony_ci VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); 9683d0407baSopenharmony_ci page = compound_head(page); 9693d0407baSopenharmony_ci return head_compound_pincount(page); 9703d0407baSopenharmony_ci} 9713d0407baSopenharmony_ci 9723d0407baSopenharmony_cistatic inline void set_compound_order(struct page *page, unsigned int order) 9733d0407baSopenharmony_ci{ 9743d0407baSopenharmony_ci page[1].compound_order = order; 9753d0407baSopenharmony_ci page[1].compound_nr = 1U << order; 9763d0407baSopenharmony_ci} 9773d0407baSopenharmony_ci 9783d0407baSopenharmony_ci/* Returns the number of pages in this potentially compound page. */ 9793d0407baSopenharmony_cistatic inline unsigned long compound_nr(struct page *page) 9803d0407baSopenharmony_ci{ 9813d0407baSopenharmony_ci if (!PageHead(page)) { 9823d0407baSopenharmony_ci return 1; 9833d0407baSopenharmony_ci } 9843d0407baSopenharmony_ci return page[1].compound_nr; 9853d0407baSopenharmony_ci} 9863d0407baSopenharmony_ci 9873d0407baSopenharmony_ci/* Returns the number of bytes in this potentially compound page. */ 9883d0407baSopenharmony_cistatic inline unsigned long page_size(struct page *page) 9893d0407baSopenharmony_ci{ 9903d0407baSopenharmony_ci return PAGE_SIZE << compound_order(page); 9913d0407baSopenharmony_ci} 9923d0407baSopenharmony_ci 9933d0407baSopenharmony_ci/* Returns the number of bits needed for the number of bytes in a page */ 9943d0407baSopenharmony_cistatic inline unsigned int page_shift(struct page *page) 9953d0407baSopenharmony_ci{ 9963d0407baSopenharmony_ci return PAGE_SHIFT + compound_order(page); 9973d0407baSopenharmony_ci} 9983d0407baSopenharmony_ci 9993d0407baSopenharmony_civoid free_compound_page(struct page *page); 10003d0407baSopenharmony_ci 10013d0407baSopenharmony_ci#ifdef CONFIG_MMU 10023d0407baSopenharmony_ci/* 10033d0407baSopenharmony_ci * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 10043d0407baSopenharmony_ci * servicing faults for write access. In the normal case, do always want 10053d0407baSopenharmony_ci * pte_mkwrite. But get_user_pages can cause write faults for mappings 10063d0407baSopenharmony_ci * that do not have writing enabled, when used by access_process_vm. 10073d0407baSopenharmony_ci */ 10083d0407baSopenharmony_cistatic inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 10093d0407baSopenharmony_ci{ 10103d0407baSopenharmony_ci if (likely(vma->vm_flags & VM_WRITE)) { 10113d0407baSopenharmony_ci pte = pte_mkwrite(pte); 10123d0407baSopenharmony_ci } 10133d0407baSopenharmony_ci return pte; 10143d0407baSopenharmony_ci} 10153d0407baSopenharmony_ci 10163d0407baSopenharmony_civm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page); 10173d0407baSopenharmony_civm_fault_t finish_fault(struct vm_fault *vmf); 10183d0407baSopenharmony_civm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); 10193d0407baSopenharmony_ci#endif 10203d0407baSopenharmony_ci 10213d0407baSopenharmony_ci/* 10223d0407baSopenharmony_ci * Multiple processes may "see" the same page. E.g. for untouched 10233d0407baSopenharmony_ci * mappings of /dev/null, all processes see the same page full of 10243d0407baSopenharmony_ci * zeroes, and text pages of executables and shared libraries have 10253d0407baSopenharmony_ci * only one copy in memory, at most, normally. 10263d0407baSopenharmony_ci * 10273d0407baSopenharmony_ci * For the non-reserved pages, page_count(page) denotes a reference count. 10283d0407baSopenharmony_ci * page_count() == 0 means the page is free. page->lru is then used for 10293d0407baSopenharmony_ci * freelist management in the buddy allocator. 10303d0407baSopenharmony_ci * page_count() > 0 means the page has been allocated. 10313d0407baSopenharmony_ci * 10323d0407baSopenharmony_ci * Pages are allocated by the slab allocator in order to provide memory 10333d0407baSopenharmony_ci * to kmalloc and kmem_cache_alloc. In this case, the management of the 10343d0407baSopenharmony_ci * page, and the fields in 'struct page' are the responsibility of mm/slab.c 10353d0407baSopenharmony_ci * unless a particular usage is carefully commented. (the responsibility of 10363d0407baSopenharmony_ci * freeing the kmalloc memory is the caller's, of course). 10373d0407baSopenharmony_ci * 10383d0407baSopenharmony_ci * A page may be used by anyone else who does a __get_free_page(). 10393d0407baSopenharmony_ci * In this case, page_count still tracks the references, and should only 10403d0407baSopenharmony_ci * be used through the normal accessor functions. The top bits of page->flags 10413d0407baSopenharmony_ci * and page->virtual store page management information, but all other fields 10423d0407baSopenharmony_ci * are unused and could be used privately, carefully. The management of this 10433d0407baSopenharmony_ci * page is the responsibility of the one who allocated it, and those who have 10443d0407baSopenharmony_ci * subsequently been given references to it. 10453d0407baSopenharmony_ci * 10463d0407baSopenharmony_ci * The other pages (we may call them "pagecache pages") are completely 10473d0407baSopenharmony_ci * managed by the Linux memory manager: I/O, buffers, swapping etc. 10483d0407baSopenharmony_ci * The following discussion applies only to them. 10493d0407baSopenharmony_ci * 10503d0407baSopenharmony_ci * A pagecache page contains an opaque `private' member, which belongs to the 10513d0407baSopenharmony_ci * page's address_space. Usually, this is the address of a circular list of 10523d0407baSopenharmony_ci * the page's disk buffers. PG_private must be set to tell the VM to call 10533d0407baSopenharmony_ci * into the filesystem to release these pages. 10543d0407baSopenharmony_ci * 10553d0407baSopenharmony_ci * A page may belong to an inode's memory mapping. In this case, page->mapping 10563d0407baSopenharmony_ci * is the pointer to the inode, and page->index is the file offset of the page, 10573d0407baSopenharmony_ci * in units of PAGE_SIZE. 10583d0407baSopenharmony_ci * 10593d0407baSopenharmony_ci * If pagecache pages are not associated with an inode, they are said to be 10603d0407baSopenharmony_ci * anonymous pages. These may become associated with the swapcache, and in that 10613d0407baSopenharmony_ci * case PG_swapcache is set, and page->private is an offset into the swapcache. 10623d0407baSopenharmony_ci * 10633d0407baSopenharmony_ci * In either case (swapcache or inode backed), the pagecache itself holds one 10643d0407baSopenharmony_ci * reference to the page. Setting PG_private should also increment the 10653d0407baSopenharmony_ci * refcount. The each user mapping also has a reference to the page. 10663d0407baSopenharmony_ci * 10673d0407baSopenharmony_ci * The pagecache pages are stored in a per-mapping radix tree, which is 10683d0407baSopenharmony_ci * rooted at mapping->i_pages, and indexed by offset. 10693d0407baSopenharmony_ci * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 10703d0407baSopenharmony_ci * lists, we instead now tag pages as dirty/writeback in the radix tree. 10713d0407baSopenharmony_ci * 10723d0407baSopenharmony_ci * All pagecache pages may be subject to I/O: 10733d0407baSopenharmony_ci * - inode pages may need to be read from disk, 10743d0407baSopenharmony_ci * - inode pages which have been modified and are MAP_SHARED may need 10753d0407baSopenharmony_ci * to be written back to the inode on disk, 10763d0407baSopenharmony_ci * - anonymous pages (including MAP_PRIVATE file mappings) which have been 10773d0407baSopenharmony_ci * modified may need to be swapped out to swap space and (later) to be read 10783d0407baSopenharmony_ci * back into memory. 10793d0407baSopenharmony_ci */ 10803d0407baSopenharmony_ci 10813d0407baSopenharmony_ci/* 10823d0407baSopenharmony_ci * The zone field is never updated after free_area_init_core() 10833d0407baSopenharmony_ci * sets it, so none of the operations on it need to be atomic. 10843d0407baSopenharmony_ci */ 10853d0407baSopenharmony_ci 10863d0407baSopenharmony_ci/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 10873d0407baSopenharmony_ci#define SECTIONS_PGOFF ((sizeof(unsigned long) * 8) - SECTIONS_WIDTH) 10883d0407baSopenharmony_ci#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 10893d0407baSopenharmony_ci#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 10903d0407baSopenharmony_ci#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 10913d0407baSopenharmony_ci#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) 10923d0407baSopenharmony_ci 10933d0407baSopenharmony_ci/* 10943d0407baSopenharmony_ci * Define the bit shifts to access each section. For non-existent 10953d0407baSopenharmony_ci * sections we define the shift as 0; that plus a 0 mask ensures 10963d0407baSopenharmony_ci * the compiler will optimise away reference to them. 10973d0407baSopenharmony_ci */ 10983d0407baSopenharmony_ci#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 10993d0407baSopenharmony_ci#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 11003d0407baSopenharmony_ci#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 11013d0407baSopenharmony_ci#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 11023d0407baSopenharmony_ci#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) 11033d0407baSopenharmony_ci 11043d0407baSopenharmony_ci/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 11053d0407baSopenharmony_ci#ifdef NODE_NOT_IN_PAGE_FLAGS 11063d0407baSopenharmony_ci#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 11073d0407baSopenharmony_ci#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? SECTIONS_PGOFF : ZONES_PGOFF) 11083d0407baSopenharmony_ci#else 11093d0407baSopenharmony_ci#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 11103d0407baSopenharmony_ci#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? NODES_PGOFF : ZONES_PGOFF) 11113d0407baSopenharmony_ci#endif 11123d0407baSopenharmony_ci 11133d0407baSopenharmony_ci#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 11143d0407baSopenharmony_ci 11153d0407baSopenharmony_ci#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 11163d0407baSopenharmony_ci#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 11173d0407baSopenharmony_ci#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 11183d0407baSopenharmony_ci#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 11193d0407baSopenharmony_ci#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) 11203d0407baSopenharmony_ci#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 11213d0407baSopenharmony_ci 11223d0407baSopenharmony_cistatic inline enum zone_type page_zonenum(const struct page *page) 11233d0407baSopenharmony_ci{ 11243d0407baSopenharmony_ci ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); 11253d0407baSopenharmony_ci return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 11263d0407baSopenharmony_ci} 11273d0407baSopenharmony_ci 11283d0407baSopenharmony_ci#ifdef CONFIG_ZONE_DEVICE 11293d0407baSopenharmony_cistatic inline bool is_zone_device_page(const struct page *page) 11303d0407baSopenharmony_ci{ 11313d0407baSopenharmony_ci return page_zonenum(page) == ZONE_DEVICE; 11323d0407baSopenharmony_ci} 11333d0407baSopenharmony_ciextern void memmap_init_zone_device(struct zone *, unsigned long, unsigned long, struct dev_pagemap *); 11343d0407baSopenharmony_ci#else 11353d0407baSopenharmony_cistatic inline bool is_zone_device_page(const struct page *page) 11363d0407baSopenharmony_ci{ 11373d0407baSopenharmony_ci return false; 11383d0407baSopenharmony_ci} 11393d0407baSopenharmony_ci#endif 11403d0407baSopenharmony_ci 11413d0407baSopenharmony_ci#ifdef CONFIG_DEV_PAGEMAP_OPS 11423d0407baSopenharmony_civoid free_devmap_managed_page(struct page *page); 11433d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(devmap_managed_key); 11443d0407baSopenharmony_ci 11453d0407baSopenharmony_cistatic inline bool page_is_devmap_managed(struct page *page) 11463d0407baSopenharmony_ci{ 11473d0407baSopenharmony_ci if (!static_branch_unlikely(&devmap_managed_key)) { 11483d0407baSopenharmony_ci return false; 11493d0407baSopenharmony_ci } 11503d0407baSopenharmony_ci if (!is_zone_device_page(page)) { 11513d0407baSopenharmony_ci return false; 11523d0407baSopenharmony_ci } 11533d0407baSopenharmony_ci switch (page->pgmap->type) { 11543d0407baSopenharmony_ci case MEMORY_DEVICE_PRIVATE: 11553d0407baSopenharmony_ci case MEMORY_DEVICE_FS_DAX: 11563d0407baSopenharmony_ci return true; 11573d0407baSopenharmony_ci default: 11583d0407baSopenharmony_ci break; 11593d0407baSopenharmony_ci } 11603d0407baSopenharmony_ci return false; 11613d0407baSopenharmony_ci} 11623d0407baSopenharmony_ci 11633d0407baSopenharmony_civoid put_devmap_managed_page(struct page *page); 11643d0407baSopenharmony_ci 11653d0407baSopenharmony_ci#else /* CONFIG_DEV_PAGEMAP_OPS */ 11663d0407baSopenharmony_cistatic inline bool page_is_devmap_managed(struct page *page) 11673d0407baSopenharmony_ci{ 11683d0407baSopenharmony_ci return false; 11693d0407baSopenharmony_ci} 11703d0407baSopenharmony_ci 11713d0407baSopenharmony_cistatic inline void put_devmap_managed_page(struct page *page) 11723d0407baSopenharmony_ci{ 11733d0407baSopenharmony_ci} 11743d0407baSopenharmony_ci#endif /* CONFIG_DEV_PAGEMAP_OPS */ 11753d0407baSopenharmony_ci 11763d0407baSopenharmony_cistatic inline bool is_device_private_page(const struct page *page) 11773d0407baSopenharmony_ci{ 11783d0407baSopenharmony_ci return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && IS_ENABLED(CONFIG_DEVICE_PRIVATE) && is_zone_device_page(page) && 11793d0407baSopenharmony_ci page->pgmap->type == MEMORY_DEVICE_PRIVATE; 11803d0407baSopenharmony_ci} 11813d0407baSopenharmony_ci 11823d0407baSopenharmony_cistatic inline bool is_pci_p2pdma_page(const struct page *page) 11833d0407baSopenharmony_ci{ 11843d0407baSopenharmony_ci return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && IS_ENABLED(CONFIG_PCI_P2PDMA) && is_zone_device_page(page) && 11853d0407baSopenharmony_ci page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; 11863d0407baSopenharmony_ci} 11873d0407baSopenharmony_ci 11883d0407baSopenharmony_ci/* 127: arbitrary random number, small enough to assemble well */ 11893d0407baSopenharmony_ci#define page_ref_zero_or_close_to_overflow(page) ((unsigned int)page_ref_count(page) + 127u <= 127u) 11903d0407baSopenharmony_ci 11913d0407baSopenharmony_cistatic inline void get_page(struct page *page) 11923d0407baSopenharmony_ci{ 11933d0407baSopenharmony_ci page = compound_head(page); 11943d0407baSopenharmony_ci /* 11953d0407baSopenharmony_ci * Getting a normal page or the head of a compound page 11963d0407baSopenharmony_ci * requires to already have an elevated page->_refcount. 11973d0407baSopenharmony_ci */ 11983d0407baSopenharmony_ci VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); 11993d0407baSopenharmony_ci page_ref_inc(page); 12003d0407baSopenharmony_ci} 12013d0407baSopenharmony_ci 12023d0407baSopenharmony_cibool __must_check try_grab_page(struct page *page, unsigned int flags); 12033d0407baSopenharmony_ci 12043d0407baSopenharmony_cistatic inline __must_check bool try_get_page(struct page *page) 12053d0407baSopenharmony_ci{ 12063d0407baSopenharmony_ci page = compound_head(page); 12073d0407baSopenharmony_ci if (WARN_ON_ONCE(page_ref_count(page) <= 0)) { 12083d0407baSopenharmony_ci return false; 12093d0407baSopenharmony_ci } 12103d0407baSopenharmony_ci page_ref_inc(page); 12113d0407baSopenharmony_ci return true; 12123d0407baSopenharmony_ci} 12133d0407baSopenharmony_ci 12143d0407baSopenharmony_cistatic inline void put_page(struct page *page) 12153d0407baSopenharmony_ci{ 12163d0407baSopenharmony_ci page = compound_head(page); 12173d0407baSopenharmony_ci /* 12183d0407baSopenharmony_ci * For devmap managed pages we need to catch refcount transition from 12193d0407baSopenharmony_ci * 2 to 1, when refcount reach one it means the page is free and we 12203d0407baSopenharmony_ci * need to inform the device driver through callback. See 12213d0407baSopenharmony_ci * include/linux/memremap.h and HMM for details. 12223d0407baSopenharmony_ci */ 12233d0407baSopenharmony_ci if (page_is_devmap_managed(page)) { 12243d0407baSopenharmony_ci put_devmap_managed_page(page); 12253d0407baSopenharmony_ci return; 12263d0407baSopenharmony_ci } 12273d0407baSopenharmony_ci 12283d0407baSopenharmony_ci if (put_page_testzero(page)) { 12293d0407baSopenharmony_ci __put_page(page); 12303d0407baSopenharmony_ci } 12313d0407baSopenharmony_ci} 12323d0407baSopenharmony_ci 12333d0407baSopenharmony_ci/* 12343d0407baSopenharmony_ci * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload 12353d0407baSopenharmony_ci * the page's refcount so that two separate items are tracked: the original page 12363d0407baSopenharmony_ci * reference count, and also a new count of how many pin_user_pages() calls were 12373d0407baSopenharmony_ci * made against the page. ("gup-pinned" is another term for the latter). 12383d0407baSopenharmony_ci * 12393d0407baSopenharmony_ci * With this scheme, pin_user_pages() becomes special: such pages are marked as 12403d0407baSopenharmony_ci * distinct from normal pages. As such, the unpin_user_page() call (and its 12413d0407baSopenharmony_ci * variants) must be used in order to release gup-pinned pages. 12423d0407baSopenharmony_ci * 12433d0407baSopenharmony_ci * Choice of value: 12443d0407baSopenharmony_ci * 12453d0407baSopenharmony_ci * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference 12463d0407baSopenharmony_ci * counts with respect to pin_user_pages() and unpin_user_page() becomes 12473d0407baSopenharmony_ci * simpler, due to the fact that adding an even power of two to the page 12483d0407baSopenharmony_ci * refcount has the effect of using only the upper N bits, for the code that 12493d0407baSopenharmony_ci * counts up using the bias value. This means that the lower bits are left for 12503d0407baSopenharmony_ci * the exclusive use of the original code that increments and decrements by one 12513d0407baSopenharmony_ci * (or at least, by much smaller values than the bias value). 12523d0407baSopenharmony_ci * 12533d0407baSopenharmony_ci * Of course, once the lower bits overflow into the upper bits (and this is 12543d0407baSopenharmony_ci * OK, because subtraction recovers the original values), then visual inspection 12553d0407baSopenharmony_ci * no longer suffices to directly view the separate counts. However, for normal 12563d0407baSopenharmony_ci * applications that don't have huge page reference counts, this won't be an 12573d0407baSopenharmony_ci * issue. 12583d0407baSopenharmony_ci * 12593d0407baSopenharmony_ci * Locking: the lockless algorithm described in page_cache_get_speculative() 12603d0407baSopenharmony_ci * and page_cache_gup_pin_speculative() provides safe operation for 12613d0407baSopenharmony_ci * get_user_pages and page_mkclean and other calls that race to set up page 12623d0407baSopenharmony_ci * table entries. 12633d0407baSopenharmony_ci */ 12643d0407baSopenharmony_ci#define GUP_PIN_COUNTING_BIAS (1U << 10) 12653d0407baSopenharmony_ci 12663d0407baSopenharmony_civoid unpin_user_page(struct page *page); 12673d0407baSopenharmony_civoid unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); 12683d0407baSopenharmony_civoid unpin_user_pages(struct page **pages, unsigned long npages); 12693d0407baSopenharmony_ci 12703d0407baSopenharmony_ci/** 12713d0407baSopenharmony_ci * page_maybe_dma_pinned() - report if a page is pinned for DMA. 12723d0407baSopenharmony_ci * 12733d0407baSopenharmony_ci * This function checks if a page has been pinned via a call to 12743d0407baSopenharmony_ci * pin_user_pages*(). 12753d0407baSopenharmony_ci * 12763d0407baSopenharmony_ci * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, 12773d0407baSopenharmony_ci * because it means "definitely not pinned for DMA", but true means "probably 12783d0407baSopenharmony_ci * pinned for DMA, but possibly a false positive due to having at least 12793d0407baSopenharmony_ci * GUP_PIN_COUNTING_BIAS worth of normal page references". 12803d0407baSopenharmony_ci * 12813d0407baSopenharmony_ci * False positives are OK, because: a) it's unlikely for a page to get that many 12823d0407baSopenharmony_ci * refcounts, and b) all the callers of this routine are expected to be able to 12833d0407baSopenharmony_ci * deal gracefully with a false positive. 12843d0407baSopenharmony_ci * 12853d0407baSopenharmony_ci * For huge pages, the result will be exactly correct. That's because we have 12863d0407baSopenharmony_ci * more tracking data available: the 3rd struct page in the compound page is 12873d0407baSopenharmony_ci * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS 12883d0407baSopenharmony_ci * scheme). 12893d0407baSopenharmony_ci * 12903d0407baSopenharmony_ci * For more information, please see Documentation/core-api/pin_user_pages.rst. 12913d0407baSopenharmony_ci * 12923d0407baSopenharmony_ci * @page: pointer to page to be queried. 12933d0407baSopenharmony_ci * @Return: True, if it is likely that the page has been "dma-pinned". 12943d0407baSopenharmony_ci * False, if the page is definitely not dma-pinned. 12953d0407baSopenharmony_ci */ 12963d0407baSopenharmony_cistatic inline bool page_maybe_dma_pinned(struct page *page) 12973d0407baSopenharmony_ci{ 12983d0407baSopenharmony_ci if (hpage_pincount_available(page)) { 12993d0407baSopenharmony_ci return compound_pincount(page) > 0; 13003d0407baSopenharmony_ci } 13013d0407baSopenharmony_ci 13023d0407baSopenharmony_ci /* 13033d0407baSopenharmony_ci * page_ref_count() is signed. If that refcount overflows, then 13043d0407baSopenharmony_ci * page_ref_count() returns a negative value, and callers will avoid 13053d0407baSopenharmony_ci * further incrementing the refcount. 13063d0407baSopenharmony_ci * 13073d0407baSopenharmony_ci * Here, for that overflow case, use the signed bit to count a little 13083d0407baSopenharmony_ci * bit higher via unsigned math, and thus still get an accurate result. 13093d0407baSopenharmony_ci */ 13103d0407baSopenharmony_ci return ((unsigned int)page_ref_count(compound_head(page))) >= GUP_PIN_COUNTING_BIAS; 13113d0407baSopenharmony_ci} 13123d0407baSopenharmony_ci 13133d0407baSopenharmony_ci#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 13143d0407baSopenharmony_ci#define SECTION_IN_PAGE_FLAGS 13153d0407baSopenharmony_ci#endif 13163d0407baSopenharmony_ci 13173d0407baSopenharmony_ci/* 13183d0407baSopenharmony_ci * The identification function is mainly used by the buddy allocator for 13193d0407baSopenharmony_ci * determining if two pages could be buddies. We are not really identifying 13203d0407baSopenharmony_ci * the zone since we could be using the section number id if we do not have 13213d0407baSopenharmony_ci * node id available in page flags. 13223d0407baSopenharmony_ci * We only guarantee that it will return the same value for two combinable 13233d0407baSopenharmony_ci * pages in a zone. 13243d0407baSopenharmony_ci */ 13253d0407baSopenharmony_cistatic inline int page_zone_id(struct page *page) 13263d0407baSopenharmony_ci{ 13273d0407baSopenharmony_ci return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 13283d0407baSopenharmony_ci} 13293d0407baSopenharmony_ci 13303d0407baSopenharmony_ci#ifdef NODE_NOT_IN_PAGE_FLAGS 13313d0407baSopenharmony_ciextern int page_to_nid(const struct page *page); 13323d0407baSopenharmony_ci#else 13333d0407baSopenharmony_cistatic inline int page_to_nid(const struct page *page) 13343d0407baSopenharmony_ci{ 13353d0407baSopenharmony_ci struct page *p = (struct page *)page; 13363d0407baSopenharmony_ci 13373d0407baSopenharmony_ci return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; 13383d0407baSopenharmony_ci} 13393d0407baSopenharmony_ci#endif 13403d0407baSopenharmony_ci 13413d0407baSopenharmony_ci#ifdef CONFIG_NUMA_BALANCING 13423d0407baSopenharmony_cistatic inline int cpu_pid_to_cpupid(int cpu, int pid) 13433d0407baSopenharmony_ci{ 13443d0407baSopenharmony_ci return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 13453d0407baSopenharmony_ci} 13463d0407baSopenharmony_ci 13473d0407baSopenharmony_cistatic inline int cpupid_to_pid(int cpupid) 13483d0407baSopenharmony_ci{ 13493d0407baSopenharmony_ci return cpupid & LAST__PID_MASK; 13503d0407baSopenharmony_ci} 13513d0407baSopenharmony_ci 13523d0407baSopenharmony_cistatic inline int cpupid_to_cpu(int cpupid) 13533d0407baSopenharmony_ci{ 13543d0407baSopenharmony_ci return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 13553d0407baSopenharmony_ci} 13563d0407baSopenharmony_ci 13573d0407baSopenharmony_cistatic inline int cpupid_to_nid(int cpupid) 13583d0407baSopenharmony_ci{ 13593d0407baSopenharmony_ci return cpu_to_node(cpupid_to_cpu(cpupid)); 13603d0407baSopenharmony_ci} 13613d0407baSopenharmony_ci 13623d0407baSopenharmony_cistatic inline bool cpupid_pid_unset(int cpupid) 13633d0407baSopenharmony_ci{ 13643d0407baSopenharmony_ci return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 13653d0407baSopenharmony_ci} 13663d0407baSopenharmony_ci 13673d0407baSopenharmony_cistatic inline bool cpupid_cpu_unset(int cpupid) 13683d0407baSopenharmony_ci{ 13693d0407baSopenharmony_ci return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 13703d0407baSopenharmony_ci} 13713d0407baSopenharmony_ci 13723d0407baSopenharmony_cistatic inline bool _cpupid_match_pid(pid_t task_pid, int cpupid) 13733d0407baSopenharmony_ci{ 13743d0407baSopenharmony_ci return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 13753d0407baSopenharmony_ci} 13763d0407baSopenharmony_ci 13773d0407baSopenharmony_ci#define cpupid_match_pid(task, cpupid) _cpupid_match_pid(task->pid, cpupid) 13783d0407baSopenharmony_ci#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 13793d0407baSopenharmony_cistatic inline int page_cpupid_xchg_last(struct page *page, int cpupid) 13803d0407baSopenharmony_ci{ 13813d0407baSopenharmony_ci return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 13823d0407baSopenharmony_ci} 13833d0407baSopenharmony_ci 13843d0407baSopenharmony_cistatic inline int page_cpupid_last(struct page *page) 13853d0407baSopenharmony_ci{ 13863d0407baSopenharmony_ci return page->_last_cpupid; 13873d0407baSopenharmony_ci} 13883d0407baSopenharmony_cistatic inline void page_cpupid_reset_last(struct page *page) 13893d0407baSopenharmony_ci{ 13903d0407baSopenharmony_ci page->_last_cpupid = -1 & LAST_CPUPID_MASK; 13913d0407baSopenharmony_ci} 13923d0407baSopenharmony_ci#else 13933d0407baSopenharmony_cistatic inline int page_cpupid_last(struct page *page) 13943d0407baSopenharmony_ci{ 13953d0407baSopenharmony_ci return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 13963d0407baSopenharmony_ci} 13973d0407baSopenharmony_ci 13983d0407baSopenharmony_ciextern int page_cpupid_xchg_last(struct page *page, int cpupid); 13993d0407baSopenharmony_ci 14003d0407baSopenharmony_cistatic inline void page_cpupid_reset_last(struct page *page) 14013d0407baSopenharmony_ci{ 14023d0407baSopenharmony_ci page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 14033d0407baSopenharmony_ci} 14043d0407baSopenharmony_ci#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 14053d0407baSopenharmony_ci#else /* !CONFIG_NUMA_BALANCING */ 14063d0407baSopenharmony_cistatic inline int page_cpupid_xchg_last(struct page *page, int cpupid) 14073d0407baSopenharmony_ci{ 14083d0407baSopenharmony_ci return page_to_nid(page); /* XXX */ 14093d0407baSopenharmony_ci} 14103d0407baSopenharmony_ci 14113d0407baSopenharmony_cistatic inline int page_cpupid_last(struct page *page) 14123d0407baSopenharmony_ci{ 14133d0407baSopenharmony_ci return page_to_nid(page); /* XXX */ 14143d0407baSopenharmony_ci} 14153d0407baSopenharmony_ci 14163d0407baSopenharmony_cistatic inline int cpupid_to_nid(int cpupid) 14173d0407baSopenharmony_ci{ 14183d0407baSopenharmony_ci return -1; 14193d0407baSopenharmony_ci} 14203d0407baSopenharmony_ci 14213d0407baSopenharmony_cistatic inline int cpupid_to_pid(int cpupid) 14223d0407baSopenharmony_ci{ 14233d0407baSopenharmony_ci return -1; 14243d0407baSopenharmony_ci} 14253d0407baSopenharmony_ci 14263d0407baSopenharmony_cistatic inline int cpupid_to_cpu(int cpupid) 14273d0407baSopenharmony_ci{ 14283d0407baSopenharmony_ci return -1; 14293d0407baSopenharmony_ci} 14303d0407baSopenharmony_ci 14313d0407baSopenharmony_cistatic inline int cpu_pid_to_cpupid(int nid, int pid) 14323d0407baSopenharmony_ci{ 14333d0407baSopenharmony_ci return -1; 14343d0407baSopenharmony_ci} 14353d0407baSopenharmony_ci 14363d0407baSopenharmony_cistatic inline bool cpupid_pid_unset(int cpupid) 14373d0407baSopenharmony_ci{ 14383d0407baSopenharmony_ci return true; 14393d0407baSopenharmony_ci} 14403d0407baSopenharmony_ci 14413d0407baSopenharmony_cistatic inline void page_cpupid_reset_last(struct page *page) 14423d0407baSopenharmony_ci{ 14433d0407baSopenharmony_ci} 14443d0407baSopenharmony_ci 14453d0407baSopenharmony_cistatic inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 14463d0407baSopenharmony_ci{ 14473d0407baSopenharmony_ci return false; 14483d0407baSopenharmony_ci} 14493d0407baSopenharmony_ci#endif /* CONFIG_NUMA_BALANCING */ 14503d0407baSopenharmony_ci 14513d0407baSopenharmony_ci#ifdef CONFIG_KASAN_SW_TAGS 14523d0407baSopenharmony_ci 14533d0407baSopenharmony_ci/* 14543d0407baSopenharmony_ci * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid 14553d0407baSopenharmony_ci * setting tags for all pages to native kernel tag value 0xff, as the default 14563d0407baSopenharmony_ci * value 0x00 maps to 0xff. 14573d0407baSopenharmony_ci */ 14583d0407baSopenharmony_ci 14593d0407baSopenharmony_cistatic inline u8 page_kasan_tag(const struct page *page) 14603d0407baSopenharmony_ci{ 14613d0407baSopenharmony_ci u8 tag; 14623d0407baSopenharmony_ci 14633d0407baSopenharmony_ci tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 14643d0407baSopenharmony_ci tag ^= 0xff; 14653d0407baSopenharmony_ci 14663d0407baSopenharmony_ci return tag; 14673d0407baSopenharmony_ci} 14683d0407baSopenharmony_ci 14693d0407baSopenharmony_cistatic inline void page_kasan_tag_set(struct page *page, u8 tag) 14703d0407baSopenharmony_ci{ 14713d0407baSopenharmony_ci tag ^= 0xff; 14723d0407baSopenharmony_ci page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 14733d0407baSopenharmony_ci page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 14743d0407baSopenharmony_ci} 14753d0407baSopenharmony_ci 14763d0407baSopenharmony_cistatic inline void page_kasan_tag_reset(struct page *page) 14773d0407baSopenharmony_ci{ 14783d0407baSopenharmony_ci page_kasan_tag_set(page, 0xff); 14793d0407baSopenharmony_ci} 14803d0407baSopenharmony_ci#else 14813d0407baSopenharmony_cistatic inline u8 page_kasan_tag(const struct page *page) 14823d0407baSopenharmony_ci{ 14833d0407baSopenharmony_ci return 0xff; 14843d0407baSopenharmony_ci} 14853d0407baSopenharmony_ci 14863d0407baSopenharmony_cistatic inline void page_kasan_tag_set(struct page *page, u8 tag) 14873d0407baSopenharmony_ci{ 14883d0407baSopenharmony_ci} 14893d0407baSopenharmony_cistatic inline void page_kasan_tag_reset(struct page *page) 14903d0407baSopenharmony_ci{ 14913d0407baSopenharmony_ci} 14923d0407baSopenharmony_ci#endif 14933d0407baSopenharmony_ci 14943d0407baSopenharmony_cistatic inline struct zone *page_zone(const struct page *page) 14953d0407baSopenharmony_ci{ 14963d0407baSopenharmony_ci return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 14973d0407baSopenharmony_ci} 14983d0407baSopenharmony_ci 14993d0407baSopenharmony_cistatic inline pg_data_t *page_pgdat(const struct page *page) 15003d0407baSopenharmony_ci{ 15013d0407baSopenharmony_ci return NODE_DATA(page_to_nid(page)); 15023d0407baSopenharmony_ci} 15033d0407baSopenharmony_ci 15043d0407baSopenharmony_ci#ifdef SECTION_IN_PAGE_FLAGS 15053d0407baSopenharmony_cistatic inline void set_page_section(struct page *page, unsigned long section) 15063d0407baSopenharmony_ci{ 15073d0407baSopenharmony_ci page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 15083d0407baSopenharmony_ci page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 15093d0407baSopenharmony_ci} 15103d0407baSopenharmony_ci 15113d0407baSopenharmony_cistatic inline unsigned long page_to_section(const struct page *page) 15123d0407baSopenharmony_ci{ 15133d0407baSopenharmony_ci return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 15143d0407baSopenharmony_ci} 15153d0407baSopenharmony_ci#endif 15163d0407baSopenharmony_ci 15173d0407baSopenharmony_cistatic inline void set_page_zone(struct page *page, enum zone_type zone) 15183d0407baSopenharmony_ci{ 15193d0407baSopenharmony_ci page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 15203d0407baSopenharmony_ci page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 15213d0407baSopenharmony_ci} 15223d0407baSopenharmony_ci 15233d0407baSopenharmony_cistatic inline void set_page_node(struct page *page, unsigned long node) 15243d0407baSopenharmony_ci{ 15253d0407baSopenharmony_ci page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 15263d0407baSopenharmony_ci page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 15273d0407baSopenharmony_ci} 15283d0407baSopenharmony_ci 15293d0407baSopenharmony_cistatic inline void set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) 15303d0407baSopenharmony_ci{ 15313d0407baSopenharmony_ci set_page_zone(page, zone); 15323d0407baSopenharmony_ci set_page_node(page, node); 15333d0407baSopenharmony_ci#ifdef SECTION_IN_PAGE_FLAGS 15343d0407baSopenharmony_ci set_page_section(page, pfn_to_section_nr(pfn)); 15353d0407baSopenharmony_ci#endif 15363d0407baSopenharmony_ci} 15373d0407baSopenharmony_ci 15383d0407baSopenharmony_ci#ifdef CONFIG_MEMCG 15393d0407baSopenharmony_cistatic inline struct mem_cgroup *page_memcg(struct page *page) 15403d0407baSopenharmony_ci{ 15413d0407baSopenharmony_ci return page->mem_cgroup; 15423d0407baSopenharmony_ci} 15433d0407baSopenharmony_cistatic inline struct mem_cgroup *page_memcg_rcu(struct page *page) 15443d0407baSopenharmony_ci{ 15453d0407baSopenharmony_ci WARN_ON_ONCE(!rcu_read_lock_held()); 15463d0407baSopenharmony_ci return READ_ONCE(page->mem_cgroup); 15473d0407baSopenharmony_ci} 15483d0407baSopenharmony_ci#else 15493d0407baSopenharmony_cistatic inline struct mem_cgroup *page_memcg(struct page *page) 15503d0407baSopenharmony_ci{ 15513d0407baSopenharmony_ci return NULL; 15523d0407baSopenharmony_ci} 15533d0407baSopenharmony_cistatic inline struct mem_cgroup *page_memcg_rcu(struct page *page) 15543d0407baSopenharmony_ci{ 15553d0407baSopenharmony_ci WARN_ON_ONCE(!rcu_read_lock_held()); 15563d0407baSopenharmony_ci return NULL; 15573d0407baSopenharmony_ci} 15583d0407baSopenharmony_ci#endif 15593d0407baSopenharmony_ci 15603d0407baSopenharmony_ci/* 15613d0407baSopenharmony_ci * Some inline functions in vmstat.h depend on page_zone() 15623d0407baSopenharmony_ci */ 15633d0407baSopenharmony_ci#include <linux/vmstat.h> 15643d0407baSopenharmony_ci 15653d0407baSopenharmony_cistatic __always_inline void *lowmem_page_address(const struct page *page) 15663d0407baSopenharmony_ci{ 15673d0407baSopenharmony_ci return page_to_virt(page); 15683d0407baSopenharmony_ci} 15693d0407baSopenharmony_ci 15703d0407baSopenharmony_ci#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 15713d0407baSopenharmony_ci#define HASHED_PAGE_VIRTUAL 15723d0407baSopenharmony_ci#endif 15733d0407baSopenharmony_ci 15743d0407baSopenharmony_ci#if defined(WANT_PAGE_VIRTUAL) 15753d0407baSopenharmony_cistatic inline void *page_address(const struct page *page) 15763d0407baSopenharmony_ci{ 15773d0407baSopenharmony_ci return page->virtual; 15783d0407baSopenharmony_ci} 15793d0407baSopenharmony_cistatic inline void set_page_address(struct page *page, void *address) 15803d0407baSopenharmony_ci{ 15813d0407baSopenharmony_ci page->virtual = address; 15823d0407baSopenharmony_ci} 15833d0407baSopenharmony_ci#define page_address_init() \ 15843d0407baSopenharmony_ci do { \ 15853d0407baSopenharmony_ci } while (0) 15863d0407baSopenharmony_ci#endif 15873d0407baSopenharmony_ci 15883d0407baSopenharmony_ci#if defined(HASHED_PAGE_VIRTUAL) 15893d0407baSopenharmony_civoid *page_address(const struct page *page); 15903d0407baSopenharmony_civoid set_page_address(struct page *page, void *virtual); 15913d0407baSopenharmony_civoid page_address_init(void); 15923d0407baSopenharmony_ci#endif 15933d0407baSopenharmony_ci 15943d0407baSopenharmony_ci#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 15953d0407baSopenharmony_ci#define page_address(page) lowmem_page_address(page) 15963d0407baSopenharmony_ci#define set_page_address(page, address) \ 15973d0407baSopenharmony_ci do { \ 15983d0407baSopenharmony_ci } while (0) 15993d0407baSopenharmony_ci#define page_address_init() \ 16003d0407baSopenharmony_ci do { \ 16013d0407baSopenharmony_ci } while (0) 16023d0407baSopenharmony_ci#endif 16033d0407baSopenharmony_ci 16043d0407baSopenharmony_ciextern void *page_rmapping(struct page *page); 16053d0407baSopenharmony_ciextern struct anon_vma *page_anon_vma(struct page *page); 16063d0407baSopenharmony_ciextern struct address_space *page_mapping(struct page *page); 16073d0407baSopenharmony_ci 16083d0407baSopenharmony_ciextern struct address_space *__page_file_mapping(struct page *); 16093d0407baSopenharmony_ci 16103d0407baSopenharmony_cistatic inline struct address_space *page_file_mapping(struct page *page) 16113d0407baSopenharmony_ci{ 16123d0407baSopenharmony_ci if (unlikely(PageSwapCache(page))) { 16133d0407baSopenharmony_ci return __page_file_mapping(page); 16143d0407baSopenharmony_ci } 16153d0407baSopenharmony_ci 16163d0407baSopenharmony_ci return page->mapping; 16173d0407baSopenharmony_ci} 16183d0407baSopenharmony_ci 16193d0407baSopenharmony_ciextern pgoff_t __page_file_index(struct page *page); 16203d0407baSopenharmony_ci 16213d0407baSopenharmony_ci/* 16223d0407baSopenharmony_ci * Return the pagecache index of the passed page. Regular pagecache pages 16233d0407baSopenharmony_ci * use ->index whereas swapcache pages use swp_offset(->private) 16243d0407baSopenharmony_ci */ 16253d0407baSopenharmony_cistatic inline pgoff_t page_index(struct page *page) 16263d0407baSopenharmony_ci{ 16273d0407baSopenharmony_ci if (unlikely(PageSwapCache(page))) { 16283d0407baSopenharmony_ci return __page_file_index(page); 16293d0407baSopenharmony_ci } 16303d0407baSopenharmony_ci return page->index; 16313d0407baSopenharmony_ci} 16323d0407baSopenharmony_ci 16333d0407baSopenharmony_cibool page_mapped(struct page *page); 16343d0407baSopenharmony_cistruct address_space *page_mapping(struct page *page); 16353d0407baSopenharmony_cistruct address_space *page_mapping_file(struct page *page); 16363d0407baSopenharmony_ci 16373d0407baSopenharmony_ci/* 16383d0407baSopenharmony_ci * Return true only if the page has been allocated with 16393d0407baSopenharmony_ci * ALLOC_NO_WATERMARKS and the low watermark was not 16403d0407baSopenharmony_ci * met implying that the system is under some pressure. 16413d0407baSopenharmony_ci */ 16423d0407baSopenharmony_cistatic inline bool page_is_pfmemalloc(struct page *page) 16433d0407baSopenharmony_ci{ 16443d0407baSopenharmony_ci /* 16453d0407baSopenharmony_ci * Page index cannot be this large so this must be 16463d0407baSopenharmony_ci * a pfmemalloc page. 16473d0407baSopenharmony_ci */ 16483d0407baSopenharmony_ci return page->index == -1UL; 16493d0407baSopenharmony_ci} 16503d0407baSopenharmony_ci 16513d0407baSopenharmony_ci/* 16523d0407baSopenharmony_ci * Only to be called by the page allocator on a freshly allocated 16533d0407baSopenharmony_ci * page. 16543d0407baSopenharmony_ci */ 16553d0407baSopenharmony_cistatic inline void set_page_pfmemalloc(struct page *page) 16563d0407baSopenharmony_ci{ 16573d0407baSopenharmony_ci page->index = -1UL; 16583d0407baSopenharmony_ci} 16593d0407baSopenharmony_ci 16603d0407baSopenharmony_cistatic inline void clear_page_pfmemalloc(struct page *page) 16613d0407baSopenharmony_ci{ 16623d0407baSopenharmony_ci page->index = 0; 16633d0407baSopenharmony_ci} 16643d0407baSopenharmony_ci 16653d0407baSopenharmony_ci/* 16663d0407baSopenharmony_ci * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 16673d0407baSopenharmony_ci */ 16683d0407baSopenharmony_ciextern void pagefault_out_of_memory(void); 16693d0407baSopenharmony_ci 16703d0407baSopenharmony_ci#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 16713d0407baSopenharmony_ci#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) 16723d0407baSopenharmony_ci 16733d0407baSopenharmony_ci/* 16743d0407baSopenharmony_ci * Flags passed to show_mem() and show_free_areas() to suppress output in 16753d0407baSopenharmony_ci * various contexts. 16763d0407baSopenharmony_ci */ 16773d0407baSopenharmony_ci#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 16783d0407baSopenharmony_ci 16793d0407baSopenharmony_ciextern void show_free_areas(unsigned int flags, nodemask_t *nodemask); 16803d0407baSopenharmony_ci 16813d0407baSopenharmony_ci#ifdef CONFIG_MMU 16823d0407baSopenharmony_ciextern bool can_do_mlock(void); 16833d0407baSopenharmony_ci#else 16843d0407baSopenharmony_cistatic inline bool can_do_mlock(void) 16853d0407baSopenharmony_ci{ 16863d0407baSopenharmony_ci return false; 16873d0407baSopenharmony_ci} 16883d0407baSopenharmony_ci#endif 16893d0407baSopenharmony_ciextern int user_shm_lock(size_t, struct user_struct *); 16903d0407baSopenharmony_ciextern void user_shm_unlock(size_t, struct user_struct *); 16913d0407baSopenharmony_ci 16923d0407baSopenharmony_ci/* 16933d0407baSopenharmony_ci * Parameter block passed down to zap_pte_range in exceptional cases. 16943d0407baSopenharmony_ci */ 16953d0407baSopenharmony_cistruct zap_details { 16963d0407baSopenharmony_ci struct address_space *check_mapping; /* Check page->mapping if set */ 16973d0407baSopenharmony_ci pgoff_t first_index; /* Lowest page->index to unmap */ 16983d0407baSopenharmony_ci pgoff_t last_index; /* Highest page->index to unmap */ 16993d0407baSopenharmony_ci struct page *single_page; /* Locked page to be unmapped */ 17003d0407baSopenharmony_ci}; 17013d0407baSopenharmony_ci 17023d0407baSopenharmony_cistruct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); 17033d0407baSopenharmony_cistruct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); 17043d0407baSopenharmony_ci 17053d0407baSopenharmony_civoid zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); 17063d0407baSopenharmony_civoid zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); 17073d0407baSopenharmony_civoid unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long start, unsigned long end); 17083d0407baSopenharmony_ci 17093d0407baSopenharmony_cistruct mmu_notifier_range; 17103d0407baSopenharmony_ci 17113d0407baSopenharmony_civoid free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, 17123d0407baSopenharmony_ci unsigned long ceiling); 17133d0407baSopenharmony_ciint copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 17143d0407baSopenharmony_ciint follow_invalidate_pte(struct mm_struct *mm, unsigned long address, struct mmu_notifier_range *range, pte_t **ptepp, 17153d0407baSopenharmony_ci pmd_t **pmdpp, spinlock_t **ptlp); 17163d0407baSopenharmony_ciint follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); 17173d0407baSopenharmony_ciint follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); 17183d0407baSopenharmony_ciint follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, 17193d0407baSopenharmony_ci resource_size_t *phys); 17203d0407baSopenharmony_ciint generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); 17213d0407baSopenharmony_ci 17223d0407baSopenharmony_ciextern void truncate_pagecache(struct inode *inode, loff_t new); 17233d0407baSopenharmony_ciextern void truncate_setsize(struct inode *inode, loff_t newsize); 17243d0407baSopenharmony_civoid pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 17253d0407baSopenharmony_civoid truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 17263d0407baSopenharmony_ciint truncate_inode_page(struct address_space *mapping, struct page *page); 17273d0407baSopenharmony_ciint generic_error_remove_page(struct address_space *mapping, struct page *page); 17283d0407baSopenharmony_ciint invalidate_inode_page(struct page *page); 17293d0407baSopenharmony_ci 17303d0407baSopenharmony_ci#ifdef CONFIG_MMU 17313d0407baSopenharmony_ciextern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, 17323d0407baSopenharmony_ci struct pt_regs *regs); 17333d0407baSopenharmony_ciextern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); 17343d0407baSopenharmony_civoid unmap_mapping_page(struct page *page); 17353d0407baSopenharmony_civoid unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows); 17363d0407baSopenharmony_civoid unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); 17373d0407baSopenharmony_ci#else 17383d0407baSopenharmony_cistatic inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, 17393d0407baSopenharmony_ci struct pt_regs *regs) 17403d0407baSopenharmony_ci{ 17413d0407baSopenharmony_ci /* should never happen if there's no MMU */ 17423d0407baSopenharmony_ci BUG(); 17433d0407baSopenharmony_ci return VM_FAULT_SIGBUS; 17443d0407baSopenharmony_ci} 17453d0407baSopenharmony_cistatic inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, 17463d0407baSopenharmony_ci bool *unlocked) 17473d0407baSopenharmony_ci{ 17483d0407baSopenharmony_ci /* should never happen if there's no MMU */ 17493d0407baSopenharmony_ci BUG(); 17503d0407baSopenharmony_ci return -EFAULT; 17513d0407baSopenharmony_ci} 17523d0407baSopenharmony_cistatic inline void unmap_mapping_page(struct page *page) 17533d0407baSopenharmony_ci{ 17543d0407baSopenharmony_ci} 17553d0407baSopenharmony_cistatic inline void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows) 17563d0407baSopenharmony_ci{ 17573d0407baSopenharmony_ci} 17583d0407baSopenharmony_cistatic inline void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, 17593d0407baSopenharmony_ci int even_cows) 17603d0407baSopenharmony_ci{ 17613d0407baSopenharmony_ci} 17623d0407baSopenharmony_ci#endif 17633d0407baSopenharmony_ci 17643d0407baSopenharmony_cistatic inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, 17653d0407baSopenharmony_ci loff_t const holelen) 17663d0407baSopenharmony_ci{ 17673d0407baSopenharmony_ci unmap_mapping_range(mapping, holebegin, holelen, 0); 17683d0407baSopenharmony_ci} 17693d0407baSopenharmony_ci 17703d0407baSopenharmony_ciextern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); 17713d0407baSopenharmony_ciextern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); 17723d0407baSopenharmony_ciextern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, 17733d0407baSopenharmony_ci unsigned int gup_flags); 17743d0407baSopenharmony_ci 17753d0407baSopenharmony_cilong get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, 17763d0407baSopenharmony_ci struct page **pages, struct vm_area_struct **vmas, int *locked); 17773d0407baSopenharmony_cilong pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, 17783d0407baSopenharmony_ci struct page **pages, struct vm_area_struct **vmas, int *locked); 17793d0407baSopenharmony_cilong get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, 17803d0407baSopenharmony_ci struct vm_area_struct **vmas); 17813d0407baSopenharmony_cilong pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, 17823d0407baSopenharmony_ci struct vm_area_struct **vmas); 17833d0407baSopenharmony_cilong get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, 17843d0407baSopenharmony_ci int *locked); 17853d0407baSopenharmony_cilong pin_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, 17863d0407baSopenharmony_ci int *locked); 17873d0407baSopenharmony_cilong get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); 17883d0407baSopenharmony_cilong pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); 17893d0407baSopenharmony_ci 17903d0407baSopenharmony_ciint get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); 17913d0407baSopenharmony_ciint pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); 17923d0407baSopenharmony_ci 17933d0407baSopenharmony_ciint account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); 17943d0407baSopenharmony_ciint __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task, 17953d0407baSopenharmony_ci bool bypass_rlim); 17963d0407baSopenharmony_ci 17973d0407baSopenharmony_ci/* Container for pinned pfns / pages */ 17983d0407baSopenharmony_cistruct frame_vector { 17993d0407baSopenharmony_ci unsigned int nr_allocated; /* Number of frames we have space for */ 18003d0407baSopenharmony_ci unsigned int nr_frames; /* Number of frames stored in ptrs array */ 18013d0407baSopenharmony_ci bool got_ref; /* Did we pin pages by getting page ref? */ 18023d0407baSopenharmony_ci bool is_pfns; /* Does array contain pages or pfns? */ 18033d0407baSopenharmony_ci void *ptrs[]; /* Array of pinned pfns / pages. Use 18043d0407baSopenharmony_ci * pfns_vector_pages() or pfns_vector_pfns() 18053d0407baSopenharmony_ci * for access */ 18063d0407baSopenharmony_ci}; 18073d0407baSopenharmony_ci 18083d0407baSopenharmony_cistruct frame_vector *frame_vector_create(unsigned int nr_frames); 18093d0407baSopenharmony_civoid frame_vector_destroy(struct frame_vector *vec); 18103d0407baSopenharmony_ciint get_vaddr_frames(unsigned long start, unsigned int nr_pfns, unsigned int gup_flags, struct frame_vector *vec); 18113d0407baSopenharmony_civoid put_vaddr_frames(struct frame_vector *vec); 18123d0407baSopenharmony_ciint frame_vector_to_pages(struct frame_vector *vec); 18133d0407baSopenharmony_civoid frame_vector_to_pfns(struct frame_vector *vec); 18143d0407baSopenharmony_ci 18153d0407baSopenharmony_cistatic inline unsigned int frame_vector_count(struct frame_vector *vec) 18163d0407baSopenharmony_ci{ 18173d0407baSopenharmony_ci return vec->nr_frames; 18183d0407baSopenharmony_ci} 18193d0407baSopenharmony_ci 18203d0407baSopenharmony_cistatic inline struct page **frame_vector_pages(struct frame_vector *vec) 18213d0407baSopenharmony_ci{ 18223d0407baSopenharmony_ci if (vec->is_pfns) { 18233d0407baSopenharmony_ci int err = frame_vector_to_pages(vec); 18243d0407baSopenharmony_ci if (err) { 18253d0407baSopenharmony_ci return ERR_PTR(err); 18263d0407baSopenharmony_ci } 18273d0407baSopenharmony_ci } 18283d0407baSopenharmony_ci return (struct page **)(vec->ptrs); 18293d0407baSopenharmony_ci} 18303d0407baSopenharmony_ci 18313d0407baSopenharmony_cistatic inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 18323d0407baSopenharmony_ci{ 18333d0407baSopenharmony_ci if (!vec->is_pfns) { 18343d0407baSopenharmony_ci frame_vector_to_pfns(vec); 18353d0407baSopenharmony_ci } 18363d0407baSopenharmony_ci return (unsigned long *)(vec->ptrs); 18373d0407baSopenharmony_ci} 18383d0407baSopenharmony_ci 18393d0407baSopenharmony_cistruct kvec; 18403d0407baSopenharmony_ciint get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); 18413d0407baSopenharmony_ciint get_kernel_page(unsigned long start, int write, struct page **pages); 18423d0407baSopenharmony_cistruct page *get_dump_page(unsigned long addr); 18433d0407baSopenharmony_ci 18443d0407baSopenharmony_ciextern int try_to_release_page(struct page *page, gfp_t gfp_mask); 18453d0407baSopenharmony_ciextern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); 18463d0407baSopenharmony_ci 18473d0407baSopenharmony_civoid __set_page_dirty(struct page *, struct address_space *, int warn); 18483d0407baSopenharmony_ciint __set_page_dirty_nobuffers(struct page *page); 18493d0407baSopenharmony_ciint __set_page_dirty_no_writeback(struct page *page); 18503d0407baSopenharmony_ciint redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); 18513d0407baSopenharmony_civoid account_page_dirtied(struct page *page, struct address_space *mapping); 18523d0407baSopenharmony_civoid account_page_cleaned(struct page *page, struct address_space *mapping, struct bdi_writeback *wb); 18533d0407baSopenharmony_ciint set_page_dirty(struct page *page); 18543d0407baSopenharmony_ciint set_page_dirty_lock(struct page *page); 18553d0407baSopenharmony_civoid __cancel_dirty_page(struct page *page); 18563d0407baSopenharmony_cistatic inline void cancel_dirty_page(struct page *page) 18573d0407baSopenharmony_ci{ 18583d0407baSopenharmony_ci /* Avoid atomic ops, locking, etc. when not actually needed. */ 18593d0407baSopenharmony_ci if (PageDirty(page)) { 18603d0407baSopenharmony_ci __cancel_dirty_page(page); 18613d0407baSopenharmony_ci } 18623d0407baSopenharmony_ci} 18633d0407baSopenharmony_ciint clear_page_dirty_for_io(struct page *page); 18643d0407baSopenharmony_ci 18653d0407baSopenharmony_ciint get_cmdline(struct task_struct *task, char *buffer, int buflen); 18663d0407baSopenharmony_ci 18673d0407baSopenharmony_ciextern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, 18683d0407baSopenharmony_ci struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, 18693d0407baSopenharmony_ci bool need_rmap_locks); 18703d0407baSopenharmony_ci 18713d0407baSopenharmony_ci/* 18723d0407baSopenharmony_ci * Flags used by change_protection(). For now we make it a bitmap so 18733d0407baSopenharmony_ci * that we can pass in multiple flags just like parameters. However 18743d0407baSopenharmony_ci * for now all the callers are only use one of the flags at the same 18753d0407baSopenharmony_ci * time. 18763d0407baSopenharmony_ci */ 18773d0407baSopenharmony_ci/* Whether we should allow dirty bit accounting */ 18783d0407baSopenharmony_ci#define MM_CP_DIRTY_ACCT (1UL << 0) 18793d0407baSopenharmony_ci/* Whether this protection change is for NUMA hints */ 18803d0407baSopenharmony_ci#define MM_CP_PROT_NUMA (1UL << 1) 18813d0407baSopenharmony_ci/* Whether this change is for write protecting */ 18823d0407baSopenharmony_ci#define MM_CP_UFFD_WP (1UL << 2) /* do wp */ 18833d0407baSopenharmony_ci#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */ 18843d0407baSopenharmony_ci#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | MM_CP_UFFD_WP_RESOLVE) 18853d0407baSopenharmony_ci 18863d0407baSopenharmony_ciextern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, 18873d0407baSopenharmony_ci pgprot_t newprot, unsigned long cp_flags); 18883d0407baSopenharmony_ciextern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, 18893d0407baSopenharmony_ci unsigned long end, unsigned long newflags); 18903d0407baSopenharmony_ci 18913d0407baSopenharmony_ci/* 18923d0407baSopenharmony_ci * doesn't attempt to fault and will return short. 18933d0407baSopenharmony_ci */ 18943d0407baSopenharmony_ciint get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); 18953d0407baSopenharmony_ciint pin_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); 18963d0407baSopenharmony_ci 18973d0407baSopenharmony_cistatic inline bool get_user_page_fast_only(unsigned long addr, unsigned int gup_flags, struct page **pagep) 18983d0407baSopenharmony_ci{ 18993d0407baSopenharmony_ci return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; 19003d0407baSopenharmony_ci} 19013d0407baSopenharmony_ci/* 19023d0407baSopenharmony_ci * per-process(per-mm_struct) statistics. 19033d0407baSopenharmony_ci */ 19043d0407baSopenharmony_cistatic inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 19053d0407baSopenharmony_ci{ 19063d0407baSopenharmony_ci long val = atomic_long_read(&mm->rss_stat.count[member]); 19073d0407baSopenharmony_ci#ifdef SPLIT_RSS_COUNTING 19083d0407baSopenharmony_ci /* 19093d0407baSopenharmony_ci * counter is updated in asynchronous manner and may go to minus. 19103d0407baSopenharmony_ci * But it's never be expected number for users. 19113d0407baSopenharmony_ci */ 19123d0407baSopenharmony_ci if (val < 0) { 19133d0407baSopenharmony_ci val = 0; 19143d0407baSopenharmony_ci } 19153d0407baSopenharmony_ci#endif 19163d0407baSopenharmony_ci return (unsigned long)val; 19173d0407baSopenharmony_ci} 19183d0407baSopenharmony_ci 19193d0407baSopenharmony_civoid mm_trace_rss_stat(struct mm_struct *mm, int member, long count); 19203d0407baSopenharmony_ci 19213d0407baSopenharmony_ci#ifdef CONFIG_RSS_THRESHOLD 19223d0407baSopenharmony_civoid listen_rss_threshold(struct mm_struct *mm); 19233d0407baSopenharmony_ci#endif 19243d0407baSopenharmony_ci 19253d0407baSopenharmony_cistatic inline void add_mm_counter(struct mm_struct *mm, int member, long value) 19263d0407baSopenharmony_ci{ 19273d0407baSopenharmony_ci long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); 19283d0407baSopenharmony_ci 19293d0407baSopenharmony_ci#ifdef CONFIG_RSS_THRESHOLD 19303d0407baSopenharmony_ci listen_rss_threshold(mm); 19313d0407baSopenharmony_ci#endif 19323d0407baSopenharmony_ci 19333d0407baSopenharmony_ci mm_trace_rss_stat(mm, member, count); 19343d0407baSopenharmony_ci} 19353d0407baSopenharmony_ci 19363d0407baSopenharmony_cistatic inline void inc_mm_counter(struct mm_struct *mm, int member) 19373d0407baSopenharmony_ci{ 19383d0407baSopenharmony_ci long count = atomic_long_inc_return(&mm->rss_stat.count[member]); 19393d0407baSopenharmony_ci 19403d0407baSopenharmony_ci#ifdef CONFIG_RSS_THRESHOLD 19413d0407baSopenharmony_ci listen_rss_threshold(mm); 19423d0407baSopenharmony_ci#endif 19433d0407baSopenharmony_ci 19443d0407baSopenharmony_ci mm_trace_rss_stat(mm, member, count); 19453d0407baSopenharmony_ci} 19463d0407baSopenharmony_ci 19473d0407baSopenharmony_cistatic inline void dec_mm_counter(struct mm_struct *mm, int member) 19483d0407baSopenharmony_ci{ 19493d0407baSopenharmony_ci long count = atomic_long_dec_return(&mm->rss_stat.count[member]); 19503d0407baSopenharmony_ci 19513d0407baSopenharmony_ci mm_trace_rss_stat(mm, member, count); 19523d0407baSopenharmony_ci} 19533d0407baSopenharmony_ci 19543d0407baSopenharmony_ci/* Optimized variant when page is already known not to be PageAnon */ 19553d0407baSopenharmony_cistatic inline int mm_counter_file(struct page *page) 19563d0407baSopenharmony_ci{ 19573d0407baSopenharmony_ci if (PageSwapBacked(page)) { 19583d0407baSopenharmony_ci return MM_SHMEMPAGES; 19593d0407baSopenharmony_ci } 19603d0407baSopenharmony_ci return MM_FILEPAGES; 19613d0407baSopenharmony_ci} 19623d0407baSopenharmony_ci 19633d0407baSopenharmony_cistatic inline int mm_counter(struct page *page) 19643d0407baSopenharmony_ci{ 19653d0407baSopenharmony_ci if (PageAnon(page)) { 19663d0407baSopenharmony_ci return MM_ANONPAGES; 19673d0407baSopenharmony_ci } 19683d0407baSopenharmony_ci return mm_counter_file(page); 19693d0407baSopenharmony_ci} 19703d0407baSopenharmony_ci 19713d0407baSopenharmony_cistatic inline unsigned long get_mm_rss(struct mm_struct *mm) 19723d0407baSopenharmony_ci{ 19733d0407baSopenharmony_ci return get_mm_counter(mm, MM_FILEPAGES) + get_mm_counter(mm, MM_ANONPAGES) + get_mm_counter(mm, MM_SHMEMPAGES); 19743d0407baSopenharmony_ci} 19753d0407baSopenharmony_ci 19763d0407baSopenharmony_cistatic inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 19773d0407baSopenharmony_ci{ 19783d0407baSopenharmony_ci return max(mm->hiwater_rss, get_mm_rss(mm)); 19793d0407baSopenharmony_ci} 19803d0407baSopenharmony_ci 19813d0407baSopenharmony_cistatic inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 19823d0407baSopenharmony_ci{ 19833d0407baSopenharmony_ci return max(mm->hiwater_vm, mm->total_vm); 19843d0407baSopenharmony_ci} 19853d0407baSopenharmony_ci 19863d0407baSopenharmony_cistatic inline void update_hiwater_rss(struct mm_struct *mm) 19873d0407baSopenharmony_ci{ 19883d0407baSopenharmony_ci unsigned long _rss = get_mm_rss(mm); 19893d0407baSopenharmony_ci if ((mm)->hiwater_rss < _rss) { 19903d0407baSopenharmony_ci (mm)->hiwater_rss = _rss; 19913d0407baSopenharmony_ci } 19923d0407baSopenharmony_ci} 19933d0407baSopenharmony_ci 19943d0407baSopenharmony_cistatic inline void update_hiwater_vm(struct mm_struct *mm) 19953d0407baSopenharmony_ci{ 19963d0407baSopenharmony_ci if (mm->hiwater_vm < mm->total_vm) { 19973d0407baSopenharmony_ci mm->hiwater_vm = mm->total_vm; 19983d0407baSopenharmony_ci } 19993d0407baSopenharmony_ci} 20003d0407baSopenharmony_ci 20013d0407baSopenharmony_cistatic inline void reset_mm_hiwater_rss(struct mm_struct *mm) 20023d0407baSopenharmony_ci{ 20033d0407baSopenharmony_ci mm->hiwater_rss = get_mm_rss(mm); 20043d0407baSopenharmony_ci} 20053d0407baSopenharmony_ci 20063d0407baSopenharmony_cistatic inline void setmax_mm_hiwater_rss(unsigned long *maxrss, struct mm_struct *mm) 20073d0407baSopenharmony_ci{ 20083d0407baSopenharmony_ci unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 20093d0407baSopenharmony_ci if (*maxrss < hiwater_rss) { 20103d0407baSopenharmony_ci *maxrss = hiwater_rss; 20113d0407baSopenharmony_ci } 20123d0407baSopenharmony_ci} 20133d0407baSopenharmony_ci 20143d0407baSopenharmony_ci#if defined(SPLIT_RSS_COUNTING) 20153d0407baSopenharmony_civoid sync_mm_rss(struct mm_struct *mm); 20163d0407baSopenharmony_ci#else 20173d0407baSopenharmony_cistatic inline void sync_mm_rss(struct mm_struct *mm) 20183d0407baSopenharmony_ci{ 20193d0407baSopenharmony_ci} 20203d0407baSopenharmony_ci#endif 20213d0407baSopenharmony_ci 20223d0407baSopenharmony_ci#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL 20233d0407baSopenharmony_cistatic inline int pte_special(pte_t pte) 20243d0407baSopenharmony_ci{ 20253d0407baSopenharmony_ci return 0; 20263d0407baSopenharmony_ci} 20273d0407baSopenharmony_ci 20283d0407baSopenharmony_cistatic inline pte_t pte_mkspecial(pte_t pte) 20293d0407baSopenharmony_ci{ 20303d0407baSopenharmony_ci return pte; 20313d0407baSopenharmony_ci} 20323d0407baSopenharmony_ci#endif 20333d0407baSopenharmony_ci 20343d0407baSopenharmony_ci#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP 20353d0407baSopenharmony_cistatic inline int pte_devmap(pte_t pte) 20363d0407baSopenharmony_ci{ 20373d0407baSopenharmony_ci return 0; 20383d0407baSopenharmony_ci} 20393d0407baSopenharmony_ci#endif 20403d0407baSopenharmony_ci 20413d0407baSopenharmony_ciint vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 20423d0407baSopenharmony_ci 20433d0407baSopenharmony_ciextern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); 20443d0407baSopenharmony_cistatic inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) 20453d0407baSopenharmony_ci{ 20463d0407baSopenharmony_ci pte_t *ptep; 20473d0407baSopenharmony_ci __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 20483d0407baSopenharmony_ci return ptep; 20493d0407baSopenharmony_ci} 20503d0407baSopenharmony_ci 20513d0407baSopenharmony_ci#ifdef __PAGETABLE_P4D_FOLDED 20523d0407baSopenharmony_cistatic inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 20533d0407baSopenharmony_ci{ 20543d0407baSopenharmony_ci return 0; 20553d0407baSopenharmony_ci} 20563d0407baSopenharmony_ci#else 20573d0407baSopenharmony_ciint __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 20583d0407baSopenharmony_ci#endif 20593d0407baSopenharmony_ci 20603d0407baSopenharmony_ci#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) 20613d0407baSopenharmony_cistatic inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 20623d0407baSopenharmony_ci{ 20633d0407baSopenharmony_ci return 0; 20643d0407baSopenharmony_ci} 20653d0407baSopenharmony_cistatic inline void mm_inc_nr_puds(struct mm_struct *mm) 20663d0407baSopenharmony_ci{ 20673d0407baSopenharmony_ci} 20683d0407baSopenharmony_cistatic inline void mm_dec_nr_puds(struct mm_struct *mm) 20693d0407baSopenharmony_ci{ 20703d0407baSopenharmony_ci} 20713d0407baSopenharmony_ci 20723d0407baSopenharmony_ci#else 20733d0407baSopenharmony_ciint __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 20743d0407baSopenharmony_ci 20753d0407baSopenharmony_cistatic inline void mm_inc_nr_puds(struct mm_struct *mm) 20763d0407baSopenharmony_ci{ 20773d0407baSopenharmony_ci if (mm_pud_folded(mm)) { 20783d0407baSopenharmony_ci return; 20793d0407baSopenharmony_ci } 20803d0407baSopenharmony_ci atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 20813d0407baSopenharmony_ci} 20823d0407baSopenharmony_ci 20833d0407baSopenharmony_cistatic inline void mm_dec_nr_puds(struct mm_struct *mm) 20843d0407baSopenharmony_ci{ 20853d0407baSopenharmony_ci if (mm_pud_folded(mm)) { 20863d0407baSopenharmony_ci return; 20873d0407baSopenharmony_ci } 20883d0407baSopenharmony_ci atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 20893d0407baSopenharmony_ci} 20903d0407baSopenharmony_ci#endif 20913d0407baSopenharmony_ci 20923d0407baSopenharmony_ci#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 20933d0407baSopenharmony_cistatic inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 20943d0407baSopenharmony_ci{ 20953d0407baSopenharmony_ci return 0; 20963d0407baSopenharmony_ci} 20973d0407baSopenharmony_ci 20983d0407baSopenharmony_cistatic inline void mm_inc_nr_pmds(struct mm_struct *mm) 20993d0407baSopenharmony_ci{ 21003d0407baSopenharmony_ci} 21013d0407baSopenharmony_cistatic inline void mm_dec_nr_pmds(struct mm_struct *mm) 21023d0407baSopenharmony_ci{ 21033d0407baSopenharmony_ci} 21043d0407baSopenharmony_ci 21053d0407baSopenharmony_ci#else 21063d0407baSopenharmony_ciint __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 21073d0407baSopenharmony_ci 21083d0407baSopenharmony_cistatic inline void mm_inc_nr_pmds(struct mm_struct *mm) 21093d0407baSopenharmony_ci{ 21103d0407baSopenharmony_ci if (mm_pmd_folded(mm)) { 21113d0407baSopenharmony_ci return; 21123d0407baSopenharmony_ci } 21133d0407baSopenharmony_ci atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 21143d0407baSopenharmony_ci} 21153d0407baSopenharmony_ci 21163d0407baSopenharmony_cistatic inline void mm_dec_nr_pmds(struct mm_struct *mm) 21173d0407baSopenharmony_ci{ 21183d0407baSopenharmony_ci if (mm_pmd_folded(mm)) { 21193d0407baSopenharmony_ci return; 21203d0407baSopenharmony_ci } 21213d0407baSopenharmony_ci atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 21223d0407baSopenharmony_ci} 21233d0407baSopenharmony_ci#endif 21243d0407baSopenharmony_ci 21253d0407baSopenharmony_ci#ifdef CONFIG_MMU 21263d0407baSopenharmony_cistatic inline void mm_pgtables_bytes_init(struct mm_struct *mm) 21273d0407baSopenharmony_ci{ 21283d0407baSopenharmony_ci atomic_long_set(&mm->pgtables_bytes, 0); 21293d0407baSopenharmony_ci} 21303d0407baSopenharmony_ci 21313d0407baSopenharmony_cistatic inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 21323d0407baSopenharmony_ci{ 21333d0407baSopenharmony_ci return atomic_long_read(&mm->pgtables_bytes); 21343d0407baSopenharmony_ci} 21353d0407baSopenharmony_ci 21363d0407baSopenharmony_cistatic inline void mm_inc_nr_ptes(struct mm_struct *mm) 21373d0407baSopenharmony_ci{ 21383d0407baSopenharmony_ci atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 21393d0407baSopenharmony_ci} 21403d0407baSopenharmony_ci 21413d0407baSopenharmony_cistatic inline void mm_dec_nr_ptes(struct mm_struct *mm) 21423d0407baSopenharmony_ci{ 21433d0407baSopenharmony_ci atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 21443d0407baSopenharmony_ci} 21453d0407baSopenharmony_ci#else 21463d0407baSopenharmony_ci 21473d0407baSopenharmony_cistatic inline void mm_pgtables_bytes_init(struct mm_struct *mm) 21483d0407baSopenharmony_ci{ 21493d0407baSopenharmony_ci} 21503d0407baSopenharmony_cistatic inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 21513d0407baSopenharmony_ci{ 21523d0407baSopenharmony_ci return 0; 21533d0407baSopenharmony_ci} 21543d0407baSopenharmony_ci 21553d0407baSopenharmony_cistatic inline void mm_inc_nr_ptes(struct mm_struct *mm) 21563d0407baSopenharmony_ci{ 21573d0407baSopenharmony_ci} 21583d0407baSopenharmony_cistatic inline void mm_dec_nr_ptes(struct mm_struct *mm) 21593d0407baSopenharmony_ci{ 21603d0407baSopenharmony_ci} 21613d0407baSopenharmony_ci#endif 21623d0407baSopenharmony_ci 21633d0407baSopenharmony_ciint __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 21643d0407baSopenharmony_ciint __pte_alloc_kernel(pmd_t *pmd); 21653d0407baSopenharmony_ci 21663d0407baSopenharmony_ci#if defined(CONFIG_MMU) 21673d0407baSopenharmony_ci 21683d0407baSopenharmony_cistatic inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 21693d0407baSopenharmony_ci{ 21703d0407baSopenharmony_ci return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? NULL : p4d_offset(pgd, address); 21713d0407baSopenharmony_ci} 21723d0407baSopenharmony_ci 21733d0407baSopenharmony_cistatic inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 21743d0407baSopenharmony_ci{ 21753d0407baSopenharmony_ci return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address); 21763d0407baSopenharmony_ci} 21773d0407baSopenharmony_ci 21783d0407baSopenharmony_cistatic inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 21793d0407baSopenharmony_ci{ 21803d0407baSopenharmony_ci return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address)) ? NULL : pmd_offset(pud, address); 21813d0407baSopenharmony_ci} 21823d0407baSopenharmony_ci#endif /* CONFIG_MMU */ 21833d0407baSopenharmony_ci 21843d0407baSopenharmony_ci#if USE_SPLIT_PTE_PTLOCKS 21853d0407baSopenharmony_ci#if ALLOC_SPLIT_PTLOCKS 21863d0407baSopenharmony_civoid __init ptlock_cache_init(void); 21873d0407baSopenharmony_ciextern bool ptlock_alloc(struct page *page); 21883d0407baSopenharmony_ciextern void ptlock_free(struct page *page); 21893d0407baSopenharmony_ci 21903d0407baSopenharmony_cistatic inline spinlock_t *ptlock_ptr(struct page *page) 21913d0407baSopenharmony_ci{ 21923d0407baSopenharmony_ci return page->ptl; 21933d0407baSopenharmony_ci} 21943d0407baSopenharmony_ci#else /* ALLOC_SPLIT_PTLOCKS */ 21953d0407baSopenharmony_cistatic inline void ptlock_cache_init(void) 21963d0407baSopenharmony_ci{ 21973d0407baSopenharmony_ci} 21983d0407baSopenharmony_ci 21993d0407baSopenharmony_cistatic inline bool ptlock_alloc(struct page *page) 22003d0407baSopenharmony_ci{ 22013d0407baSopenharmony_ci return true; 22023d0407baSopenharmony_ci} 22033d0407baSopenharmony_ci 22043d0407baSopenharmony_cistatic inline void ptlock_free(struct page *page) 22053d0407baSopenharmony_ci{ 22063d0407baSopenharmony_ci} 22073d0407baSopenharmony_ci 22083d0407baSopenharmony_cistatic inline spinlock_t *ptlock_ptr(struct page *page) 22093d0407baSopenharmony_ci{ 22103d0407baSopenharmony_ci return &page->ptl; 22113d0407baSopenharmony_ci} 22123d0407baSopenharmony_ci#endif /* ALLOC_SPLIT_PTLOCKS */ 22133d0407baSopenharmony_ci 22143d0407baSopenharmony_cistatic inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 22153d0407baSopenharmony_ci{ 22163d0407baSopenharmony_ci return ptlock_ptr(pmd_page(*pmd)); 22173d0407baSopenharmony_ci} 22183d0407baSopenharmony_ci 22193d0407baSopenharmony_cistatic inline bool ptlock_init(struct page *page) 22203d0407baSopenharmony_ci{ 22213d0407baSopenharmony_ci /* 22223d0407baSopenharmony_ci * prep_new_page() initialize page->private (and therefore page->ptl) 22233d0407baSopenharmony_ci * with 0. Make sure nobody took it in use in between. 22243d0407baSopenharmony_ci * 22253d0407baSopenharmony_ci * It can happen if arch try to use slab for page table allocation: 22263d0407baSopenharmony_ci * slab code uses page->slab_cache, which share storage with page->ptl. 22273d0407baSopenharmony_ci */ 22283d0407baSopenharmony_ci VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 22293d0407baSopenharmony_ci if (!ptlock_alloc(page)) { 22303d0407baSopenharmony_ci return false; 22313d0407baSopenharmony_ci } 22323d0407baSopenharmony_ci spin_lock_init(ptlock_ptr(page)); 22333d0407baSopenharmony_ci return true; 22343d0407baSopenharmony_ci} 22353d0407baSopenharmony_ci 22363d0407baSopenharmony_ci#else /* !USE_SPLIT_PTE_PTLOCKS */ 22373d0407baSopenharmony_ci/* 22383d0407baSopenharmony_ci * We use mm->page_table_lock to guard all pagetable pages of the mm. 22393d0407baSopenharmony_ci */ 22403d0407baSopenharmony_cistatic inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 22413d0407baSopenharmony_ci{ 22423d0407baSopenharmony_ci return &mm->page_table_lock; 22433d0407baSopenharmony_ci} 22443d0407baSopenharmony_cistatic inline void ptlock_cache_init(void) 22453d0407baSopenharmony_ci{ 22463d0407baSopenharmony_ci} 22473d0407baSopenharmony_cistatic inline bool ptlock_init(struct page *page) 22483d0407baSopenharmony_ci{ 22493d0407baSopenharmony_ci return true; 22503d0407baSopenharmony_ci} 22513d0407baSopenharmony_cistatic inline void ptlock_free(struct page *page) 22523d0407baSopenharmony_ci{ 22533d0407baSopenharmony_ci} 22543d0407baSopenharmony_ci#endif /* USE_SPLIT_PTE_PTLOCKS */ 22553d0407baSopenharmony_ci 22563d0407baSopenharmony_cistatic inline void pgtable_init(void) 22573d0407baSopenharmony_ci{ 22583d0407baSopenharmony_ci ptlock_cache_init(); 22593d0407baSopenharmony_ci pgtable_cache_init(); 22603d0407baSopenharmony_ci} 22613d0407baSopenharmony_ci 22623d0407baSopenharmony_cistatic inline bool pgtable_pte_page_ctor(struct page *page) 22633d0407baSopenharmony_ci{ 22643d0407baSopenharmony_ci if (!ptlock_init(page)) { 22653d0407baSopenharmony_ci return false; 22663d0407baSopenharmony_ci } 22673d0407baSopenharmony_ci __SetPageTable(page); 22683d0407baSopenharmony_ci inc_zone_page_state(page, NR_PAGETABLE); 22693d0407baSopenharmony_ci return true; 22703d0407baSopenharmony_ci} 22713d0407baSopenharmony_ci 22723d0407baSopenharmony_cistatic inline void pgtable_pte_page_dtor(struct page *page) 22733d0407baSopenharmony_ci{ 22743d0407baSopenharmony_ci ptlock_free(page); 22753d0407baSopenharmony_ci __ClearPageTable(page); 22763d0407baSopenharmony_ci dec_zone_page_state(page, NR_PAGETABLE); 22773d0407baSopenharmony_ci} 22783d0407baSopenharmony_ci 22793d0407baSopenharmony_ci#define pte_offset_map_lock(mm, pmd, address, ptlp) \ 22803d0407baSopenharmony_ci ( { \ 22813d0407baSopenharmony_ci spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 22823d0407baSopenharmony_ci pte_t *__pte = pte_offset_map(pmd, address); \ 22833d0407baSopenharmony_ci *(ptlp) = __ptl; \ 22843d0407baSopenharmony_ci spin_lock(__ptl); \ 22853d0407baSopenharmony_ci __pte; \ 22863d0407baSopenharmony_ci }) 22873d0407baSopenharmony_ci 22883d0407baSopenharmony_ci#define pte_unmap_unlock(pte, ptl) \ 22893d0407baSopenharmony_ci do { \ 22903d0407baSopenharmony_ci spin_unlock(ptl); \ 22913d0407baSopenharmony_ci pte_unmap(pte); \ 22923d0407baSopenharmony_ci } while (0) 22933d0407baSopenharmony_ci 22943d0407baSopenharmony_ci#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) 22953d0407baSopenharmony_ci 22963d0407baSopenharmony_ci#define pte_alloc_map(mm, pmd, address) (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 22973d0407baSopenharmony_ci 22983d0407baSopenharmony_ci#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 22993d0407baSopenharmony_ci (pte_alloc(mm, pmd) ? NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 23003d0407baSopenharmony_ci 23013d0407baSopenharmony_ci#define pte_alloc_kernel(pmd, address) \ 23023d0407baSopenharmony_ci ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd)) ? NULL : pte_offset_kernel(pmd, address)) 23033d0407baSopenharmony_ci 23043d0407baSopenharmony_ci#if USE_SPLIT_PMD_PTLOCKS 23053d0407baSopenharmony_ci 23063d0407baSopenharmony_cistatic struct page *pmd_to_page(pmd_t *pmd) 23073d0407baSopenharmony_ci{ 23083d0407baSopenharmony_ci unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 23093d0407baSopenharmony_ci return virt_to_page((void *)((unsigned long)pmd & mask)); 23103d0407baSopenharmony_ci} 23113d0407baSopenharmony_ci 23123d0407baSopenharmony_cistatic inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 23133d0407baSopenharmony_ci{ 23143d0407baSopenharmony_ci return ptlock_ptr(pmd_to_page(pmd)); 23153d0407baSopenharmony_ci} 23163d0407baSopenharmony_ci 23173d0407baSopenharmony_cistatic inline bool pmd_ptlock_init(struct page *page) 23183d0407baSopenharmony_ci{ 23193d0407baSopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE 23203d0407baSopenharmony_ci page->pmd_huge_pte = NULL; 23213d0407baSopenharmony_ci#endif 23223d0407baSopenharmony_ci return ptlock_init(page); 23233d0407baSopenharmony_ci} 23243d0407baSopenharmony_ci 23253d0407baSopenharmony_cistatic inline void pmd_ptlock_free(struct page *page) 23263d0407baSopenharmony_ci{ 23273d0407baSopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE 23283d0407baSopenharmony_ci VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 23293d0407baSopenharmony_ci#endif 23303d0407baSopenharmony_ci ptlock_free(page); 23313d0407baSopenharmony_ci} 23323d0407baSopenharmony_ci 23333d0407baSopenharmony_ci#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 23343d0407baSopenharmony_ci 23353d0407baSopenharmony_ci#else 23363d0407baSopenharmony_ci 23373d0407baSopenharmony_cistatic inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 23383d0407baSopenharmony_ci{ 23393d0407baSopenharmony_ci return &mm->page_table_lock; 23403d0407baSopenharmony_ci} 23413d0407baSopenharmony_ci 23423d0407baSopenharmony_cistatic inline bool pmd_ptlock_init(struct page *page) 23433d0407baSopenharmony_ci{ 23443d0407baSopenharmony_ci return true; 23453d0407baSopenharmony_ci} 23463d0407baSopenharmony_cistatic inline void pmd_ptlock_free(struct page *page) 23473d0407baSopenharmony_ci{ 23483d0407baSopenharmony_ci} 23493d0407baSopenharmony_ci 23503d0407baSopenharmony_ci#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 23513d0407baSopenharmony_ci 23523d0407baSopenharmony_ci#endif 23533d0407baSopenharmony_ci 23543d0407baSopenharmony_cistatic inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 23553d0407baSopenharmony_ci{ 23563d0407baSopenharmony_ci spinlock_t *ptl = pmd_lockptr(mm, pmd); 23573d0407baSopenharmony_ci spin_lock(ptl); 23583d0407baSopenharmony_ci return ptl; 23593d0407baSopenharmony_ci} 23603d0407baSopenharmony_ci 23613d0407baSopenharmony_cistatic inline bool pgtable_pmd_page_ctor(struct page *page) 23623d0407baSopenharmony_ci{ 23633d0407baSopenharmony_ci if (!pmd_ptlock_init(page)) { 23643d0407baSopenharmony_ci return false; 23653d0407baSopenharmony_ci } 23663d0407baSopenharmony_ci __SetPageTable(page); 23673d0407baSopenharmony_ci inc_zone_page_state(page, NR_PAGETABLE); 23683d0407baSopenharmony_ci return true; 23693d0407baSopenharmony_ci} 23703d0407baSopenharmony_ci 23713d0407baSopenharmony_cistatic inline void pgtable_pmd_page_dtor(struct page *page) 23723d0407baSopenharmony_ci{ 23733d0407baSopenharmony_ci pmd_ptlock_free(page); 23743d0407baSopenharmony_ci __ClearPageTable(page); 23753d0407baSopenharmony_ci dec_zone_page_state(page, NR_PAGETABLE); 23763d0407baSopenharmony_ci} 23773d0407baSopenharmony_ci 23783d0407baSopenharmony_ci/* 23793d0407baSopenharmony_ci * No scalability reason to split PUD locks yet, but follow the same pattern 23803d0407baSopenharmony_ci * as the PMD locks to make it easier if we decide to. The VM should not be 23813d0407baSopenharmony_ci * considered ready to switch to split PUD locks yet; there may be places 23823d0407baSopenharmony_ci * which need to be converted from page_table_lock. 23833d0407baSopenharmony_ci */ 23843d0407baSopenharmony_cistatic inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 23853d0407baSopenharmony_ci{ 23863d0407baSopenharmony_ci return &mm->page_table_lock; 23873d0407baSopenharmony_ci} 23883d0407baSopenharmony_ci 23893d0407baSopenharmony_cistatic inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 23903d0407baSopenharmony_ci{ 23913d0407baSopenharmony_ci spinlock_t *ptl = pud_lockptr(mm, pud); 23923d0407baSopenharmony_ci 23933d0407baSopenharmony_ci spin_lock(ptl); 23943d0407baSopenharmony_ci return ptl; 23953d0407baSopenharmony_ci} 23963d0407baSopenharmony_ci 23973d0407baSopenharmony_ciextern void __init pagecache_init(void); 23983d0407baSopenharmony_ciextern void __init free_area_init_memoryless_node(int nid); 23993d0407baSopenharmony_ciextern void free_initmem(void); 24003d0407baSopenharmony_ci 24013d0407baSopenharmony_ci/* 24023d0407baSopenharmony_ci * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 24033d0407baSopenharmony_ci * into the buddy system. The freed pages will be poisoned with pattern 24043d0407baSopenharmony_ci * "poison" if it's within range [0, UCHAR_MAX]. 24053d0407baSopenharmony_ci * Return pages freed into the buddy system. 24063d0407baSopenharmony_ci */ 24073d0407baSopenharmony_ciextern unsigned long free_reserved_area(void *start, void *end, int poison, const char *s); 24083d0407baSopenharmony_ci 24093d0407baSopenharmony_ci#ifdef CONFIG_HIGHMEM 24103d0407baSopenharmony_ci/* 24113d0407baSopenharmony_ci * Free a highmem page into the buddy system, adjusting totalhigh_pages 24123d0407baSopenharmony_ci * and totalram_pages. 24133d0407baSopenharmony_ci */ 24143d0407baSopenharmony_ciextern void free_highmem_page(struct page *page); 24153d0407baSopenharmony_ci#endif 24163d0407baSopenharmony_ci 24173d0407baSopenharmony_ciextern void adjust_managed_page_count(struct page *page, long count); 24183d0407baSopenharmony_ciextern void mem_init_print_info(const char *str); 24193d0407baSopenharmony_ci 24203d0407baSopenharmony_ciextern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 24213d0407baSopenharmony_ci 24223d0407baSopenharmony_ci/* Free the reserved page into the buddy system, so it gets managed. */ 24233d0407baSopenharmony_cistatic inline void __free_reserved_page(struct page *page) 24243d0407baSopenharmony_ci{ 24253d0407baSopenharmony_ci ClearPageReserved(page); 24263d0407baSopenharmony_ci init_page_count(page); 24273d0407baSopenharmony_ci __free_page(page); 24283d0407baSopenharmony_ci} 24293d0407baSopenharmony_ci 24303d0407baSopenharmony_cistatic inline void free_reserved_page(struct page *page) 24313d0407baSopenharmony_ci{ 24323d0407baSopenharmony_ci __free_reserved_page(page); 24333d0407baSopenharmony_ci adjust_managed_page_count(page, 1); 24343d0407baSopenharmony_ci} 24353d0407baSopenharmony_ci 24363d0407baSopenharmony_cistatic inline void mark_page_reserved(struct page *page) 24373d0407baSopenharmony_ci{ 24383d0407baSopenharmony_ci SetPageReserved(page); 24393d0407baSopenharmony_ci adjust_managed_page_count(page, -1); 24403d0407baSopenharmony_ci} 24413d0407baSopenharmony_ci 24423d0407baSopenharmony_ci/* 24433d0407baSopenharmony_ci * Default method to free all the __init memory into the buddy system. 24443d0407baSopenharmony_ci * The freed pages will be poisoned with pattern "poison" if it's within 24453d0407baSopenharmony_ci * range [0, UCHAR_MAX]. 24463d0407baSopenharmony_ci * Return pages freed into the buddy system. 24473d0407baSopenharmony_ci */ 24483d0407baSopenharmony_cistatic inline unsigned long free_initmem_default(int poison) 24493d0407baSopenharmony_ci{ 24503d0407baSopenharmony_ci extern char __init_begin[], __init_end[]; 24513d0407baSopenharmony_ci 24523d0407baSopenharmony_ci return free_reserved_area(&__init_begin, &__init_end, poison, "unused kernel"); 24533d0407baSopenharmony_ci} 24543d0407baSopenharmony_ci 24553d0407baSopenharmony_cistatic inline unsigned long get_num_physpages(void) 24563d0407baSopenharmony_ci{ 24573d0407baSopenharmony_ci int nid; 24583d0407baSopenharmony_ci unsigned long phys_pages = 0; 24593d0407baSopenharmony_ci 24603d0407baSopenharmony_ci for_each_online_node(nid) phys_pages += node_present_pages(nid); 24613d0407baSopenharmony_ci 24623d0407baSopenharmony_ci return phys_pages; 24633d0407baSopenharmony_ci} 24643d0407baSopenharmony_ci 24653d0407baSopenharmony_ci/* 24663d0407baSopenharmony_ci * Using memblock node mappings, an architecture may initialise its 24673d0407baSopenharmony_ci * zones, allocate the backing mem_map and account for memory holes in an 24683d0407baSopenharmony_ci * architecture independent manner. 24693d0407baSopenharmony_ci * 24703d0407baSopenharmony_ci * An architecture is expected to register range of page frames backed by 24713d0407baSopenharmony_ci * physical memory with memblock_add[_node]() before calling 24723d0407baSopenharmony_ci * free_area_init() passing in the PFN each zone ends at. At a basic 24733d0407baSopenharmony_ci * usage, an architecture is expected to do something like 24743d0407baSopenharmony_ci * 24753d0407baSopenharmony_ci * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 24763d0407baSopenharmony_ci * max_highmem_pfn}; 24773d0407baSopenharmony_ci * for_each_valid_physical_page_range() 24783d0407baSopenharmony_ci * memblock_add_node(base, size, nid) 24793d0407baSopenharmony_ci * free_area_init(max_zone_pfns); 24803d0407baSopenharmony_ci */ 24813d0407baSopenharmony_civoid free_area_init(unsigned long *max_zone_pfn); 24823d0407baSopenharmony_ciunsigned long node_map_pfn_alignment(void); 24833d0407baSopenharmony_ciunsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); 24843d0407baSopenharmony_ciextern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); 24853d0407baSopenharmony_ciextern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); 24863d0407baSopenharmony_ciextern unsigned long find_min_pfn_with_active_regions(void); 24873d0407baSopenharmony_ci 24883d0407baSopenharmony_ci#ifndef CONFIG_NEED_MULTIPLE_NODES 24893d0407baSopenharmony_cistatic inline int early_pfn_to_nid(unsigned long pfn) 24903d0407baSopenharmony_ci{ 24913d0407baSopenharmony_ci return 0; 24923d0407baSopenharmony_ci} 24933d0407baSopenharmony_ci#else 24943d0407baSopenharmony_ci/* please see mm/page_alloc.c */ 24953d0407baSopenharmony_ciextern int __meminit early_pfn_to_nid(unsigned long pfn); 24963d0407baSopenharmony_ci/* there is a per-arch backend function. */ 24973d0407baSopenharmony_ciextern int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state); 24983d0407baSopenharmony_ci#endif 24993d0407baSopenharmony_ci 25003d0407baSopenharmony_ciextern void set_dma_reserve(unsigned long new_dma_reserve); 25013d0407baSopenharmony_ciextern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, 25023d0407baSopenharmony_ci struct vmem_altmap *, int migratetype); 25033d0407baSopenharmony_ciextern void setup_per_zone_wmarks(void); 25043d0407baSopenharmony_ciextern int __meminit init_per_zone_wmark_min(void); 25053d0407baSopenharmony_ciextern void mem_init(void); 25063d0407baSopenharmony_ciextern void __init mmap_init(void); 25073d0407baSopenharmony_ciextern void show_mem(unsigned int flags, nodemask_t *nodemask); 25083d0407baSopenharmony_ciextern long si_mem_available(void); 25093d0407baSopenharmony_ciextern void si_meminfo(struct sysinfo *val); 25103d0407baSopenharmony_ciextern void si_meminfo_node(struct sysinfo *val, int nid); 25113d0407baSopenharmony_ci#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 25123d0407baSopenharmony_ciextern unsigned long arch_reserved_kernel_pages(void); 25133d0407baSopenharmony_ci#endif 25143d0407baSopenharmony_ci 25153d0407baSopenharmony_ciextern __printf(3, 4) void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 25163d0407baSopenharmony_ci 25173d0407baSopenharmony_ciextern void setup_per_cpu_pageset(void); 25183d0407baSopenharmony_ci 25193d0407baSopenharmony_ci/* page_alloc.c */ 25203d0407baSopenharmony_ciextern int min_free_kbytes; 25213d0407baSopenharmony_ciextern int watermark_boost_factor; 25223d0407baSopenharmony_ciextern int watermark_scale_factor; 25233d0407baSopenharmony_ciextern bool arch_has_descending_max_zone_pfns(void); 25243d0407baSopenharmony_ci 25253d0407baSopenharmony_ci/* nommu.c */ 25263d0407baSopenharmony_ciextern atomic_long_t mmap_pages_allocated; 25273d0407baSopenharmony_ciextern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 25283d0407baSopenharmony_ci 25293d0407baSopenharmony_ci/* interval_tree.c */ 25303d0407baSopenharmony_civoid vma_interval_tree_insert(struct vm_area_struct *node, struct rb_root_cached *root); 25313d0407baSopenharmony_civoid vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, 25323d0407baSopenharmony_ci struct rb_root_cached *root); 25333d0407baSopenharmony_civoid vma_interval_tree_remove(struct vm_area_struct *node, struct rb_root_cached *root); 25343d0407baSopenharmony_cistruct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, 25353d0407baSopenharmony_ci unsigned long last); 25363d0407baSopenharmony_cistruct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, unsigned long start, 25373d0407baSopenharmony_ci unsigned long last); 25383d0407baSopenharmony_ci 25393d0407baSopenharmony_ci#define vma_interval_tree_foreach(vma, root, start, last) \ 25403d0407baSopenharmony_ci for (vma = vma_interval_tree_iter_first(root, start, last); vma; \ 25413d0407baSopenharmony_ci vma = vma_interval_tree_iter_next(vma, start, last)) 25423d0407baSopenharmony_ci 25433d0407baSopenharmony_civoid anon_vma_interval_tree_insert(struct anon_vma_chain *node, struct rb_root_cached *root); 25443d0407baSopenharmony_civoid anon_vma_interval_tree_remove(struct anon_vma_chain *node, struct rb_root_cached *root); 25453d0407baSopenharmony_cistruct anon_vma_chain *anon_vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long start, 25463d0407baSopenharmony_ci unsigned long last); 25473d0407baSopenharmony_cistruct anon_vma_chain *anon_vma_interval_tree_iter_next(struct anon_vma_chain *node, unsigned long start, 25483d0407baSopenharmony_ci unsigned long last); 25493d0407baSopenharmony_ci#ifdef CONFIG_DEBUG_VM_RB 25503d0407baSopenharmony_civoid anon_vma_interval_tree_verify(struct anon_vma_chain *node); 25513d0407baSopenharmony_ci#endif 25523d0407baSopenharmony_ci 25533d0407baSopenharmony_ci#define anon_vma_interval_tree_foreach(avc, root, start, last) \ 25543d0407baSopenharmony_ci for (avc = anon_vma_interval_tree_iter_first(root, start, last); avc; \ 25553d0407baSopenharmony_ci avc = anon_vma_interval_tree_iter_next(avc, start, last)) 25563d0407baSopenharmony_ci 25573d0407baSopenharmony_ci/* mmap.c */ 25583d0407baSopenharmony_ciextern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 25593d0407baSopenharmony_ciextern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, 25603d0407baSopenharmony_ci struct vm_area_struct *insert, struct vm_area_struct *expand); 25613d0407baSopenharmony_cistatic inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, 25623d0407baSopenharmony_ci struct vm_area_struct *insert) 25633d0407baSopenharmony_ci{ 25643d0407baSopenharmony_ci return __vma_adjust(vma, start, end, pgoff, insert, NULL); 25653d0407baSopenharmony_ci} 25663d0407baSopenharmony_ciextern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, 25673d0407baSopenharmony_ci unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, 25683d0407baSopenharmony_ci pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *); 25693d0407baSopenharmony_ciextern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 25703d0407baSopenharmony_ciextern int __split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); 25713d0407baSopenharmony_ciextern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); 25723d0407baSopenharmony_ciextern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 25733d0407baSopenharmony_ciextern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); 25743d0407baSopenharmony_ciextern void unlink_file_vma(struct vm_area_struct *); 25753d0407baSopenharmony_ciextern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff, 25763d0407baSopenharmony_ci bool *need_rmap_locks); 25773d0407baSopenharmony_ciextern void exit_mmap(struct mm_struct *); 25783d0407baSopenharmony_ci 25793d0407baSopenharmony_cistatic inline int check_data_rlimit(unsigned long rlim, unsigned long new, unsigned long start, unsigned long end_data, 25803d0407baSopenharmony_ci unsigned long start_data) 25813d0407baSopenharmony_ci{ 25823d0407baSopenharmony_ci if (rlim < RLIM_INFINITY) { 25833d0407baSopenharmony_ci if (((new - start) + (end_data - start_data)) > rlim) { 25843d0407baSopenharmony_ci return -ENOSPC; 25853d0407baSopenharmony_ci } 25863d0407baSopenharmony_ci } 25873d0407baSopenharmony_ci 25883d0407baSopenharmony_ci return 0; 25893d0407baSopenharmony_ci} 25903d0407baSopenharmony_ci 25913d0407baSopenharmony_ciextern int mm_take_all_locks(struct mm_struct *mm); 25923d0407baSopenharmony_ciextern void mm_drop_all_locks(struct mm_struct *mm); 25933d0407baSopenharmony_ci 25943d0407baSopenharmony_ciextern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 25953d0407baSopenharmony_ciextern struct file *get_mm_exe_file(struct mm_struct *mm); 25963d0407baSopenharmony_ciextern struct file *get_task_exe_file(struct task_struct *task); 25973d0407baSopenharmony_ci 25983d0407baSopenharmony_ciextern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 25993d0407baSopenharmony_ciextern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 26003d0407baSopenharmony_ci 26013d0407baSopenharmony_ciextern bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm); 26023d0407baSopenharmony_ciextern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, 26033d0407baSopenharmony_ci unsigned long flags, const struct vm_special_mapping *spec); 26043d0407baSopenharmony_ci/* This is an obsolete alternative to _install_special_mapping. */ 26053d0407baSopenharmony_ciextern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, 26063d0407baSopenharmony_ci struct page **pages); 26073d0407baSopenharmony_ci 26083d0407baSopenharmony_ciunsigned long randomize_stack_top(unsigned long stack_top); 26093d0407baSopenharmony_ciunsigned long randomize_page(unsigned long start, unsigned long range); 26103d0407baSopenharmony_ci 26113d0407baSopenharmony_ciextern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 26123d0407baSopenharmony_ci 26133d0407baSopenharmony_ciextern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, 26143d0407baSopenharmony_ci unsigned long pgoff, struct list_head *uf); 26153d0407baSopenharmony_ciextern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, 26163d0407baSopenharmony_ci unsigned long flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); 26173d0407baSopenharmony_ciextern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); 26183d0407baSopenharmony_ciextern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); 26193d0407baSopenharmony_ciextern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); 26203d0407baSopenharmony_ci 26213d0407baSopenharmony_ci#ifdef CONFIG_MMU 26223d0407baSopenharmony_ciextern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); 26233d0407baSopenharmony_cistatic inline void mm_populate(unsigned long addr, unsigned long len) 26243d0407baSopenharmony_ci{ 26253d0407baSopenharmony_ci /* Ignore errors */ 26263d0407baSopenharmony_ci (void)__mm_populate(addr, len, 1); 26273d0407baSopenharmony_ci} 26283d0407baSopenharmony_ci#else 26293d0407baSopenharmony_cistatic inline void mm_populate(unsigned long addr, unsigned long len) 26303d0407baSopenharmony_ci{ 26313d0407baSopenharmony_ci} 26323d0407baSopenharmony_ci#endif 26333d0407baSopenharmony_ci 26343d0407baSopenharmony_ci/* These take the mm semaphore themselves */ 26353d0407baSopenharmony_ciextern int __must_check vm_brk(unsigned long, unsigned long); 26363d0407baSopenharmony_ciextern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 26373d0407baSopenharmony_ciextern int vm_munmap(unsigned long, size_t); 26383d0407baSopenharmony_ciextern unsigned long __must_check vm_mmap(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, 26393d0407baSopenharmony_ci unsigned long); 26403d0407baSopenharmony_ci 26413d0407baSopenharmony_cistruct vm_unmapped_area_info { 26423d0407baSopenharmony_ci#define VM_UNMAPPED_AREA_TOPDOWN 1 26433d0407baSopenharmony_ci unsigned long flags; 26443d0407baSopenharmony_ci unsigned long length; 26453d0407baSopenharmony_ci unsigned long low_limit; 26463d0407baSopenharmony_ci unsigned long high_limit; 26473d0407baSopenharmony_ci unsigned long align_mask; 26483d0407baSopenharmony_ci unsigned long align_offset; 26493d0407baSopenharmony_ci}; 26503d0407baSopenharmony_ci 26513d0407baSopenharmony_ciextern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); 26523d0407baSopenharmony_ci 26533d0407baSopenharmony_ci/* truncate.c */ 26543d0407baSopenharmony_ciextern void truncate_inode_pages(struct address_space *, loff_t); 26553d0407baSopenharmony_ciextern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend); 26563d0407baSopenharmony_ciextern void truncate_inode_pages_final(struct address_space *); 26573d0407baSopenharmony_ci 26583d0407baSopenharmony_ci/* generic vm_area_ops exported for stackable file systems */ 26593d0407baSopenharmony_ciextern vm_fault_t filemap_fault(struct vm_fault *vmf); 26603d0407baSopenharmony_ciextern void filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); 26613d0407baSopenharmony_ciextern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); 26623d0407baSopenharmony_ci 26633d0407baSopenharmony_ci/* mm/page-writeback.c */ 26643d0407baSopenharmony_ciint __must_check write_one_page(struct page *page); 26653d0407baSopenharmony_civoid task_dirty_inc(struct task_struct *tsk); 26663d0407baSopenharmony_ci 26673d0407baSopenharmony_ciextern unsigned long stack_guard_gap; 26683d0407baSopenharmony_ci/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 26693d0407baSopenharmony_ciextern int expand_stack(struct vm_area_struct *vma, unsigned long address); 26703d0407baSopenharmony_ci 26713d0407baSopenharmony_ci/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ 26723d0407baSopenharmony_ciextern int expand_downwards(struct vm_area_struct *vma, unsigned long address); 26733d0407baSopenharmony_ci#if VM_GROWSUP 26743d0407baSopenharmony_ciextern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 26753d0407baSopenharmony_ci#else 26763d0407baSopenharmony_ci#define expand_upwards(vma, address) (0) 26773d0407baSopenharmony_ci#endif 26783d0407baSopenharmony_ci 26793d0407baSopenharmony_ci/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 26803d0407baSopenharmony_ciextern struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr); 26813d0407baSopenharmony_ciextern struct vm_area_struct *find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev); 26823d0407baSopenharmony_ci 26833d0407baSopenharmony_ci/* Look up the first VMA which intersects the interval start_addr..end_addr-1, 26843d0407baSopenharmony_ci NULL if none. Assume start_addr < end_addr. */ 26853d0407baSopenharmony_cistatic inline struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr, 26863d0407baSopenharmony_ci unsigned long end_addr) 26873d0407baSopenharmony_ci{ 26883d0407baSopenharmony_ci struct vm_area_struct *vma = find_vma(mm, start_addr); 26893d0407baSopenharmony_ci 26903d0407baSopenharmony_ci if (vma && end_addr <= vma->vm_start) { 26913d0407baSopenharmony_ci vma = NULL; 26923d0407baSopenharmony_ci } 26933d0407baSopenharmony_ci return vma; 26943d0407baSopenharmony_ci} 26953d0407baSopenharmony_ci 26963d0407baSopenharmony_cistatic inline unsigned long vm_start_gap(struct vm_area_struct *vma) 26973d0407baSopenharmony_ci{ 26983d0407baSopenharmony_ci unsigned long vm_start = vma->vm_start; 26993d0407baSopenharmony_ci 27003d0407baSopenharmony_ci if (vma->vm_flags & VM_GROWSDOWN) { 27013d0407baSopenharmony_ci vm_start -= stack_guard_gap; 27023d0407baSopenharmony_ci if (vm_start > vma->vm_start) { 27033d0407baSopenharmony_ci vm_start = 0; 27043d0407baSopenharmony_ci } 27053d0407baSopenharmony_ci } 27063d0407baSopenharmony_ci return vm_start; 27073d0407baSopenharmony_ci} 27083d0407baSopenharmony_ci 27093d0407baSopenharmony_cistatic inline unsigned long vm_end_gap(struct vm_area_struct *vma) 27103d0407baSopenharmony_ci{ 27113d0407baSopenharmony_ci unsigned long vm_end = vma->vm_end; 27123d0407baSopenharmony_ci 27133d0407baSopenharmony_ci if (vma->vm_flags & VM_GROWSUP) { 27143d0407baSopenharmony_ci vm_end += stack_guard_gap; 27153d0407baSopenharmony_ci if (vm_end < vma->vm_end) { 27163d0407baSopenharmony_ci vm_end = -PAGE_SIZE; 27173d0407baSopenharmony_ci } 27183d0407baSopenharmony_ci } 27193d0407baSopenharmony_ci return vm_end; 27203d0407baSopenharmony_ci} 27213d0407baSopenharmony_ci 27223d0407baSopenharmony_cistatic inline unsigned long vma_pages(struct vm_area_struct *vma) 27233d0407baSopenharmony_ci{ 27243d0407baSopenharmony_ci return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 27253d0407baSopenharmony_ci} 27263d0407baSopenharmony_ci 27273d0407baSopenharmony_ci/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 27283d0407baSopenharmony_cistatic inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end) 27293d0407baSopenharmony_ci{ 27303d0407baSopenharmony_ci struct vm_area_struct *vma = find_vma(mm, vm_start); 27313d0407baSopenharmony_ci 27323d0407baSopenharmony_ci if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) { 27333d0407baSopenharmony_ci vma = NULL; 27343d0407baSopenharmony_ci } 27353d0407baSopenharmony_ci 27363d0407baSopenharmony_ci return vma; 27373d0407baSopenharmony_ci} 27383d0407baSopenharmony_ci 27393d0407baSopenharmony_cistatic inline bool range_in_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) 27403d0407baSopenharmony_ci{ 27413d0407baSopenharmony_ci return (vma && vma->vm_start <= start && end <= vma->vm_end); 27423d0407baSopenharmony_ci} 27433d0407baSopenharmony_ci 27443d0407baSopenharmony_ci#ifdef CONFIG_MMU 27453d0407baSopenharmony_cipgprot_t vm_get_page_prot(unsigned long vm_flags); 27463d0407baSopenharmony_civoid vma_set_page_prot(struct vm_area_struct *vma); 27473d0407baSopenharmony_ci#else 27483d0407baSopenharmony_cistatic inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 27493d0407baSopenharmony_ci{ 27503d0407baSopenharmony_ci return __pgprot(0); 27513d0407baSopenharmony_ci} 27523d0407baSopenharmony_cistatic inline void vma_set_page_prot(struct vm_area_struct *vma) 27533d0407baSopenharmony_ci{ 27543d0407baSopenharmony_ci vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 27553d0407baSopenharmony_ci} 27563d0407baSopenharmony_ci#endif 27573d0407baSopenharmony_ci 27583d0407baSopenharmony_ci#ifdef CONFIG_NUMA_BALANCING 27593d0407baSopenharmony_ciunsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); 27603d0407baSopenharmony_ci#endif 27613d0407baSopenharmony_ci 27623d0407baSopenharmony_cistruct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 27633d0407baSopenharmony_ciint remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); 27643d0407baSopenharmony_ciint vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 27653d0407baSopenharmony_ciint vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); 27663d0407baSopenharmony_ciint vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); 27673d0407baSopenharmony_ciint vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num); 27683d0407baSopenharmony_civm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); 27693d0407baSopenharmony_civm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); 27703d0407baSopenharmony_civm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); 27713d0407baSopenharmony_civm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t pgprot); 27723d0407baSopenharmony_civm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); 27733d0407baSopenharmony_ciint vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 27743d0407baSopenharmony_ci 27753d0407baSopenharmony_cistatic inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) 27763d0407baSopenharmony_ci{ 27773d0407baSopenharmony_ci int err = vm_insert_page(vma, addr, page); 27783d0407baSopenharmony_ci if (err == -ENOMEM) { 27793d0407baSopenharmony_ci return VM_FAULT_OOM; 27803d0407baSopenharmony_ci } 27813d0407baSopenharmony_ci if (err < 0 && err != -EBUSY) { 27823d0407baSopenharmony_ci return VM_FAULT_SIGBUS; 27833d0407baSopenharmony_ci } 27843d0407baSopenharmony_ci return VM_FAULT_NOPAGE; 27853d0407baSopenharmony_ci} 27863d0407baSopenharmony_ci 27873d0407baSopenharmony_ci#ifndef io_remap_pfn_range 27883d0407baSopenharmony_cistatic inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, 27893d0407baSopenharmony_ci unsigned long size, pgprot_t prot) 27903d0407baSopenharmony_ci{ 27913d0407baSopenharmony_ci return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); 27923d0407baSopenharmony_ci} 27933d0407baSopenharmony_ci#endif 27943d0407baSopenharmony_ci 27953d0407baSopenharmony_cistatic inline vm_fault_t vmf_error(int err) 27963d0407baSopenharmony_ci{ 27973d0407baSopenharmony_ci if (err == -ENOMEM) { 27983d0407baSopenharmony_ci return VM_FAULT_OOM; 27993d0407baSopenharmony_ci } 28003d0407baSopenharmony_ci return VM_FAULT_SIGBUS; 28013d0407baSopenharmony_ci} 28023d0407baSopenharmony_ci 28033d0407baSopenharmony_cistruct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags); 28043d0407baSopenharmony_ci 28053d0407baSopenharmony_ci#define FOLL_WRITE 0x01 /* check pte is writable */ 28063d0407baSopenharmony_ci#define FOLL_TOUCH 0x02 /* mark page accessed */ 28073d0407baSopenharmony_ci#define FOLL_GET 0x04 /* do get_page on page */ 28083d0407baSopenharmony_ci#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 28093d0407baSopenharmony_ci#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 28103d0407baSopenharmony_ci#define FOLL_NOWAIT \ 28113d0407baSopenharmony_ci 0x20 /* if a disk transfer is needed, start the IO \ 28123d0407baSopenharmony_ci * and return without waiting upon it */ 28133d0407baSopenharmony_ci#define FOLL_POPULATE 0x40 /* fault in page */ 28143d0407baSopenharmony_ci#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 28153d0407baSopenharmony_ci#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 28163d0407baSopenharmony_ci#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 28173d0407baSopenharmony_ci#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 28183d0407baSopenharmony_ci#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 28193d0407baSopenharmony_ci#define FOLL_MLOCK 0x1000 /* lock present pages */ 28203d0407baSopenharmony_ci#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 28213d0407baSopenharmony_ci#define FOLL_COW 0x4000 /* internal GUP flag */ 28223d0407baSopenharmony_ci#define FOLL_ANON 0x8000 /* don't do file mappings */ 28233d0407baSopenharmony_ci#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ 28243d0407baSopenharmony_ci#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ 28253d0407baSopenharmony_ci#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ 28263d0407baSopenharmony_ci#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ 28273d0407baSopenharmony_ci 28283d0407baSopenharmony_ci/* 28293d0407baSopenharmony_ci * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each 28303d0407baSopenharmony_ci * other. Here is what they mean, and how to use them: 28313d0407baSopenharmony_ci * 28323d0407baSopenharmony_ci * FOLL_LONGTERM indicates that the page will be held for an indefinite time 28333d0407baSopenharmony_ci * period _often_ under userspace control. This is in contrast to 28343d0407baSopenharmony_ci * iov_iter_get_pages(), whose usages are transient. 28353d0407baSopenharmony_ci * 28363d0407baSopenharmony_ci * For pages which are part of a filesystem, mappings are subject to the 28373d0407baSopenharmony_ci * lifetime enforced by the filesystem and we need guarantees that longterm 28383d0407baSopenharmony_ci * users like RDMA and V4L2 only establish mappings which coordinate usage with 28393d0407baSopenharmony_ci * the filesystem. Ideas for this coordination include revoking the longterm 28403d0407baSopenharmony_ci * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was 28413d0407baSopenharmony_ci * added after the problem with filesystems was found FS DAX VMAs are 28423d0407baSopenharmony_ci * specifically failed. Filesystem pages are still subject to bugs and use of 28433d0407baSopenharmony_ci * FOLL_LONGTERM should be avoided on those pages. 28443d0407baSopenharmony_ci * 28453d0407baSopenharmony_ci * Also NOTE that FOLL_LONGTERM is not supported in every GUP call. 28463d0407baSopenharmony_ci * Currently only get_user_pages() and get_user_pages_fast() support this flag 28473d0407baSopenharmony_ci * and calls to get_user_pages_[un]locked are specifically not allowed. This 28483d0407baSopenharmony_ci * is due to an incompatibility with the FS DAX check and 28493d0407baSopenharmony_ci * FAULT_FLAG_ALLOW_RETRY. 28503d0407baSopenharmony_ci * 28513d0407baSopenharmony_ci * In the CMA case: long term pins in a CMA region would unnecessarily fragment 28523d0407baSopenharmony_ci * that region. And so, CMA attempts to migrate the page before pinning, when 28533d0407baSopenharmony_ci * FOLL_LONGTERM is specified. 28543d0407baSopenharmony_ci * 28553d0407baSopenharmony_ci * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount, 28563d0407baSopenharmony_ci * but an additional pin counting system) will be invoked. This is intended for 28573d0407baSopenharmony_ci * anything that gets a page reference and then touches page data (for example, 28583d0407baSopenharmony_ci * Direct IO). This lets the filesystem know that some non-file-system entity is 28593d0407baSopenharmony_ci * potentially changing the pages' data. In contrast to FOLL_GET (whose pages 28603d0407baSopenharmony_ci * are released via put_page()), FOLL_PIN pages must be released, ultimately, by 28613d0407baSopenharmony_ci * a call to unpin_user_page(). 28623d0407baSopenharmony_ci * 28633d0407baSopenharmony_ci * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different 28643d0407baSopenharmony_ci * and separate refcounting mechanisms, however, and that means that each has 28653d0407baSopenharmony_ci * its own acquire and release mechanisms: 28663d0407baSopenharmony_ci * 28673d0407baSopenharmony_ci * FOLL_GET: get_user_pages*() to acquire, and put_page() to release. 28683d0407baSopenharmony_ci * 28693d0407baSopenharmony_ci * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release. 28703d0407baSopenharmony_ci * 28713d0407baSopenharmony_ci * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call. 28723d0407baSopenharmony_ci * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based 28733d0407baSopenharmony_ci * calls applied to them, and that's perfectly OK. This is a constraint on the 28743d0407baSopenharmony_ci * callers, not on the pages.) 28753d0407baSopenharmony_ci * 28763d0407baSopenharmony_ci * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never 28773d0407baSopenharmony_ci * directly by the caller. That's in order to help avoid mismatches when 28783d0407baSopenharmony_ci * releasing pages: get_user_pages*() pages must be released via put_page(), 28793d0407baSopenharmony_ci * while pin_user_pages*() pages must be released via unpin_user_page(). 28803d0407baSopenharmony_ci * 28813d0407baSopenharmony_ci * Please see Documentation/core-api/pin_user_pages.rst for more information. 28823d0407baSopenharmony_ci */ 28833d0407baSopenharmony_ci 28843d0407baSopenharmony_cistatic inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) 28853d0407baSopenharmony_ci{ 28863d0407baSopenharmony_ci if (vm_fault & VM_FAULT_OOM) { 28873d0407baSopenharmony_ci return -ENOMEM; 28883d0407baSopenharmony_ci } 28893d0407baSopenharmony_ci if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { 28903d0407baSopenharmony_ci return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 28913d0407baSopenharmony_ci } 28923d0407baSopenharmony_ci if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { 28933d0407baSopenharmony_ci return -EFAULT; 28943d0407baSopenharmony_ci } 28953d0407baSopenharmony_ci return 0; 28963d0407baSopenharmony_ci} 28973d0407baSopenharmony_ci 28983d0407baSopenharmony_citypedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data); 28993d0407baSopenharmony_ciextern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, 29003d0407baSopenharmony_ci void *data); 29013d0407baSopenharmony_ciextern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, 29023d0407baSopenharmony_ci void *data); 29033d0407baSopenharmony_ci 29043d0407baSopenharmony_ci#ifdef CONFIG_PAGE_POISONING 29053d0407baSopenharmony_ciextern bool page_poisoning_enabled(void); 29063d0407baSopenharmony_ciextern void kernel_poison_pages(struct page *page, int numpages, int enable); 29073d0407baSopenharmony_ci#else 29083d0407baSopenharmony_cistatic inline bool page_poisoning_enabled(void) 29093d0407baSopenharmony_ci{ 29103d0407baSopenharmony_ci return false; 29113d0407baSopenharmony_ci} 29123d0407baSopenharmony_cistatic inline bool page_poisoning_enabled_static(void) 29133d0407baSopenharmony_ci{ 29143d0407baSopenharmony_ci return false; 29153d0407baSopenharmony_ci} 29163d0407baSopenharmony_cistatic inline void _kernel_poison_pages(struct page *page, int nunmpages) 29173d0407baSopenharmony_ci{ 29183d0407baSopenharmony_ci} 29193d0407baSopenharmony_cistatic inline void kernel_poison_pages(struct page *page, int numpages, int enable) 29203d0407baSopenharmony_ci{ 29213d0407baSopenharmony_ci} 29223d0407baSopenharmony_ci#endif 29233d0407baSopenharmony_ci 29243d0407baSopenharmony_ci#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON 29253d0407baSopenharmony_ciDECLARE_STATIC_KEY_TRUE(init_on_alloc); 29263d0407baSopenharmony_ci#else 29273d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(init_on_alloc); 29283d0407baSopenharmony_ci#endif 29293d0407baSopenharmony_cistatic inline bool want_init_on_alloc(gfp_t flags) 29303d0407baSopenharmony_ci{ 29313d0407baSopenharmony_ci if (static_branch_unlikely(&init_on_alloc) && !page_poisoning_enabled()) { 29323d0407baSopenharmony_ci return true; 29333d0407baSopenharmony_ci } 29343d0407baSopenharmony_ci return flags & __GFP_ZERO; 29353d0407baSopenharmony_ci} 29363d0407baSopenharmony_ci 29373d0407baSopenharmony_ci#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON 29383d0407baSopenharmony_ciDECLARE_STATIC_KEY_TRUE(init_on_free); 29393d0407baSopenharmony_ci#else 29403d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(init_on_free); 29413d0407baSopenharmony_ci#endif 29423d0407baSopenharmony_cistatic inline bool want_init_on_free(void) 29433d0407baSopenharmony_ci{ 29443d0407baSopenharmony_ci return static_branch_unlikely(&init_on_free) && !page_poisoning_enabled(); 29453d0407baSopenharmony_ci} 29463d0407baSopenharmony_ci 29473d0407baSopenharmony_ci#ifdef CONFIG_DEBUG_PAGEALLOC 29483d0407baSopenharmony_ciextern void init_debug_pagealloc(void); 29493d0407baSopenharmony_ci#else 29503d0407baSopenharmony_cistatic inline void init_debug_pagealloc(void) 29513d0407baSopenharmony_ci{ 29523d0407baSopenharmony_ci} 29533d0407baSopenharmony_ci#endif 29543d0407baSopenharmony_ciextern bool _debug_pagealloc_enabled_early; 29553d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 29563d0407baSopenharmony_ci 29573d0407baSopenharmony_cistatic inline bool debug_pagealloc_enabled(void) 29583d0407baSopenharmony_ci{ 29593d0407baSopenharmony_ci return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled_early; 29603d0407baSopenharmony_ci} 29613d0407baSopenharmony_ci 29623d0407baSopenharmony_ci/* 29633d0407baSopenharmony_ci * For use in fast paths after init_debug_pagealloc() has run, or when a 29643d0407baSopenharmony_ci * false negative result is not harmful when called too early. 29653d0407baSopenharmony_ci */ 29663d0407baSopenharmony_cistatic inline bool debug_pagealloc_enabled_static(void) 29673d0407baSopenharmony_ci{ 29683d0407baSopenharmony_ci if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) { 29693d0407baSopenharmony_ci return false; 29703d0407baSopenharmony_ci } 29713d0407baSopenharmony_ci 29723d0407baSopenharmony_ci return static_branch_unlikely(&_debug_pagealloc_enabled); 29733d0407baSopenharmony_ci} 29743d0407baSopenharmony_ci 29753d0407baSopenharmony_ci#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP) 29763d0407baSopenharmony_ciextern void __kernel_map_pages(struct page *page, int numpages, int enable); 29773d0407baSopenharmony_ci 29783d0407baSopenharmony_ci/* 29793d0407baSopenharmony_ci * When called in DEBUG_PAGEALLOC context, the call should most likely be 29803d0407baSopenharmony_ci * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static() 29813d0407baSopenharmony_ci */ 29823d0407baSopenharmony_cistatic inline void kernel_map_pages(struct page *page, int numpages, int enable) 29833d0407baSopenharmony_ci{ 29843d0407baSopenharmony_ci __kernel_map_pages(page, numpages, enable); 29853d0407baSopenharmony_ci} 29863d0407baSopenharmony_ci#ifdef CONFIG_HIBERNATION 29873d0407baSopenharmony_ciextern bool kernel_page_present(struct page *page); 29883d0407baSopenharmony_ci#endif /* CONFIG_HIBERNATION */ 29893d0407baSopenharmony_ci#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ 29903d0407baSopenharmony_cistatic inline void kernel_map_pages(struct page *page, int numpages, int enable) 29913d0407baSopenharmony_ci{ 29923d0407baSopenharmony_ci} 29933d0407baSopenharmony_ci#ifdef CONFIG_HIBERNATION 29943d0407baSopenharmony_cistatic inline bool kernel_page_present(struct page *page) 29953d0407baSopenharmony_ci{ 29963d0407baSopenharmony_ci return true; 29973d0407baSopenharmony_ci} 29983d0407baSopenharmony_ci#endif /* CONFIG_HIBERNATION */ 29993d0407baSopenharmony_ci#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */ 30003d0407baSopenharmony_ci 30013d0407baSopenharmony_ci#ifdef __HAVE_ARCH_GATE_AREA 30023d0407baSopenharmony_ciextern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 30033d0407baSopenharmony_ciextern int in_gate_area_no_mm(unsigned long addr); 30043d0407baSopenharmony_ciextern int in_gate_area(struct mm_struct *mm, unsigned long addr); 30053d0407baSopenharmony_ci#else 30063d0407baSopenharmony_cistatic inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 30073d0407baSopenharmony_ci{ 30083d0407baSopenharmony_ci return NULL; 30093d0407baSopenharmony_ci} 30103d0407baSopenharmony_cistatic inline int in_gate_area_no_mm(unsigned long addr) 30113d0407baSopenharmony_ci{ 30123d0407baSopenharmony_ci return 0; 30133d0407baSopenharmony_ci} 30143d0407baSopenharmony_cistatic inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 30153d0407baSopenharmony_ci{ 30163d0407baSopenharmony_ci return 0; 30173d0407baSopenharmony_ci} 30183d0407baSopenharmony_ci#endif /* __HAVE_ARCH_GATE_AREA */ 30193d0407baSopenharmony_ci 30203d0407baSopenharmony_ciextern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 30213d0407baSopenharmony_ci 30223d0407baSopenharmony_ci#ifdef CONFIG_SYSCTL 30233d0407baSopenharmony_ciextern int sysctl_drop_caches; 30243d0407baSopenharmony_ciint drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 30253d0407baSopenharmony_ci#endif 30263d0407baSopenharmony_ci 30273d0407baSopenharmony_civoid drop_slab(void); 30283d0407baSopenharmony_civoid drop_slab_node(int nid); 30293d0407baSopenharmony_ci 30303d0407baSopenharmony_ci#ifndef CONFIG_MMU 30313d0407baSopenharmony_ci#define randomize_va_space 0 30323d0407baSopenharmony_ci#else 30333d0407baSopenharmony_ciextern int randomize_va_space; 30343d0407baSopenharmony_ci#endif 30353d0407baSopenharmony_ci 30363d0407baSopenharmony_ciconst char *arch_vma_name(struct vm_area_struct *vma); 30373d0407baSopenharmony_ci#ifdef CONFIG_MMU 30383d0407baSopenharmony_civoid print_vma_addr(char *prefix, unsigned long rip); 30393d0407baSopenharmony_ci#else 30403d0407baSopenharmony_cistatic inline void print_vma_addr(char *prefix, unsigned long rip) 30413d0407baSopenharmony_ci{ 30423d0407baSopenharmony_ci} 30433d0407baSopenharmony_ci#endif 30443d0407baSopenharmony_ci 30453d0407baSopenharmony_civoid *sparse_buffer_alloc(unsigned long size); 30463d0407baSopenharmony_cistruct page *__populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap); 30473d0407baSopenharmony_cipgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 30483d0407baSopenharmony_cip4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 30493d0407baSopenharmony_cipud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 30503d0407baSopenharmony_cipmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 30513d0407baSopenharmony_cipte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap); 30523d0407baSopenharmony_civoid *vmemmap_alloc_block(unsigned long size, int node); 30533d0407baSopenharmony_cistruct vmem_altmap; 30543d0407baSopenharmony_civoid *vmemmap_alloc_block_buf(unsigned long size, int node, struct vmem_altmap *altmap); 30553d0407baSopenharmony_civoid vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 30563d0407baSopenharmony_ciint vmemmap_populate_basepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); 30573d0407baSopenharmony_ciint vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); 30583d0407baSopenharmony_civoid vmemmap_populate_print_last(void); 30593d0407baSopenharmony_ci#ifdef CONFIG_MEMORY_HOTPLUG 30603d0407baSopenharmony_civoid vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap); 30613d0407baSopenharmony_ci#endif 30623d0407baSopenharmony_civoid register_page_bootmem_memmap(unsigned long section_nr, struct page *map, unsigned long nr_pages); 30633d0407baSopenharmony_ci 30643d0407baSopenharmony_cienum mf_flags { 30653d0407baSopenharmony_ci MF_COUNT_INCREASED = 1 << 0, 30663d0407baSopenharmony_ci MF_ACTION_REQUIRED = 1 << 1, 30673d0407baSopenharmony_ci MF_MUST_KILL = 1 << 2, 30683d0407baSopenharmony_ci MF_SOFT_OFFLINE = 1 << 3, 30693d0407baSopenharmony_ci}; 30703d0407baSopenharmony_ciextern int memory_failure(unsigned long pfn, int flags); 30713d0407baSopenharmony_ciextern void memory_failure_queue(unsigned long pfn, int flags); 30723d0407baSopenharmony_ciextern void memory_failure_queue_kick(int cpu); 30733d0407baSopenharmony_ciextern int unpoison_memory(unsigned long pfn); 30743d0407baSopenharmony_ciextern int sysctl_memory_failure_early_kill; 30753d0407baSopenharmony_ciextern int sysctl_memory_failure_recovery; 30763d0407baSopenharmony_ciextern void shake_page(struct page *p, int access); 30773d0407baSopenharmony_ciextern atomic_long_t num_poisoned_pages __read_mostly; 30783d0407baSopenharmony_ciextern int soft_offline_page(unsigned long pfn, int flags); 30793d0407baSopenharmony_ci 30803d0407baSopenharmony_ci/* 30813d0407baSopenharmony_ci * Error handlers for various types of pages. 30823d0407baSopenharmony_ci */ 30833d0407baSopenharmony_cienum mf_result { 30843d0407baSopenharmony_ci MF_IGNORED, /* Error: cannot be handled */ 30853d0407baSopenharmony_ci MF_FAILED, /* Error: handling failed */ 30863d0407baSopenharmony_ci MF_DELAYED, /* Will be handled later */ 30873d0407baSopenharmony_ci MF_RECOVERED, /* Successfully recovered */ 30883d0407baSopenharmony_ci}; 30893d0407baSopenharmony_ci 30903d0407baSopenharmony_cienum mf_action_page_type { 30913d0407baSopenharmony_ci MF_MSG_KERNEL, 30923d0407baSopenharmony_ci MF_MSG_KERNEL_HIGH_ORDER, 30933d0407baSopenharmony_ci MF_MSG_SLAB, 30943d0407baSopenharmony_ci MF_MSG_DIFFERENT_COMPOUND, 30953d0407baSopenharmony_ci MF_MSG_POISONED_HUGE, 30963d0407baSopenharmony_ci MF_MSG_HUGE, 30973d0407baSopenharmony_ci MF_MSG_FREE_HUGE, 30983d0407baSopenharmony_ci MF_MSG_NON_PMD_HUGE, 30993d0407baSopenharmony_ci MF_MSG_UNMAP_FAILED, 31003d0407baSopenharmony_ci MF_MSG_DIRTY_SWAPCACHE, 31013d0407baSopenharmony_ci MF_MSG_CLEAN_SWAPCACHE, 31023d0407baSopenharmony_ci MF_MSG_DIRTY_MLOCKED_LRU, 31033d0407baSopenharmony_ci MF_MSG_CLEAN_MLOCKED_LRU, 31043d0407baSopenharmony_ci MF_MSG_DIRTY_UNEVICTABLE_LRU, 31053d0407baSopenharmony_ci MF_MSG_CLEAN_UNEVICTABLE_LRU, 31063d0407baSopenharmony_ci MF_MSG_DIRTY_LRU, 31073d0407baSopenharmony_ci MF_MSG_CLEAN_LRU, 31083d0407baSopenharmony_ci MF_MSG_TRUNCATED_LRU, 31093d0407baSopenharmony_ci MF_MSG_BUDDY, 31103d0407baSopenharmony_ci MF_MSG_BUDDY_2ND, 31113d0407baSopenharmony_ci MF_MSG_DAX, 31123d0407baSopenharmony_ci MF_MSG_UNSPLIT_THP, 31133d0407baSopenharmony_ci MF_MSG_UNKNOWN, 31143d0407baSopenharmony_ci}; 31153d0407baSopenharmony_ci 31163d0407baSopenharmony_ci#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 31173d0407baSopenharmony_ciextern void clear_huge_page(struct page *page, unsigned long addr_hint, unsigned int pages_per_huge_page); 31183d0407baSopenharmony_ciextern void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr_hint, struct vm_area_struct *vma, 31193d0407baSopenharmony_ci unsigned int pages_per_huge_page); 31203d0407baSopenharmony_ciextern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, 31213d0407baSopenharmony_ci unsigned int pages_per_huge_page, bool allow_pagefault); 31223d0407baSopenharmony_ci 31233d0407baSopenharmony_ci/** 31243d0407baSopenharmony_ci * vma_is_special_huge - Are transhuge page-table entries considered special? 31253d0407baSopenharmony_ci * @vma: Pointer to the struct vm_area_struct to consider 31263d0407baSopenharmony_ci * 31273d0407baSopenharmony_ci * Whether transhuge page-table entries are considered "special" following 31283d0407baSopenharmony_ci * the definition in vm_normal_page(). 31293d0407baSopenharmony_ci * 31303d0407baSopenharmony_ci * Return: true if transhuge page-table entries should be considered special, 31313d0407baSopenharmony_ci * false otherwise. 31323d0407baSopenharmony_ci */ 31333d0407baSopenharmony_cistatic inline bool vma_is_special_huge(const struct vm_area_struct *vma) 31343d0407baSopenharmony_ci{ 31353d0407baSopenharmony_ci return vma_is_dax(vma) || (vma->vm_file && (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 31363d0407baSopenharmony_ci} 31373d0407baSopenharmony_ci 31383d0407baSopenharmony_ci#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 31393d0407baSopenharmony_ci 31403d0407baSopenharmony_ci#ifdef CONFIG_DEBUG_PAGEALLOC 31413d0407baSopenharmony_ciextern unsigned int _debug_guardpage_minorder; 31423d0407baSopenharmony_ciDECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 31433d0407baSopenharmony_ci 31443d0407baSopenharmony_cistatic inline unsigned int debug_guardpage_minorder(void) 31453d0407baSopenharmony_ci{ 31463d0407baSopenharmony_ci return _debug_guardpage_minorder; 31473d0407baSopenharmony_ci} 31483d0407baSopenharmony_ci 31493d0407baSopenharmony_cistatic inline bool debug_guardpage_enabled(void) 31503d0407baSopenharmony_ci{ 31513d0407baSopenharmony_ci return static_branch_unlikely(&_debug_guardpage_enabled); 31523d0407baSopenharmony_ci} 31533d0407baSopenharmony_ci 31543d0407baSopenharmony_cistatic inline bool page_is_guard(struct page *page) 31553d0407baSopenharmony_ci{ 31563d0407baSopenharmony_ci if (!debug_guardpage_enabled()) { 31573d0407baSopenharmony_ci return false; 31583d0407baSopenharmony_ci } 31593d0407baSopenharmony_ci 31603d0407baSopenharmony_ci return PageGuard(page); 31613d0407baSopenharmony_ci} 31623d0407baSopenharmony_ci#else 31633d0407baSopenharmony_cistatic inline unsigned int debug_guardpage_minorder(void) 31643d0407baSopenharmony_ci{ 31653d0407baSopenharmony_ci return 0; 31663d0407baSopenharmony_ci} 31673d0407baSopenharmony_cistatic inline bool debug_guardpage_enabled(void) 31683d0407baSopenharmony_ci{ 31693d0407baSopenharmony_ci return false; 31703d0407baSopenharmony_ci} 31713d0407baSopenharmony_cistatic inline bool page_is_guard(struct page *page) 31723d0407baSopenharmony_ci{ 31733d0407baSopenharmony_ci return false; 31743d0407baSopenharmony_ci} 31753d0407baSopenharmony_ci#endif /* CONFIG_DEBUG_PAGEALLOC */ 31763d0407baSopenharmony_ci 31773d0407baSopenharmony_ci#if MAX_NUMNODES > 1 31783d0407baSopenharmony_civoid __init setup_nr_node_ids(void); 31793d0407baSopenharmony_ci#else 31803d0407baSopenharmony_cistatic inline void setup_nr_node_ids(void) 31813d0407baSopenharmony_ci{ 31823d0407baSopenharmony_ci} 31833d0407baSopenharmony_ci#endif 31843d0407baSopenharmony_ci 31853d0407baSopenharmony_ciextern int memcmp_pages(struct page *page1, struct page *page2); 31863d0407baSopenharmony_ci 31873d0407baSopenharmony_cistatic inline int pages_identical(struct page *page1, struct page *page2) 31883d0407baSopenharmony_ci{ 31893d0407baSopenharmony_ci return !memcmp_pages(page1, page2); 31903d0407baSopenharmony_ci} 31913d0407baSopenharmony_ci 31923d0407baSopenharmony_ci#ifdef CONFIG_MAPPING_DIRTY_HELPERS 31933d0407baSopenharmony_ciunsigned long clean_record_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr, 31943d0407baSopenharmony_ci pgoff_t bitmap_pgoff, unsigned long *bitmap, pgoff_t *start, 31953d0407baSopenharmony_ci pgoff_t *end); 31963d0407baSopenharmony_ci 31973d0407baSopenharmony_ciunsigned long wp_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr); 31983d0407baSopenharmony_ci#endif 31993d0407baSopenharmony_ci 32003d0407baSopenharmony_ciextern int sysctl_nr_trim_pages; 32013d0407baSopenharmony_ci 32023d0407baSopenharmony_ci/** 32033d0407baSopenharmony_ci * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it 32043d0407baSopenharmony_ci * @seals: the seals to check 32053d0407baSopenharmony_ci * @vma: the vma to operate on 32063d0407baSopenharmony_ci * 32073d0407baSopenharmony_ci * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on 32083d0407baSopenharmony_ci * the vma flags. Return 0 if check pass, or <0 for errors. 32093d0407baSopenharmony_ci */ 32103d0407baSopenharmony_cistatic inline int seal_check_future_write(int seals, struct vm_area_struct *vma) 32113d0407baSopenharmony_ci{ 32123d0407baSopenharmony_ci if (seals & F_SEAL_FUTURE_WRITE) { 32133d0407baSopenharmony_ci /* 32143d0407baSopenharmony_ci * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 32153d0407baSopenharmony_ci * "future write" seal active. 32163d0407baSopenharmony_ci */ 32173d0407baSopenharmony_ci if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) { 32183d0407baSopenharmony_ci return -EPERM; 32193d0407baSopenharmony_ci } 32203d0407baSopenharmony_ci 32213d0407baSopenharmony_ci /* 32223d0407baSopenharmony_ci * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as 32233d0407baSopenharmony_ci * MAP_SHARED and read-only, take care to not allow mprotect to 32243d0407baSopenharmony_ci * revert protections on such mappings. Do this only for shared 32253d0407baSopenharmony_ci * mappings. For private mappings, don't need to mask 32263d0407baSopenharmony_ci * VM_MAYWRITE as we still want them to be COW-writable. 32273d0407baSopenharmony_ci */ 32283d0407baSopenharmony_ci if (vma->vm_flags & VM_SHARED) { 32293d0407baSopenharmony_ci vma->vm_flags &= ~(VM_MAYWRITE); 32303d0407baSopenharmony_ci } 32313d0407baSopenharmony_ci } 32323d0407baSopenharmony_ci 32333d0407baSopenharmony_ci return 0; 32343d0407baSopenharmony_ci} 32353d0407baSopenharmony_ci 32363d0407baSopenharmony_ci#ifdef CONFIG_ANON_VMA_NAME 32373d0407baSopenharmony_ciint madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, 32383d0407baSopenharmony_ci struct anon_vma_name *anon_name); 32393d0407baSopenharmony_ci#else 32403d0407baSopenharmony_cistatic inline int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, 32413d0407baSopenharmony_ci struct anon_vma_name *anon_name) 32423d0407baSopenharmony_ci{ 32433d0407baSopenharmony_ci return 0; 32443d0407baSopenharmony_ci} 32453d0407baSopenharmony_ci#endif 32463d0407baSopenharmony_ci 32473d0407baSopenharmony_ci#endif /* __KERNEL__ */ 32483d0407baSopenharmony_ci#endif /* _LINUX_MM_H */ 3249