162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci *  Copyright (C) 1993  Linus Torvalds
462306a36Sopenharmony_ci *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
562306a36Sopenharmony_ci *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
662306a36Sopenharmony_ci *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
762306a36Sopenharmony_ci *  Numa awareness, Christoph Lameter, SGI, June 2005
862306a36Sopenharmony_ci *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
962306a36Sopenharmony_ci */
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#include <linux/vmalloc.h>
1262306a36Sopenharmony_ci#include <linux/mm.h>
1362306a36Sopenharmony_ci#include <linux/module.h>
1462306a36Sopenharmony_ci#include <linux/highmem.h>
1562306a36Sopenharmony_ci#include <linux/sched/signal.h>
1662306a36Sopenharmony_ci#include <linux/slab.h>
1762306a36Sopenharmony_ci#include <linux/spinlock.h>
1862306a36Sopenharmony_ci#include <linux/interrupt.h>
1962306a36Sopenharmony_ci#include <linux/proc_fs.h>
2062306a36Sopenharmony_ci#include <linux/seq_file.h>
2162306a36Sopenharmony_ci#include <linux/set_memory.h>
2262306a36Sopenharmony_ci#include <linux/debugobjects.h>
2362306a36Sopenharmony_ci#include <linux/kallsyms.h>
2462306a36Sopenharmony_ci#include <linux/list.h>
2562306a36Sopenharmony_ci#include <linux/notifier.h>
2662306a36Sopenharmony_ci#include <linux/rbtree.h>
2762306a36Sopenharmony_ci#include <linux/xarray.h>
2862306a36Sopenharmony_ci#include <linux/io.h>
2962306a36Sopenharmony_ci#include <linux/rcupdate.h>
3062306a36Sopenharmony_ci#include <linux/pfn.h>
3162306a36Sopenharmony_ci#include <linux/kmemleak.h>
3262306a36Sopenharmony_ci#include <linux/atomic.h>
3362306a36Sopenharmony_ci#include <linux/compiler.h>
3462306a36Sopenharmony_ci#include <linux/memcontrol.h>
3562306a36Sopenharmony_ci#include <linux/llist.h>
3662306a36Sopenharmony_ci#include <linux/uio.h>
3762306a36Sopenharmony_ci#include <linux/bitops.h>
3862306a36Sopenharmony_ci#include <linux/rbtree_augmented.h>
3962306a36Sopenharmony_ci#include <linux/overflow.h>
4062306a36Sopenharmony_ci#include <linux/pgtable.h>
4162306a36Sopenharmony_ci#include <linux/hugetlb.h>
4262306a36Sopenharmony_ci#include <linux/sched/mm.h>
4362306a36Sopenharmony_ci#include <asm/tlbflush.h>
4462306a36Sopenharmony_ci#include <asm/shmparam.h>
4562306a36Sopenharmony_ci
4662306a36Sopenharmony_ci#define CREATE_TRACE_POINTS
4762306a36Sopenharmony_ci#include <trace/events/vmalloc.h>
4862306a36Sopenharmony_ci
4962306a36Sopenharmony_ci#include "internal.h"
5062306a36Sopenharmony_ci#include "pgalloc-track.h"
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
5362306a36Sopenharmony_cistatic unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
5462306a36Sopenharmony_ci
5562306a36Sopenharmony_cistatic int __init set_nohugeiomap(char *str)
5662306a36Sopenharmony_ci{
5762306a36Sopenharmony_ci	ioremap_max_page_shift = PAGE_SHIFT;
5862306a36Sopenharmony_ci	return 0;
5962306a36Sopenharmony_ci}
6062306a36Sopenharmony_ciearly_param("nohugeiomap", set_nohugeiomap);
6162306a36Sopenharmony_ci#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
6262306a36Sopenharmony_cistatic const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
6362306a36Sopenharmony_ci#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
6462306a36Sopenharmony_ci
6562306a36Sopenharmony_ci#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
6662306a36Sopenharmony_cistatic bool __ro_after_init vmap_allow_huge = true;
6762306a36Sopenharmony_ci
6862306a36Sopenharmony_cistatic int __init set_nohugevmalloc(char *str)
6962306a36Sopenharmony_ci{
7062306a36Sopenharmony_ci	vmap_allow_huge = false;
7162306a36Sopenharmony_ci	return 0;
7262306a36Sopenharmony_ci}
7362306a36Sopenharmony_ciearly_param("nohugevmalloc", set_nohugevmalloc);
7462306a36Sopenharmony_ci#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
7562306a36Sopenharmony_cistatic const bool vmap_allow_huge = false;
7662306a36Sopenharmony_ci#endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_cibool is_vmalloc_addr(const void *x)
7962306a36Sopenharmony_ci{
8062306a36Sopenharmony_ci	unsigned long addr = (unsigned long)kasan_reset_tag(x);
8162306a36Sopenharmony_ci
8262306a36Sopenharmony_ci	return addr >= VMALLOC_START && addr < VMALLOC_END;
8362306a36Sopenharmony_ci}
8462306a36Sopenharmony_ciEXPORT_SYMBOL(is_vmalloc_addr);
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_cistruct vfree_deferred {
8762306a36Sopenharmony_ci	struct llist_head list;
8862306a36Sopenharmony_ci	struct work_struct wq;
8962306a36Sopenharmony_ci};
9062306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_ci/*** Page table manipulation functions ***/
9362306a36Sopenharmony_cistatic int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
9462306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
9562306a36Sopenharmony_ci			unsigned int max_page_shift, pgtbl_mod_mask *mask)
9662306a36Sopenharmony_ci{
9762306a36Sopenharmony_ci	pte_t *pte;
9862306a36Sopenharmony_ci	u64 pfn;
9962306a36Sopenharmony_ci	unsigned long size = PAGE_SIZE;
10062306a36Sopenharmony_ci
10162306a36Sopenharmony_ci	pfn = phys_addr >> PAGE_SHIFT;
10262306a36Sopenharmony_ci	pte = pte_alloc_kernel_track(pmd, addr, mask);
10362306a36Sopenharmony_ci	if (!pte)
10462306a36Sopenharmony_ci		return -ENOMEM;
10562306a36Sopenharmony_ci	do {
10662306a36Sopenharmony_ci		BUG_ON(!pte_none(ptep_get(pte)));
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_ci#ifdef CONFIG_HUGETLB_PAGE
10962306a36Sopenharmony_ci		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
11062306a36Sopenharmony_ci		if (size != PAGE_SIZE) {
11162306a36Sopenharmony_ci			pte_t entry = pfn_pte(pfn, prot);
11262306a36Sopenharmony_ci
11362306a36Sopenharmony_ci			entry = arch_make_huge_pte(entry, ilog2(size), 0);
11462306a36Sopenharmony_ci			set_huge_pte_at(&init_mm, addr, pte, entry, size);
11562306a36Sopenharmony_ci			pfn += PFN_DOWN(size);
11662306a36Sopenharmony_ci			continue;
11762306a36Sopenharmony_ci		}
11862306a36Sopenharmony_ci#endif
11962306a36Sopenharmony_ci		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
12062306a36Sopenharmony_ci		pfn++;
12162306a36Sopenharmony_ci	} while (pte += PFN_DOWN(size), addr += size, addr != end);
12262306a36Sopenharmony_ci	*mask |= PGTBL_PTE_MODIFIED;
12362306a36Sopenharmony_ci	return 0;
12462306a36Sopenharmony_ci}
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_cistatic int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
12762306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
12862306a36Sopenharmony_ci			unsigned int max_page_shift)
12962306a36Sopenharmony_ci{
13062306a36Sopenharmony_ci	if (max_page_shift < PMD_SHIFT)
13162306a36Sopenharmony_ci		return 0;
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci	if (!arch_vmap_pmd_supported(prot))
13462306a36Sopenharmony_ci		return 0;
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci	if ((end - addr) != PMD_SIZE)
13762306a36Sopenharmony_ci		return 0;
13862306a36Sopenharmony_ci
13962306a36Sopenharmony_ci	if (!IS_ALIGNED(addr, PMD_SIZE))
14062306a36Sopenharmony_ci		return 0;
14162306a36Sopenharmony_ci
14262306a36Sopenharmony_ci	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
14362306a36Sopenharmony_ci		return 0;
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
14662306a36Sopenharmony_ci		return 0;
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_ci	return pmd_set_huge(pmd, phys_addr, prot);
14962306a36Sopenharmony_ci}
15062306a36Sopenharmony_ci
15162306a36Sopenharmony_cistatic int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
15262306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
15362306a36Sopenharmony_ci			unsigned int max_page_shift, pgtbl_mod_mask *mask)
15462306a36Sopenharmony_ci{
15562306a36Sopenharmony_ci	pmd_t *pmd;
15662306a36Sopenharmony_ci	unsigned long next;
15762306a36Sopenharmony_ci
15862306a36Sopenharmony_ci	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
15962306a36Sopenharmony_ci	if (!pmd)
16062306a36Sopenharmony_ci		return -ENOMEM;
16162306a36Sopenharmony_ci	do {
16262306a36Sopenharmony_ci		next = pmd_addr_end(addr, end);
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
16562306a36Sopenharmony_ci					max_page_shift)) {
16662306a36Sopenharmony_ci			*mask |= PGTBL_PMD_MODIFIED;
16762306a36Sopenharmony_ci			continue;
16862306a36Sopenharmony_ci		}
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
17162306a36Sopenharmony_ci			return -ENOMEM;
17262306a36Sopenharmony_ci	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
17362306a36Sopenharmony_ci	return 0;
17462306a36Sopenharmony_ci}
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_cistatic int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
17762306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
17862306a36Sopenharmony_ci			unsigned int max_page_shift)
17962306a36Sopenharmony_ci{
18062306a36Sopenharmony_ci	if (max_page_shift < PUD_SHIFT)
18162306a36Sopenharmony_ci		return 0;
18262306a36Sopenharmony_ci
18362306a36Sopenharmony_ci	if (!arch_vmap_pud_supported(prot))
18462306a36Sopenharmony_ci		return 0;
18562306a36Sopenharmony_ci
18662306a36Sopenharmony_ci	if ((end - addr) != PUD_SIZE)
18762306a36Sopenharmony_ci		return 0;
18862306a36Sopenharmony_ci
18962306a36Sopenharmony_ci	if (!IS_ALIGNED(addr, PUD_SIZE))
19062306a36Sopenharmony_ci		return 0;
19162306a36Sopenharmony_ci
19262306a36Sopenharmony_ci	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
19362306a36Sopenharmony_ci		return 0;
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_ci	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
19662306a36Sopenharmony_ci		return 0;
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_ci	return pud_set_huge(pud, phys_addr, prot);
19962306a36Sopenharmony_ci}
20062306a36Sopenharmony_ci
20162306a36Sopenharmony_cistatic int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
20262306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
20362306a36Sopenharmony_ci			unsigned int max_page_shift, pgtbl_mod_mask *mask)
20462306a36Sopenharmony_ci{
20562306a36Sopenharmony_ci	pud_t *pud;
20662306a36Sopenharmony_ci	unsigned long next;
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
20962306a36Sopenharmony_ci	if (!pud)
21062306a36Sopenharmony_ci		return -ENOMEM;
21162306a36Sopenharmony_ci	do {
21262306a36Sopenharmony_ci		next = pud_addr_end(addr, end);
21362306a36Sopenharmony_ci
21462306a36Sopenharmony_ci		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
21562306a36Sopenharmony_ci					max_page_shift)) {
21662306a36Sopenharmony_ci			*mask |= PGTBL_PUD_MODIFIED;
21762306a36Sopenharmony_ci			continue;
21862306a36Sopenharmony_ci		}
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_ci		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
22162306a36Sopenharmony_ci					max_page_shift, mask))
22262306a36Sopenharmony_ci			return -ENOMEM;
22362306a36Sopenharmony_ci	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
22462306a36Sopenharmony_ci	return 0;
22562306a36Sopenharmony_ci}
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_cistatic int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
22862306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
22962306a36Sopenharmony_ci			unsigned int max_page_shift)
23062306a36Sopenharmony_ci{
23162306a36Sopenharmony_ci	if (max_page_shift < P4D_SHIFT)
23262306a36Sopenharmony_ci		return 0;
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci	if (!arch_vmap_p4d_supported(prot))
23562306a36Sopenharmony_ci		return 0;
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_ci	if ((end - addr) != P4D_SIZE)
23862306a36Sopenharmony_ci		return 0;
23962306a36Sopenharmony_ci
24062306a36Sopenharmony_ci	if (!IS_ALIGNED(addr, P4D_SIZE))
24162306a36Sopenharmony_ci		return 0;
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_ci	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
24462306a36Sopenharmony_ci		return 0;
24562306a36Sopenharmony_ci
24662306a36Sopenharmony_ci	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
24762306a36Sopenharmony_ci		return 0;
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci	return p4d_set_huge(p4d, phys_addr, prot);
25062306a36Sopenharmony_ci}
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_cistatic int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
25362306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
25462306a36Sopenharmony_ci			unsigned int max_page_shift, pgtbl_mod_mask *mask)
25562306a36Sopenharmony_ci{
25662306a36Sopenharmony_ci	p4d_t *p4d;
25762306a36Sopenharmony_ci	unsigned long next;
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_ci	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
26062306a36Sopenharmony_ci	if (!p4d)
26162306a36Sopenharmony_ci		return -ENOMEM;
26262306a36Sopenharmony_ci	do {
26362306a36Sopenharmony_ci		next = p4d_addr_end(addr, end);
26462306a36Sopenharmony_ci
26562306a36Sopenharmony_ci		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
26662306a36Sopenharmony_ci					max_page_shift)) {
26762306a36Sopenharmony_ci			*mask |= PGTBL_P4D_MODIFIED;
26862306a36Sopenharmony_ci			continue;
26962306a36Sopenharmony_ci		}
27062306a36Sopenharmony_ci
27162306a36Sopenharmony_ci		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
27262306a36Sopenharmony_ci					max_page_shift, mask))
27362306a36Sopenharmony_ci			return -ENOMEM;
27462306a36Sopenharmony_ci	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
27562306a36Sopenharmony_ci	return 0;
27662306a36Sopenharmony_ci}
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_cistatic int vmap_range_noflush(unsigned long addr, unsigned long end,
27962306a36Sopenharmony_ci			phys_addr_t phys_addr, pgprot_t prot,
28062306a36Sopenharmony_ci			unsigned int max_page_shift)
28162306a36Sopenharmony_ci{
28262306a36Sopenharmony_ci	pgd_t *pgd;
28362306a36Sopenharmony_ci	unsigned long start;
28462306a36Sopenharmony_ci	unsigned long next;
28562306a36Sopenharmony_ci	int err;
28662306a36Sopenharmony_ci	pgtbl_mod_mask mask = 0;
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_ci	might_sleep();
28962306a36Sopenharmony_ci	BUG_ON(addr >= end);
29062306a36Sopenharmony_ci
29162306a36Sopenharmony_ci	start = addr;
29262306a36Sopenharmony_ci	pgd = pgd_offset_k(addr);
29362306a36Sopenharmony_ci	do {
29462306a36Sopenharmony_ci		next = pgd_addr_end(addr, end);
29562306a36Sopenharmony_ci		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
29662306a36Sopenharmony_ci					max_page_shift, &mask);
29762306a36Sopenharmony_ci		if (err)
29862306a36Sopenharmony_ci			break;
29962306a36Sopenharmony_ci	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
30062306a36Sopenharmony_ci
30162306a36Sopenharmony_ci	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
30262306a36Sopenharmony_ci		arch_sync_kernel_mappings(start, end);
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_ci	return err;
30562306a36Sopenharmony_ci}
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_ciint ioremap_page_range(unsigned long addr, unsigned long end,
30862306a36Sopenharmony_ci		phys_addr_t phys_addr, pgprot_t prot)
30962306a36Sopenharmony_ci{
31062306a36Sopenharmony_ci	int err;
31162306a36Sopenharmony_ci
31262306a36Sopenharmony_ci	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
31362306a36Sopenharmony_ci				 ioremap_max_page_shift);
31462306a36Sopenharmony_ci	flush_cache_vmap(addr, end);
31562306a36Sopenharmony_ci	if (!err)
31662306a36Sopenharmony_ci		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
31762306a36Sopenharmony_ci					       ioremap_max_page_shift);
31862306a36Sopenharmony_ci	return err;
31962306a36Sopenharmony_ci}
32062306a36Sopenharmony_ci
32162306a36Sopenharmony_cistatic void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
32262306a36Sopenharmony_ci			     pgtbl_mod_mask *mask)
32362306a36Sopenharmony_ci{
32462306a36Sopenharmony_ci	pte_t *pte;
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ci	pte = pte_offset_kernel(pmd, addr);
32762306a36Sopenharmony_ci	do {
32862306a36Sopenharmony_ci		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
32962306a36Sopenharmony_ci		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
33062306a36Sopenharmony_ci	} while (pte++, addr += PAGE_SIZE, addr != end);
33162306a36Sopenharmony_ci	*mask |= PGTBL_PTE_MODIFIED;
33262306a36Sopenharmony_ci}
33362306a36Sopenharmony_ci
33462306a36Sopenharmony_cistatic void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
33562306a36Sopenharmony_ci			     pgtbl_mod_mask *mask)
33662306a36Sopenharmony_ci{
33762306a36Sopenharmony_ci	pmd_t *pmd;
33862306a36Sopenharmony_ci	unsigned long next;
33962306a36Sopenharmony_ci	int cleared;
34062306a36Sopenharmony_ci
34162306a36Sopenharmony_ci	pmd = pmd_offset(pud, addr);
34262306a36Sopenharmony_ci	do {
34362306a36Sopenharmony_ci		next = pmd_addr_end(addr, end);
34462306a36Sopenharmony_ci
34562306a36Sopenharmony_ci		cleared = pmd_clear_huge(pmd);
34662306a36Sopenharmony_ci		if (cleared || pmd_bad(*pmd))
34762306a36Sopenharmony_ci			*mask |= PGTBL_PMD_MODIFIED;
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci		if (cleared)
35062306a36Sopenharmony_ci			continue;
35162306a36Sopenharmony_ci		if (pmd_none_or_clear_bad(pmd))
35262306a36Sopenharmony_ci			continue;
35362306a36Sopenharmony_ci		vunmap_pte_range(pmd, addr, next, mask);
35462306a36Sopenharmony_ci
35562306a36Sopenharmony_ci		cond_resched();
35662306a36Sopenharmony_ci	} while (pmd++, addr = next, addr != end);
35762306a36Sopenharmony_ci}
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_cistatic void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
36062306a36Sopenharmony_ci			     pgtbl_mod_mask *mask)
36162306a36Sopenharmony_ci{
36262306a36Sopenharmony_ci	pud_t *pud;
36362306a36Sopenharmony_ci	unsigned long next;
36462306a36Sopenharmony_ci	int cleared;
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_ci	pud = pud_offset(p4d, addr);
36762306a36Sopenharmony_ci	do {
36862306a36Sopenharmony_ci		next = pud_addr_end(addr, end);
36962306a36Sopenharmony_ci
37062306a36Sopenharmony_ci		cleared = pud_clear_huge(pud);
37162306a36Sopenharmony_ci		if (cleared || pud_bad(*pud))
37262306a36Sopenharmony_ci			*mask |= PGTBL_PUD_MODIFIED;
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_ci		if (cleared)
37562306a36Sopenharmony_ci			continue;
37662306a36Sopenharmony_ci		if (pud_none_or_clear_bad(pud))
37762306a36Sopenharmony_ci			continue;
37862306a36Sopenharmony_ci		vunmap_pmd_range(pud, addr, next, mask);
37962306a36Sopenharmony_ci	} while (pud++, addr = next, addr != end);
38062306a36Sopenharmony_ci}
38162306a36Sopenharmony_ci
38262306a36Sopenharmony_cistatic void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
38362306a36Sopenharmony_ci			     pgtbl_mod_mask *mask)
38462306a36Sopenharmony_ci{
38562306a36Sopenharmony_ci	p4d_t *p4d;
38662306a36Sopenharmony_ci	unsigned long next;
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_ci	p4d = p4d_offset(pgd, addr);
38962306a36Sopenharmony_ci	do {
39062306a36Sopenharmony_ci		next = p4d_addr_end(addr, end);
39162306a36Sopenharmony_ci
39262306a36Sopenharmony_ci		p4d_clear_huge(p4d);
39362306a36Sopenharmony_ci		if (p4d_bad(*p4d))
39462306a36Sopenharmony_ci			*mask |= PGTBL_P4D_MODIFIED;
39562306a36Sopenharmony_ci
39662306a36Sopenharmony_ci		if (p4d_none_or_clear_bad(p4d))
39762306a36Sopenharmony_ci			continue;
39862306a36Sopenharmony_ci		vunmap_pud_range(p4d, addr, next, mask);
39962306a36Sopenharmony_ci	} while (p4d++, addr = next, addr != end);
40062306a36Sopenharmony_ci}
40162306a36Sopenharmony_ci
40262306a36Sopenharmony_ci/*
40362306a36Sopenharmony_ci * vunmap_range_noflush is similar to vunmap_range, but does not
40462306a36Sopenharmony_ci * flush caches or TLBs.
40562306a36Sopenharmony_ci *
40662306a36Sopenharmony_ci * The caller is responsible for calling flush_cache_vmap() before calling
40762306a36Sopenharmony_ci * this function, and flush_tlb_kernel_range after it has returned
40862306a36Sopenharmony_ci * successfully (and before the addresses are expected to cause a page fault
40962306a36Sopenharmony_ci * or be re-mapped for something else, if TLB flushes are being delayed or
41062306a36Sopenharmony_ci * coalesced).
41162306a36Sopenharmony_ci *
41262306a36Sopenharmony_ci * This is an internal function only. Do not use outside mm/.
41362306a36Sopenharmony_ci */
41462306a36Sopenharmony_civoid __vunmap_range_noflush(unsigned long start, unsigned long end)
41562306a36Sopenharmony_ci{
41662306a36Sopenharmony_ci	unsigned long next;
41762306a36Sopenharmony_ci	pgd_t *pgd;
41862306a36Sopenharmony_ci	unsigned long addr = start;
41962306a36Sopenharmony_ci	pgtbl_mod_mask mask = 0;
42062306a36Sopenharmony_ci
42162306a36Sopenharmony_ci	BUG_ON(addr >= end);
42262306a36Sopenharmony_ci	pgd = pgd_offset_k(addr);
42362306a36Sopenharmony_ci	do {
42462306a36Sopenharmony_ci		next = pgd_addr_end(addr, end);
42562306a36Sopenharmony_ci		if (pgd_bad(*pgd))
42662306a36Sopenharmony_ci			mask |= PGTBL_PGD_MODIFIED;
42762306a36Sopenharmony_ci		if (pgd_none_or_clear_bad(pgd))
42862306a36Sopenharmony_ci			continue;
42962306a36Sopenharmony_ci		vunmap_p4d_range(pgd, addr, next, &mask);
43062306a36Sopenharmony_ci	} while (pgd++, addr = next, addr != end);
43162306a36Sopenharmony_ci
43262306a36Sopenharmony_ci	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
43362306a36Sopenharmony_ci		arch_sync_kernel_mappings(start, end);
43462306a36Sopenharmony_ci}
43562306a36Sopenharmony_ci
43662306a36Sopenharmony_civoid vunmap_range_noflush(unsigned long start, unsigned long end)
43762306a36Sopenharmony_ci{
43862306a36Sopenharmony_ci	kmsan_vunmap_range_noflush(start, end);
43962306a36Sopenharmony_ci	__vunmap_range_noflush(start, end);
44062306a36Sopenharmony_ci}
44162306a36Sopenharmony_ci
44262306a36Sopenharmony_ci/**
44362306a36Sopenharmony_ci * vunmap_range - unmap kernel virtual addresses
44462306a36Sopenharmony_ci * @addr: start of the VM area to unmap
44562306a36Sopenharmony_ci * @end: end of the VM area to unmap (non-inclusive)
44662306a36Sopenharmony_ci *
44762306a36Sopenharmony_ci * Clears any present PTEs in the virtual address range, flushes TLBs and
44862306a36Sopenharmony_ci * caches. Any subsequent access to the address before it has been re-mapped
44962306a36Sopenharmony_ci * is a kernel bug.
45062306a36Sopenharmony_ci */
45162306a36Sopenharmony_civoid vunmap_range(unsigned long addr, unsigned long end)
45262306a36Sopenharmony_ci{
45362306a36Sopenharmony_ci	flush_cache_vunmap(addr, end);
45462306a36Sopenharmony_ci	vunmap_range_noflush(addr, end);
45562306a36Sopenharmony_ci	flush_tlb_kernel_range(addr, end);
45662306a36Sopenharmony_ci}
45762306a36Sopenharmony_ci
45862306a36Sopenharmony_cistatic int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
45962306a36Sopenharmony_ci		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
46062306a36Sopenharmony_ci		pgtbl_mod_mask *mask)
46162306a36Sopenharmony_ci{
46262306a36Sopenharmony_ci	pte_t *pte;
46362306a36Sopenharmony_ci
46462306a36Sopenharmony_ci	/*
46562306a36Sopenharmony_ci	 * nr is a running index into the array which helps higher level
46662306a36Sopenharmony_ci	 * callers keep track of where we're up to.
46762306a36Sopenharmony_ci	 */
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_ci	pte = pte_alloc_kernel_track(pmd, addr, mask);
47062306a36Sopenharmony_ci	if (!pte)
47162306a36Sopenharmony_ci		return -ENOMEM;
47262306a36Sopenharmony_ci	do {
47362306a36Sopenharmony_ci		struct page *page = pages[*nr];
47462306a36Sopenharmony_ci
47562306a36Sopenharmony_ci		if (WARN_ON(!pte_none(ptep_get(pte))))
47662306a36Sopenharmony_ci			return -EBUSY;
47762306a36Sopenharmony_ci		if (WARN_ON(!page))
47862306a36Sopenharmony_ci			return -ENOMEM;
47962306a36Sopenharmony_ci		if (WARN_ON(!pfn_valid(page_to_pfn(page))))
48062306a36Sopenharmony_ci			return -EINVAL;
48162306a36Sopenharmony_ci
48262306a36Sopenharmony_ci		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
48362306a36Sopenharmony_ci		(*nr)++;
48462306a36Sopenharmony_ci	} while (pte++, addr += PAGE_SIZE, addr != end);
48562306a36Sopenharmony_ci	*mask |= PGTBL_PTE_MODIFIED;
48662306a36Sopenharmony_ci	return 0;
48762306a36Sopenharmony_ci}
48862306a36Sopenharmony_ci
48962306a36Sopenharmony_cistatic int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
49062306a36Sopenharmony_ci		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
49162306a36Sopenharmony_ci		pgtbl_mod_mask *mask)
49262306a36Sopenharmony_ci{
49362306a36Sopenharmony_ci	pmd_t *pmd;
49462306a36Sopenharmony_ci	unsigned long next;
49562306a36Sopenharmony_ci
49662306a36Sopenharmony_ci	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
49762306a36Sopenharmony_ci	if (!pmd)
49862306a36Sopenharmony_ci		return -ENOMEM;
49962306a36Sopenharmony_ci	do {
50062306a36Sopenharmony_ci		next = pmd_addr_end(addr, end);
50162306a36Sopenharmony_ci		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
50262306a36Sopenharmony_ci			return -ENOMEM;
50362306a36Sopenharmony_ci	} while (pmd++, addr = next, addr != end);
50462306a36Sopenharmony_ci	return 0;
50562306a36Sopenharmony_ci}
50662306a36Sopenharmony_ci
50762306a36Sopenharmony_cistatic int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
50862306a36Sopenharmony_ci		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
50962306a36Sopenharmony_ci		pgtbl_mod_mask *mask)
51062306a36Sopenharmony_ci{
51162306a36Sopenharmony_ci	pud_t *pud;
51262306a36Sopenharmony_ci	unsigned long next;
51362306a36Sopenharmony_ci
51462306a36Sopenharmony_ci	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
51562306a36Sopenharmony_ci	if (!pud)
51662306a36Sopenharmony_ci		return -ENOMEM;
51762306a36Sopenharmony_ci	do {
51862306a36Sopenharmony_ci		next = pud_addr_end(addr, end);
51962306a36Sopenharmony_ci		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
52062306a36Sopenharmony_ci			return -ENOMEM;
52162306a36Sopenharmony_ci	} while (pud++, addr = next, addr != end);
52262306a36Sopenharmony_ci	return 0;
52362306a36Sopenharmony_ci}
52462306a36Sopenharmony_ci
52562306a36Sopenharmony_cistatic int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
52662306a36Sopenharmony_ci		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
52762306a36Sopenharmony_ci		pgtbl_mod_mask *mask)
52862306a36Sopenharmony_ci{
52962306a36Sopenharmony_ci	p4d_t *p4d;
53062306a36Sopenharmony_ci	unsigned long next;
53162306a36Sopenharmony_ci
53262306a36Sopenharmony_ci	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
53362306a36Sopenharmony_ci	if (!p4d)
53462306a36Sopenharmony_ci		return -ENOMEM;
53562306a36Sopenharmony_ci	do {
53662306a36Sopenharmony_ci		next = p4d_addr_end(addr, end);
53762306a36Sopenharmony_ci		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
53862306a36Sopenharmony_ci			return -ENOMEM;
53962306a36Sopenharmony_ci	} while (p4d++, addr = next, addr != end);
54062306a36Sopenharmony_ci	return 0;
54162306a36Sopenharmony_ci}
54262306a36Sopenharmony_ci
54362306a36Sopenharmony_cistatic int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
54462306a36Sopenharmony_ci		pgprot_t prot, struct page **pages)
54562306a36Sopenharmony_ci{
54662306a36Sopenharmony_ci	unsigned long start = addr;
54762306a36Sopenharmony_ci	pgd_t *pgd;
54862306a36Sopenharmony_ci	unsigned long next;
54962306a36Sopenharmony_ci	int err = 0;
55062306a36Sopenharmony_ci	int nr = 0;
55162306a36Sopenharmony_ci	pgtbl_mod_mask mask = 0;
55262306a36Sopenharmony_ci
55362306a36Sopenharmony_ci	BUG_ON(addr >= end);
55462306a36Sopenharmony_ci	pgd = pgd_offset_k(addr);
55562306a36Sopenharmony_ci	do {
55662306a36Sopenharmony_ci		next = pgd_addr_end(addr, end);
55762306a36Sopenharmony_ci		if (pgd_bad(*pgd))
55862306a36Sopenharmony_ci			mask |= PGTBL_PGD_MODIFIED;
55962306a36Sopenharmony_ci		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
56062306a36Sopenharmony_ci		if (err)
56162306a36Sopenharmony_ci			return err;
56262306a36Sopenharmony_ci	} while (pgd++, addr = next, addr != end);
56362306a36Sopenharmony_ci
56462306a36Sopenharmony_ci	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
56562306a36Sopenharmony_ci		arch_sync_kernel_mappings(start, end);
56662306a36Sopenharmony_ci
56762306a36Sopenharmony_ci	return 0;
56862306a36Sopenharmony_ci}
56962306a36Sopenharmony_ci
57062306a36Sopenharmony_ci/*
57162306a36Sopenharmony_ci * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
57262306a36Sopenharmony_ci * flush caches.
57362306a36Sopenharmony_ci *
57462306a36Sopenharmony_ci * The caller is responsible for calling flush_cache_vmap() after this
57562306a36Sopenharmony_ci * function returns successfully and before the addresses are accessed.
57662306a36Sopenharmony_ci *
57762306a36Sopenharmony_ci * This is an internal function only. Do not use outside mm/.
57862306a36Sopenharmony_ci */
57962306a36Sopenharmony_ciint __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
58062306a36Sopenharmony_ci		pgprot_t prot, struct page **pages, unsigned int page_shift)
58162306a36Sopenharmony_ci{
58262306a36Sopenharmony_ci	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
58362306a36Sopenharmony_ci
58462306a36Sopenharmony_ci	WARN_ON(page_shift < PAGE_SHIFT);
58562306a36Sopenharmony_ci
58662306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
58762306a36Sopenharmony_ci			page_shift == PAGE_SHIFT)
58862306a36Sopenharmony_ci		return vmap_small_pages_range_noflush(addr, end, prot, pages);
58962306a36Sopenharmony_ci
59062306a36Sopenharmony_ci	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
59162306a36Sopenharmony_ci		int err;
59262306a36Sopenharmony_ci
59362306a36Sopenharmony_ci		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
59462306a36Sopenharmony_ci					page_to_phys(pages[i]), prot,
59562306a36Sopenharmony_ci					page_shift);
59662306a36Sopenharmony_ci		if (err)
59762306a36Sopenharmony_ci			return err;
59862306a36Sopenharmony_ci
59962306a36Sopenharmony_ci		addr += 1UL << page_shift;
60062306a36Sopenharmony_ci	}
60162306a36Sopenharmony_ci
60262306a36Sopenharmony_ci	return 0;
60362306a36Sopenharmony_ci}
60462306a36Sopenharmony_ci
60562306a36Sopenharmony_ciint vmap_pages_range_noflush(unsigned long addr, unsigned long end,
60662306a36Sopenharmony_ci		pgprot_t prot, struct page **pages, unsigned int page_shift)
60762306a36Sopenharmony_ci{
60862306a36Sopenharmony_ci	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
60962306a36Sopenharmony_ci						 page_shift);
61062306a36Sopenharmony_ci
61162306a36Sopenharmony_ci	if (ret)
61262306a36Sopenharmony_ci		return ret;
61362306a36Sopenharmony_ci	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
61462306a36Sopenharmony_ci}
61562306a36Sopenharmony_ci
61662306a36Sopenharmony_ci/**
61762306a36Sopenharmony_ci * vmap_pages_range - map pages to a kernel virtual address
61862306a36Sopenharmony_ci * @addr: start of the VM area to map
61962306a36Sopenharmony_ci * @end: end of the VM area to map (non-inclusive)
62062306a36Sopenharmony_ci * @prot: page protection flags to use
62162306a36Sopenharmony_ci * @pages: pages to map (always PAGE_SIZE pages)
62262306a36Sopenharmony_ci * @page_shift: maximum shift that the pages may be mapped with, @pages must
62362306a36Sopenharmony_ci * be aligned and contiguous up to at least this shift.
62462306a36Sopenharmony_ci *
62562306a36Sopenharmony_ci * RETURNS:
62662306a36Sopenharmony_ci * 0 on success, -errno on failure.
62762306a36Sopenharmony_ci */
62862306a36Sopenharmony_cistatic int vmap_pages_range(unsigned long addr, unsigned long end,
62962306a36Sopenharmony_ci		pgprot_t prot, struct page **pages, unsigned int page_shift)
63062306a36Sopenharmony_ci{
63162306a36Sopenharmony_ci	int err;
63262306a36Sopenharmony_ci
63362306a36Sopenharmony_ci	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
63462306a36Sopenharmony_ci	flush_cache_vmap(addr, end);
63562306a36Sopenharmony_ci	return err;
63662306a36Sopenharmony_ci}
63762306a36Sopenharmony_ci
63862306a36Sopenharmony_ciint is_vmalloc_or_module_addr(const void *x)
63962306a36Sopenharmony_ci{
64062306a36Sopenharmony_ci	/*
64162306a36Sopenharmony_ci	 * ARM, x86-64 and sparc64 put modules in a special place,
64262306a36Sopenharmony_ci	 * and fall back on vmalloc() if that fails. Others
64362306a36Sopenharmony_ci	 * just put it in the vmalloc space.
64462306a36Sopenharmony_ci	 */
64562306a36Sopenharmony_ci#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64662306a36Sopenharmony_ci	unsigned long addr = (unsigned long)kasan_reset_tag(x);
64762306a36Sopenharmony_ci	if (addr >= MODULES_VADDR && addr < MODULES_END)
64862306a36Sopenharmony_ci		return 1;
64962306a36Sopenharmony_ci#endif
65062306a36Sopenharmony_ci	return is_vmalloc_addr(x);
65162306a36Sopenharmony_ci}
65262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
65362306a36Sopenharmony_ci
65462306a36Sopenharmony_ci/*
65562306a36Sopenharmony_ci * Walk a vmap address to the struct page it maps. Huge vmap mappings will
65662306a36Sopenharmony_ci * return the tail page that corresponds to the base page address, which
65762306a36Sopenharmony_ci * matches small vmap mappings.
65862306a36Sopenharmony_ci */
65962306a36Sopenharmony_cistruct page *vmalloc_to_page(const void *vmalloc_addr)
66062306a36Sopenharmony_ci{
66162306a36Sopenharmony_ci	unsigned long addr = (unsigned long) vmalloc_addr;
66262306a36Sopenharmony_ci	struct page *page = NULL;
66362306a36Sopenharmony_ci	pgd_t *pgd = pgd_offset_k(addr);
66462306a36Sopenharmony_ci	p4d_t *p4d;
66562306a36Sopenharmony_ci	pud_t *pud;
66662306a36Sopenharmony_ci	pmd_t *pmd;
66762306a36Sopenharmony_ci	pte_t *ptep, pte;
66862306a36Sopenharmony_ci
66962306a36Sopenharmony_ci	/*
67062306a36Sopenharmony_ci	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
67162306a36Sopenharmony_ci	 * architectures that do not vmalloc module space
67262306a36Sopenharmony_ci	 */
67362306a36Sopenharmony_ci	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
67462306a36Sopenharmony_ci
67562306a36Sopenharmony_ci	if (pgd_none(*pgd))
67662306a36Sopenharmony_ci		return NULL;
67762306a36Sopenharmony_ci	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
67862306a36Sopenharmony_ci		return NULL; /* XXX: no allowance for huge pgd */
67962306a36Sopenharmony_ci	if (WARN_ON_ONCE(pgd_bad(*pgd)))
68062306a36Sopenharmony_ci		return NULL;
68162306a36Sopenharmony_ci
68262306a36Sopenharmony_ci	p4d = p4d_offset(pgd, addr);
68362306a36Sopenharmony_ci	if (p4d_none(*p4d))
68462306a36Sopenharmony_ci		return NULL;
68562306a36Sopenharmony_ci	if (p4d_leaf(*p4d))
68662306a36Sopenharmony_ci		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
68762306a36Sopenharmony_ci	if (WARN_ON_ONCE(p4d_bad(*p4d)))
68862306a36Sopenharmony_ci		return NULL;
68962306a36Sopenharmony_ci
69062306a36Sopenharmony_ci	pud = pud_offset(p4d, addr);
69162306a36Sopenharmony_ci	if (pud_none(*pud))
69262306a36Sopenharmony_ci		return NULL;
69362306a36Sopenharmony_ci	if (pud_leaf(*pud))
69462306a36Sopenharmony_ci		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
69562306a36Sopenharmony_ci	if (WARN_ON_ONCE(pud_bad(*pud)))
69662306a36Sopenharmony_ci		return NULL;
69762306a36Sopenharmony_ci
69862306a36Sopenharmony_ci	pmd = pmd_offset(pud, addr);
69962306a36Sopenharmony_ci	if (pmd_none(*pmd))
70062306a36Sopenharmony_ci		return NULL;
70162306a36Sopenharmony_ci	if (pmd_leaf(*pmd))
70262306a36Sopenharmony_ci		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
70362306a36Sopenharmony_ci	if (WARN_ON_ONCE(pmd_bad(*pmd)))
70462306a36Sopenharmony_ci		return NULL;
70562306a36Sopenharmony_ci
70662306a36Sopenharmony_ci	ptep = pte_offset_kernel(pmd, addr);
70762306a36Sopenharmony_ci	pte = ptep_get(ptep);
70862306a36Sopenharmony_ci	if (pte_present(pte))
70962306a36Sopenharmony_ci		page = pte_page(pte);
71062306a36Sopenharmony_ci
71162306a36Sopenharmony_ci	return page;
71262306a36Sopenharmony_ci}
71362306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc_to_page);
71462306a36Sopenharmony_ci
71562306a36Sopenharmony_ci/*
71662306a36Sopenharmony_ci * Map a vmalloc()-space virtual address to the physical page frame number.
71762306a36Sopenharmony_ci */
71862306a36Sopenharmony_ciunsigned long vmalloc_to_pfn(const void *vmalloc_addr)
71962306a36Sopenharmony_ci{
72062306a36Sopenharmony_ci	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
72162306a36Sopenharmony_ci}
72262306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc_to_pfn);
72362306a36Sopenharmony_ci
72462306a36Sopenharmony_ci
72562306a36Sopenharmony_ci/*** Global kva allocator ***/
72662306a36Sopenharmony_ci
72762306a36Sopenharmony_ci#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
72862306a36Sopenharmony_ci#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
72962306a36Sopenharmony_ci
73062306a36Sopenharmony_ci
73162306a36Sopenharmony_cistatic DEFINE_SPINLOCK(vmap_area_lock);
73262306a36Sopenharmony_cistatic DEFINE_SPINLOCK(free_vmap_area_lock);
73362306a36Sopenharmony_ci/* Export for kexec only */
73462306a36Sopenharmony_ciLIST_HEAD(vmap_area_list);
73562306a36Sopenharmony_cistatic struct rb_root vmap_area_root = RB_ROOT;
73662306a36Sopenharmony_cistatic bool vmap_initialized __read_mostly;
73762306a36Sopenharmony_ci
73862306a36Sopenharmony_cistatic struct rb_root purge_vmap_area_root = RB_ROOT;
73962306a36Sopenharmony_cistatic LIST_HEAD(purge_vmap_area_list);
74062306a36Sopenharmony_cistatic DEFINE_SPINLOCK(purge_vmap_area_lock);
74162306a36Sopenharmony_ci
74262306a36Sopenharmony_ci/*
74362306a36Sopenharmony_ci * This kmem_cache is used for vmap_area objects. Instead of
74462306a36Sopenharmony_ci * allocating from slab we reuse an object from this cache to
74562306a36Sopenharmony_ci * make things faster. Especially in "no edge" splitting of
74662306a36Sopenharmony_ci * free block.
74762306a36Sopenharmony_ci */
74862306a36Sopenharmony_cistatic struct kmem_cache *vmap_area_cachep;
74962306a36Sopenharmony_ci
75062306a36Sopenharmony_ci/*
75162306a36Sopenharmony_ci * This linked list is used in pair with free_vmap_area_root.
75262306a36Sopenharmony_ci * It gives O(1) access to prev/next to perform fast coalescing.
75362306a36Sopenharmony_ci */
75462306a36Sopenharmony_cistatic LIST_HEAD(free_vmap_area_list);
75562306a36Sopenharmony_ci
75662306a36Sopenharmony_ci/*
75762306a36Sopenharmony_ci * This augment red-black tree represents the free vmap space.
75862306a36Sopenharmony_ci * All vmap_area objects in this tree are sorted by va->va_start
75962306a36Sopenharmony_ci * address. It is used for allocation and merging when a vmap
76062306a36Sopenharmony_ci * object is released.
76162306a36Sopenharmony_ci *
76262306a36Sopenharmony_ci * Each vmap_area node contains a maximum available free block
76362306a36Sopenharmony_ci * of its sub-tree, right or left. Therefore it is possible to
76462306a36Sopenharmony_ci * find a lowest match of free area.
76562306a36Sopenharmony_ci */
76662306a36Sopenharmony_cistatic struct rb_root free_vmap_area_root = RB_ROOT;
76762306a36Sopenharmony_ci
76862306a36Sopenharmony_ci/*
76962306a36Sopenharmony_ci * Preload a CPU with one object for "no edge" split case. The
77062306a36Sopenharmony_ci * aim is to get rid of allocations from the atomic context, thus
77162306a36Sopenharmony_ci * to use more permissive allocation masks.
77262306a36Sopenharmony_ci */
77362306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
77462306a36Sopenharmony_ci
77562306a36Sopenharmony_cistatic __always_inline unsigned long
77662306a36Sopenharmony_civa_size(struct vmap_area *va)
77762306a36Sopenharmony_ci{
77862306a36Sopenharmony_ci	return (va->va_end - va->va_start);
77962306a36Sopenharmony_ci}
78062306a36Sopenharmony_ci
78162306a36Sopenharmony_cistatic __always_inline unsigned long
78262306a36Sopenharmony_ciget_subtree_max_size(struct rb_node *node)
78362306a36Sopenharmony_ci{
78462306a36Sopenharmony_ci	struct vmap_area *va;
78562306a36Sopenharmony_ci
78662306a36Sopenharmony_ci	va = rb_entry_safe(node, struct vmap_area, rb_node);
78762306a36Sopenharmony_ci	return va ? va->subtree_max_size : 0;
78862306a36Sopenharmony_ci}
78962306a36Sopenharmony_ci
79062306a36Sopenharmony_ciRB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
79162306a36Sopenharmony_ci	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
79262306a36Sopenharmony_ci
79362306a36Sopenharmony_cistatic void reclaim_and_purge_vmap_areas(void);
79462306a36Sopenharmony_cistatic BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
79562306a36Sopenharmony_cistatic void drain_vmap_area_work(struct work_struct *work);
79662306a36Sopenharmony_cistatic DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
79762306a36Sopenharmony_ci
79862306a36Sopenharmony_cistatic atomic_long_t nr_vmalloc_pages;
79962306a36Sopenharmony_ci
80062306a36Sopenharmony_ciunsigned long vmalloc_nr_pages(void)
80162306a36Sopenharmony_ci{
80262306a36Sopenharmony_ci	return atomic_long_read(&nr_vmalloc_pages);
80362306a36Sopenharmony_ci}
80462306a36Sopenharmony_ci
80562306a36Sopenharmony_ci/* Look up the first VA which satisfies addr < va_end, NULL if none. */
80662306a36Sopenharmony_cistatic struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr)
80762306a36Sopenharmony_ci{
80862306a36Sopenharmony_ci	struct vmap_area *va = NULL;
80962306a36Sopenharmony_ci	struct rb_node *n = vmap_area_root.rb_node;
81062306a36Sopenharmony_ci
81162306a36Sopenharmony_ci	addr = (unsigned long)kasan_reset_tag((void *)addr);
81262306a36Sopenharmony_ci
81362306a36Sopenharmony_ci	while (n) {
81462306a36Sopenharmony_ci		struct vmap_area *tmp;
81562306a36Sopenharmony_ci
81662306a36Sopenharmony_ci		tmp = rb_entry(n, struct vmap_area, rb_node);
81762306a36Sopenharmony_ci		if (tmp->va_end > addr) {
81862306a36Sopenharmony_ci			va = tmp;
81962306a36Sopenharmony_ci			if (tmp->va_start <= addr)
82062306a36Sopenharmony_ci				break;
82162306a36Sopenharmony_ci
82262306a36Sopenharmony_ci			n = n->rb_left;
82362306a36Sopenharmony_ci		} else
82462306a36Sopenharmony_ci			n = n->rb_right;
82562306a36Sopenharmony_ci	}
82662306a36Sopenharmony_ci
82762306a36Sopenharmony_ci	return va;
82862306a36Sopenharmony_ci}
82962306a36Sopenharmony_ci
83062306a36Sopenharmony_cistatic struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
83162306a36Sopenharmony_ci{
83262306a36Sopenharmony_ci	struct rb_node *n = root->rb_node;
83362306a36Sopenharmony_ci
83462306a36Sopenharmony_ci	addr = (unsigned long)kasan_reset_tag((void *)addr);
83562306a36Sopenharmony_ci
83662306a36Sopenharmony_ci	while (n) {
83762306a36Sopenharmony_ci		struct vmap_area *va;
83862306a36Sopenharmony_ci
83962306a36Sopenharmony_ci		va = rb_entry(n, struct vmap_area, rb_node);
84062306a36Sopenharmony_ci		if (addr < va->va_start)
84162306a36Sopenharmony_ci			n = n->rb_left;
84262306a36Sopenharmony_ci		else if (addr >= va->va_end)
84362306a36Sopenharmony_ci			n = n->rb_right;
84462306a36Sopenharmony_ci		else
84562306a36Sopenharmony_ci			return va;
84662306a36Sopenharmony_ci	}
84762306a36Sopenharmony_ci
84862306a36Sopenharmony_ci	return NULL;
84962306a36Sopenharmony_ci}
85062306a36Sopenharmony_ci
85162306a36Sopenharmony_ci/*
85262306a36Sopenharmony_ci * This function returns back addresses of parent node
85362306a36Sopenharmony_ci * and its left or right link for further processing.
85462306a36Sopenharmony_ci *
85562306a36Sopenharmony_ci * Otherwise NULL is returned. In that case all further
85662306a36Sopenharmony_ci * steps regarding inserting of conflicting overlap range
85762306a36Sopenharmony_ci * have to be declined and actually considered as a bug.
85862306a36Sopenharmony_ci */
85962306a36Sopenharmony_cistatic __always_inline struct rb_node **
86062306a36Sopenharmony_cifind_va_links(struct vmap_area *va,
86162306a36Sopenharmony_ci	struct rb_root *root, struct rb_node *from,
86262306a36Sopenharmony_ci	struct rb_node **parent)
86362306a36Sopenharmony_ci{
86462306a36Sopenharmony_ci	struct vmap_area *tmp_va;
86562306a36Sopenharmony_ci	struct rb_node **link;
86662306a36Sopenharmony_ci
86762306a36Sopenharmony_ci	if (root) {
86862306a36Sopenharmony_ci		link = &root->rb_node;
86962306a36Sopenharmony_ci		if (unlikely(!*link)) {
87062306a36Sopenharmony_ci			*parent = NULL;
87162306a36Sopenharmony_ci			return link;
87262306a36Sopenharmony_ci		}
87362306a36Sopenharmony_ci	} else {
87462306a36Sopenharmony_ci		link = &from;
87562306a36Sopenharmony_ci	}
87662306a36Sopenharmony_ci
87762306a36Sopenharmony_ci	/*
87862306a36Sopenharmony_ci	 * Go to the bottom of the tree. When we hit the last point
87962306a36Sopenharmony_ci	 * we end up with parent rb_node and correct direction, i name
88062306a36Sopenharmony_ci	 * it link, where the new va->rb_node will be attached to.
88162306a36Sopenharmony_ci	 */
88262306a36Sopenharmony_ci	do {
88362306a36Sopenharmony_ci		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
88462306a36Sopenharmony_ci
88562306a36Sopenharmony_ci		/*
88662306a36Sopenharmony_ci		 * During the traversal we also do some sanity check.
88762306a36Sopenharmony_ci		 * Trigger the BUG() if there are sides(left/right)
88862306a36Sopenharmony_ci		 * or full overlaps.
88962306a36Sopenharmony_ci		 */
89062306a36Sopenharmony_ci		if (va->va_end <= tmp_va->va_start)
89162306a36Sopenharmony_ci			link = &(*link)->rb_left;
89262306a36Sopenharmony_ci		else if (va->va_start >= tmp_va->va_end)
89362306a36Sopenharmony_ci			link = &(*link)->rb_right;
89462306a36Sopenharmony_ci		else {
89562306a36Sopenharmony_ci			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
89662306a36Sopenharmony_ci				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
89762306a36Sopenharmony_ci
89862306a36Sopenharmony_ci			return NULL;
89962306a36Sopenharmony_ci		}
90062306a36Sopenharmony_ci	} while (*link);
90162306a36Sopenharmony_ci
90262306a36Sopenharmony_ci	*parent = &tmp_va->rb_node;
90362306a36Sopenharmony_ci	return link;
90462306a36Sopenharmony_ci}
90562306a36Sopenharmony_ci
90662306a36Sopenharmony_cistatic __always_inline struct list_head *
90762306a36Sopenharmony_ciget_va_next_sibling(struct rb_node *parent, struct rb_node **link)
90862306a36Sopenharmony_ci{
90962306a36Sopenharmony_ci	struct list_head *list;
91062306a36Sopenharmony_ci
91162306a36Sopenharmony_ci	if (unlikely(!parent))
91262306a36Sopenharmony_ci		/*
91362306a36Sopenharmony_ci		 * The red-black tree where we try to find VA neighbors
91462306a36Sopenharmony_ci		 * before merging or inserting is empty, i.e. it means
91562306a36Sopenharmony_ci		 * there is no free vmap space. Normally it does not
91662306a36Sopenharmony_ci		 * happen but we handle this case anyway.
91762306a36Sopenharmony_ci		 */
91862306a36Sopenharmony_ci		return NULL;
91962306a36Sopenharmony_ci
92062306a36Sopenharmony_ci	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
92162306a36Sopenharmony_ci	return (&parent->rb_right == link ? list->next : list);
92262306a36Sopenharmony_ci}
92362306a36Sopenharmony_ci
92462306a36Sopenharmony_cistatic __always_inline void
92562306a36Sopenharmony_ci__link_va(struct vmap_area *va, struct rb_root *root,
92662306a36Sopenharmony_ci	struct rb_node *parent, struct rb_node **link,
92762306a36Sopenharmony_ci	struct list_head *head, bool augment)
92862306a36Sopenharmony_ci{
92962306a36Sopenharmony_ci	/*
93062306a36Sopenharmony_ci	 * VA is still not in the list, but we can
93162306a36Sopenharmony_ci	 * identify its future previous list_head node.
93262306a36Sopenharmony_ci	 */
93362306a36Sopenharmony_ci	if (likely(parent)) {
93462306a36Sopenharmony_ci		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
93562306a36Sopenharmony_ci		if (&parent->rb_right != link)
93662306a36Sopenharmony_ci			head = head->prev;
93762306a36Sopenharmony_ci	}
93862306a36Sopenharmony_ci
93962306a36Sopenharmony_ci	/* Insert to the rb-tree */
94062306a36Sopenharmony_ci	rb_link_node(&va->rb_node, parent, link);
94162306a36Sopenharmony_ci	if (augment) {
94262306a36Sopenharmony_ci		/*
94362306a36Sopenharmony_ci		 * Some explanation here. Just perform simple insertion
94462306a36Sopenharmony_ci		 * to the tree. We do not set va->subtree_max_size to
94562306a36Sopenharmony_ci		 * its current size before calling rb_insert_augmented().
94662306a36Sopenharmony_ci		 * It is because we populate the tree from the bottom
94762306a36Sopenharmony_ci		 * to parent levels when the node _is_ in the tree.
94862306a36Sopenharmony_ci		 *
94962306a36Sopenharmony_ci		 * Therefore we set subtree_max_size to zero after insertion,
95062306a36Sopenharmony_ci		 * to let __augment_tree_propagate_from() puts everything to
95162306a36Sopenharmony_ci		 * the correct order later on.
95262306a36Sopenharmony_ci		 */
95362306a36Sopenharmony_ci		rb_insert_augmented(&va->rb_node,
95462306a36Sopenharmony_ci			root, &free_vmap_area_rb_augment_cb);
95562306a36Sopenharmony_ci		va->subtree_max_size = 0;
95662306a36Sopenharmony_ci	} else {
95762306a36Sopenharmony_ci		rb_insert_color(&va->rb_node, root);
95862306a36Sopenharmony_ci	}
95962306a36Sopenharmony_ci
96062306a36Sopenharmony_ci	/* Address-sort this list */
96162306a36Sopenharmony_ci	list_add(&va->list, head);
96262306a36Sopenharmony_ci}
96362306a36Sopenharmony_ci
96462306a36Sopenharmony_cistatic __always_inline void
96562306a36Sopenharmony_cilink_va(struct vmap_area *va, struct rb_root *root,
96662306a36Sopenharmony_ci	struct rb_node *parent, struct rb_node **link,
96762306a36Sopenharmony_ci	struct list_head *head)
96862306a36Sopenharmony_ci{
96962306a36Sopenharmony_ci	__link_va(va, root, parent, link, head, false);
97062306a36Sopenharmony_ci}
97162306a36Sopenharmony_ci
97262306a36Sopenharmony_cistatic __always_inline void
97362306a36Sopenharmony_cilink_va_augment(struct vmap_area *va, struct rb_root *root,
97462306a36Sopenharmony_ci	struct rb_node *parent, struct rb_node **link,
97562306a36Sopenharmony_ci	struct list_head *head)
97662306a36Sopenharmony_ci{
97762306a36Sopenharmony_ci	__link_va(va, root, parent, link, head, true);
97862306a36Sopenharmony_ci}
97962306a36Sopenharmony_ci
98062306a36Sopenharmony_cistatic __always_inline void
98162306a36Sopenharmony_ci__unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
98262306a36Sopenharmony_ci{
98362306a36Sopenharmony_ci	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
98462306a36Sopenharmony_ci		return;
98562306a36Sopenharmony_ci
98662306a36Sopenharmony_ci	if (augment)
98762306a36Sopenharmony_ci		rb_erase_augmented(&va->rb_node,
98862306a36Sopenharmony_ci			root, &free_vmap_area_rb_augment_cb);
98962306a36Sopenharmony_ci	else
99062306a36Sopenharmony_ci		rb_erase(&va->rb_node, root);
99162306a36Sopenharmony_ci
99262306a36Sopenharmony_ci	list_del_init(&va->list);
99362306a36Sopenharmony_ci	RB_CLEAR_NODE(&va->rb_node);
99462306a36Sopenharmony_ci}
99562306a36Sopenharmony_ci
99662306a36Sopenharmony_cistatic __always_inline void
99762306a36Sopenharmony_ciunlink_va(struct vmap_area *va, struct rb_root *root)
99862306a36Sopenharmony_ci{
99962306a36Sopenharmony_ci	__unlink_va(va, root, false);
100062306a36Sopenharmony_ci}
100162306a36Sopenharmony_ci
100262306a36Sopenharmony_cistatic __always_inline void
100362306a36Sopenharmony_ciunlink_va_augment(struct vmap_area *va, struct rb_root *root)
100462306a36Sopenharmony_ci{
100562306a36Sopenharmony_ci	__unlink_va(va, root, true);
100662306a36Sopenharmony_ci}
100762306a36Sopenharmony_ci
100862306a36Sopenharmony_ci#if DEBUG_AUGMENT_PROPAGATE_CHECK
100962306a36Sopenharmony_ci/*
101062306a36Sopenharmony_ci * Gets called when remove the node and rotate.
101162306a36Sopenharmony_ci */
101262306a36Sopenharmony_cistatic __always_inline unsigned long
101362306a36Sopenharmony_cicompute_subtree_max_size(struct vmap_area *va)
101462306a36Sopenharmony_ci{
101562306a36Sopenharmony_ci	return max3(va_size(va),
101662306a36Sopenharmony_ci		get_subtree_max_size(va->rb_node.rb_left),
101762306a36Sopenharmony_ci		get_subtree_max_size(va->rb_node.rb_right));
101862306a36Sopenharmony_ci}
101962306a36Sopenharmony_ci
102062306a36Sopenharmony_cistatic void
102162306a36Sopenharmony_ciaugment_tree_propagate_check(void)
102262306a36Sopenharmony_ci{
102362306a36Sopenharmony_ci	struct vmap_area *va;
102462306a36Sopenharmony_ci	unsigned long computed_size;
102562306a36Sopenharmony_ci
102662306a36Sopenharmony_ci	list_for_each_entry(va, &free_vmap_area_list, list) {
102762306a36Sopenharmony_ci		computed_size = compute_subtree_max_size(va);
102862306a36Sopenharmony_ci		if (computed_size != va->subtree_max_size)
102962306a36Sopenharmony_ci			pr_emerg("tree is corrupted: %lu, %lu\n",
103062306a36Sopenharmony_ci				va_size(va), va->subtree_max_size);
103162306a36Sopenharmony_ci	}
103262306a36Sopenharmony_ci}
103362306a36Sopenharmony_ci#endif
103462306a36Sopenharmony_ci
103562306a36Sopenharmony_ci/*
103662306a36Sopenharmony_ci * This function populates subtree_max_size from bottom to upper
103762306a36Sopenharmony_ci * levels starting from VA point. The propagation must be done
103862306a36Sopenharmony_ci * when VA size is modified by changing its va_start/va_end. Or
103962306a36Sopenharmony_ci * in case of newly inserting of VA to the tree.
104062306a36Sopenharmony_ci *
104162306a36Sopenharmony_ci * It means that __augment_tree_propagate_from() must be called:
104262306a36Sopenharmony_ci * - After VA has been inserted to the tree(free path);
104362306a36Sopenharmony_ci * - After VA has been shrunk(allocation path);
104462306a36Sopenharmony_ci * - After VA has been increased(merging path).
104562306a36Sopenharmony_ci *
104662306a36Sopenharmony_ci * Please note that, it does not mean that upper parent nodes
104762306a36Sopenharmony_ci * and their subtree_max_size are recalculated all the time up
104862306a36Sopenharmony_ci * to the root node.
104962306a36Sopenharmony_ci *
105062306a36Sopenharmony_ci *       4--8
105162306a36Sopenharmony_ci *        /\
105262306a36Sopenharmony_ci *       /  \
105362306a36Sopenharmony_ci *      /    \
105462306a36Sopenharmony_ci *    2--2  8--8
105562306a36Sopenharmony_ci *
105662306a36Sopenharmony_ci * For example if we modify the node 4, shrinking it to 2, then
105762306a36Sopenharmony_ci * no any modification is required. If we shrink the node 2 to 1
105862306a36Sopenharmony_ci * its subtree_max_size is updated only, and set to 1. If we shrink
105962306a36Sopenharmony_ci * the node 8 to 6, then its subtree_max_size is set to 6 and parent
106062306a36Sopenharmony_ci * node becomes 4--6.
106162306a36Sopenharmony_ci */
106262306a36Sopenharmony_cistatic __always_inline void
106362306a36Sopenharmony_ciaugment_tree_propagate_from(struct vmap_area *va)
106462306a36Sopenharmony_ci{
106562306a36Sopenharmony_ci	/*
106662306a36Sopenharmony_ci	 * Populate the tree from bottom towards the root until
106762306a36Sopenharmony_ci	 * the calculated maximum available size of checked node
106862306a36Sopenharmony_ci	 * is equal to its current one.
106962306a36Sopenharmony_ci	 */
107062306a36Sopenharmony_ci	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
107162306a36Sopenharmony_ci
107262306a36Sopenharmony_ci#if DEBUG_AUGMENT_PROPAGATE_CHECK
107362306a36Sopenharmony_ci	augment_tree_propagate_check();
107462306a36Sopenharmony_ci#endif
107562306a36Sopenharmony_ci}
107662306a36Sopenharmony_ci
107762306a36Sopenharmony_cistatic void
107862306a36Sopenharmony_ciinsert_vmap_area(struct vmap_area *va,
107962306a36Sopenharmony_ci	struct rb_root *root, struct list_head *head)
108062306a36Sopenharmony_ci{
108162306a36Sopenharmony_ci	struct rb_node **link;
108262306a36Sopenharmony_ci	struct rb_node *parent;
108362306a36Sopenharmony_ci
108462306a36Sopenharmony_ci	link = find_va_links(va, root, NULL, &parent);
108562306a36Sopenharmony_ci	if (link)
108662306a36Sopenharmony_ci		link_va(va, root, parent, link, head);
108762306a36Sopenharmony_ci}
108862306a36Sopenharmony_ci
108962306a36Sopenharmony_cistatic void
109062306a36Sopenharmony_ciinsert_vmap_area_augment(struct vmap_area *va,
109162306a36Sopenharmony_ci	struct rb_node *from, struct rb_root *root,
109262306a36Sopenharmony_ci	struct list_head *head)
109362306a36Sopenharmony_ci{
109462306a36Sopenharmony_ci	struct rb_node **link;
109562306a36Sopenharmony_ci	struct rb_node *parent;
109662306a36Sopenharmony_ci
109762306a36Sopenharmony_ci	if (from)
109862306a36Sopenharmony_ci		link = find_va_links(va, NULL, from, &parent);
109962306a36Sopenharmony_ci	else
110062306a36Sopenharmony_ci		link = find_va_links(va, root, NULL, &parent);
110162306a36Sopenharmony_ci
110262306a36Sopenharmony_ci	if (link) {
110362306a36Sopenharmony_ci		link_va_augment(va, root, parent, link, head);
110462306a36Sopenharmony_ci		augment_tree_propagate_from(va);
110562306a36Sopenharmony_ci	}
110662306a36Sopenharmony_ci}
110762306a36Sopenharmony_ci
110862306a36Sopenharmony_ci/*
110962306a36Sopenharmony_ci * Merge de-allocated chunk of VA memory with previous
111062306a36Sopenharmony_ci * and next free blocks. If coalesce is not done a new
111162306a36Sopenharmony_ci * free area is inserted. If VA has been merged, it is
111262306a36Sopenharmony_ci * freed.
111362306a36Sopenharmony_ci *
111462306a36Sopenharmony_ci * Please note, it can return NULL in case of overlap
111562306a36Sopenharmony_ci * ranges, followed by WARN() report. Despite it is a
111662306a36Sopenharmony_ci * buggy behaviour, a system can be alive and keep
111762306a36Sopenharmony_ci * ongoing.
111862306a36Sopenharmony_ci */
111962306a36Sopenharmony_cistatic __always_inline struct vmap_area *
112062306a36Sopenharmony_ci__merge_or_add_vmap_area(struct vmap_area *va,
112162306a36Sopenharmony_ci	struct rb_root *root, struct list_head *head, bool augment)
112262306a36Sopenharmony_ci{
112362306a36Sopenharmony_ci	struct vmap_area *sibling;
112462306a36Sopenharmony_ci	struct list_head *next;
112562306a36Sopenharmony_ci	struct rb_node **link;
112662306a36Sopenharmony_ci	struct rb_node *parent;
112762306a36Sopenharmony_ci	bool merged = false;
112862306a36Sopenharmony_ci
112962306a36Sopenharmony_ci	/*
113062306a36Sopenharmony_ci	 * Find a place in the tree where VA potentially will be
113162306a36Sopenharmony_ci	 * inserted, unless it is merged with its sibling/siblings.
113262306a36Sopenharmony_ci	 */
113362306a36Sopenharmony_ci	link = find_va_links(va, root, NULL, &parent);
113462306a36Sopenharmony_ci	if (!link)
113562306a36Sopenharmony_ci		return NULL;
113662306a36Sopenharmony_ci
113762306a36Sopenharmony_ci	/*
113862306a36Sopenharmony_ci	 * Get next node of VA to check if merging can be done.
113962306a36Sopenharmony_ci	 */
114062306a36Sopenharmony_ci	next = get_va_next_sibling(parent, link);
114162306a36Sopenharmony_ci	if (unlikely(next == NULL))
114262306a36Sopenharmony_ci		goto insert;
114362306a36Sopenharmony_ci
114462306a36Sopenharmony_ci	/*
114562306a36Sopenharmony_ci	 * start            end
114662306a36Sopenharmony_ci	 * |                |
114762306a36Sopenharmony_ci	 * |<------VA------>|<-----Next----->|
114862306a36Sopenharmony_ci	 *                  |                |
114962306a36Sopenharmony_ci	 *                  start            end
115062306a36Sopenharmony_ci	 */
115162306a36Sopenharmony_ci	if (next != head) {
115262306a36Sopenharmony_ci		sibling = list_entry(next, struct vmap_area, list);
115362306a36Sopenharmony_ci		if (sibling->va_start == va->va_end) {
115462306a36Sopenharmony_ci			sibling->va_start = va->va_start;
115562306a36Sopenharmony_ci
115662306a36Sopenharmony_ci			/* Free vmap_area object. */
115762306a36Sopenharmony_ci			kmem_cache_free(vmap_area_cachep, va);
115862306a36Sopenharmony_ci
115962306a36Sopenharmony_ci			/* Point to the new merged area. */
116062306a36Sopenharmony_ci			va = sibling;
116162306a36Sopenharmony_ci			merged = true;
116262306a36Sopenharmony_ci		}
116362306a36Sopenharmony_ci	}
116462306a36Sopenharmony_ci
116562306a36Sopenharmony_ci	/*
116662306a36Sopenharmony_ci	 * start            end
116762306a36Sopenharmony_ci	 * |                |
116862306a36Sopenharmony_ci	 * |<-----Prev----->|<------VA------>|
116962306a36Sopenharmony_ci	 *                  |                |
117062306a36Sopenharmony_ci	 *                  start            end
117162306a36Sopenharmony_ci	 */
117262306a36Sopenharmony_ci	if (next->prev != head) {
117362306a36Sopenharmony_ci		sibling = list_entry(next->prev, struct vmap_area, list);
117462306a36Sopenharmony_ci		if (sibling->va_end == va->va_start) {
117562306a36Sopenharmony_ci			/*
117662306a36Sopenharmony_ci			 * If both neighbors are coalesced, it is important
117762306a36Sopenharmony_ci			 * to unlink the "next" node first, followed by merging
117862306a36Sopenharmony_ci			 * with "previous" one. Otherwise the tree might not be
117962306a36Sopenharmony_ci			 * fully populated if a sibling's augmented value is
118062306a36Sopenharmony_ci			 * "normalized" because of rotation operations.
118162306a36Sopenharmony_ci			 */
118262306a36Sopenharmony_ci			if (merged)
118362306a36Sopenharmony_ci				__unlink_va(va, root, augment);
118462306a36Sopenharmony_ci
118562306a36Sopenharmony_ci			sibling->va_end = va->va_end;
118662306a36Sopenharmony_ci
118762306a36Sopenharmony_ci			/* Free vmap_area object. */
118862306a36Sopenharmony_ci			kmem_cache_free(vmap_area_cachep, va);
118962306a36Sopenharmony_ci
119062306a36Sopenharmony_ci			/* Point to the new merged area. */
119162306a36Sopenharmony_ci			va = sibling;
119262306a36Sopenharmony_ci			merged = true;
119362306a36Sopenharmony_ci		}
119462306a36Sopenharmony_ci	}
119562306a36Sopenharmony_ci
119662306a36Sopenharmony_ciinsert:
119762306a36Sopenharmony_ci	if (!merged)
119862306a36Sopenharmony_ci		__link_va(va, root, parent, link, head, augment);
119962306a36Sopenharmony_ci
120062306a36Sopenharmony_ci	return va;
120162306a36Sopenharmony_ci}
120262306a36Sopenharmony_ci
120362306a36Sopenharmony_cistatic __always_inline struct vmap_area *
120462306a36Sopenharmony_cimerge_or_add_vmap_area(struct vmap_area *va,
120562306a36Sopenharmony_ci	struct rb_root *root, struct list_head *head)
120662306a36Sopenharmony_ci{
120762306a36Sopenharmony_ci	return __merge_or_add_vmap_area(va, root, head, false);
120862306a36Sopenharmony_ci}
120962306a36Sopenharmony_ci
121062306a36Sopenharmony_cistatic __always_inline struct vmap_area *
121162306a36Sopenharmony_cimerge_or_add_vmap_area_augment(struct vmap_area *va,
121262306a36Sopenharmony_ci	struct rb_root *root, struct list_head *head)
121362306a36Sopenharmony_ci{
121462306a36Sopenharmony_ci	va = __merge_or_add_vmap_area(va, root, head, true);
121562306a36Sopenharmony_ci	if (va)
121662306a36Sopenharmony_ci		augment_tree_propagate_from(va);
121762306a36Sopenharmony_ci
121862306a36Sopenharmony_ci	return va;
121962306a36Sopenharmony_ci}
122062306a36Sopenharmony_ci
122162306a36Sopenharmony_cistatic __always_inline bool
122262306a36Sopenharmony_ciis_within_this_va(struct vmap_area *va, unsigned long size,
122362306a36Sopenharmony_ci	unsigned long align, unsigned long vstart)
122462306a36Sopenharmony_ci{
122562306a36Sopenharmony_ci	unsigned long nva_start_addr;
122662306a36Sopenharmony_ci
122762306a36Sopenharmony_ci	if (va->va_start > vstart)
122862306a36Sopenharmony_ci		nva_start_addr = ALIGN(va->va_start, align);
122962306a36Sopenharmony_ci	else
123062306a36Sopenharmony_ci		nva_start_addr = ALIGN(vstart, align);
123162306a36Sopenharmony_ci
123262306a36Sopenharmony_ci	/* Can be overflowed due to big size or alignment. */
123362306a36Sopenharmony_ci	if (nva_start_addr + size < nva_start_addr ||
123462306a36Sopenharmony_ci			nva_start_addr < vstart)
123562306a36Sopenharmony_ci		return false;
123662306a36Sopenharmony_ci
123762306a36Sopenharmony_ci	return (nva_start_addr + size <= va->va_end);
123862306a36Sopenharmony_ci}
123962306a36Sopenharmony_ci
124062306a36Sopenharmony_ci/*
124162306a36Sopenharmony_ci * Find the first free block(lowest start address) in the tree,
124262306a36Sopenharmony_ci * that will accomplish the request corresponding to passing
124362306a36Sopenharmony_ci * parameters. Please note, with an alignment bigger than PAGE_SIZE,
124462306a36Sopenharmony_ci * a search length is adjusted to account for worst case alignment
124562306a36Sopenharmony_ci * overhead.
124662306a36Sopenharmony_ci */
124762306a36Sopenharmony_cistatic __always_inline struct vmap_area *
124862306a36Sopenharmony_cifind_vmap_lowest_match(struct rb_root *root, unsigned long size,
124962306a36Sopenharmony_ci	unsigned long align, unsigned long vstart, bool adjust_search_size)
125062306a36Sopenharmony_ci{
125162306a36Sopenharmony_ci	struct vmap_area *va;
125262306a36Sopenharmony_ci	struct rb_node *node;
125362306a36Sopenharmony_ci	unsigned long length;
125462306a36Sopenharmony_ci
125562306a36Sopenharmony_ci	/* Start from the root. */
125662306a36Sopenharmony_ci	node = root->rb_node;
125762306a36Sopenharmony_ci
125862306a36Sopenharmony_ci	/* Adjust the search size for alignment overhead. */
125962306a36Sopenharmony_ci	length = adjust_search_size ? size + align - 1 : size;
126062306a36Sopenharmony_ci
126162306a36Sopenharmony_ci	while (node) {
126262306a36Sopenharmony_ci		va = rb_entry(node, struct vmap_area, rb_node);
126362306a36Sopenharmony_ci
126462306a36Sopenharmony_ci		if (get_subtree_max_size(node->rb_left) >= length &&
126562306a36Sopenharmony_ci				vstart < va->va_start) {
126662306a36Sopenharmony_ci			node = node->rb_left;
126762306a36Sopenharmony_ci		} else {
126862306a36Sopenharmony_ci			if (is_within_this_va(va, size, align, vstart))
126962306a36Sopenharmony_ci				return va;
127062306a36Sopenharmony_ci
127162306a36Sopenharmony_ci			/*
127262306a36Sopenharmony_ci			 * Does not make sense to go deeper towards the right
127362306a36Sopenharmony_ci			 * sub-tree if it does not have a free block that is
127462306a36Sopenharmony_ci			 * equal or bigger to the requested search length.
127562306a36Sopenharmony_ci			 */
127662306a36Sopenharmony_ci			if (get_subtree_max_size(node->rb_right) >= length) {
127762306a36Sopenharmony_ci				node = node->rb_right;
127862306a36Sopenharmony_ci				continue;
127962306a36Sopenharmony_ci			}
128062306a36Sopenharmony_ci
128162306a36Sopenharmony_ci			/*
128262306a36Sopenharmony_ci			 * OK. We roll back and find the first right sub-tree,
128362306a36Sopenharmony_ci			 * that will satisfy the search criteria. It can happen
128462306a36Sopenharmony_ci			 * due to "vstart" restriction or an alignment overhead
128562306a36Sopenharmony_ci			 * that is bigger then PAGE_SIZE.
128662306a36Sopenharmony_ci			 */
128762306a36Sopenharmony_ci			while ((node = rb_parent(node))) {
128862306a36Sopenharmony_ci				va = rb_entry(node, struct vmap_area, rb_node);
128962306a36Sopenharmony_ci				if (is_within_this_va(va, size, align, vstart))
129062306a36Sopenharmony_ci					return va;
129162306a36Sopenharmony_ci
129262306a36Sopenharmony_ci				if (get_subtree_max_size(node->rb_right) >= length &&
129362306a36Sopenharmony_ci						vstart <= va->va_start) {
129462306a36Sopenharmony_ci					/*
129562306a36Sopenharmony_ci					 * Shift the vstart forward. Please note, we update it with
129662306a36Sopenharmony_ci					 * parent's start address adding "1" because we do not want
129762306a36Sopenharmony_ci					 * to enter same sub-tree after it has already been checked
129862306a36Sopenharmony_ci					 * and no suitable free block found there.
129962306a36Sopenharmony_ci					 */
130062306a36Sopenharmony_ci					vstart = va->va_start + 1;
130162306a36Sopenharmony_ci					node = node->rb_right;
130262306a36Sopenharmony_ci					break;
130362306a36Sopenharmony_ci				}
130462306a36Sopenharmony_ci			}
130562306a36Sopenharmony_ci		}
130662306a36Sopenharmony_ci	}
130762306a36Sopenharmony_ci
130862306a36Sopenharmony_ci	return NULL;
130962306a36Sopenharmony_ci}
131062306a36Sopenharmony_ci
131162306a36Sopenharmony_ci#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
131262306a36Sopenharmony_ci#include <linux/random.h>
131362306a36Sopenharmony_ci
131462306a36Sopenharmony_cistatic struct vmap_area *
131562306a36Sopenharmony_cifind_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
131662306a36Sopenharmony_ci	unsigned long align, unsigned long vstart)
131762306a36Sopenharmony_ci{
131862306a36Sopenharmony_ci	struct vmap_area *va;
131962306a36Sopenharmony_ci
132062306a36Sopenharmony_ci	list_for_each_entry(va, head, list) {
132162306a36Sopenharmony_ci		if (!is_within_this_va(va, size, align, vstart))
132262306a36Sopenharmony_ci			continue;
132362306a36Sopenharmony_ci
132462306a36Sopenharmony_ci		return va;
132562306a36Sopenharmony_ci	}
132662306a36Sopenharmony_ci
132762306a36Sopenharmony_ci	return NULL;
132862306a36Sopenharmony_ci}
132962306a36Sopenharmony_ci
133062306a36Sopenharmony_cistatic void
133162306a36Sopenharmony_cifind_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
133262306a36Sopenharmony_ci			     unsigned long size, unsigned long align)
133362306a36Sopenharmony_ci{
133462306a36Sopenharmony_ci	struct vmap_area *va_1, *va_2;
133562306a36Sopenharmony_ci	unsigned long vstart;
133662306a36Sopenharmony_ci	unsigned int rnd;
133762306a36Sopenharmony_ci
133862306a36Sopenharmony_ci	get_random_bytes(&rnd, sizeof(rnd));
133962306a36Sopenharmony_ci	vstart = VMALLOC_START + rnd;
134062306a36Sopenharmony_ci
134162306a36Sopenharmony_ci	va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
134262306a36Sopenharmony_ci	va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
134362306a36Sopenharmony_ci
134462306a36Sopenharmony_ci	if (va_1 != va_2)
134562306a36Sopenharmony_ci		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
134662306a36Sopenharmony_ci			va_1, va_2, vstart);
134762306a36Sopenharmony_ci}
134862306a36Sopenharmony_ci#endif
134962306a36Sopenharmony_ci
135062306a36Sopenharmony_cienum fit_type {
135162306a36Sopenharmony_ci	NOTHING_FIT = 0,
135262306a36Sopenharmony_ci	FL_FIT_TYPE = 1,	/* full fit */
135362306a36Sopenharmony_ci	LE_FIT_TYPE = 2,	/* left edge fit */
135462306a36Sopenharmony_ci	RE_FIT_TYPE = 3,	/* right edge fit */
135562306a36Sopenharmony_ci	NE_FIT_TYPE = 4		/* no edge fit */
135662306a36Sopenharmony_ci};
135762306a36Sopenharmony_ci
135862306a36Sopenharmony_cistatic __always_inline enum fit_type
135962306a36Sopenharmony_ciclassify_va_fit_type(struct vmap_area *va,
136062306a36Sopenharmony_ci	unsigned long nva_start_addr, unsigned long size)
136162306a36Sopenharmony_ci{
136262306a36Sopenharmony_ci	enum fit_type type;
136362306a36Sopenharmony_ci
136462306a36Sopenharmony_ci	/* Check if it is within VA. */
136562306a36Sopenharmony_ci	if (nva_start_addr < va->va_start ||
136662306a36Sopenharmony_ci			nva_start_addr + size > va->va_end)
136762306a36Sopenharmony_ci		return NOTHING_FIT;
136862306a36Sopenharmony_ci
136962306a36Sopenharmony_ci	/* Now classify. */
137062306a36Sopenharmony_ci	if (va->va_start == nva_start_addr) {
137162306a36Sopenharmony_ci		if (va->va_end == nva_start_addr + size)
137262306a36Sopenharmony_ci			type = FL_FIT_TYPE;
137362306a36Sopenharmony_ci		else
137462306a36Sopenharmony_ci			type = LE_FIT_TYPE;
137562306a36Sopenharmony_ci	} else if (va->va_end == nva_start_addr + size) {
137662306a36Sopenharmony_ci		type = RE_FIT_TYPE;
137762306a36Sopenharmony_ci	} else {
137862306a36Sopenharmony_ci		type = NE_FIT_TYPE;
137962306a36Sopenharmony_ci	}
138062306a36Sopenharmony_ci
138162306a36Sopenharmony_ci	return type;
138262306a36Sopenharmony_ci}
138362306a36Sopenharmony_ci
138462306a36Sopenharmony_cistatic __always_inline int
138562306a36Sopenharmony_ciadjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
138662306a36Sopenharmony_ci		      struct vmap_area *va, unsigned long nva_start_addr,
138762306a36Sopenharmony_ci		      unsigned long size)
138862306a36Sopenharmony_ci{
138962306a36Sopenharmony_ci	struct vmap_area *lva = NULL;
139062306a36Sopenharmony_ci	enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
139162306a36Sopenharmony_ci
139262306a36Sopenharmony_ci	if (type == FL_FIT_TYPE) {
139362306a36Sopenharmony_ci		/*
139462306a36Sopenharmony_ci		 * No need to split VA, it fully fits.
139562306a36Sopenharmony_ci		 *
139662306a36Sopenharmony_ci		 * |               |
139762306a36Sopenharmony_ci		 * V      NVA      V
139862306a36Sopenharmony_ci		 * |---------------|
139962306a36Sopenharmony_ci		 */
140062306a36Sopenharmony_ci		unlink_va_augment(va, root);
140162306a36Sopenharmony_ci		kmem_cache_free(vmap_area_cachep, va);
140262306a36Sopenharmony_ci	} else if (type == LE_FIT_TYPE) {
140362306a36Sopenharmony_ci		/*
140462306a36Sopenharmony_ci		 * Split left edge of fit VA.
140562306a36Sopenharmony_ci		 *
140662306a36Sopenharmony_ci		 * |       |
140762306a36Sopenharmony_ci		 * V  NVA  V   R
140862306a36Sopenharmony_ci		 * |-------|-------|
140962306a36Sopenharmony_ci		 */
141062306a36Sopenharmony_ci		va->va_start += size;
141162306a36Sopenharmony_ci	} else if (type == RE_FIT_TYPE) {
141262306a36Sopenharmony_ci		/*
141362306a36Sopenharmony_ci		 * Split right edge of fit VA.
141462306a36Sopenharmony_ci		 *
141562306a36Sopenharmony_ci		 *         |       |
141662306a36Sopenharmony_ci		 *     L   V  NVA  V
141762306a36Sopenharmony_ci		 * |-------|-------|
141862306a36Sopenharmony_ci		 */
141962306a36Sopenharmony_ci		va->va_end = nva_start_addr;
142062306a36Sopenharmony_ci	} else if (type == NE_FIT_TYPE) {
142162306a36Sopenharmony_ci		/*
142262306a36Sopenharmony_ci		 * Split no edge of fit VA.
142362306a36Sopenharmony_ci		 *
142462306a36Sopenharmony_ci		 *     |       |
142562306a36Sopenharmony_ci		 *   L V  NVA  V R
142662306a36Sopenharmony_ci		 * |---|-------|---|
142762306a36Sopenharmony_ci		 */
142862306a36Sopenharmony_ci		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
142962306a36Sopenharmony_ci		if (unlikely(!lva)) {
143062306a36Sopenharmony_ci			/*
143162306a36Sopenharmony_ci			 * For percpu allocator we do not do any pre-allocation
143262306a36Sopenharmony_ci			 * and leave it as it is. The reason is it most likely
143362306a36Sopenharmony_ci			 * never ends up with NE_FIT_TYPE splitting. In case of
143462306a36Sopenharmony_ci			 * percpu allocations offsets and sizes are aligned to
143562306a36Sopenharmony_ci			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
143662306a36Sopenharmony_ci			 * are its main fitting cases.
143762306a36Sopenharmony_ci			 *
143862306a36Sopenharmony_ci			 * There are a few exceptions though, as an example it is
143962306a36Sopenharmony_ci			 * a first allocation (early boot up) when we have "one"
144062306a36Sopenharmony_ci			 * big free space that has to be split.
144162306a36Sopenharmony_ci			 *
144262306a36Sopenharmony_ci			 * Also we can hit this path in case of regular "vmap"
144362306a36Sopenharmony_ci			 * allocations, if "this" current CPU was not preloaded.
144462306a36Sopenharmony_ci			 * See the comment in alloc_vmap_area() why. If so, then
144562306a36Sopenharmony_ci			 * GFP_NOWAIT is used instead to get an extra object for
144662306a36Sopenharmony_ci			 * split purpose. That is rare and most time does not
144762306a36Sopenharmony_ci			 * occur.
144862306a36Sopenharmony_ci			 *
144962306a36Sopenharmony_ci			 * What happens if an allocation gets failed. Basically,
145062306a36Sopenharmony_ci			 * an "overflow" path is triggered to purge lazily freed
145162306a36Sopenharmony_ci			 * areas to free some memory, then, the "retry" path is
145262306a36Sopenharmony_ci			 * triggered to repeat one more time. See more details
145362306a36Sopenharmony_ci			 * in alloc_vmap_area() function.
145462306a36Sopenharmony_ci			 */
145562306a36Sopenharmony_ci			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
145662306a36Sopenharmony_ci			if (!lva)
145762306a36Sopenharmony_ci				return -1;
145862306a36Sopenharmony_ci		}
145962306a36Sopenharmony_ci
146062306a36Sopenharmony_ci		/*
146162306a36Sopenharmony_ci		 * Build the remainder.
146262306a36Sopenharmony_ci		 */
146362306a36Sopenharmony_ci		lva->va_start = va->va_start;
146462306a36Sopenharmony_ci		lva->va_end = nva_start_addr;
146562306a36Sopenharmony_ci
146662306a36Sopenharmony_ci		/*
146762306a36Sopenharmony_ci		 * Shrink this VA to remaining size.
146862306a36Sopenharmony_ci		 */
146962306a36Sopenharmony_ci		va->va_start = nva_start_addr + size;
147062306a36Sopenharmony_ci	} else {
147162306a36Sopenharmony_ci		return -1;
147262306a36Sopenharmony_ci	}
147362306a36Sopenharmony_ci
147462306a36Sopenharmony_ci	if (type != FL_FIT_TYPE) {
147562306a36Sopenharmony_ci		augment_tree_propagate_from(va);
147662306a36Sopenharmony_ci
147762306a36Sopenharmony_ci		if (lva)	/* type == NE_FIT_TYPE */
147862306a36Sopenharmony_ci			insert_vmap_area_augment(lva, &va->rb_node, root, head);
147962306a36Sopenharmony_ci	}
148062306a36Sopenharmony_ci
148162306a36Sopenharmony_ci	return 0;
148262306a36Sopenharmony_ci}
148362306a36Sopenharmony_ci
148462306a36Sopenharmony_ci/*
148562306a36Sopenharmony_ci * Returns a start address of the newly allocated area, if success.
148662306a36Sopenharmony_ci * Otherwise a vend is returned that indicates failure.
148762306a36Sopenharmony_ci */
148862306a36Sopenharmony_cistatic __always_inline unsigned long
148962306a36Sopenharmony_ci__alloc_vmap_area(struct rb_root *root, struct list_head *head,
149062306a36Sopenharmony_ci	unsigned long size, unsigned long align,
149162306a36Sopenharmony_ci	unsigned long vstart, unsigned long vend)
149262306a36Sopenharmony_ci{
149362306a36Sopenharmony_ci	bool adjust_search_size = true;
149462306a36Sopenharmony_ci	unsigned long nva_start_addr;
149562306a36Sopenharmony_ci	struct vmap_area *va;
149662306a36Sopenharmony_ci	int ret;
149762306a36Sopenharmony_ci
149862306a36Sopenharmony_ci	/*
149962306a36Sopenharmony_ci	 * Do not adjust when:
150062306a36Sopenharmony_ci	 *   a) align <= PAGE_SIZE, because it does not make any sense.
150162306a36Sopenharmony_ci	 *      All blocks(their start addresses) are at least PAGE_SIZE
150262306a36Sopenharmony_ci	 *      aligned anyway;
150362306a36Sopenharmony_ci	 *   b) a short range where a requested size corresponds to exactly
150462306a36Sopenharmony_ci	 *      specified [vstart:vend] interval and an alignment > PAGE_SIZE.
150562306a36Sopenharmony_ci	 *      With adjusted search length an allocation would not succeed.
150662306a36Sopenharmony_ci	 */
150762306a36Sopenharmony_ci	if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
150862306a36Sopenharmony_ci		adjust_search_size = false;
150962306a36Sopenharmony_ci
151062306a36Sopenharmony_ci	va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
151162306a36Sopenharmony_ci	if (unlikely(!va))
151262306a36Sopenharmony_ci		return vend;
151362306a36Sopenharmony_ci
151462306a36Sopenharmony_ci	if (va->va_start > vstart)
151562306a36Sopenharmony_ci		nva_start_addr = ALIGN(va->va_start, align);
151662306a36Sopenharmony_ci	else
151762306a36Sopenharmony_ci		nva_start_addr = ALIGN(vstart, align);
151862306a36Sopenharmony_ci
151962306a36Sopenharmony_ci	/* Check the "vend" restriction. */
152062306a36Sopenharmony_ci	if (nva_start_addr + size > vend)
152162306a36Sopenharmony_ci		return vend;
152262306a36Sopenharmony_ci
152362306a36Sopenharmony_ci	/* Update the free vmap_area. */
152462306a36Sopenharmony_ci	ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
152562306a36Sopenharmony_ci	if (WARN_ON_ONCE(ret))
152662306a36Sopenharmony_ci		return vend;
152762306a36Sopenharmony_ci
152862306a36Sopenharmony_ci#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
152962306a36Sopenharmony_ci	find_vmap_lowest_match_check(root, head, size, align);
153062306a36Sopenharmony_ci#endif
153162306a36Sopenharmony_ci
153262306a36Sopenharmony_ci	return nva_start_addr;
153362306a36Sopenharmony_ci}
153462306a36Sopenharmony_ci
153562306a36Sopenharmony_ci/*
153662306a36Sopenharmony_ci * Free a region of KVA allocated by alloc_vmap_area
153762306a36Sopenharmony_ci */
153862306a36Sopenharmony_cistatic void free_vmap_area(struct vmap_area *va)
153962306a36Sopenharmony_ci{
154062306a36Sopenharmony_ci	/*
154162306a36Sopenharmony_ci	 * Remove from the busy tree/list.
154262306a36Sopenharmony_ci	 */
154362306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
154462306a36Sopenharmony_ci	unlink_va(va, &vmap_area_root);
154562306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
154662306a36Sopenharmony_ci
154762306a36Sopenharmony_ci	/*
154862306a36Sopenharmony_ci	 * Insert/Merge it back to the free tree/list.
154962306a36Sopenharmony_ci	 */
155062306a36Sopenharmony_ci	spin_lock(&free_vmap_area_lock);
155162306a36Sopenharmony_ci	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
155262306a36Sopenharmony_ci	spin_unlock(&free_vmap_area_lock);
155362306a36Sopenharmony_ci}
155462306a36Sopenharmony_ci
155562306a36Sopenharmony_cistatic inline void
155662306a36Sopenharmony_cipreload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
155762306a36Sopenharmony_ci{
155862306a36Sopenharmony_ci	struct vmap_area *va = NULL;
155962306a36Sopenharmony_ci
156062306a36Sopenharmony_ci	/*
156162306a36Sopenharmony_ci	 * Preload this CPU with one extra vmap_area object. It is used
156262306a36Sopenharmony_ci	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
156362306a36Sopenharmony_ci	 * a CPU that does an allocation is preloaded.
156462306a36Sopenharmony_ci	 *
156562306a36Sopenharmony_ci	 * We do it in non-atomic context, thus it allows us to use more
156662306a36Sopenharmony_ci	 * permissive allocation masks to be more stable under low memory
156762306a36Sopenharmony_ci	 * condition and high memory pressure.
156862306a36Sopenharmony_ci	 */
156962306a36Sopenharmony_ci	if (!this_cpu_read(ne_fit_preload_node))
157062306a36Sopenharmony_ci		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
157162306a36Sopenharmony_ci
157262306a36Sopenharmony_ci	spin_lock(lock);
157362306a36Sopenharmony_ci
157462306a36Sopenharmony_ci	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
157562306a36Sopenharmony_ci		kmem_cache_free(vmap_area_cachep, va);
157662306a36Sopenharmony_ci}
157762306a36Sopenharmony_ci
157862306a36Sopenharmony_ci/*
157962306a36Sopenharmony_ci * Allocate a region of KVA of the specified size and alignment, within the
158062306a36Sopenharmony_ci * vstart and vend.
158162306a36Sopenharmony_ci */
158262306a36Sopenharmony_cistatic struct vmap_area *alloc_vmap_area(unsigned long size,
158362306a36Sopenharmony_ci				unsigned long align,
158462306a36Sopenharmony_ci				unsigned long vstart, unsigned long vend,
158562306a36Sopenharmony_ci				int node, gfp_t gfp_mask,
158662306a36Sopenharmony_ci				unsigned long va_flags)
158762306a36Sopenharmony_ci{
158862306a36Sopenharmony_ci	struct vmap_area *va;
158962306a36Sopenharmony_ci	unsigned long freed;
159062306a36Sopenharmony_ci	unsigned long addr;
159162306a36Sopenharmony_ci	int purged = 0;
159262306a36Sopenharmony_ci	int ret;
159362306a36Sopenharmony_ci
159462306a36Sopenharmony_ci	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
159562306a36Sopenharmony_ci		return ERR_PTR(-EINVAL);
159662306a36Sopenharmony_ci
159762306a36Sopenharmony_ci	if (unlikely(!vmap_initialized))
159862306a36Sopenharmony_ci		return ERR_PTR(-EBUSY);
159962306a36Sopenharmony_ci
160062306a36Sopenharmony_ci	might_sleep();
160162306a36Sopenharmony_ci	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
160262306a36Sopenharmony_ci
160362306a36Sopenharmony_ci	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
160462306a36Sopenharmony_ci	if (unlikely(!va))
160562306a36Sopenharmony_ci		return ERR_PTR(-ENOMEM);
160662306a36Sopenharmony_ci
160762306a36Sopenharmony_ci	/*
160862306a36Sopenharmony_ci	 * Only scan the relevant parts containing pointers to other objects
160962306a36Sopenharmony_ci	 * to avoid false negatives.
161062306a36Sopenharmony_ci	 */
161162306a36Sopenharmony_ci	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
161262306a36Sopenharmony_ci
161362306a36Sopenharmony_ciretry:
161462306a36Sopenharmony_ci	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
161562306a36Sopenharmony_ci	addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
161662306a36Sopenharmony_ci		size, align, vstart, vend);
161762306a36Sopenharmony_ci	spin_unlock(&free_vmap_area_lock);
161862306a36Sopenharmony_ci
161962306a36Sopenharmony_ci	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
162062306a36Sopenharmony_ci
162162306a36Sopenharmony_ci	/*
162262306a36Sopenharmony_ci	 * If an allocation fails, the "vend" address is
162362306a36Sopenharmony_ci	 * returned. Therefore trigger the overflow path.
162462306a36Sopenharmony_ci	 */
162562306a36Sopenharmony_ci	if (unlikely(addr == vend))
162662306a36Sopenharmony_ci		goto overflow;
162762306a36Sopenharmony_ci
162862306a36Sopenharmony_ci	va->va_start = addr;
162962306a36Sopenharmony_ci	va->va_end = addr + size;
163062306a36Sopenharmony_ci	va->vm = NULL;
163162306a36Sopenharmony_ci	va->flags = va_flags;
163262306a36Sopenharmony_ci
163362306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
163462306a36Sopenharmony_ci	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
163562306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
163662306a36Sopenharmony_ci
163762306a36Sopenharmony_ci	BUG_ON(!IS_ALIGNED(va->va_start, align));
163862306a36Sopenharmony_ci	BUG_ON(va->va_start < vstart);
163962306a36Sopenharmony_ci	BUG_ON(va->va_end > vend);
164062306a36Sopenharmony_ci
164162306a36Sopenharmony_ci	ret = kasan_populate_vmalloc(addr, size);
164262306a36Sopenharmony_ci	if (ret) {
164362306a36Sopenharmony_ci		free_vmap_area(va);
164462306a36Sopenharmony_ci		return ERR_PTR(ret);
164562306a36Sopenharmony_ci	}
164662306a36Sopenharmony_ci
164762306a36Sopenharmony_ci	return va;
164862306a36Sopenharmony_ci
164962306a36Sopenharmony_cioverflow:
165062306a36Sopenharmony_ci	if (!purged) {
165162306a36Sopenharmony_ci		reclaim_and_purge_vmap_areas();
165262306a36Sopenharmony_ci		purged = 1;
165362306a36Sopenharmony_ci		goto retry;
165462306a36Sopenharmony_ci	}
165562306a36Sopenharmony_ci
165662306a36Sopenharmony_ci	freed = 0;
165762306a36Sopenharmony_ci	blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
165862306a36Sopenharmony_ci
165962306a36Sopenharmony_ci	if (freed > 0) {
166062306a36Sopenharmony_ci		purged = 0;
166162306a36Sopenharmony_ci		goto retry;
166262306a36Sopenharmony_ci	}
166362306a36Sopenharmony_ci
166462306a36Sopenharmony_ci	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
166562306a36Sopenharmony_ci		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
166662306a36Sopenharmony_ci			size);
166762306a36Sopenharmony_ci
166862306a36Sopenharmony_ci	kmem_cache_free(vmap_area_cachep, va);
166962306a36Sopenharmony_ci	return ERR_PTR(-EBUSY);
167062306a36Sopenharmony_ci}
167162306a36Sopenharmony_ci
167262306a36Sopenharmony_ciint register_vmap_purge_notifier(struct notifier_block *nb)
167362306a36Sopenharmony_ci{
167462306a36Sopenharmony_ci	return blocking_notifier_chain_register(&vmap_notify_list, nb);
167562306a36Sopenharmony_ci}
167662306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
167762306a36Sopenharmony_ci
167862306a36Sopenharmony_ciint unregister_vmap_purge_notifier(struct notifier_block *nb)
167962306a36Sopenharmony_ci{
168062306a36Sopenharmony_ci	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
168162306a36Sopenharmony_ci}
168262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
168362306a36Sopenharmony_ci
168462306a36Sopenharmony_ci/*
168562306a36Sopenharmony_ci * lazy_max_pages is the maximum amount of virtual address space we gather up
168662306a36Sopenharmony_ci * before attempting to purge with a TLB flush.
168762306a36Sopenharmony_ci *
168862306a36Sopenharmony_ci * There is a tradeoff here: a larger number will cover more kernel page tables
168962306a36Sopenharmony_ci * and take slightly longer to purge, but it will linearly reduce the number of
169062306a36Sopenharmony_ci * global TLB flushes that must be performed. It would seem natural to scale
169162306a36Sopenharmony_ci * this number up linearly with the number of CPUs (because vmapping activity
169262306a36Sopenharmony_ci * could also scale linearly with the number of CPUs), however it is likely
169362306a36Sopenharmony_ci * that in practice, workloads might be constrained in other ways that mean
169462306a36Sopenharmony_ci * vmap activity will not scale linearly with CPUs. Also, I want to be
169562306a36Sopenharmony_ci * conservative and not introduce a big latency on huge systems, so go with
169662306a36Sopenharmony_ci * a less aggressive log scale. It will still be an improvement over the old
169762306a36Sopenharmony_ci * code, and it will be simple to change the scale factor if we find that it
169862306a36Sopenharmony_ci * becomes a problem on bigger systems.
169962306a36Sopenharmony_ci */
170062306a36Sopenharmony_cistatic unsigned long lazy_max_pages(void)
170162306a36Sopenharmony_ci{
170262306a36Sopenharmony_ci	unsigned int log;
170362306a36Sopenharmony_ci
170462306a36Sopenharmony_ci	log = fls(num_online_cpus());
170562306a36Sopenharmony_ci
170662306a36Sopenharmony_ci	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
170762306a36Sopenharmony_ci}
170862306a36Sopenharmony_ci
170962306a36Sopenharmony_cistatic atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
171062306a36Sopenharmony_ci
171162306a36Sopenharmony_ci/*
171262306a36Sopenharmony_ci * Serialize vmap purging.  There is no actual critical section protected
171362306a36Sopenharmony_ci * by this lock, but we want to avoid concurrent calls for performance
171462306a36Sopenharmony_ci * reasons and to make the pcpu_get_vm_areas more deterministic.
171562306a36Sopenharmony_ci */
171662306a36Sopenharmony_cistatic DEFINE_MUTEX(vmap_purge_lock);
171762306a36Sopenharmony_ci
171862306a36Sopenharmony_ci/* for per-CPU blocks */
171962306a36Sopenharmony_cistatic void purge_fragmented_blocks_allcpus(void);
172062306a36Sopenharmony_ci
172162306a36Sopenharmony_ci/*
172262306a36Sopenharmony_ci * Purges all lazily-freed vmap areas.
172362306a36Sopenharmony_ci */
172462306a36Sopenharmony_cistatic bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
172562306a36Sopenharmony_ci{
172662306a36Sopenharmony_ci	unsigned long resched_threshold;
172762306a36Sopenharmony_ci	unsigned int num_purged_areas = 0;
172862306a36Sopenharmony_ci	struct list_head local_purge_list;
172962306a36Sopenharmony_ci	struct vmap_area *va, *n_va;
173062306a36Sopenharmony_ci
173162306a36Sopenharmony_ci	lockdep_assert_held(&vmap_purge_lock);
173262306a36Sopenharmony_ci
173362306a36Sopenharmony_ci	spin_lock(&purge_vmap_area_lock);
173462306a36Sopenharmony_ci	purge_vmap_area_root = RB_ROOT;
173562306a36Sopenharmony_ci	list_replace_init(&purge_vmap_area_list, &local_purge_list);
173662306a36Sopenharmony_ci	spin_unlock(&purge_vmap_area_lock);
173762306a36Sopenharmony_ci
173862306a36Sopenharmony_ci	if (unlikely(list_empty(&local_purge_list)))
173962306a36Sopenharmony_ci		goto out;
174062306a36Sopenharmony_ci
174162306a36Sopenharmony_ci	start = min(start,
174262306a36Sopenharmony_ci		list_first_entry(&local_purge_list,
174362306a36Sopenharmony_ci			struct vmap_area, list)->va_start);
174462306a36Sopenharmony_ci
174562306a36Sopenharmony_ci	end = max(end,
174662306a36Sopenharmony_ci		list_last_entry(&local_purge_list,
174762306a36Sopenharmony_ci			struct vmap_area, list)->va_end);
174862306a36Sopenharmony_ci
174962306a36Sopenharmony_ci	flush_tlb_kernel_range(start, end);
175062306a36Sopenharmony_ci	resched_threshold = lazy_max_pages() << 1;
175162306a36Sopenharmony_ci
175262306a36Sopenharmony_ci	spin_lock(&free_vmap_area_lock);
175362306a36Sopenharmony_ci	list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
175462306a36Sopenharmony_ci		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
175562306a36Sopenharmony_ci		unsigned long orig_start = va->va_start;
175662306a36Sopenharmony_ci		unsigned long orig_end = va->va_end;
175762306a36Sopenharmony_ci
175862306a36Sopenharmony_ci		/*
175962306a36Sopenharmony_ci		 * Finally insert or merge lazily-freed area. It is
176062306a36Sopenharmony_ci		 * detached and there is no need to "unlink" it from
176162306a36Sopenharmony_ci		 * anything.
176262306a36Sopenharmony_ci		 */
176362306a36Sopenharmony_ci		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
176462306a36Sopenharmony_ci				&free_vmap_area_list);
176562306a36Sopenharmony_ci
176662306a36Sopenharmony_ci		if (!va)
176762306a36Sopenharmony_ci			continue;
176862306a36Sopenharmony_ci
176962306a36Sopenharmony_ci		if (is_vmalloc_or_module_addr((void *)orig_start))
177062306a36Sopenharmony_ci			kasan_release_vmalloc(orig_start, orig_end,
177162306a36Sopenharmony_ci					      va->va_start, va->va_end);
177262306a36Sopenharmony_ci
177362306a36Sopenharmony_ci		atomic_long_sub(nr, &vmap_lazy_nr);
177462306a36Sopenharmony_ci		num_purged_areas++;
177562306a36Sopenharmony_ci
177662306a36Sopenharmony_ci		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
177762306a36Sopenharmony_ci			cond_resched_lock(&free_vmap_area_lock);
177862306a36Sopenharmony_ci	}
177962306a36Sopenharmony_ci	spin_unlock(&free_vmap_area_lock);
178062306a36Sopenharmony_ci
178162306a36Sopenharmony_ciout:
178262306a36Sopenharmony_ci	trace_purge_vmap_area_lazy(start, end, num_purged_areas);
178362306a36Sopenharmony_ci	return num_purged_areas > 0;
178462306a36Sopenharmony_ci}
178562306a36Sopenharmony_ci
178662306a36Sopenharmony_ci/*
178762306a36Sopenharmony_ci * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
178862306a36Sopenharmony_ci */
178962306a36Sopenharmony_cistatic void reclaim_and_purge_vmap_areas(void)
179062306a36Sopenharmony_ci
179162306a36Sopenharmony_ci{
179262306a36Sopenharmony_ci	mutex_lock(&vmap_purge_lock);
179362306a36Sopenharmony_ci	purge_fragmented_blocks_allcpus();
179462306a36Sopenharmony_ci	__purge_vmap_area_lazy(ULONG_MAX, 0);
179562306a36Sopenharmony_ci	mutex_unlock(&vmap_purge_lock);
179662306a36Sopenharmony_ci}
179762306a36Sopenharmony_ci
179862306a36Sopenharmony_cistatic void drain_vmap_area_work(struct work_struct *work)
179962306a36Sopenharmony_ci{
180062306a36Sopenharmony_ci	unsigned long nr_lazy;
180162306a36Sopenharmony_ci
180262306a36Sopenharmony_ci	do {
180362306a36Sopenharmony_ci		mutex_lock(&vmap_purge_lock);
180462306a36Sopenharmony_ci		__purge_vmap_area_lazy(ULONG_MAX, 0);
180562306a36Sopenharmony_ci		mutex_unlock(&vmap_purge_lock);
180662306a36Sopenharmony_ci
180762306a36Sopenharmony_ci		/* Recheck if further work is required. */
180862306a36Sopenharmony_ci		nr_lazy = atomic_long_read(&vmap_lazy_nr);
180962306a36Sopenharmony_ci	} while (nr_lazy > lazy_max_pages());
181062306a36Sopenharmony_ci}
181162306a36Sopenharmony_ci
181262306a36Sopenharmony_ci/*
181362306a36Sopenharmony_ci * Free a vmap area, caller ensuring that the area has been unmapped,
181462306a36Sopenharmony_ci * unlinked and flush_cache_vunmap had been called for the correct
181562306a36Sopenharmony_ci * range previously.
181662306a36Sopenharmony_ci */
181762306a36Sopenharmony_cistatic void free_vmap_area_noflush(struct vmap_area *va)
181862306a36Sopenharmony_ci{
181962306a36Sopenharmony_ci	unsigned long nr_lazy_max = lazy_max_pages();
182062306a36Sopenharmony_ci	unsigned long va_start = va->va_start;
182162306a36Sopenharmony_ci	unsigned long nr_lazy;
182262306a36Sopenharmony_ci
182362306a36Sopenharmony_ci	if (WARN_ON_ONCE(!list_empty(&va->list)))
182462306a36Sopenharmony_ci		return;
182562306a36Sopenharmony_ci
182662306a36Sopenharmony_ci	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
182762306a36Sopenharmony_ci				PAGE_SHIFT, &vmap_lazy_nr);
182862306a36Sopenharmony_ci
182962306a36Sopenharmony_ci	/*
183062306a36Sopenharmony_ci	 * Merge or place it to the purge tree/list.
183162306a36Sopenharmony_ci	 */
183262306a36Sopenharmony_ci	spin_lock(&purge_vmap_area_lock);
183362306a36Sopenharmony_ci	merge_or_add_vmap_area(va,
183462306a36Sopenharmony_ci		&purge_vmap_area_root, &purge_vmap_area_list);
183562306a36Sopenharmony_ci	spin_unlock(&purge_vmap_area_lock);
183662306a36Sopenharmony_ci
183762306a36Sopenharmony_ci	trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
183862306a36Sopenharmony_ci
183962306a36Sopenharmony_ci	/* After this point, we may free va at any time */
184062306a36Sopenharmony_ci	if (unlikely(nr_lazy > nr_lazy_max))
184162306a36Sopenharmony_ci		schedule_work(&drain_vmap_work);
184262306a36Sopenharmony_ci}
184362306a36Sopenharmony_ci
184462306a36Sopenharmony_ci/*
184562306a36Sopenharmony_ci * Free and unmap a vmap area
184662306a36Sopenharmony_ci */
184762306a36Sopenharmony_cistatic void free_unmap_vmap_area(struct vmap_area *va)
184862306a36Sopenharmony_ci{
184962306a36Sopenharmony_ci	flush_cache_vunmap(va->va_start, va->va_end);
185062306a36Sopenharmony_ci	vunmap_range_noflush(va->va_start, va->va_end);
185162306a36Sopenharmony_ci	if (debug_pagealloc_enabled_static())
185262306a36Sopenharmony_ci		flush_tlb_kernel_range(va->va_start, va->va_end);
185362306a36Sopenharmony_ci
185462306a36Sopenharmony_ci	free_vmap_area_noflush(va);
185562306a36Sopenharmony_ci}
185662306a36Sopenharmony_ci
185762306a36Sopenharmony_cistruct vmap_area *find_vmap_area(unsigned long addr)
185862306a36Sopenharmony_ci{
185962306a36Sopenharmony_ci	struct vmap_area *va;
186062306a36Sopenharmony_ci
186162306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
186262306a36Sopenharmony_ci	va = __find_vmap_area(addr, &vmap_area_root);
186362306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
186462306a36Sopenharmony_ci
186562306a36Sopenharmony_ci	return va;
186662306a36Sopenharmony_ci}
186762306a36Sopenharmony_ci
186862306a36Sopenharmony_cistatic struct vmap_area *find_unlink_vmap_area(unsigned long addr)
186962306a36Sopenharmony_ci{
187062306a36Sopenharmony_ci	struct vmap_area *va;
187162306a36Sopenharmony_ci
187262306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
187362306a36Sopenharmony_ci	va = __find_vmap_area(addr, &vmap_area_root);
187462306a36Sopenharmony_ci	if (va)
187562306a36Sopenharmony_ci		unlink_va(va, &vmap_area_root);
187662306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
187762306a36Sopenharmony_ci
187862306a36Sopenharmony_ci	return va;
187962306a36Sopenharmony_ci}
188062306a36Sopenharmony_ci
188162306a36Sopenharmony_ci/*** Per cpu kva allocator ***/
188262306a36Sopenharmony_ci
188362306a36Sopenharmony_ci/*
188462306a36Sopenharmony_ci * vmap space is limited especially on 32 bit architectures. Ensure there is
188562306a36Sopenharmony_ci * room for at least 16 percpu vmap blocks per CPU.
188662306a36Sopenharmony_ci */
188762306a36Sopenharmony_ci/*
188862306a36Sopenharmony_ci * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
188962306a36Sopenharmony_ci * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
189062306a36Sopenharmony_ci * instead (we just need a rough idea)
189162306a36Sopenharmony_ci */
189262306a36Sopenharmony_ci#if BITS_PER_LONG == 32
189362306a36Sopenharmony_ci#define VMALLOC_SPACE		(128UL*1024*1024)
189462306a36Sopenharmony_ci#else
189562306a36Sopenharmony_ci#define VMALLOC_SPACE		(128UL*1024*1024*1024)
189662306a36Sopenharmony_ci#endif
189762306a36Sopenharmony_ci
189862306a36Sopenharmony_ci#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
189962306a36Sopenharmony_ci#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
190062306a36Sopenharmony_ci#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
190162306a36Sopenharmony_ci#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
190262306a36Sopenharmony_ci#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
190362306a36Sopenharmony_ci#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
190462306a36Sopenharmony_ci#define VMAP_BBMAP_BITS		\
190562306a36Sopenharmony_ci		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
190662306a36Sopenharmony_ci		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
190762306a36Sopenharmony_ci			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
190862306a36Sopenharmony_ci
190962306a36Sopenharmony_ci#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
191062306a36Sopenharmony_ci
191162306a36Sopenharmony_ci/*
191262306a36Sopenharmony_ci * Purge threshold to prevent overeager purging of fragmented blocks for
191362306a36Sopenharmony_ci * regular operations: Purge if vb->free is less than 1/4 of the capacity.
191462306a36Sopenharmony_ci */
191562306a36Sopenharmony_ci#define VMAP_PURGE_THRESHOLD	(VMAP_BBMAP_BITS / 4)
191662306a36Sopenharmony_ci
191762306a36Sopenharmony_ci#define VMAP_RAM		0x1 /* indicates vm_map_ram area*/
191862306a36Sopenharmony_ci#define VMAP_BLOCK		0x2 /* mark out the vmap_block sub-type*/
191962306a36Sopenharmony_ci#define VMAP_FLAGS_MASK		0x3
192062306a36Sopenharmony_ci
192162306a36Sopenharmony_cistruct vmap_block_queue {
192262306a36Sopenharmony_ci	spinlock_t lock;
192362306a36Sopenharmony_ci	struct list_head free;
192462306a36Sopenharmony_ci
192562306a36Sopenharmony_ci	/*
192662306a36Sopenharmony_ci	 * An xarray requires an extra memory dynamically to
192762306a36Sopenharmony_ci	 * be allocated. If it is an issue, we can use rb-tree
192862306a36Sopenharmony_ci	 * instead.
192962306a36Sopenharmony_ci	 */
193062306a36Sopenharmony_ci	struct xarray vmap_blocks;
193162306a36Sopenharmony_ci};
193262306a36Sopenharmony_ci
193362306a36Sopenharmony_cistruct vmap_block {
193462306a36Sopenharmony_ci	spinlock_t lock;
193562306a36Sopenharmony_ci	struct vmap_area *va;
193662306a36Sopenharmony_ci	unsigned long free, dirty;
193762306a36Sopenharmony_ci	DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
193862306a36Sopenharmony_ci	unsigned long dirty_min, dirty_max; /*< dirty range */
193962306a36Sopenharmony_ci	struct list_head free_list;
194062306a36Sopenharmony_ci	struct rcu_head rcu_head;
194162306a36Sopenharmony_ci	struct list_head purge;
194262306a36Sopenharmony_ci};
194362306a36Sopenharmony_ci
194462306a36Sopenharmony_ci/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
194562306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
194662306a36Sopenharmony_ci
194762306a36Sopenharmony_ci/*
194862306a36Sopenharmony_ci * In order to fast access to any "vmap_block" associated with a
194962306a36Sopenharmony_ci * specific address, we use a hash.
195062306a36Sopenharmony_ci *
195162306a36Sopenharmony_ci * A per-cpu vmap_block_queue is used in both ways, to serialize
195262306a36Sopenharmony_ci * an access to free block chains among CPUs(alloc path) and it
195362306a36Sopenharmony_ci * also acts as a vmap_block hash(alloc/free paths). It means we
195462306a36Sopenharmony_ci * overload it, since we already have the per-cpu array which is
195562306a36Sopenharmony_ci * used as a hash table. When used as a hash a 'cpu' passed to
195662306a36Sopenharmony_ci * per_cpu() is not actually a CPU but rather a hash index.
195762306a36Sopenharmony_ci *
195862306a36Sopenharmony_ci * A hash function is addr_to_vb_xa() which hashes any address
195962306a36Sopenharmony_ci * to a specific index(in a hash) it belongs to. This then uses a
196062306a36Sopenharmony_ci * per_cpu() macro to access an array with generated index.
196162306a36Sopenharmony_ci *
196262306a36Sopenharmony_ci * An example:
196362306a36Sopenharmony_ci *
196462306a36Sopenharmony_ci *  CPU_1  CPU_2  CPU_0
196562306a36Sopenharmony_ci *    |      |      |
196662306a36Sopenharmony_ci *    V      V      V
196762306a36Sopenharmony_ci * 0     10     20     30     40     50     60
196862306a36Sopenharmony_ci * |------|------|------|------|------|------|...<vmap address space>
196962306a36Sopenharmony_ci *   CPU0   CPU1   CPU2   CPU0   CPU1   CPU2
197062306a36Sopenharmony_ci *
197162306a36Sopenharmony_ci * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
197262306a36Sopenharmony_ci *   it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
197362306a36Sopenharmony_ci *
197462306a36Sopenharmony_ci * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
197562306a36Sopenharmony_ci *   it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
197662306a36Sopenharmony_ci *
197762306a36Sopenharmony_ci * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
197862306a36Sopenharmony_ci *   it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
197962306a36Sopenharmony_ci *
198062306a36Sopenharmony_ci * This technique almost always avoids lock contention on insert/remove,
198162306a36Sopenharmony_ci * however xarray spinlocks protect against any contention that remains.
198262306a36Sopenharmony_ci */
198362306a36Sopenharmony_cistatic struct xarray *
198462306a36Sopenharmony_ciaddr_to_vb_xa(unsigned long addr)
198562306a36Sopenharmony_ci{
198662306a36Sopenharmony_ci	int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus();
198762306a36Sopenharmony_ci
198862306a36Sopenharmony_ci	return &per_cpu(vmap_block_queue, index).vmap_blocks;
198962306a36Sopenharmony_ci}
199062306a36Sopenharmony_ci
199162306a36Sopenharmony_ci/*
199262306a36Sopenharmony_ci * We should probably have a fallback mechanism to allocate virtual memory
199362306a36Sopenharmony_ci * out of partially filled vmap blocks. However vmap block sizing should be
199462306a36Sopenharmony_ci * fairly reasonable according to the vmalloc size, so it shouldn't be a
199562306a36Sopenharmony_ci * big problem.
199662306a36Sopenharmony_ci */
199762306a36Sopenharmony_ci
199862306a36Sopenharmony_cistatic unsigned long addr_to_vb_idx(unsigned long addr)
199962306a36Sopenharmony_ci{
200062306a36Sopenharmony_ci	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
200162306a36Sopenharmony_ci	addr /= VMAP_BLOCK_SIZE;
200262306a36Sopenharmony_ci	return addr;
200362306a36Sopenharmony_ci}
200462306a36Sopenharmony_ci
200562306a36Sopenharmony_cistatic void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
200662306a36Sopenharmony_ci{
200762306a36Sopenharmony_ci	unsigned long addr;
200862306a36Sopenharmony_ci
200962306a36Sopenharmony_ci	addr = va_start + (pages_off << PAGE_SHIFT);
201062306a36Sopenharmony_ci	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
201162306a36Sopenharmony_ci	return (void *)addr;
201262306a36Sopenharmony_ci}
201362306a36Sopenharmony_ci
201462306a36Sopenharmony_ci/**
201562306a36Sopenharmony_ci * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
201662306a36Sopenharmony_ci *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
201762306a36Sopenharmony_ci * @order:    how many 2^order pages should be occupied in newly allocated block
201862306a36Sopenharmony_ci * @gfp_mask: flags for the page level allocator
201962306a36Sopenharmony_ci *
202062306a36Sopenharmony_ci * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
202162306a36Sopenharmony_ci */
202262306a36Sopenharmony_cistatic void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
202362306a36Sopenharmony_ci{
202462306a36Sopenharmony_ci	struct vmap_block_queue *vbq;
202562306a36Sopenharmony_ci	struct vmap_block *vb;
202662306a36Sopenharmony_ci	struct vmap_area *va;
202762306a36Sopenharmony_ci	struct xarray *xa;
202862306a36Sopenharmony_ci	unsigned long vb_idx;
202962306a36Sopenharmony_ci	int node, err;
203062306a36Sopenharmony_ci	void *vaddr;
203162306a36Sopenharmony_ci
203262306a36Sopenharmony_ci	node = numa_node_id();
203362306a36Sopenharmony_ci
203462306a36Sopenharmony_ci	vb = kmalloc_node(sizeof(struct vmap_block),
203562306a36Sopenharmony_ci			gfp_mask & GFP_RECLAIM_MASK, node);
203662306a36Sopenharmony_ci	if (unlikely(!vb))
203762306a36Sopenharmony_ci		return ERR_PTR(-ENOMEM);
203862306a36Sopenharmony_ci
203962306a36Sopenharmony_ci	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
204062306a36Sopenharmony_ci					VMALLOC_START, VMALLOC_END,
204162306a36Sopenharmony_ci					node, gfp_mask,
204262306a36Sopenharmony_ci					VMAP_RAM|VMAP_BLOCK);
204362306a36Sopenharmony_ci	if (IS_ERR(va)) {
204462306a36Sopenharmony_ci		kfree(vb);
204562306a36Sopenharmony_ci		return ERR_CAST(va);
204662306a36Sopenharmony_ci	}
204762306a36Sopenharmony_ci
204862306a36Sopenharmony_ci	vaddr = vmap_block_vaddr(va->va_start, 0);
204962306a36Sopenharmony_ci	spin_lock_init(&vb->lock);
205062306a36Sopenharmony_ci	vb->va = va;
205162306a36Sopenharmony_ci	/* At least something should be left free */
205262306a36Sopenharmony_ci	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
205362306a36Sopenharmony_ci	bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
205462306a36Sopenharmony_ci	vb->free = VMAP_BBMAP_BITS - (1UL << order);
205562306a36Sopenharmony_ci	vb->dirty = 0;
205662306a36Sopenharmony_ci	vb->dirty_min = VMAP_BBMAP_BITS;
205762306a36Sopenharmony_ci	vb->dirty_max = 0;
205862306a36Sopenharmony_ci	bitmap_set(vb->used_map, 0, (1UL << order));
205962306a36Sopenharmony_ci	INIT_LIST_HEAD(&vb->free_list);
206062306a36Sopenharmony_ci
206162306a36Sopenharmony_ci	xa = addr_to_vb_xa(va->va_start);
206262306a36Sopenharmony_ci	vb_idx = addr_to_vb_idx(va->va_start);
206362306a36Sopenharmony_ci	err = xa_insert(xa, vb_idx, vb, gfp_mask);
206462306a36Sopenharmony_ci	if (err) {
206562306a36Sopenharmony_ci		kfree(vb);
206662306a36Sopenharmony_ci		free_vmap_area(va);
206762306a36Sopenharmony_ci		return ERR_PTR(err);
206862306a36Sopenharmony_ci	}
206962306a36Sopenharmony_ci
207062306a36Sopenharmony_ci	vbq = raw_cpu_ptr(&vmap_block_queue);
207162306a36Sopenharmony_ci	spin_lock(&vbq->lock);
207262306a36Sopenharmony_ci	list_add_tail_rcu(&vb->free_list, &vbq->free);
207362306a36Sopenharmony_ci	spin_unlock(&vbq->lock);
207462306a36Sopenharmony_ci
207562306a36Sopenharmony_ci	return vaddr;
207662306a36Sopenharmony_ci}
207762306a36Sopenharmony_ci
207862306a36Sopenharmony_cistatic void free_vmap_block(struct vmap_block *vb)
207962306a36Sopenharmony_ci{
208062306a36Sopenharmony_ci	struct vmap_block *tmp;
208162306a36Sopenharmony_ci	struct xarray *xa;
208262306a36Sopenharmony_ci
208362306a36Sopenharmony_ci	xa = addr_to_vb_xa(vb->va->va_start);
208462306a36Sopenharmony_ci	tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
208562306a36Sopenharmony_ci	BUG_ON(tmp != vb);
208662306a36Sopenharmony_ci
208762306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
208862306a36Sopenharmony_ci	unlink_va(vb->va, &vmap_area_root);
208962306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
209062306a36Sopenharmony_ci
209162306a36Sopenharmony_ci	free_vmap_area_noflush(vb->va);
209262306a36Sopenharmony_ci	kfree_rcu(vb, rcu_head);
209362306a36Sopenharmony_ci}
209462306a36Sopenharmony_ci
209562306a36Sopenharmony_cistatic bool purge_fragmented_block(struct vmap_block *vb,
209662306a36Sopenharmony_ci		struct vmap_block_queue *vbq, struct list_head *purge_list,
209762306a36Sopenharmony_ci		bool force_purge)
209862306a36Sopenharmony_ci{
209962306a36Sopenharmony_ci	if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
210062306a36Sopenharmony_ci	    vb->dirty == VMAP_BBMAP_BITS)
210162306a36Sopenharmony_ci		return false;
210262306a36Sopenharmony_ci
210362306a36Sopenharmony_ci	/* Don't overeagerly purge usable blocks unless requested */
210462306a36Sopenharmony_ci	if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
210562306a36Sopenharmony_ci		return false;
210662306a36Sopenharmony_ci
210762306a36Sopenharmony_ci	/* prevent further allocs after releasing lock */
210862306a36Sopenharmony_ci	WRITE_ONCE(vb->free, 0);
210962306a36Sopenharmony_ci	/* prevent purging it again */
211062306a36Sopenharmony_ci	WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
211162306a36Sopenharmony_ci	vb->dirty_min = 0;
211262306a36Sopenharmony_ci	vb->dirty_max = VMAP_BBMAP_BITS;
211362306a36Sopenharmony_ci	spin_lock(&vbq->lock);
211462306a36Sopenharmony_ci	list_del_rcu(&vb->free_list);
211562306a36Sopenharmony_ci	spin_unlock(&vbq->lock);
211662306a36Sopenharmony_ci	list_add_tail(&vb->purge, purge_list);
211762306a36Sopenharmony_ci	return true;
211862306a36Sopenharmony_ci}
211962306a36Sopenharmony_ci
212062306a36Sopenharmony_cistatic void free_purged_blocks(struct list_head *purge_list)
212162306a36Sopenharmony_ci{
212262306a36Sopenharmony_ci	struct vmap_block *vb, *n_vb;
212362306a36Sopenharmony_ci
212462306a36Sopenharmony_ci	list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
212562306a36Sopenharmony_ci		list_del(&vb->purge);
212662306a36Sopenharmony_ci		free_vmap_block(vb);
212762306a36Sopenharmony_ci	}
212862306a36Sopenharmony_ci}
212962306a36Sopenharmony_ci
213062306a36Sopenharmony_cistatic void purge_fragmented_blocks(int cpu)
213162306a36Sopenharmony_ci{
213262306a36Sopenharmony_ci	LIST_HEAD(purge);
213362306a36Sopenharmony_ci	struct vmap_block *vb;
213462306a36Sopenharmony_ci	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
213562306a36Sopenharmony_ci
213662306a36Sopenharmony_ci	rcu_read_lock();
213762306a36Sopenharmony_ci	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
213862306a36Sopenharmony_ci		unsigned long free = READ_ONCE(vb->free);
213962306a36Sopenharmony_ci		unsigned long dirty = READ_ONCE(vb->dirty);
214062306a36Sopenharmony_ci
214162306a36Sopenharmony_ci		if (free + dirty != VMAP_BBMAP_BITS ||
214262306a36Sopenharmony_ci		    dirty == VMAP_BBMAP_BITS)
214362306a36Sopenharmony_ci			continue;
214462306a36Sopenharmony_ci
214562306a36Sopenharmony_ci		spin_lock(&vb->lock);
214662306a36Sopenharmony_ci		purge_fragmented_block(vb, vbq, &purge, true);
214762306a36Sopenharmony_ci		spin_unlock(&vb->lock);
214862306a36Sopenharmony_ci	}
214962306a36Sopenharmony_ci	rcu_read_unlock();
215062306a36Sopenharmony_ci	free_purged_blocks(&purge);
215162306a36Sopenharmony_ci}
215262306a36Sopenharmony_ci
215362306a36Sopenharmony_cistatic void purge_fragmented_blocks_allcpus(void)
215462306a36Sopenharmony_ci{
215562306a36Sopenharmony_ci	int cpu;
215662306a36Sopenharmony_ci
215762306a36Sopenharmony_ci	for_each_possible_cpu(cpu)
215862306a36Sopenharmony_ci		purge_fragmented_blocks(cpu);
215962306a36Sopenharmony_ci}
216062306a36Sopenharmony_ci
216162306a36Sopenharmony_cistatic void *vb_alloc(unsigned long size, gfp_t gfp_mask)
216262306a36Sopenharmony_ci{
216362306a36Sopenharmony_ci	struct vmap_block_queue *vbq;
216462306a36Sopenharmony_ci	struct vmap_block *vb;
216562306a36Sopenharmony_ci	void *vaddr = NULL;
216662306a36Sopenharmony_ci	unsigned int order;
216762306a36Sopenharmony_ci
216862306a36Sopenharmony_ci	BUG_ON(offset_in_page(size));
216962306a36Sopenharmony_ci	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
217062306a36Sopenharmony_ci	if (WARN_ON(size == 0)) {
217162306a36Sopenharmony_ci		/*
217262306a36Sopenharmony_ci		 * Allocating 0 bytes isn't what caller wants since
217362306a36Sopenharmony_ci		 * get_order(0) returns funny result. Just warn and terminate
217462306a36Sopenharmony_ci		 * early.
217562306a36Sopenharmony_ci		 */
217662306a36Sopenharmony_ci		return NULL;
217762306a36Sopenharmony_ci	}
217862306a36Sopenharmony_ci	order = get_order(size);
217962306a36Sopenharmony_ci
218062306a36Sopenharmony_ci	rcu_read_lock();
218162306a36Sopenharmony_ci	vbq = raw_cpu_ptr(&vmap_block_queue);
218262306a36Sopenharmony_ci	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
218362306a36Sopenharmony_ci		unsigned long pages_off;
218462306a36Sopenharmony_ci
218562306a36Sopenharmony_ci		if (READ_ONCE(vb->free) < (1UL << order))
218662306a36Sopenharmony_ci			continue;
218762306a36Sopenharmony_ci
218862306a36Sopenharmony_ci		spin_lock(&vb->lock);
218962306a36Sopenharmony_ci		if (vb->free < (1UL << order)) {
219062306a36Sopenharmony_ci			spin_unlock(&vb->lock);
219162306a36Sopenharmony_ci			continue;
219262306a36Sopenharmony_ci		}
219362306a36Sopenharmony_ci
219462306a36Sopenharmony_ci		pages_off = VMAP_BBMAP_BITS - vb->free;
219562306a36Sopenharmony_ci		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
219662306a36Sopenharmony_ci		WRITE_ONCE(vb->free, vb->free - (1UL << order));
219762306a36Sopenharmony_ci		bitmap_set(vb->used_map, pages_off, (1UL << order));
219862306a36Sopenharmony_ci		if (vb->free == 0) {
219962306a36Sopenharmony_ci			spin_lock(&vbq->lock);
220062306a36Sopenharmony_ci			list_del_rcu(&vb->free_list);
220162306a36Sopenharmony_ci			spin_unlock(&vbq->lock);
220262306a36Sopenharmony_ci		}
220362306a36Sopenharmony_ci
220462306a36Sopenharmony_ci		spin_unlock(&vb->lock);
220562306a36Sopenharmony_ci		break;
220662306a36Sopenharmony_ci	}
220762306a36Sopenharmony_ci
220862306a36Sopenharmony_ci	rcu_read_unlock();
220962306a36Sopenharmony_ci
221062306a36Sopenharmony_ci	/* Allocate new block if nothing was found */
221162306a36Sopenharmony_ci	if (!vaddr)
221262306a36Sopenharmony_ci		vaddr = new_vmap_block(order, gfp_mask);
221362306a36Sopenharmony_ci
221462306a36Sopenharmony_ci	return vaddr;
221562306a36Sopenharmony_ci}
221662306a36Sopenharmony_ci
221762306a36Sopenharmony_cistatic void vb_free(unsigned long addr, unsigned long size)
221862306a36Sopenharmony_ci{
221962306a36Sopenharmony_ci	unsigned long offset;
222062306a36Sopenharmony_ci	unsigned int order;
222162306a36Sopenharmony_ci	struct vmap_block *vb;
222262306a36Sopenharmony_ci	struct xarray *xa;
222362306a36Sopenharmony_ci
222462306a36Sopenharmony_ci	BUG_ON(offset_in_page(size));
222562306a36Sopenharmony_ci	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
222662306a36Sopenharmony_ci
222762306a36Sopenharmony_ci	flush_cache_vunmap(addr, addr + size);
222862306a36Sopenharmony_ci
222962306a36Sopenharmony_ci	order = get_order(size);
223062306a36Sopenharmony_ci	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
223162306a36Sopenharmony_ci
223262306a36Sopenharmony_ci	xa = addr_to_vb_xa(addr);
223362306a36Sopenharmony_ci	vb = xa_load(xa, addr_to_vb_idx(addr));
223462306a36Sopenharmony_ci
223562306a36Sopenharmony_ci	spin_lock(&vb->lock);
223662306a36Sopenharmony_ci	bitmap_clear(vb->used_map, offset, (1UL << order));
223762306a36Sopenharmony_ci	spin_unlock(&vb->lock);
223862306a36Sopenharmony_ci
223962306a36Sopenharmony_ci	vunmap_range_noflush(addr, addr + size);
224062306a36Sopenharmony_ci
224162306a36Sopenharmony_ci	if (debug_pagealloc_enabled_static())
224262306a36Sopenharmony_ci		flush_tlb_kernel_range(addr, addr + size);
224362306a36Sopenharmony_ci
224462306a36Sopenharmony_ci	spin_lock(&vb->lock);
224562306a36Sopenharmony_ci
224662306a36Sopenharmony_ci	/* Expand the not yet TLB flushed dirty range */
224762306a36Sopenharmony_ci	vb->dirty_min = min(vb->dirty_min, offset);
224862306a36Sopenharmony_ci	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
224962306a36Sopenharmony_ci
225062306a36Sopenharmony_ci	WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
225162306a36Sopenharmony_ci	if (vb->dirty == VMAP_BBMAP_BITS) {
225262306a36Sopenharmony_ci		BUG_ON(vb->free);
225362306a36Sopenharmony_ci		spin_unlock(&vb->lock);
225462306a36Sopenharmony_ci		free_vmap_block(vb);
225562306a36Sopenharmony_ci	} else
225662306a36Sopenharmony_ci		spin_unlock(&vb->lock);
225762306a36Sopenharmony_ci}
225862306a36Sopenharmony_ci
225962306a36Sopenharmony_cistatic void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
226062306a36Sopenharmony_ci{
226162306a36Sopenharmony_ci	LIST_HEAD(purge_list);
226262306a36Sopenharmony_ci	int cpu;
226362306a36Sopenharmony_ci
226462306a36Sopenharmony_ci	if (unlikely(!vmap_initialized))
226562306a36Sopenharmony_ci		return;
226662306a36Sopenharmony_ci
226762306a36Sopenharmony_ci	mutex_lock(&vmap_purge_lock);
226862306a36Sopenharmony_ci
226962306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
227062306a36Sopenharmony_ci		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
227162306a36Sopenharmony_ci		struct vmap_block *vb;
227262306a36Sopenharmony_ci		unsigned long idx;
227362306a36Sopenharmony_ci
227462306a36Sopenharmony_ci		rcu_read_lock();
227562306a36Sopenharmony_ci		xa_for_each(&vbq->vmap_blocks, idx, vb) {
227662306a36Sopenharmony_ci			spin_lock(&vb->lock);
227762306a36Sopenharmony_ci
227862306a36Sopenharmony_ci			/*
227962306a36Sopenharmony_ci			 * Try to purge a fragmented block first. If it's
228062306a36Sopenharmony_ci			 * not purgeable, check whether there is dirty
228162306a36Sopenharmony_ci			 * space to be flushed.
228262306a36Sopenharmony_ci			 */
228362306a36Sopenharmony_ci			if (!purge_fragmented_block(vb, vbq, &purge_list, false) &&
228462306a36Sopenharmony_ci			    vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
228562306a36Sopenharmony_ci				unsigned long va_start = vb->va->va_start;
228662306a36Sopenharmony_ci				unsigned long s, e;
228762306a36Sopenharmony_ci
228862306a36Sopenharmony_ci				s = va_start + (vb->dirty_min << PAGE_SHIFT);
228962306a36Sopenharmony_ci				e = va_start + (vb->dirty_max << PAGE_SHIFT);
229062306a36Sopenharmony_ci
229162306a36Sopenharmony_ci				start = min(s, start);
229262306a36Sopenharmony_ci				end   = max(e, end);
229362306a36Sopenharmony_ci
229462306a36Sopenharmony_ci				/* Prevent that this is flushed again */
229562306a36Sopenharmony_ci				vb->dirty_min = VMAP_BBMAP_BITS;
229662306a36Sopenharmony_ci				vb->dirty_max = 0;
229762306a36Sopenharmony_ci
229862306a36Sopenharmony_ci				flush = 1;
229962306a36Sopenharmony_ci			}
230062306a36Sopenharmony_ci			spin_unlock(&vb->lock);
230162306a36Sopenharmony_ci		}
230262306a36Sopenharmony_ci		rcu_read_unlock();
230362306a36Sopenharmony_ci	}
230462306a36Sopenharmony_ci	free_purged_blocks(&purge_list);
230562306a36Sopenharmony_ci
230662306a36Sopenharmony_ci	if (!__purge_vmap_area_lazy(start, end) && flush)
230762306a36Sopenharmony_ci		flush_tlb_kernel_range(start, end);
230862306a36Sopenharmony_ci	mutex_unlock(&vmap_purge_lock);
230962306a36Sopenharmony_ci}
231062306a36Sopenharmony_ci
231162306a36Sopenharmony_ci/**
231262306a36Sopenharmony_ci * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
231362306a36Sopenharmony_ci *
231462306a36Sopenharmony_ci * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
231562306a36Sopenharmony_ci * to amortize TLB flushing overheads. What this means is that any page you
231662306a36Sopenharmony_ci * have now, may, in a former life, have been mapped into kernel virtual
231762306a36Sopenharmony_ci * address by the vmap layer and so there might be some CPUs with TLB entries
231862306a36Sopenharmony_ci * still referencing that page (additional to the regular 1:1 kernel mapping).
231962306a36Sopenharmony_ci *
232062306a36Sopenharmony_ci * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
232162306a36Sopenharmony_ci * be sure that none of the pages we have control over will have any aliases
232262306a36Sopenharmony_ci * from the vmap layer.
232362306a36Sopenharmony_ci */
232462306a36Sopenharmony_civoid vm_unmap_aliases(void)
232562306a36Sopenharmony_ci{
232662306a36Sopenharmony_ci	unsigned long start = ULONG_MAX, end = 0;
232762306a36Sopenharmony_ci	int flush = 0;
232862306a36Sopenharmony_ci
232962306a36Sopenharmony_ci	_vm_unmap_aliases(start, end, flush);
233062306a36Sopenharmony_ci}
233162306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(vm_unmap_aliases);
233262306a36Sopenharmony_ci
233362306a36Sopenharmony_ci/**
233462306a36Sopenharmony_ci * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
233562306a36Sopenharmony_ci * @mem: the pointer returned by vm_map_ram
233662306a36Sopenharmony_ci * @count: the count passed to that vm_map_ram call (cannot unmap partial)
233762306a36Sopenharmony_ci */
233862306a36Sopenharmony_civoid vm_unmap_ram(const void *mem, unsigned int count)
233962306a36Sopenharmony_ci{
234062306a36Sopenharmony_ci	unsigned long size = (unsigned long)count << PAGE_SHIFT;
234162306a36Sopenharmony_ci	unsigned long addr = (unsigned long)kasan_reset_tag(mem);
234262306a36Sopenharmony_ci	struct vmap_area *va;
234362306a36Sopenharmony_ci
234462306a36Sopenharmony_ci	might_sleep();
234562306a36Sopenharmony_ci	BUG_ON(!addr);
234662306a36Sopenharmony_ci	BUG_ON(addr < VMALLOC_START);
234762306a36Sopenharmony_ci	BUG_ON(addr > VMALLOC_END);
234862306a36Sopenharmony_ci	BUG_ON(!PAGE_ALIGNED(addr));
234962306a36Sopenharmony_ci
235062306a36Sopenharmony_ci	kasan_poison_vmalloc(mem, size);
235162306a36Sopenharmony_ci
235262306a36Sopenharmony_ci	if (likely(count <= VMAP_MAX_ALLOC)) {
235362306a36Sopenharmony_ci		debug_check_no_locks_freed(mem, size);
235462306a36Sopenharmony_ci		vb_free(addr, size);
235562306a36Sopenharmony_ci		return;
235662306a36Sopenharmony_ci	}
235762306a36Sopenharmony_ci
235862306a36Sopenharmony_ci	va = find_unlink_vmap_area(addr);
235962306a36Sopenharmony_ci	if (WARN_ON_ONCE(!va))
236062306a36Sopenharmony_ci		return;
236162306a36Sopenharmony_ci
236262306a36Sopenharmony_ci	debug_check_no_locks_freed((void *)va->va_start,
236362306a36Sopenharmony_ci				    (va->va_end - va->va_start));
236462306a36Sopenharmony_ci	free_unmap_vmap_area(va);
236562306a36Sopenharmony_ci}
236662306a36Sopenharmony_ciEXPORT_SYMBOL(vm_unmap_ram);
236762306a36Sopenharmony_ci
236862306a36Sopenharmony_ci/**
236962306a36Sopenharmony_ci * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
237062306a36Sopenharmony_ci * @pages: an array of pointers to the pages to be mapped
237162306a36Sopenharmony_ci * @count: number of pages
237262306a36Sopenharmony_ci * @node: prefer to allocate data structures on this node
237362306a36Sopenharmony_ci *
237462306a36Sopenharmony_ci * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
237562306a36Sopenharmony_ci * faster than vmap so it's good.  But if you mix long-life and short-life
237662306a36Sopenharmony_ci * objects with vm_map_ram(), it could consume lots of address space through
237762306a36Sopenharmony_ci * fragmentation (especially on a 32bit machine).  You could see failures in
237862306a36Sopenharmony_ci * the end.  Please use this function for short-lived objects.
237962306a36Sopenharmony_ci *
238062306a36Sopenharmony_ci * Returns: a pointer to the address that has been mapped, or %NULL on failure
238162306a36Sopenharmony_ci */
238262306a36Sopenharmony_civoid *vm_map_ram(struct page **pages, unsigned int count, int node)
238362306a36Sopenharmony_ci{
238462306a36Sopenharmony_ci	unsigned long size = (unsigned long)count << PAGE_SHIFT;
238562306a36Sopenharmony_ci	unsigned long addr;
238662306a36Sopenharmony_ci	void *mem;
238762306a36Sopenharmony_ci
238862306a36Sopenharmony_ci	if (likely(count <= VMAP_MAX_ALLOC)) {
238962306a36Sopenharmony_ci		mem = vb_alloc(size, GFP_KERNEL);
239062306a36Sopenharmony_ci		if (IS_ERR(mem))
239162306a36Sopenharmony_ci			return NULL;
239262306a36Sopenharmony_ci		addr = (unsigned long)mem;
239362306a36Sopenharmony_ci	} else {
239462306a36Sopenharmony_ci		struct vmap_area *va;
239562306a36Sopenharmony_ci		va = alloc_vmap_area(size, PAGE_SIZE,
239662306a36Sopenharmony_ci				VMALLOC_START, VMALLOC_END,
239762306a36Sopenharmony_ci				node, GFP_KERNEL, VMAP_RAM);
239862306a36Sopenharmony_ci		if (IS_ERR(va))
239962306a36Sopenharmony_ci			return NULL;
240062306a36Sopenharmony_ci
240162306a36Sopenharmony_ci		addr = va->va_start;
240262306a36Sopenharmony_ci		mem = (void *)addr;
240362306a36Sopenharmony_ci	}
240462306a36Sopenharmony_ci
240562306a36Sopenharmony_ci	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
240662306a36Sopenharmony_ci				pages, PAGE_SHIFT) < 0) {
240762306a36Sopenharmony_ci		vm_unmap_ram(mem, count);
240862306a36Sopenharmony_ci		return NULL;
240962306a36Sopenharmony_ci	}
241062306a36Sopenharmony_ci
241162306a36Sopenharmony_ci	/*
241262306a36Sopenharmony_ci	 * Mark the pages as accessible, now that they are mapped.
241362306a36Sopenharmony_ci	 * With hardware tag-based KASAN, marking is skipped for
241462306a36Sopenharmony_ci	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
241562306a36Sopenharmony_ci	 */
241662306a36Sopenharmony_ci	mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
241762306a36Sopenharmony_ci
241862306a36Sopenharmony_ci	return mem;
241962306a36Sopenharmony_ci}
242062306a36Sopenharmony_ciEXPORT_SYMBOL(vm_map_ram);
242162306a36Sopenharmony_ci
242262306a36Sopenharmony_cistatic struct vm_struct *vmlist __initdata;
242362306a36Sopenharmony_ci
242462306a36Sopenharmony_cistatic inline unsigned int vm_area_page_order(struct vm_struct *vm)
242562306a36Sopenharmony_ci{
242662306a36Sopenharmony_ci#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
242762306a36Sopenharmony_ci	return vm->page_order;
242862306a36Sopenharmony_ci#else
242962306a36Sopenharmony_ci	return 0;
243062306a36Sopenharmony_ci#endif
243162306a36Sopenharmony_ci}
243262306a36Sopenharmony_ci
243362306a36Sopenharmony_cistatic inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
243462306a36Sopenharmony_ci{
243562306a36Sopenharmony_ci#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
243662306a36Sopenharmony_ci	vm->page_order = order;
243762306a36Sopenharmony_ci#else
243862306a36Sopenharmony_ci	BUG_ON(order != 0);
243962306a36Sopenharmony_ci#endif
244062306a36Sopenharmony_ci}
244162306a36Sopenharmony_ci
244262306a36Sopenharmony_ci/**
244362306a36Sopenharmony_ci * vm_area_add_early - add vmap area early during boot
244462306a36Sopenharmony_ci * @vm: vm_struct to add
244562306a36Sopenharmony_ci *
244662306a36Sopenharmony_ci * This function is used to add fixed kernel vm area to vmlist before
244762306a36Sopenharmony_ci * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
244862306a36Sopenharmony_ci * should contain proper values and the other fields should be zero.
244962306a36Sopenharmony_ci *
245062306a36Sopenharmony_ci * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
245162306a36Sopenharmony_ci */
245262306a36Sopenharmony_civoid __init vm_area_add_early(struct vm_struct *vm)
245362306a36Sopenharmony_ci{
245462306a36Sopenharmony_ci	struct vm_struct *tmp, **p;
245562306a36Sopenharmony_ci
245662306a36Sopenharmony_ci	BUG_ON(vmap_initialized);
245762306a36Sopenharmony_ci	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
245862306a36Sopenharmony_ci		if (tmp->addr >= vm->addr) {
245962306a36Sopenharmony_ci			BUG_ON(tmp->addr < vm->addr + vm->size);
246062306a36Sopenharmony_ci			break;
246162306a36Sopenharmony_ci		} else
246262306a36Sopenharmony_ci			BUG_ON(tmp->addr + tmp->size > vm->addr);
246362306a36Sopenharmony_ci	}
246462306a36Sopenharmony_ci	vm->next = *p;
246562306a36Sopenharmony_ci	*p = vm;
246662306a36Sopenharmony_ci}
246762306a36Sopenharmony_ci
246862306a36Sopenharmony_ci/**
246962306a36Sopenharmony_ci * vm_area_register_early - register vmap area early during boot
247062306a36Sopenharmony_ci * @vm: vm_struct to register
247162306a36Sopenharmony_ci * @align: requested alignment
247262306a36Sopenharmony_ci *
247362306a36Sopenharmony_ci * This function is used to register kernel vm area before
247462306a36Sopenharmony_ci * vmalloc_init() is called.  @vm->size and @vm->flags should contain
247562306a36Sopenharmony_ci * proper values on entry and other fields should be zero.  On return,
247662306a36Sopenharmony_ci * vm->addr contains the allocated address.
247762306a36Sopenharmony_ci *
247862306a36Sopenharmony_ci * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
247962306a36Sopenharmony_ci */
248062306a36Sopenharmony_civoid __init vm_area_register_early(struct vm_struct *vm, size_t align)
248162306a36Sopenharmony_ci{
248262306a36Sopenharmony_ci	unsigned long addr = ALIGN(VMALLOC_START, align);
248362306a36Sopenharmony_ci	struct vm_struct *cur, **p;
248462306a36Sopenharmony_ci
248562306a36Sopenharmony_ci	BUG_ON(vmap_initialized);
248662306a36Sopenharmony_ci
248762306a36Sopenharmony_ci	for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
248862306a36Sopenharmony_ci		if ((unsigned long)cur->addr - addr >= vm->size)
248962306a36Sopenharmony_ci			break;
249062306a36Sopenharmony_ci		addr = ALIGN((unsigned long)cur->addr + cur->size, align);
249162306a36Sopenharmony_ci	}
249262306a36Sopenharmony_ci
249362306a36Sopenharmony_ci	BUG_ON(addr > VMALLOC_END - vm->size);
249462306a36Sopenharmony_ci	vm->addr = (void *)addr;
249562306a36Sopenharmony_ci	vm->next = *p;
249662306a36Sopenharmony_ci	*p = vm;
249762306a36Sopenharmony_ci	kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
249862306a36Sopenharmony_ci}
249962306a36Sopenharmony_ci
250062306a36Sopenharmony_cistatic void vmap_init_free_space(void)
250162306a36Sopenharmony_ci{
250262306a36Sopenharmony_ci	unsigned long vmap_start = 1;
250362306a36Sopenharmony_ci	const unsigned long vmap_end = ULONG_MAX;
250462306a36Sopenharmony_ci	struct vmap_area *busy, *free;
250562306a36Sopenharmony_ci
250662306a36Sopenharmony_ci	/*
250762306a36Sopenharmony_ci	 *     B     F     B     B     B     F
250862306a36Sopenharmony_ci	 * -|-----|.....|-----|-----|-----|.....|-
250962306a36Sopenharmony_ci	 *  |           The KVA space           |
251062306a36Sopenharmony_ci	 *  |<--------------------------------->|
251162306a36Sopenharmony_ci	 */
251262306a36Sopenharmony_ci	list_for_each_entry(busy, &vmap_area_list, list) {
251362306a36Sopenharmony_ci		if (busy->va_start - vmap_start > 0) {
251462306a36Sopenharmony_ci			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
251562306a36Sopenharmony_ci			if (!WARN_ON_ONCE(!free)) {
251662306a36Sopenharmony_ci				free->va_start = vmap_start;
251762306a36Sopenharmony_ci				free->va_end = busy->va_start;
251862306a36Sopenharmony_ci
251962306a36Sopenharmony_ci				insert_vmap_area_augment(free, NULL,
252062306a36Sopenharmony_ci					&free_vmap_area_root,
252162306a36Sopenharmony_ci						&free_vmap_area_list);
252262306a36Sopenharmony_ci			}
252362306a36Sopenharmony_ci		}
252462306a36Sopenharmony_ci
252562306a36Sopenharmony_ci		vmap_start = busy->va_end;
252662306a36Sopenharmony_ci	}
252762306a36Sopenharmony_ci
252862306a36Sopenharmony_ci	if (vmap_end - vmap_start > 0) {
252962306a36Sopenharmony_ci		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
253062306a36Sopenharmony_ci		if (!WARN_ON_ONCE(!free)) {
253162306a36Sopenharmony_ci			free->va_start = vmap_start;
253262306a36Sopenharmony_ci			free->va_end = vmap_end;
253362306a36Sopenharmony_ci
253462306a36Sopenharmony_ci			insert_vmap_area_augment(free, NULL,
253562306a36Sopenharmony_ci				&free_vmap_area_root,
253662306a36Sopenharmony_ci					&free_vmap_area_list);
253762306a36Sopenharmony_ci		}
253862306a36Sopenharmony_ci	}
253962306a36Sopenharmony_ci}
254062306a36Sopenharmony_ci
254162306a36Sopenharmony_cistatic inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
254262306a36Sopenharmony_ci	struct vmap_area *va, unsigned long flags, const void *caller)
254362306a36Sopenharmony_ci{
254462306a36Sopenharmony_ci	vm->flags = flags;
254562306a36Sopenharmony_ci	vm->addr = (void *)va->va_start;
254662306a36Sopenharmony_ci	vm->size = va->va_end - va->va_start;
254762306a36Sopenharmony_ci	vm->caller = caller;
254862306a36Sopenharmony_ci	va->vm = vm;
254962306a36Sopenharmony_ci}
255062306a36Sopenharmony_ci
255162306a36Sopenharmony_cistatic void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
255262306a36Sopenharmony_ci			      unsigned long flags, const void *caller)
255362306a36Sopenharmony_ci{
255462306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
255562306a36Sopenharmony_ci	setup_vmalloc_vm_locked(vm, va, flags, caller);
255662306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
255762306a36Sopenharmony_ci}
255862306a36Sopenharmony_ci
255962306a36Sopenharmony_cistatic void clear_vm_uninitialized_flag(struct vm_struct *vm)
256062306a36Sopenharmony_ci{
256162306a36Sopenharmony_ci	/*
256262306a36Sopenharmony_ci	 * Before removing VM_UNINITIALIZED,
256362306a36Sopenharmony_ci	 * we should make sure that vm has proper values.
256462306a36Sopenharmony_ci	 * Pair with smp_rmb() in show_numa_info().
256562306a36Sopenharmony_ci	 */
256662306a36Sopenharmony_ci	smp_wmb();
256762306a36Sopenharmony_ci	vm->flags &= ~VM_UNINITIALIZED;
256862306a36Sopenharmony_ci}
256962306a36Sopenharmony_ci
257062306a36Sopenharmony_cistatic struct vm_struct *__get_vm_area_node(unsigned long size,
257162306a36Sopenharmony_ci		unsigned long align, unsigned long shift, unsigned long flags,
257262306a36Sopenharmony_ci		unsigned long start, unsigned long end, int node,
257362306a36Sopenharmony_ci		gfp_t gfp_mask, const void *caller)
257462306a36Sopenharmony_ci{
257562306a36Sopenharmony_ci	struct vmap_area *va;
257662306a36Sopenharmony_ci	struct vm_struct *area;
257762306a36Sopenharmony_ci	unsigned long requested_size = size;
257862306a36Sopenharmony_ci
257962306a36Sopenharmony_ci	BUG_ON(in_interrupt());
258062306a36Sopenharmony_ci	size = ALIGN(size, 1ul << shift);
258162306a36Sopenharmony_ci	if (unlikely(!size))
258262306a36Sopenharmony_ci		return NULL;
258362306a36Sopenharmony_ci
258462306a36Sopenharmony_ci	if (flags & VM_IOREMAP)
258562306a36Sopenharmony_ci		align = 1ul << clamp_t(int, get_count_order_long(size),
258662306a36Sopenharmony_ci				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
258762306a36Sopenharmony_ci
258862306a36Sopenharmony_ci	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
258962306a36Sopenharmony_ci	if (unlikely(!area))
259062306a36Sopenharmony_ci		return NULL;
259162306a36Sopenharmony_ci
259262306a36Sopenharmony_ci	if (!(flags & VM_NO_GUARD))
259362306a36Sopenharmony_ci		size += PAGE_SIZE;
259462306a36Sopenharmony_ci
259562306a36Sopenharmony_ci	va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
259662306a36Sopenharmony_ci	if (IS_ERR(va)) {
259762306a36Sopenharmony_ci		kfree(area);
259862306a36Sopenharmony_ci		return NULL;
259962306a36Sopenharmony_ci	}
260062306a36Sopenharmony_ci
260162306a36Sopenharmony_ci	setup_vmalloc_vm(area, va, flags, caller);
260262306a36Sopenharmony_ci
260362306a36Sopenharmony_ci	/*
260462306a36Sopenharmony_ci	 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
260562306a36Sopenharmony_ci	 * best-effort approach, as they can be mapped outside of vmalloc code.
260662306a36Sopenharmony_ci	 * For VM_ALLOC mappings, the pages are marked as accessible after
260762306a36Sopenharmony_ci	 * getting mapped in __vmalloc_node_range().
260862306a36Sopenharmony_ci	 * With hardware tag-based KASAN, marking is skipped for
260962306a36Sopenharmony_ci	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
261062306a36Sopenharmony_ci	 */
261162306a36Sopenharmony_ci	if (!(flags & VM_ALLOC))
261262306a36Sopenharmony_ci		area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
261362306a36Sopenharmony_ci						    KASAN_VMALLOC_PROT_NORMAL);
261462306a36Sopenharmony_ci
261562306a36Sopenharmony_ci	return area;
261662306a36Sopenharmony_ci}
261762306a36Sopenharmony_ci
261862306a36Sopenharmony_cistruct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
261962306a36Sopenharmony_ci				       unsigned long start, unsigned long end,
262062306a36Sopenharmony_ci				       const void *caller)
262162306a36Sopenharmony_ci{
262262306a36Sopenharmony_ci	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
262362306a36Sopenharmony_ci				  NUMA_NO_NODE, GFP_KERNEL, caller);
262462306a36Sopenharmony_ci}
262562306a36Sopenharmony_ci
262662306a36Sopenharmony_ci/**
262762306a36Sopenharmony_ci * get_vm_area - reserve a contiguous kernel virtual area
262862306a36Sopenharmony_ci * @size:	 size of the area
262962306a36Sopenharmony_ci * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
263062306a36Sopenharmony_ci *
263162306a36Sopenharmony_ci * Search an area of @size in the kernel virtual mapping area,
263262306a36Sopenharmony_ci * and reserved it for out purposes.  Returns the area descriptor
263362306a36Sopenharmony_ci * on success or %NULL on failure.
263462306a36Sopenharmony_ci *
263562306a36Sopenharmony_ci * Return: the area descriptor on success or %NULL on failure.
263662306a36Sopenharmony_ci */
263762306a36Sopenharmony_cistruct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
263862306a36Sopenharmony_ci{
263962306a36Sopenharmony_ci	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
264062306a36Sopenharmony_ci				  VMALLOC_START, VMALLOC_END,
264162306a36Sopenharmony_ci				  NUMA_NO_NODE, GFP_KERNEL,
264262306a36Sopenharmony_ci				  __builtin_return_address(0));
264362306a36Sopenharmony_ci}
264462306a36Sopenharmony_ci
264562306a36Sopenharmony_cistruct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
264662306a36Sopenharmony_ci				const void *caller)
264762306a36Sopenharmony_ci{
264862306a36Sopenharmony_ci	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
264962306a36Sopenharmony_ci				  VMALLOC_START, VMALLOC_END,
265062306a36Sopenharmony_ci				  NUMA_NO_NODE, GFP_KERNEL, caller);
265162306a36Sopenharmony_ci}
265262306a36Sopenharmony_ci
265362306a36Sopenharmony_ci/**
265462306a36Sopenharmony_ci * find_vm_area - find a continuous kernel virtual area
265562306a36Sopenharmony_ci * @addr:	  base address
265662306a36Sopenharmony_ci *
265762306a36Sopenharmony_ci * Search for the kernel VM area starting at @addr, and return it.
265862306a36Sopenharmony_ci * It is up to the caller to do all required locking to keep the returned
265962306a36Sopenharmony_ci * pointer valid.
266062306a36Sopenharmony_ci *
266162306a36Sopenharmony_ci * Return: the area descriptor on success or %NULL on failure.
266262306a36Sopenharmony_ci */
266362306a36Sopenharmony_cistruct vm_struct *find_vm_area(const void *addr)
266462306a36Sopenharmony_ci{
266562306a36Sopenharmony_ci	struct vmap_area *va;
266662306a36Sopenharmony_ci
266762306a36Sopenharmony_ci	va = find_vmap_area((unsigned long)addr);
266862306a36Sopenharmony_ci	if (!va)
266962306a36Sopenharmony_ci		return NULL;
267062306a36Sopenharmony_ci
267162306a36Sopenharmony_ci	return va->vm;
267262306a36Sopenharmony_ci}
267362306a36Sopenharmony_ci
267462306a36Sopenharmony_ci/**
267562306a36Sopenharmony_ci * remove_vm_area - find and remove a continuous kernel virtual area
267662306a36Sopenharmony_ci * @addr:	    base address
267762306a36Sopenharmony_ci *
267862306a36Sopenharmony_ci * Search for the kernel VM area starting at @addr, and remove it.
267962306a36Sopenharmony_ci * This function returns the found VM area, but using it is NOT safe
268062306a36Sopenharmony_ci * on SMP machines, except for its size or flags.
268162306a36Sopenharmony_ci *
268262306a36Sopenharmony_ci * Return: the area descriptor on success or %NULL on failure.
268362306a36Sopenharmony_ci */
268462306a36Sopenharmony_cistruct vm_struct *remove_vm_area(const void *addr)
268562306a36Sopenharmony_ci{
268662306a36Sopenharmony_ci	struct vmap_area *va;
268762306a36Sopenharmony_ci	struct vm_struct *vm;
268862306a36Sopenharmony_ci
268962306a36Sopenharmony_ci	might_sleep();
269062306a36Sopenharmony_ci
269162306a36Sopenharmony_ci	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
269262306a36Sopenharmony_ci			addr))
269362306a36Sopenharmony_ci		return NULL;
269462306a36Sopenharmony_ci
269562306a36Sopenharmony_ci	va = find_unlink_vmap_area((unsigned long)addr);
269662306a36Sopenharmony_ci	if (!va || !va->vm)
269762306a36Sopenharmony_ci		return NULL;
269862306a36Sopenharmony_ci	vm = va->vm;
269962306a36Sopenharmony_ci
270062306a36Sopenharmony_ci	debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
270162306a36Sopenharmony_ci	debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
270262306a36Sopenharmony_ci	kasan_free_module_shadow(vm);
270362306a36Sopenharmony_ci	kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
270462306a36Sopenharmony_ci
270562306a36Sopenharmony_ci	free_unmap_vmap_area(va);
270662306a36Sopenharmony_ci	return vm;
270762306a36Sopenharmony_ci}
270862306a36Sopenharmony_ci
270962306a36Sopenharmony_cistatic inline void set_area_direct_map(const struct vm_struct *area,
271062306a36Sopenharmony_ci				       int (*set_direct_map)(struct page *page))
271162306a36Sopenharmony_ci{
271262306a36Sopenharmony_ci	int i;
271362306a36Sopenharmony_ci
271462306a36Sopenharmony_ci	/* HUGE_VMALLOC passes small pages to set_direct_map */
271562306a36Sopenharmony_ci	for (i = 0; i < area->nr_pages; i++)
271662306a36Sopenharmony_ci		if (page_address(area->pages[i]))
271762306a36Sopenharmony_ci			set_direct_map(area->pages[i]);
271862306a36Sopenharmony_ci}
271962306a36Sopenharmony_ci
272062306a36Sopenharmony_ci/*
272162306a36Sopenharmony_ci * Flush the vm mapping and reset the direct map.
272262306a36Sopenharmony_ci */
272362306a36Sopenharmony_cistatic void vm_reset_perms(struct vm_struct *area)
272462306a36Sopenharmony_ci{
272562306a36Sopenharmony_ci	unsigned long start = ULONG_MAX, end = 0;
272662306a36Sopenharmony_ci	unsigned int page_order = vm_area_page_order(area);
272762306a36Sopenharmony_ci	int flush_dmap = 0;
272862306a36Sopenharmony_ci	int i;
272962306a36Sopenharmony_ci
273062306a36Sopenharmony_ci	/*
273162306a36Sopenharmony_ci	 * Find the start and end range of the direct mappings to make sure that
273262306a36Sopenharmony_ci	 * the vm_unmap_aliases() flush includes the direct map.
273362306a36Sopenharmony_ci	 */
273462306a36Sopenharmony_ci	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
273562306a36Sopenharmony_ci		unsigned long addr = (unsigned long)page_address(area->pages[i]);
273662306a36Sopenharmony_ci
273762306a36Sopenharmony_ci		if (addr) {
273862306a36Sopenharmony_ci			unsigned long page_size;
273962306a36Sopenharmony_ci
274062306a36Sopenharmony_ci			page_size = PAGE_SIZE << page_order;
274162306a36Sopenharmony_ci			start = min(addr, start);
274262306a36Sopenharmony_ci			end = max(addr + page_size, end);
274362306a36Sopenharmony_ci			flush_dmap = 1;
274462306a36Sopenharmony_ci		}
274562306a36Sopenharmony_ci	}
274662306a36Sopenharmony_ci
274762306a36Sopenharmony_ci	/*
274862306a36Sopenharmony_ci	 * Set direct map to something invalid so that it won't be cached if
274962306a36Sopenharmony_ci	 * there are any accesses after the TLB flush, then flush the TLB and
275062306a36Sopenharmony_ci	 * reset the direct map permissions to the default.
275162306a36Sopenharmony_ci	 */
275262306a36Sopenharmony_ci	set_area_direct_map(area, set_direct_map_invalid_noflush);
275362306a36Sopenharmony_ci	_vm_unmap_aliases(start, end, flush_dmap);
275462306a36Sopenharmony_ci	set_area_direct_map(area, set_direct_map_default_noflush);
275562306a36Sopenharmony_ci}
275662306a36Sopenharmony_ci
275762306a36Sopenharmony_cistatic void delayed_vfree_work(struct work_struct *w)
275862306a36Sopenharmony_ci{
275962306a36Sopenharmony_ci	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
276062306a36Sopenharmony_ci	struct llist_node *t, *llnode;
276162306a36Sopenharmony_ci
276262306a36Sopenharmony_ci	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
276362306a36Sopenharmony_ci		vfree(llnode);
276462306a36Sopenharmony_ci}
276562306a36Sopenharmony_ci
276662306a36Sopenharmony_ci/**
276762306a36Sopenharmony_ci * vfree_atomic - release memory allocated by vmalloc()
276862306a36Sopenharmony_ci * @addr:	  memory base address
276962306a36Sopenharmony_ci *
277062306a36Sopenharmony_ci * This one is just like vfree() but can be called in any atomic context
277162306a36Sopenharmony_ci * except NMIs.
277262306a36Sopenharmony_ci */
277362306a36Sopenharmony_civoid vfree_atomic(const void *addr)
277462306a36Sopenharmony_ci{
277562306a36Sopenharmony_ci	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
277662306a36Sopenharmony_ci
277762306a36Sopenharmony_ci	BUG_ON(in_nmi());
277862306a36Sopenharmony_ci	kmemleak_free(addr);
277962306a36Sopenharmony_ci
278062306a36Sopenharmony_ci	/*
278162306a36Sopenharmony_ci	 * Use raw_cpu_ptr() because this can be called from preemptible
278262306a36Sopenharmony_ci	 * context. Preemption is absolutely fine here, because the llist_add()
278362306a36Sopenharmony_ci	 * implementation is lockless, so it works even if we are adding to
278462306a36Sopenharmony_ci	 * another cpu's list. schedule_work() should be fine with this too.
278562306a36Sopenharmony_ci	 */
278662306a36Sopenharmony_ci	if (addr && llist_add((struct llist_node *)addr, &p->list))
278762306a36Sopenharmony_ci		schedule_work(&p->wq);
278862306a36Sopenharmony_ci}
278962306a36Sopenharmony_ci
279062306a36Sopenharmony_ci/**
279162306a36Sopenharmony_ci * vfree - Release memory allocated by vmalloc()
279262306a36Sopenharmony_ci * @addr:  Memory base address
279362306a36Sopenharmony_ci *
279462306a36Sopenharmony_ci * Free the virtually continuous memory area starting at @addr, as obtained
279562306a36Sopenharmony_ci * from one of the vmalloc() family of APIs.  This will usually also free the
279662306a36Sopenharmony_ci * physical memory underlying the virtual allocation, but that memory is
279762306a36Sopenharmony_ci * reference counted, so it will not be freed until the last user goes away.
279862306a36Sopenharmony_ci *
279962306a36Sopenharmony_ci * If @addr is NULL, no operation is performed.
280062306a36Sopenharmony_ci *
280162306a36Sopenharmony_ci * Context:
280262306a36Sopenharmony_ci * May sleep if called *not* from interrupt context.
280362306a36Sopenharmony_ci * Must not be called in NMI context (strictly speaking, it could be
280462306a36Sopenharmony_ci * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
280562306a36Sopenharmony_ci * conventions for vfree() arch-dependent would be a really bad idea).
280662306a36Sopenharmony_ci */
280762306a36Sopenharmony_civoid vfree(const void *addr)
280862306a36Sopenharmony_ci{
280962306a36Sopenharmony_ci	struct vm_struct *vm;
281062306a36Sopenharmony_ci	int i;
281162306a36Sopenharmony_ci
281262306a36Sopenharmony_ci	if (unlikely(in_interrupt())) {
281362306a36Sopenharmony_ci		vfree_atomic(addr);
281462306a36Sopenharmony_ci		return;
281562306a36Sopenharmony_ci	}
281662306a36Sopenharmony_ci
281762306a36Sopenharmony_ci	BUG_ON(in_nmi());
281862306a36Sopenharmony_ci	kmemleak_free(addr);
281962306a36Sopenharmony_ci	might_sleep();
282062306a36Sopenharmony_ci
282162306a36Sopenharmony_ci	if (!addr)
282262306a36Sopenharmony_ci		return;
282362306a36Sopenharmony_ci
282462306a36Sopenharmony_ci	vm = remove_vm_area(addr);
282562306a36Sopenharmony_ci	if (unlikely(!vm)) {
282662306a36Sopenharmony_ci		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
282762306a36Sopenharmony_ci				addr);
282862306a36Sopenharmony_ci		return;
282962306a36Sopenharmony_ci	}
283062306a36Sopenharmony_ci
283162306a36Sopenharmony_ci	if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
283262306a36Sopenharmony_ci		vm_reset_perms(vm);
283362306a36Sopenharmony_ci	for (i = 0; i < vm->nr_pages; i++) {
283462306a36Sopenharmony_ci		struct page *page = vm->pages[i];
283562306a36Sopenharmony_ci
283662306a36Sopenharmony_ci		BUG_ON(!page);
283762306a36Sopenharmony_ci		mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
283862306a36Sopenharmony_ci		/*
283962306a36Sopenharmony_ci		 * High-order allocs for huge vmallocs are split, so
284062306a36Sopenharmony_ci		 * can be freed as an array of order-0 allocations
284162306a36Sopenharmony_ci		 */
284262306a36Sopenharmony_ci		__free_page(page);
284362306a36Sopenharmony_ci		cond_resched();
284462306a36Sopenharmony_ci	}
284562306a36Sopenharmony_ci	atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
284662306a36Sopenharmony_ci	kvfree(vm->pages);
284762306a36Sopenharmony_ci	kfree(vm);
284862306a36Sopenharmony_ci}
284962306a36Sopenharmony_ciEXPORT_SYMBOL(vfree);
285062306a36Sopenharmony_ci
285162306a36Sopenharmony_ci/**
285262306a36Sopenharmony_ci * vunmap - release virtual mapping obtained by vmap()
285362306a36Sopenharmony_ci * @addr:   memory base address
285462306a36Sopenharmony_ci *
285562306a36Sopenharmony_ci * Free the virtually contiguous memory area starting at @addr,
285662306a36Sopenharmony_ci * which was created from the page array passed to vmap().
285762306a36Sopenharmony_ci *
285862306a36Sopenharmony_ci * Must not be called in interrupt context.
285962306a36Sopenharmony_ci */
286062306a36Sopenharmony_civoid vunmap(const void *addr)
286162306a36Sopenharmony_ci{
286262306a36Sopenharmony_ci	struct vm_struct *vm;
286362306a36Sopenharmony_ci
286462306a36Sopenharmony_ci	BUG_ON(in_interrupt());
286562306a36Sopenharmony_ci	might_sleep();
286662306a36Sopenharmony_ci
286762306a36Sopenharmony_ci	if (!addr)
286862306a36Sopenharmony_ci		return;
286962306a36Sopenharmony_ci	vm = remove_vm_area(addr);
287062306a36Sopenharmony_ci	if (unlikely(!vm)) {
287162306a36Sopenharmony_ci		WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
287262306a36Sopenharmony_ci				addr);
287362306a36Sopenharmony_ci		return;
287462306a36Sopenharmony_ci	}
287562306a36Sopenharmony_ci	kfree(vm);
287662306a36Sopenharmony_ci}
287762306a36Sopenharmony_ciEXPORT_SYMBOL(vunmap);
287862306a36Sopenharmony_ci
287962306a36Sopenharmony_ci/**
288062306a36Sopenharmony_ci * vmap - map an array of pages into virtually contiguous space
288162306a36Sopenharmony_ci * @pages: array of page pointers
288262306a36Sopenharmony_ci * @count: number of pages to map
288362306a36Sopenharmony_ci * @flags: vm_area->flags
288462306a36Sopenharmony_ci * @prot: page protection for the mapping
288562306a36Sopenharmony_ci *
288662306a36Sopenharmony_ci * Maps @count pages from @pages into contiguous kernel virtual space.
288762306a36Sopenharmony_ci * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
288862306a36Sopenharmony_ci * (which must be kmalloc or vmalloc memory) and one reference per pages in it
288962306a36Sopenharmony_ci * are transferred from the caller to vmap(), and will be freed / dropped when
289062306a36Sopenharmony_ci * vfree() is called on the return value.
289162306a36Sopenharmony_ci *
289262306a36Sopenharmony_ci * Return: the address of the area or %NULL on failure
289362306a36Sopenharmony_ci */
289462306a36Sopenharmony_civoid *vmap(struct page **pages, unsigned int count,
289562306a36Sopenharmony_ci	   unsigned long flags, pgprot_t prot)
289662306a36Sopenharmony_ci{
289762306a36Sopenharmony_ci	struct vm_struct *area;
289862306a36Sopenharmony_ci	unsigned long addr;
289962306a36Sopenharmony_ci	unsigned long size;		/* In bytes */
290062306a36Sopenharmony_ci
290162306a36Sopenharmony_ci	might_sleep();
290262306a36Sopenharmony_ci
290362306a36Sopenharmony_ci	if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
290462306a36Sopenharmony_ci		return NULL;
290562306a36Sopenharmony_ci
290662306a36Sopenharmony_ci	/*
290762306a36Sopenharmony_ci	 * Your top guard is someone else's bottom guard. Not having a top
290862306a36Sopenharmony_ci	 * guard compromises someone else's mappings too.
290962306a36Sopenharmony_ci	 */
291062306a36Sopenharmony_ci	if (WARN_ON_ONCE(flags & VM_NO_GUARD))
291162306a36Sopenharmony_ci		flags &= ~VM_NO_GUARD;
291262306a36Sopenharmony_ci
291362306a36Sopenharmony_ci	if (count > totalram_pages())
291462306a36Sopenharmony_ci		return NULL;
291562306a36Sopenharmony_ci
291662306a36Sopenharmony_ci	size = (unsigned long)count << PAGE_SHIFT;
291762306a36Sopenharmony_ci	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
291862306a36Sopenharmony_ci	if (!area)
291962306a36Sopenharmony_ci		return NULL;
292062306a36Sopenharmony_ci
292162306a36Sopenharmony_ci	addr = (unsigned long)area->addr;
292262306a36Sopenharmony_ci	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
292362306a36Sopenharmony_ci				pages, PAGE_SHIFT) < 0) {
292462306a36Sopenharmony_ci		vunmap(area->addr);
292562306a36Sopenharmony_ci		return NULL;
292662306a36Sopenharmony_ci	}
292762306a36Sopenharmony_ci
292862306a36Sopenharmony_ci	if (flags & VM_MAP_PUT_PAGES) {
292962306a36Sopenharmony_ci		area->pages = pages;
293062306a36Sopenharmony_ci		area->nr_pages = count;
293162306a36Sopenharmony_ci	}
293262306a36Sopenharmony_ci	return area->addr;
293362306a36Sopenharmony_ci}
293462306a36Sopenharmony_ciEXPORT_SYMBOL(vmap);
293562306a36Sopenharmony_ci
293662306a36Sopenharmony_ci#ifdef CONFIG_VMAP_PFN
293762306a36Sopenharmony_cistruct vmap_pfn_data {
293862306a36Sopenharmony_ci	unsigned long	*pfns;
293962306a36Sopenharmony_ci	pgprot_t	prot;
294062306a36Sopenharmony_ci	unsigned int	idx;
294162306a36Sopenharmony_ci};
294262306a36Sopenharmony_ci
294362306a36Sopenharmony_cistatic int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
294462306a36Sopenharmony_ci{
294562306a36Sopenharmony_ci	struct vmap_pfn_data *data = private;
294662306a36Sopenharmony_ci	unsigned long pfn = data->pfns[data->idx];
294762306a36Sopenharmony_ci	pte_t ptent;
294862306a36Sopenharmony_ci
294962306a36Sopenharmony_ci	if (WARN_ON_ONCE(pfn_valid(pfn)))
295062306a36Sopenharmony_ci		return -EINVAL;
295162306a36Sopenharmony_ci
295262306a36Sopenharmony_ci	ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
295362306a36Sopenharmony_ci	set_pte_at(&init_mm, addr, pte, ptent);
295462306a36Sopenharmony_ci
295562306a36Sopenharmony_ci	data->idx++;
295662306a36Sopenharmony_ci	return 0;
295762306a36Sopenharmony_ci}
295862306a36Sopenharmony_ci
295962306a36Sopenharmony_ci/**
296062306a36Sopenharmony_ci * vmap_pfn - map an array of PFNs into virtually contiguous space
296162306a36Sopenharmony_ci * @pfns: array of PFNs
296262306a36Sopenharmony_ci * @count: number of pages to map
296362306a36Sopenharmony_ci * @prot: page protection for the mapping
296462306a36Sopenharmony_ci *
296562306a36Sopenharmony_ci * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
296662306a36Sopenharmony_ci * the start address of the mapping.
296762306a36Sopenharmony_ci */
296862306a36Sopenharmony_civoid *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
296962306a36Sopenharmony_ci{
297062306a36Sopenharmony_ci	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
297162306a36Sopenharmony_ci	struct vm_struct *area;
297262306a36Sopenharmony_ci
297362306a36Sopenharmony_ci	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
297462306a36Sopenharmony_ci			__builtin_return_address(0));
297562306a36Sopenharmony_ci	if (!area)
297662306a36Sopenharmony_ci		return NULL;
297762306a36Sopenharmony_ci	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
297862306a36Sopenharmony_ci			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
297962306a36Sopenharmony_ci		free_vm_area(area);
298062306a36Sopenharmony_ci		return NULL;
298162306a36Sopenharmony_ci	}
298262306a36Sopenharmony_ci
298362306a36Sopenharmony_ci	flush_cache_vmap((unsigned long)area->addr,
298462306a36Sopenharmony_ci			 (unsigned long)area->addr + count * PAGE_SIZE);
298562306a36Sopenharmony_ci
298662306a36Sopenharmony_ci	return area->addr;
298762306a36Sopenharmony_ci}
298862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(vmap_pfn);
298962306a36Sopenharmony_ci#endif /* CONFIG_VMAP_PFN */
299062306a36Sopenharmony_ci
299162306a36Sopenharmony_cistatic inline unsigned int
299262306a36Sopenharmony_civm_area_alloc_pages(gfp_t gfp, int nid,
299362306a36Sopenharmony_ci		unsigned int order, unsigned int nr_pages, struct page **pages)
299462306a36Sopenharmony_ci{
299562306a36Sopenharmony_ci	unsigned int nr_allocated = 0;
299662306a36Sopenharmony_ci	gfp_t alloc_gfp = gfp;
299762306a36Sopenharmony_ci	bool nofail = false;
299862306a36Sopenharmony_ci	struct page *page;
299962306a36Sopenharmony_ci	int i;
300062306a36Sopenharmony_ci
300162306a36Sopenharmony_ci	/*
300262306a36Sopenharmony_ci	 * For order-0 pages we make use of bulk allocator, if
300362306a36Sopenharmony_ci	 * the page array is partly or not at all populated due
300462306a36Sopenharmony_ci	 * to fails, fallback to a single page allocator that is
300562306a36Sopenharmony_ci	 * more permissive.
300662306a36Sopenharmony_ci	 */
300762306a36Sopenharmony_ci	if (!order) {
300862306a36Sopenharmony_ci		/* bulk allocator doesn't support nofail req. officially */
300962306a36Sopenharmony_ci		gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
301062306a36Sopenharmony_ci
301162306a36Sopenharmony_ci		while (nr_allocated < nr_pages) {
301262306a36Sopenharmony_ci			unsigned int nr, nr_pages_request;
301362306a36Sopenharmony_ci
301462306a36Sopenharmony_ci			/*
301562306a36Sopenharmony_ci			 * A maximum allowed request is hard-coded and is 100
301662306a36Sopenharmony_ci			 * pages per call. That is done in order to prevent a
301762306a36Sopenharmony_ci			 * long preemption off scenario in the bulk-allocator
301862306a36Sopenharmony_ci			 * so the range is [1:100].
301962306a36Sopenharmony_ci			 */
302062306a36Sopenharmony_ci			nr_pages_request = min(100U, nr_pages - nr_allocated);
302162306a36Sopenharmony_ci
302262306a36Sopenharmony_ci			/* memory allocation should consider mempolicy, we can't
302362306a36Sopenharmony_ci			 * wrongly use nearest node when nid == NUMA_NO_NODE,
302462306a36Sopenharmony_ci			 * otherwise memory may be allocated in only one node,
302562306a36Sopenharmony_ci			 * but mempolicy wants to alloc memory by interleaving.
302662306a36Sopenharmony_ci			 */
302762306a36Sopenharmony_ci			if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
302862306a36Sopenharmony_ci				nr = alloc_pages_bulk_array_mempolicy(bulk_gfp,
302962306a36Sopenharmony_ci							nr_pages_request,
303062306a36Sopenharmony_ci							pages + nr_allocated);
303162306a36Sopenharmony_ci
303262306a36Sopenharmony_ci			else
303362306a36Sopenharmony_ci				nr = alloc_pages_bulk_array_node(bulk_gfp, nid,
303462306a36Sopenharmony_ci							nr_pages_request,
303562306a36Sopenharmony_ci							pages + nr_allocated);
303662306a36Sopenharmony_ci
303762306a36Sopenharmony_ci			nr_allocated += nr;
303862306a36Sopenharmony_ci			cond_resched();
303962306a36Sopenharmony_ci
304062306a36Sopenharmony_ci			/*
304162306a36Sopenharmony_ci			 * If zero or pages were obtained partly,
304262306a36Sopenharmony_ci			 * fallback to a single page allocator.
304362306a36Sopenharmony_ci			 */
304462306a36Sopenharmony_ci			if (nr != nr_pages_request)
304562306a36Sopenharmony_ci				break;
304662306a36Sopenharmony_ci		}
304762306a36Sopenharmony_ci	} else if (gfp & __GFP_NOFAIL) {
304862306a36Sopenharmony_ci		/*
304962306a36Sopenharmony_ci		 * Higher order nofail allocations are really expensive and
305062306a36Sopenharmony_ci		 * potentially dangerous (pre-mature OOM, disruptive reclaim
305162306a36Sopenharmony_ci		 * and compaction etc.
305262306a36Sopenharmony_ci		 */
305362306a36Sopenharmony_ci		alloc_gfp &= ~__GFP_NOFAIL;
305462306a36Sopenharmony_ci		nofail = true;
305562306a36Sopenharmony_ci	}
305662306a36Sopenharmony_ci
305762306a36Sopenharmony_ci	/* High-order pages or fallback path if "bulk" fails. */
305862306a36Sopenharmony_ci	while (nr_allocated < nr_pages) {
305962306a36Sopenharmony_ci		if (fatal_signal_pending(current))
306062306a36Sopenharmony_ci			break;
306162306a36Sopenharmony_ci
306262306a36Sopenharmony_ci		if (nid == NUMA_NO_NODE)
306362306a36Sopenharmony_ci			page = alloc_pages(alloc_gfp, order);
306462306a36Sopenharmony_ci		else
306562306a36Sopenharmony_ci			page = alloc_pages_node(nid, alloc_gfp, order);
306662306a36Sopenharmony_ci		if (unlikely(!page)) {
306762306a36Sopenharmony_ci			if (!nofail)
306862306a36Sopenharmony_ci				break;
306962306a36Sopenharmony_ci
307062306a36Sopenharmony_ci			/* fall back to the zero order allocations */
307162306a36Sopenharmony_ci			alloc_gfp |= __GFP_NOFAIL;
307262306a36Sopenharmony_ci			order = 0;
307362306a36Sopenharmony_ci			continue;
307462306a36Sopenharmony_ci		}
307562306a36Sopenharmony_ci
307662306a36Sopenharmony_ci		/*
307762306a36Sopenharmony_ci		 * Higher order allocations must be able to be treated as
307862306a36Sopenharmony_ci		 * indepdenent small pages by callers (as they can with
307962306a36Sopenharmony_ci		 * small-page vmallocs). Some drivers do their own refcounting
308062306a36Sopenharmony_ci		 * on vmalloc_to_page() pages, some use page->mapping,
308162306a36Sopenharmony_ci		 * page->lru, etc.
308262306a36Sopenharmony_ci		 */
308362306a36Sopenharmony_ci		if (order)
308462306a36Sopenharmony_ci			split_page(page, order);
308562306a36Sopenharmony_ci
308662306a36Sopenharmony_ci		/*
308762306a36Sopenharmony_ci		 * Careful, we allocate and map page-order pages, but
308862306a36Sopenharmony_ci		 * tracking is done per PAGE_SIZE page so as to keep the
308962306a36Sopenharmony_ci		 * vm_struct APIs independent of the physical/mapped size.
309062306a36Sopenharmony_ci		 */
309162306a36Sopenharmony_ci		for (i = 0; i < (1U << order); i++)
309262306a36Sopenharmony_ci			pages[nr_allocated + i] = page + i;
309362306a36Sopenharmony_ci
309462306a36Sopenharmony_ci		cond_resched();
309562306a36Sopenharmony_ci		nr_allocated += 1U << order;
309662306a36Sopenharmony_ci	}
309762306a36Sopenharmony_ci
309862306a36Sopenharmony_ci	return nr_allocated;
309962306a36Sopenharmony_ci}
310062306a36Sopenharmony_ci
310162306a36Sopenharmony_cistatic void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
310262306a36Sopenharmony_ci				 pgprot_t prot, unsigned int page_shift,
310362306a36Sopenharmony_ci				 int node)
310462306a36Sopenharmony_ci{
310562306a36Sopenharmony_ci	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
310662306a36Sopenharmony_ci	bool nofail = gfp_mask & __GFP_NOFAIL;
310762306a36Sopenharmony_ci	unsigned long addr = (unsigned long)area->addr;
310862306a36Sopenharmony_ci	unsigned long size = get_vm_area_size(area);
310962306a36Sopenharmony_ci	unsigned long array_size;
311062306a36Sopenharmony_ci	unsigned int nr_small_pages = size >> PAGE_SHIFT;
311162306a36Sopenharmony_ci	unsigned int page_order;
311262306a36Sopenharmony_ci	unsigned int flags;
311362306a36Sopenharmony_ci	int ret;
311462306a36Sopenharmony_ci
311562306a36Sopenharmony_ci	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
311662306a36Sopenharmony_ci
311762306a36Sopenharmony_ci	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
311862306a36Sopenharmony_ci		gfp_mask |= __GFP_HIGHMEM;
311962306a36Sopenharmony_ci
312062306a36Sopenharmony_ci	/* Please note that the recursion is strictly bounded. */
312162306a36Sopenharmony_ci	if (array_size > PAGE_SIZE) {
312262306a36Sopenharmony_ci		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
312362306a36Sopenharmony_ci					area->caller);
312462306a36Sopenharmony_ci	} else {
312562306a36Sopenharmony_ci		area->pages = kmalloc_node(array_size, nested_gfp, node);
312662306a36Sopenharmony_ci	}
312762306a36Sopenharmony_ci
312862306a36Sopenharmony_ci	if (!area->pages) {
312962306a36Sopenharmony_ci		warn_alloc(gfp_mask, NULL,
313062306a36Sopenharmony_ci			"vmalloc error: size %lu, failed to allocated page array size %lu",
313162306a36Sopenharmony_ci			nr_small_pages * PAGE_SIZE, array_size);
313262306a36Sopenharmony_ci		free_vm_area(area);
313362306a36Sopenharmony_ci		return NULL;
313462306a36Sopenharmony_ci	}
313562306a36Sopenharmony_ci
313662306a36Sopenharmony_ci	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
313762306a36Sopenharmony_ci	page_order = vm_area_page_order(area);
313862306a36Sopenharmony_ci
313962306a36Sopenharmony_ci	area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
314062306a36Sopenharmony_ci		node, page_order, nr_small_pages, area->pages);
314162306a36Sopenharmony_ci
314262306a36Sopenharmony_ci	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
314362306a36Sopenharmony_ci	if (gfp_mask & __GFP_ACCOUNT) {
314462306a36Sopenharmony_ci		int i;
314562306a36Sopenharmony_ci
314662306a36Sopenharmony_ci		for (i = 0; i < area->nr_pages; i++)
314762306a36Sopenharmony_ci			mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
314862306a36Sopenharmony_ci	}
314962306a36Sopenharmony_ci
315062306a36Sopenharmony_ci	/*
315162306a36Sopenharmony_ci	 * If not enough pages were obtained to accomplish an
315262306a36Sopenharmony_ci	 * allocation request, free them via vfree() if any.
315362306a36Sopenharmony_ci	 */
315462306a36Sopenharmony_ci	if (area->nr_pages != nr_small_pages) {
315562306a36Sopenharmony_ci		/*
315662306a36Sopenharmony_ci		 * vm_area_alloc_pages() can fail due to insufficient memory but
315762306a36Sopenharmony_ci		 * also:-
315862306a36Sopenharmony_ci		 *
315962306a36Sopenharmony_ci		 * - a pending fatal signal
316062306a36Sopenharmony_ci		 * - insufficient huge page-order pages
316162306a36Sopenharmony_ci		 *
316262306a36Sopenharmony_ci		 * Since we always retry allocations at order-0 in the huge page
316362306a36Sopenharmony_ci		 * case a warning for either is spurious.
316462306a36Sopenharmony_ci		 */
316562306a36Sopenharmony_ci		if (!fatal_signal_pending(current) && page_order == 0)
316662306a36Sopenharmony_ci			warn_alloc(gfp_mask, NULL,
316762306a36Sopenharmony_ci				"vmalloc error: size %lu, failed to allocate pages",
316862306a36Sopenharmony_ci				area->nr_pages * PAGE_SIZE);
316962306a36Sopenharmony_ci		goto fail;
317062306a36Sopenharmony_ci	}
317162306a36Sopenharmony_ci
317262306a36Sopenharmony_ci	/*
317362306a36Sopenharmony_ci	 * page tables allocations ignore external gfp mask, enforce it
317462306a36Sopenharmony_ci	 * by the scope API
317562306a36Sopenharmony_ci	 */
317662306a36Sopenharmony_ci	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
317762306a36Sopenharmony_ci		flags = memalloc_nofs_save();
317862306a36Sopenharmony_ci	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
317962306a36Sopenharmony_ci		flags = memalloc_noio_save();
318062306a36Sopenharmony_ci
318162306a36Sopenharmony_ci	do {
318262306a36Sopenharmony_ci		ret = vmap_pages_range(addr, addr + size, prot, area->pages,
318362306a36Sopenharmony_ci			page_shift);
318462306a36Sopenharmony_ci		if (nofail && (ret < 0))
318562306a36Sopenharmony_ci			schedule_timeout_uninterruptible(1);
318662306a36Sopenharmony_ci	} while (nofail && (ret < 0));
318762306a36Sopenharmony_ci
318862306a36Sopenharmony_ci	if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
318962306a36Sopenharmony_ci		memalloc_nofs_restore(flags);
319062306a36Sopenharmony_ci	else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
319162306a36Sopenharmony_ci		memalloc_noio_restore(flags);
319262306a36Sopenharmony_ci
319362306a36Sopenharmony_ci	if (ret < 0) {
319462306a36Sopenharmony_ci		warn_alloc(gfp_mask, NULL,
319562306a36Sopenharmony_ci			"vmalloc error: size %lu, failed to map pages",
319662306a36Sopenharmony_ci			area->nr_pages * PAGE_SIZE);
319762306a36Sopenharmony_ci		goto fail;
319862306a36Sopenharmony_ci	}
319962306a36Sopenharmony_ci
320062306a36Sopenharmony_ci	return area->addr;
320162306a36Sopenharmony_ci
320262306a36Sopenharmony_cifail:
320362306a36Sopenharmony_ci	vfree(area->addr);
320462306a36Sopenharmony_ci	return NULL;
320562306a36Sopenharmony_ci}
320662306a36Sopenharmony_ci
320762306a36Sopenharmony_ci/**
320862306a36Sopenharmony_ci * __vmalloc_node_range - allocate virtually contiguous memory
320962306a36Sopenharmony_ci * @size:		  allocation size
321062306a36Sopenharmony_ci * @align:		  desired alignment
321162306a36Sopenharmony_ci * @start:		  vm area range start
321262306a36Sopenharmony_ci * @end:		  vm area range end
321362306a36Sopenharmony_ci * @gfp_mask:		  flags for the page level allocator
321462306a36Sopenharmony_ci * @prot:		  protection mask for the allocated pages
321562306a36Sopenharmony_ci * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
321662306a36Sopenharmony_ci * @node:		  node to use for allocation or NUMA_NO_NODE
321762306a36Sopenharmony_ci * @caller:		  caller's return address
321862306a36Sopenharmony_ci *
321962306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level
322062306a36Sopenharmony_ci * allocator with @gfp_mask flags. Please note that the full set of gfp
322162306a36Sopenharmony_ci * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
322262306a36Sopenharmony_ci * supported.
322362306a36Sopenharmony_ci * Zone modifiers are not supported. From the reclaim modifiers
322462306a36Sopenharmony_ci * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
322562306a36Sopenharmony_ci * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
322662306a36Sopenharmony_ci * __GFP_RETRY_MAYFAIL are not supported).
322762306a36Sopenharmony_ci *
322862306a36Sopenharmony_ci * __GFP_NOWARN can be used to suppress failures messages.
322962306a36Sopenharmony_ci *
323062306a36Sopenharmony_ci * Map them into contiguous kernel virtual space, using a pagetable
323162306a36Sopenharmony_ci * protection of @prot.
323262306a36Sopenharmony_ci *
323362306a36Sopenharmony_ci * Return: the address of the area or %NULL on failure
323462306a36Sopenharmony_ci */
323562306a36Sopenharmony_civoid *__vmalloc_node_range(unsigned long size, unsigned long align,
323662306a36Sopenharmony_ci			unsigned long start, unsigned long end, gfp_t gfp_mask,
323762306a36Sopenharmony_ci			pgprot_t prot, unsigned long vm_flags, int node,
323862306a36Sopenharmony_ci			const void *caller)
323962306a36Sopenharmony_ci{
324062306a36Sopenharmony_ci	struct vm_struct *area;
324162306a36Sopenharmony_ci	void *ret;
324262306a36Sopenharmony_ci	kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
324362306a36Sopenharmony_ci	unsigned long real_size = size;
324462306a36Sopenharmony_ci	unsigned long real_align = align;
324562306a36Sopenharmony_ci	unsigned int shift = PAGE_SHIFT;
324662306a36Sopenharmony_ci
324762306a36Sopenharmony_ci	if (WARN_ON_ONCE(!size))
324862306a36Sopenharmony_ci		return NULL;
324962306a36Sopenharmony_ci
325062306a36Sopenharmony_ci	if ((size >> PAGE_SHIFT) > totalram_pages()) {
325162306a36Sopenharmony_ci		warn_alloc(gfp_mask, NULL,
325262306a36Sopenharmony_ci			"vmalloc error: size %lu, exceeds total pages",
325362306a36Sopenharmony_ci			real_size);
325462306a36Sopenharmony_ci		return NULL;
325562306a36Sopenharmony_ci	}
325662306a36Sopenharmony_ci
325762306a36Sopenharmony_ci	if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
325862306a36Sopenharmony_ci		unsigned long size_per_node;
325962306a36Sopenharmony_ci
326062306a36Sopenharmony_ci		/*
326162306a36Sopenharmony_ci		 * Try huge pages. Only try for PAGE_KERNEL allocations,
326262306a36Sopenharmony_ci		 * others like modules don't yet expect huge pages in
326362306a36Sopenharmony_ci		 * their allocations due to apply_to_page_range not
326462306a36Sopenharmony_ci		 * supporting them.
326562306a36Sopenharmony_ci		 */
326662306a36Sopenharmony_ci
326762306a36Sopenharmony_ci		size_per_node = size;
326862306a36Sopenharmony_ci		if (node == NUMA_NO_NODE)
326962306a36Sopenharmony_ci			size_per_node /= num_online_nodes();
327062306a36Sopenharmony_ci		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
327162306a36Sopenharmony_ci			shift = PMD_SHIFT;
327262306a36Sopenharmony_ci		else
327362306a36Sopenharmony_ci			shift = arch_vmap_pte_supported_shift(size_per_node);
327462306a36Sopenharmony_ci
327562306a36Sopenharmony_ci		align = max(real_align, 1UL << shift);
327662306a36Sopenharmony_ci		size = ALIGN(real_size, 1UL << shift);
327762306a36Sopenharmony_ci	}
327862306a36Sopenharmony_ci
327962306a36Sopenharmony_ciagain:
328062306a36Sopenharmony_ci	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
328162306a36Sopenharmony_ci				  VM_UNINITIALIZED | vm_flags, start, end, node,
328262306a36Sopenharmony_ci				  gfp_mask, caller);
328362306a36Sopenharmony_ci	if (!area) {
328462306a36Sopenharmony_ci		bool nofail = gfp_mask & __GFP_NOFAIL;
328562306a36Sopenharmony_ci		warn_alloc(gfp_mask, NULL,
328662306a36Sopenharmony_ci			"vmalloc error: size %lu, vm_struct allocation failed%s",
328762306a36Sopenharmony_ci			real_size, (nofail) ? ". Retrying." : "");
328862306a36Sopenharmony_ci		if (nofail) {
328962306a36Sopenharmony_ci			schedule_timeout_uninterruptible(1);
329062306a36Sopenharmony_ci			goto again;
329162306a36Sopenharmony_ci		}
329262306a36Sopenharmony_ci		goto fail;
329362306a36Sopenharmony_ci	}
329462306a36Sopenharmony_ci
329562306a36Sopenharmony_ci	/*
329662306a36Sopenharmony_ci	 * Prepare arguments for __vmalloc_area_node() and
329762306a36Sopenharmony_ci	 * kasan_unpoison_vmalloc().
329862306a36Sopenharmony_ci	 */
329962306a36Sopenharmony_ci	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
330062306a36Sopenharmony_ci		if (kasan_hw_tags_enabled()) {
330162306a36Sopenharmony_ci			/*
330262306a36Sopenharmony_ci			 * Modify protection bits to allow tagging.
330362306a36Sopenharmony_ci			 * This must be done before mapping.
330462306a36Sopenharmony_ci			 */
330562306a36Sopenharmony_ci			prot = arch_vmap_pgprot_tagged(prot);
330662306a36Sopenharmony_ci
330762306a36Sopenharmony_ci			/*
330862306a36Sopenharmony_ci			 * Skip page_alloc poisoning and zeroing for physical
330962306a36Sopenharmony_ci			 * pages backing VM_ALLOC mapping. Memory is instead
331062306a36Sopenharmony_ci			 * poisoned and zeroed by kasan_unpoison_vmalloc().
331162306a36Sopenharmony_ci			 */
331262306a36Sopenharmony_ci			gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
331362306a36Sopenharmony_ci		}
331462306a36Sopenharmony_ci
331562306a36Sopenharmony_ci		/* Take note that the mapping is PAGE_KERNEL. */
331662306a36Sopenharmony_ci		kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
331762306a36Sopenharmony_ci	}
331862306a36Sopenharmony_ci
331962306a36Sopenharmony_ci	/* Allocate physical pages and map them into vmalloc space. */
332062306a36Sopenharmony_ci	ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
332162306a36Sopenharmony_ci	if (!ret)
332262306a36Sopenharmony_ci		goto fail;
332362306a36Sopenharmony_ci
332462306a36Sopenharmony_ci	/*
332562306a36Sopenharmony_ci	 * Mark the pages as accessible, now that they are mapped.
332662306a36Sopenharmony_ci	 * The condition for setting KASAN_VMALLOC_INIT should complement the
332762306a36Sopenharmony_ci	 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
332862306a36Sopenharmony_ci	 * to make sure that memory is initialized under the same conditions.
332962306a36Sopenharmony_ci	 * Tag-based KASAN modes only assign tags to normal non-executable
333062306a36Sopenharmony_ci	 * allocations, see __kasan_unpoison_vmalloc().
333162306a36Sopenharmony_ci	 */
333262306a36Sopenharmony_ci	kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
333362306a36Sopenharmony_ci	if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
333462306a36Sopenharmony_ci	    (gfp_mask & __GFP_SKIP_ZERO))
333562306a36Sopenharmony_ci		kasan_flags |= KASAN_VMALLOC_INIT;
333662306a36Sopenharmony_ci	/* KASAN_VMALLOC_PROT_NORMAL already set if required. */
333762306a36Sopenharmony_ci	area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
333862306a36Sopenharmony_ci
333962306a36Sopenharmony_ci	/*
334062306a36Sopenharmony_ci	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
334162306a36Sopenharmony_ci	 * flag. It means that vm_struct is not fully initialized.
334262306a36Sopenharmony_ci	 * Now, it is fully initialized, so remove this flag here.
334362306a36Sopenharmony_ci	 */
334462306a36Sopenharmony_ci	clear_vm_uninitialized_flag(area);
334562306a36Sopenharmony_ci
334662306a36Sopenharmony_ci	size = PAGE_ALIGN(size);
334762306a36Sopenharmony_ci	if (!(vm_flags & VM_DEFER_KMEMLEAK))
334862306a36Sopenharmony_ci		kmemleak_vmalloc(area, size, gfp_mask);
334962306a36Sopenharmony_ci
335062306a36Sopenharmony_ci	return area->addr;
335162306a36Sopenharmony_ci
335262306a36Sopenharmony_cifail:
335362306a36Sopenharmony_ci	if (shift > PAGE_SHIFT) {
335462306a36Sopenharmony_ci		shift = PAGE_SHIFT;
335562306a36Sopenharmony_ci		align = real_align;
335662306a36Sopenharmony_ci		size = real_size;
335762306a36Sopenharmony_ci		goto again;
335862306a36Sopenharmony_ci	}
335962306a36Sopenharmony_ci
336062306a36Sopenharmony_ci	return NULL;
336162306a36Sopenharmony_ci}
336262306a36Sopenharmony_ci
336362306a36Sopenharmony_ci/**
336462306a36Sopenharmony_ci * __vmalloc_node - allocate virtually contiguous memory
336562306a36Sopenharmony_ci * @size:	    allocation size
336662306a36Sopenharmony_ci * @align:	    desired alignment
336762306a36Sopenharmony_ci * @gfp_mask:	    flags for the page level allocator
336862306a36Sopenharmony_ci * @node:	    node to use for allocation or NUMA_NO_NODE
336962306a36Sopenharmony_ci * @caller:	    caller's return address
337062306a36Sopenharmony_ci *
337162306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level allocator with
337262306a36Sopenharmony_ci * @gfp_mask flags.  Map them into contiguous kernel virtual space.
337362306a36Sopenharmony_ci *
337462306a36Sopenharmony_ci * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
337562306a36Sopenharmony_ci * and __GFP_NOFAIL are not supported
337662306a36Sopenharmony_ci *
337762306a36Sopenharmony_ci * Any use of gfp flags outside of GFP_KERNEL should be consulted
337862306a36Sopenharmony_ci * with mm people.
337962306a36Sopenharmony_ci *
338062306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
338162306a36Sopenharmony_ci */
338262306a36Sopenharmony_civoid *__vmalloc_node(unsigned long size, unsigned long align,
338362306a36Sopenharmony_ci			    gfp_t gfp_mask, int node, const void *caller)
338462306a36Sopenharmony_ci{
338562306a36Sopenharmony_ci	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
338662306a36Sopenharmony_ci				gfp_mask, PAGE_KERNEL, 0, node, caller);
338762306a36Sopenharmony_ci}
338862306a36Sopenharmony_ci/*
338962306a36Sopenharmony_ci * This is only for performance analysis of vmalloc and stress purpose.
339062306a36Sopenharmony_ci * It is required by vmalloc test module, therefore do not use it other
339162306a36Sopenharmony_ci * than that.
339262306a36Sopenharmony_ci */
339362306a36Sopenharmony_ci#ifdef CONFIG_TEST_VMALLOC_MODULE
339462306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(__vmalloc_node);
339562306a36Sopenharmony_ci#endif
339662306a36Sopenharmony_ci
339762306a36Sopenharmony_civoid *__vmalloc(unsigned long size, gfp_t gfp_mask)
339862306a36Sopenharmony_ci{
339962306a36Sopenharmony_ci	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
340062306a36Sopenharmony_ci				__builtin_return_address(0));
340162306a36Sopenharmony_ci}
340262306a36Sopenharmony_ciEXPORT_SYMBOL(__vmalloc);
340362306a36Sopenharmony_ci
340462306a36Sopenharmony_ci/**
340562306a36Sopenharmony_ci * vmalloc - allocate virtually contiguous memory
340662306a36Sopenharmony_ci * @size:    allocation size
340762306a36Sopenharmony_ci *
340862306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level
340962306a36Sopenharmony_ci * allocator and map them into contiguous kernel virtual space.
341062306a36Sopenharmony_ci *
341162306a36Sopenharmony_ci * For tight control over page level allocator and protection flags
341262306a36Sopenharmony_ci * use __vmalloc() instead.
341362306a36Sopenharmony_ci *
341462306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
341562306a36Sopenharmony_ci */
341662306a36Sopenharmony_civoid *vmalloc(unsigned long size)
341762306a36Sopenharmony_ci{
341862306a36Sopenharmony_ci	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
341962306a36Sopenharmony_ci				__builtin_return_address(0));
342062306a36Sopenharmony_ci}
342162306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc);
342262306a36Sopenharmony_ci
342362306a36Sopenharmony_ci/**
342462306a36Sopenharmony_ci * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
342562306a36Sopenharmony_ci * @size:      allocation size
342662306a36Sopenharmony_ci * @gfp_mask:  flags for the page level allocator
342762306a36Sopenharmony_ci *
342862306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level
342962306a36Sopenharmony_ci * allocator and map them into contiguous kernel virtual space.
343062306a36Sopenharmony_ci * If @size is greater than or equal to PMD_SIZE, allow using
343162306a36Sopenharmony_ci * huge pages for the memory
343262306a36Sopenharmony_ci *
343362306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
343462306a36Sopenharmony_ci */
343562306a36Sopenharmony_civoid *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
343662306a36Sopenharmony_ci{
343762306a36Sopenharmony_ci	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
343862306a36Sopenharmony_ci				    gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
343962306a36Sopenharmony_ci				    NUMA_NO_NODE, __builtin_return_address(0));
344062306a36Sopenharmony_ci}
344162306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(vmalloc_huge);
344262306a36Sopenharmony_ci
344362306a36Sopenharmony_ci/**
344462306a36Sopenharmony_ci * vzalloc - allocate virtually contiguous memory with zero fill
344562306a36Sopenharmony_ci * @size:    allocation size
344662306a36Sopenharmony_ci *
344762306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level
344862306a36Sopenharmony_ci * allocator and map them into contiguous kernel virtual space.
344962306a36Sopenharmony_ci * The memory allocated is set to zero.
345062306a36Sopenharmony_ci *
345162306a36Sopenharmony_ci * For tight control over page level allocator and protection flags
345262306a36Sopenharmony_ci * use __vmalloc() instead.
345362306a36Sopenharmony_ci *
345462306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
345562306a36Sopenharmony_ci */
345662306a36Sopenharmony_civoid *vzalloc(unsigned long size)
345762306a36Sopenharmony_ci{
345862306a36Sopenharmony_ci	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
345962306a36Sopenharmony_ci				__builtin_return_address(0));
346062306a36Sopenharmony_ci}
346162306a36Sopenharmony_ciEXPORT_SYMBOL(vzalloc);
346262306a36Sopenharmony_ci
346362306a36Sopenharmony_ci/**
346462306a36Sopenharmony_ci * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
346562306a36Sopenharmony_ci * @size: allocation size
346662306a36Sopenharmony_ci *
346762306a36Sopenharmony_ci * The resulting memory area is zeroed so it can be mapped to userspace
346862306a36Sopenharmony_ci * without leaking data.
346962306a36Sopenharmony_ci *
347062306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
347162306a36Sopenharmony_ci */
347262306a36Sopenharmony_civoid *vmalloc_user(unsigned long size)
347362306a36Sopenharmony_ci{
347462306a36Sopenharmony_ci	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
347562306a36Sopenharmony_ci				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
347662306a36Sopenharmony_ci				    VM_USERMAP, NUMA_NO_NODE,
347762306a36Sopenharmony_ci				    __builtin_return_address(0));
347862306a36Sopenharmony_ci}
347962306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc_user);
348062306a36Sopenharmony_ci
348162306a36Sopenharmony_ci/**
348262306a36Sopenharmony_ci * vmalloc_node - allocate memory on a specific node
348362306a36Sopenharmony_ci * @size:	  allocation size
348462306a36Sopenharmony_ci * @node:	  numa node
348562306a36Sopenharmony_ci *
348662306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level
348762306a36Sopenharmony_ci * allocator and map them into contiguous kernel virtual space.
348862306a36Sopenharmony_ci *
348962306a36Sopenharmony_ci * For tight control over page level allocator and protection flags
349062306a36Sopenharmony_ci * use __vmalloc() instead.
349162306a36Sopenharmony_ci *
349262306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
349362306a36Sopenharmony_ci */
349462306a36Sopenharmony_civoid *vmalloc_node(unsigned long size, int node)
349562306a36Sopenharmony_ci{
349662306a36Sopenharmony_ci	return __vmalloc_node(size, 1, GFP_KERNEL, node,
349762306a36Sopenharmony_ci			__builtin_return_address(0));
349862306a36Sopenharmony_ci}
349962306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc_node);
350062306a36Sopenharmony_ci
350162306a36Sopenharmony_ci/**
350262306a36Sopenharmony_ci * vzalloc_node - allocate memory on a specific node with zero fill
350362306a36Sopenharmony_ci * @size:	allocation size
350462306a36Sopenharmony_ci * @node:	numa node
350562306a36Sopenharmony_ci *
350662306a36Sopenharmony_ci * Allocate enough pages to cover @size from the page level
350762306a36Sopenharmony_ci * allocator and map them into contiguous kernel virtual space.
350862306a36Sopenharmony_ci * The memory allocated is set to zero.
350962306a36Sopenharmony_ci *
351062306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
351162306a36Sopenharmony_ci */
351262306a36Sopenharmony_civoid *vzalloc_node(unsigned long size, int node)
351362306a36Sopenharmony_ci{
351462306a36Sopenharmony_ci	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
351562306a36Sopenharmony_ci				__builtin_return_address(0));
351662306a36Sopenharmony_ci}
351762306a36Sopenharmony_ciEXPORT_SYMBOL(vzalloc_node);
351862306a36Sopenharmony_ci
351962306a36Sopenharmony_ci#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
352062306a36Sopenharmony_ci#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
352162306a36Sopenharmony_ci#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
352262306a36Sopenharmony_ci#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
352362306a36Sopenharmony_ci#else
352462306a36Sopenharmony_ci/*
352562306a36Sopenharmony_ci * 64b systems should always have either DMA or DMA32 zones. For others
352662306a36Sopenharmony_ci * GFP_DMA32 should do the right thing and use the normal zone.
352762306a36Sopenharmony_ci */
352862306a36Sopenharmony_ci#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
352962306a36Sopenharmony_ci#endif
353062306a36Sopenharmony_ci
353162306a36Sopenharmony_ci/**
353262306a36Sopenharmony_ci * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
353362306a36Sopenharmony_ci * @size:	allocation size
353462306a36Sopenharmony_ci *
353562306a36Sopenharmony_ci * Allocate enough 32bit PA addressable pages to cover @size from the
353662306a36Sopenharmony_ci * page level allocator and map them into contiguous kernel virtual space.
353762306a36Sopenharmony_ci *
353862306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
353962306a36Sopenharmony_ci */
354062306a36Sopenharmony_civoid *vmalloc_32(unsigned long size)
354162306a36Sopenharmony_ci{
354262306a36Sopenharmony_ci	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
354362306a36Sopenharmony_ci			__builtin_return_address(0));
354462306a36Sopenharmony_ci}
354562306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc_32);
354662306a36Sopenharmony_ci
354762306a36Sopenharmony_ci/**
354862306a36Sopenharmony_ci * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
354962306a36Sopenharmony_ci * @size:	     allocation size
355062306a36Sopenharmony_ci *
355162306a36Sopenharmony_ci * The resulting memory area is 32bit addressable and zeroed so it can be
355262306a36Sopenharmony_ci * mapped to userspace without leaking data.
355362306a36Sopenharmony_ci *
355462306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL on error
355562306a36Sopenharmony_ci */
355662306a36Sopenharmony_civoid *vmalloc_32_user(unsigned long size)
355762306a36Sopenharmony_ci{
355862306a36Sopenharmony_ci	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
355962306a36Sopenharmony_ci				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
356062306a36Sopenharmony_ci				    VM_USERMAP, NUMA_NO_NODE,
356162306a36Sopenharmony_ci				    __builtin_return_address(0));
356262306a36Sopenharmony_ci}
356362306a36Sopenharmony_ciEXPORT_SYMBOL(vmalloc_32_user);
356462306a36Sopenharmony_ci
356562306a36Sopenharmony_ci/*
356662306a36Sopenharmony_ci * Atomically zero bytes in the iterator.
356762306a36Sopenharmony_ci *
356862306a36Sopenharmony_ci * Returns the number of zeroed bytes.
356962306a36Sopenharmony_ci */
357062306a36Sopenharmony_cistatic size_t zero_iter(struct iov_iter *iter, size_t count)
357162306a36Sopenharmony_ci{
357262306a36Sopenharmony_ci	size_t remains = count;
357362306a36Sopenharmony_ci
357462306a36Sopenharmony_ci	while (remains > 0) {
357562306a36Sopenharmony_ci		size_t num, copied;
357662306a36Sopenharmony_ci
357762306a36Sopenharmony_ci		num = min_t(size_t, remains, PAGE_SIZE);
357862306a36Sopenharmony_ci		copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
357962306a36Sopenharmony_ci		remains -= copied;
358062306a36Sopenharmony_ci
358162306a36Sopenharmony_ci		if (copied < num)
358262306a36Sopenharmony_ci			break;
358362306a36Sopenharmony_ci	}
358462306a36Sopenharmony_ci
358562306a36Sopenharmony_ci	return count - remains;
358662306a36Sopenharmony_ci}
358762306a36Sopenharmony_ci
358862306a36Sopenharmony_ci/*
358962306a36Sopenharmony_ci * small helper routine, copy contents to iter from addr.
359062306a36Sopenharmony_ci * If the page is not present, fill zero.
359162306a36Sopenharmony_ci *
359262306a36Sopenharmony_ci * Returns the number of copied bytes.
359362306a36Sopenharmony_ci */
359462306a36Sopenharmony_cistatic size_t aligned_vread_iter(struct iov_iter *iter,
359562306a36Sopenharmony_ci				 const char *addr, size_t count)
359662306a36Sopenharmony_ci{
359762306a36Sopenharmony_ci	size_t remains = count;
359862306a36Sopenharmony_ci	struct page *page;
359962306a36Sopenharmony_ci
360062306a36Sopenharmony_ci	while (remains > 0) {
360162306a36Sopenharmony_ci		unsigned long offset, length;
360262306a36Sopenharmony_ci		size_t copied = 0;
360362306a36Sopenharmony_ci
360462306a36Sopenharmony_ci		offset = offset_in_page(addr);
360562306a36Sopenharmony_ci		length = PAGE_SIZE - offset;
360662306a36Sopenharmony_ci		if (length > remains)
360762306a36Sopenharmony_ci			length = remains;
360862306a36Sopenharmony_ci		page = vmalloc_to_page(addr);
360962306a36Sopenharmony_ci		/*
361062306a36Sopenharmony_ci		 * To do safe access to this _mapped_ area, we need lock. But
361162306a36Sopenharmony_ci		 * adding lock here means that we need to add overhead of
361262306a36Sopenharmony_ci		 * vmalloc()/vfree() calls for this _debug_ interface, rarely
361362306a36Sopenharmony_ci		 * used. Instead of that, we'll use an local mapping via
361462306a36Sopenharmony_ci		 * copy_page_to_iter_nofault() and accept a small overhead in
361562306a36Sopenharmony_ci		 * this access function.
361662306a36Sopenharmony_ci		 */
361762306a36Sopenharmony_ci		if (page)
361862306a36Sopenharmony_ci			copied = copy_page_to_iter_nofault(page, offset,
361962306a36Sopenharmony_ci							   length, iter);
362062306a36Sopenharmony_ci		else
362162306a36Sopenharmony_ci			copied = zero_iter(iter, length);
362262306a36Sopenharmony_ci
362362306a36Sopenharmony_ci		addr += copied;
362462306a36Sopenharmony_ci		remains -= copied;
362562306a36Sopenharmony_ci
362662306a36Sopenharmony_ci		if (copied != length)
362762306a36Sopenharmony_ci			break;
362862306a36Sopenharmony_ci	}
362962306a36Sopenharmony_ci
363062306a36Sopenharmony_ci	return count - remains;
363162306a36Sopenharmony_ci}
363262306a36Sopenharmony_ci
363362306a36Sopenharmony_ci/*
363462306a36Sopenharmony_ci * Read from a vm_map_ram region of memory.
363562306a36Sopenharmony_ci *
363662306a36Sopenharmony_ci * Returns the number of copied bytes.
363762306a36Sopenharmony_ci */
363862306a36Sopenharmony_cistatic size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
363962306a36Sopenharmony_ci				  size_t count, unsigned long flags)
364062306a36Sopenharmony_ci{
364162306a36Sopenharmony_ci	char *start;
364262306a36Sopenharmony_ci	struct vmap_block *vb;
364362306a36Sopenharmony_ci	struct xarray *xa;
364462306a36Sopenharmony_ci	unsigned long offset;
364562306a36Sopenharmony_ci	unsigned int rs, re;
364662306a36Sopenharmony_ci	size_t remains, n;
364762306a36Sopenharmony_ci
364862306a36Sopenharmony_ci	/*
364962306a36Sopenharmony_ci	 * If it's area created by vm_map_ram() interface directly, but
365062306a36Sopenharmony_ci	 * not further subdividing and delegating management to vmap_block,
365162306a36Sopenharmony_ci	 * handle it here.
365262306a36Sopenharmony_ci	 */
365362306a36Sopenharmony_ci	if (!(flags & VMAP_BLOCK))
365462306a36Sopenharmony_ci		return aligned_vread_iter(iter, addr, count);
365562306a36Sopenharmony_ci
365662306a36Sopenharmony_ci	remains = count;
365762306a36Sopenharmony_ci
365862306a36Sopenharmony_ci	/*
365962306a36Sopenharmony_ci	 * Area is split into regions and tracked with vmap_block, read out
366062306a36Sopenharmony_ci	 * each region and zero fill the hole between regions.
366162306a36Sopenharmony_ci	 */
366262306a36Sopenharmony_ci	xa = addr_to_vb_xa((unsigned long) addr);
366362306a36Sopenharmony_ci	vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
366462306a36Sopenharmony_ci	if (!vb)
366562306a36Sopenharmony_ci		goto finished_zero;
366662306a36Sopenharmony_ci
366762306a36Sopenharmony_ci	spin_lock(&vb->lock);
366862306a36Sopenharmony_ci	if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
366962306a36Sopenharmony_ci		spin_unlock(&vb->lock);
367062306a36Sopenharmony_ci		goto finished_zero;
367162306a36Sopenharmony_ci	}
367262306a36Sopenharmony_ci
367362306a36Sopenharmony_ci	for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
367462306a36Sopenharmony_ci		size_t copied;
367562306a36Sopenharmony_ci
367662306a36Sopenharmony_ci		if (remains == 0)
367762306a36Sopenharmony_ci			goto finished;
367862306a36Sopenharmony_ci
367962306a36Sopenharmony_ci		start = vmap_block_vaddr(vb->va->va_start, rs);
368062306a36Sopenharmony_ci
368162306a36Sopenharmony_ci		if (addr < start) {
368262306a36Sopenharmony_ci			size_t to_zero = min_t(size_t, start - addr, remains);
368362306a36Sopenharmony_ci			size_t zeroed = zero_iter(iter, to_zero);
368462306a36Sopenharmony_ci
368562306a36Sopenharmony_ci			addr += zeroed;
368662306a36Sopenharmony_ci			remains -= zeroed;
368762306a36Sopenharmony_ci
368862306a36Sopenharmony_ci			if (remains == 0 || zeroed != to_zero)
368962306a36Sopenharmony_ci				goto finished;
369062306a36Sopenharmony_ci		}
369162306a36Sopenharmony_ci
369262306a36Sopenharmony_ci		/*it could start reading from the middle of used region*/
369362306a36Sopenharmony_ci		offset = offset_in_page(addr);
369462306a36Sopenharmony_ci		n = ((re - rs + 1) << PAGE_SHIFT) - offset;
369562306a36Sopenharmony_ci		if (n > remains)
369662306a36Sopenharmony_ci			n = remains;
369762306a36Sopenharmony_ci
369862306a36Sopenharmony_ci		copied = aligned_vread_iter(iter, start + offset, n);
369962306a36Sopenharmony_ci
370062306a36Sopenharmony_ci		addr += copied;
370162306a36Sopenharmony_ci		remains -= copied;
370262306a36Sopenharmony_ci
370362306a36Sopenharmony_ci		if (copied != n)
370462306a36Sopenharmony_ci			goto finished;
370562306a36Sopenharmony_ci	}
370662306a36Sopenharmony_ci
370762306a36Sopenharmony_ci	spin_unlock(&vb->lock);
370862306a36Sopenharmony_ci
370962306a36Sopenharmony_cifinished_zero:
371062306a36Sopenharmony_ci	/* zero-fill the left dirty or free regions */
371162306a36Sopenharmony_ci	return count - remains + zero_iter(iter, remains);
371262306a36Sopenharmony_cifinished:
371362306a36Sopenharmony_ci	/* We couldn't copy/zero everything */
371462306a36Sopenharmony_ci	spin_unlock(&vb->lock);
371562306a36Sopenharmony_ci	return count - remains;
371662306a36Sopenharmony_ci}
371762306a36Sopenharmony_ci
371862306a36Sopenharmony_ci/**
371962306a36Sopenharmony_ci * vread_iter() - read vmalloc area in a safe way to an iterator.
372062306a36Sopenharmony_ci * @iter:         the iterator to which data should be written.
372162306a36Sopenharmony_ci * @addr:         vm address.
372262306a36Sopenharmony_ci * @count:        number of bytes to be read.
372362306a36Sopenharmony_ci *
372462306a36Sopenharmony_ci * This function checks that addr is a valid vmalloc'ed area, and
372562306a36Sopenharmony_ci * copy data from that area to a given buffer. If the given memory range
372662306a36Sopenharmony_ci * of [addr...addr+count) includes some valid address, data is copied to
372762306a36Sopenharmony_ci * proper area of @buf. If there are memory holes, they'll be zero-filled.
372862306a36Sopenharmony_ci * IOREMAP area is treated as memory hole and no copy is done.
372962306a36Sopenharmony_ci *
373062306a36Sopenharmony_ci * If [addr...addr+count) doesn't includes any intersects with alive
373162306a36Sopenharmony_ci * vm_struct area, returns 0. @buf should be kernel's buffer.
373262306a36Sopenharmony_ci *
373362306a36Sopenharmony_ci * Note: In usual ops, vread() is never necessary because the caller
373462306a36Sopenharmony_ci * should know vmalloc() area is valid and can use memcpy().
373562306a36Sopenharmony_ci * This is for routines which have to access vmalloc area without
373662306a36Sopenharmony_ci * any information, as /proc/kcore.
373762306a36Sopenharmony_ci *
373862306a36Sopenharmony_ci * Return: number of bytes for which addr and buf should be increased
373962306a36Sopenharmony_ci * (same number as @count) or %0 if [addr...addr+count) doesn't
374062306a36Sopenharmony_ci * include any intersection with valid vmalloc area
374162306a36Sopenharmony_ci */
374262306a36Sopenharmony_cilong vread_iter(struct iov_iter *iter, const char *addr, size_t count)
374362306a36Sopenharmony_ci{
374462306a36Sopenharmony_ci	struct vmap_area *va;
374562306a36Sopenharmony_ci	struct vm_struct *vm;
374662306a36Sopenharmony_ci	char *vaddr;
374762306a36Sopenharmony_ci	size_t n, size, flags, remains;
374862306a36Sopenharmony_ci
374962306a36Sopenharmony_ci	addr = kasan_reset_tag(addr);
375062306a36Sopenharmony_ci
375162306a36Sopenharmony_ci	/* Don't allow overflow */
375262306a36Sopenharmony_ci	if ((unsigned long) addr + count < count)
375362306a36Sopenharmony_ci		count = -(unsigned long) addr;
375462306a36Sopenharmony_ci
375562306a36Sopenharmony_ci	remains = count;
375662306a36Sopenharmony_ci
375762306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
375862306a36Sopenharmony_ci	va = find_vmap_area_exceed_addr((unsigned long)addr);
375962306a36Sopenharmony_ci	if (!va)
376062306a36Sopenharmony_ci		goto finished_zero;
376162306a36Sopenharmony_ci
376262306a36Sopenharmony_ci	/* no intersects with alive vmap_area */
376362306a36Sopenharmony_ci	if ((unsigned long)addr + remains <= va->va_start)
376462306a36Sopenharmony_ci		goto finished_zero;
376562306a36Sopenharmony_ci
376662306a36Sopenharmony_ci	list_for_each_entry_from(va, &vmap_area_list, list) {
376762306a36Sopenharmony_ci		size_t copied;
376862306a36Sopenharmony_ci
376962306a36Sopenharmony_ci		if (remains == 0)
377062306a36Sopenharmony_ci			goto finished;
377162306a36Sopenharmony_ci
377262306a36Sopenharmony_ci		vm = va->vm;
377362306a36Sopenharmony_ci		flags = va->flags & VMAP_FLAGS_MASK;
377462306a36Sopenharmony_ci		/*
377562306a36Sopenharmony_ci		 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
377662306a36Sopenharmony_ci		 * be set together with VMAP_RAM.
377762306a36Sopenharmony_ci		 */
377862306a36Sopenharmony_ci		WARN_ON(flags == VMAP_BLOCK);
377962306a36Sopenharmony_ci
378062306a36Sopenharmony_ci		if (!vm && !flags)
378162306a36Sopenharmony_ci			continue;
378262306a36Sopenharmony_ci
378362306a36Sopenharmony_ci		if (vm && (vm->flags & VM_UNINITIALIZED))
378462306a36Sopenharmony_ci			continue;
378562306a36Sopenharmony_ci
378662306a36Sopenharmony_ci		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
378762306a36Sopenharmony_ci		smp_rmb();
378862306a36Sopenharmony_ci
378962306a36Sopenharmony_ci		vaddr = (char *) va->va_start;
379062306a36Sopenharmony_ci		size = vm ? get_vm_area_size(vm) : va_size(va);
379162306a36Sopenharmony_ci
379262306a36Sopenharmony_ci		if (addr >= vaddr + size)
379362306a36Sopenharmony_ci			continue;
379462306a36Sopenharmony_ci
379562306a36Sopenharmony_ci		if (addr < vaddr) {
379662306a36Sopenharmony_ci			size_t to_zero = min_t(size_t, vaddr - addr, remains);
379762306a36Sopenharmony_ci			size_t zeroed = zero_iter(iter, to_zero);
379862306a36Sopenharmony_ci
379962306a36Sopenharmony_ci			addr += zeroed;
380062306a36Sopenharmony_ci			remains -= zeroed;
380162306a36Sopenharmony_ci
380262306a36Sopenharmony_ci			if (remains == 0 || zeroed != to_zero)
380362306a36Sopenharmony_ci				goto finished;
380462306a36Sopenharmony_ci		}
380562306a36Sopenharmony_ci
380662306a36Sopenharmony_ci		n = vaddr + size - addr;
380762306a36Sopenharmony_ci		if (n > remains)
380862306a36Sopenharmony_ci			n = remains;
380962306a36Sopenharmony_ci
381062306a36Sopenharmony_ci		if (flags & VMAP_RAM)
381162306a36Sopenharmony_ci			copied = vmap_ram_vread_iter(iter, addr, n, flags);
381262306a36Sopenharmony_ci		else if (!(vm->flags & VM_IOREMAP))
381362306a36Sopenharmony_ci			copied = aligned_vread_iter(iter, addr, n);
381462306a36Sopenharmony_ci		else /* IOREMAP area is treated as memory hole */
381562306a36Sopenharmony_ci			copied = zero_iter(iter, n);
381662306a36Sopenharmony_ci
381762306a36Sopenharmony_ci		addr += copied;
381862306a36Sopenharmony_ci		remains -= copied;
381962306a36Sopenharmony_ci
382062306a36Sopenharmony_ci		if (copied != n)
382162306a36Sopenharmony_ci			goto finished;
382262306a36Sopenharmony_ci	}
382362306a36Sopenharmony_ci
382462306a36Sopenharmony_cifinished_zero:
382562306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
382662306a36Sopenharmony_ci	/* zero-fill memory holes */
382762306a36Sopenharmony_ci	return count - remains + zero_iter(iter, remains);
382862306a36Sopenharmony_cifinished:
382962306a36Sopenharmony_ci	/* Nothing remains, or We couldn't copy/zero everything. */
383062306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
383162306a36Sopenharmony_ci
383262306a36Sopenharmony_ci	return count - remains;
383362306a36Sopenharmony_ci}
383462306a36Sopenharmony_ci
383562306a36Sopenharmony_ci/**
383662306a36Sopenharmony_ci * remap_vmalloc_range_partial - map vmalloc pages to userspace
383762306a36Sopenharmony_ci * @vma:		vma to cover
383862306a36Sopenharmony_ci * @uaddr:		target user address to start at
383962306a36Sopenharmony_ci * @kaddr:		virtual address of vmalloc kernel memory
384062306a36Sopenharmony_ci * @pgoff:		offset from @kaddr to start at
384162306a36Sopenharmony_ci * @size:		size of map area
384262306a36Sopenharmony_ci *
384362306a36Sopenharmony_ci * Returns:	0 for success, -Exxx on failure
384462306a36Sopenharmony_ci *
384562306a36Sopenharmony_ci * This function checks that @kaddr is a valid vmalloc'ed area,
384662306a36Sopenharmony_ci * and that it is big enough to cover the range starting at
384762306a36Sopenharmony_ci * @uaddr in @vma. Will return failure if that criteria isn't
384862306a36Sopenharmony_ci * met.
384962306a36Sopenharmony_ci *
385062306a36Sopenharmony_ci * Similar to remap_pfn_range() (see mm/memory.c)
385162306a36Sopenharmony_ci */
385262306a36Sopenharmony_ciint remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
385362306a36Sopenharmony_ci				void *kaddr, unsigned long pgoff,
385462306a36Sopenharmony_ci				unsigned long size)
385562306a36Sopenharmony_ci{
385662306a36Sopenharmony_ci	struct vm_struct *area;
385762306a36Sopenharmony_ci	unsigned long off;
385862306a36Sopenharmony_ci	unsigned long end_index;
385962306a36Sopenharmony_ci
386062306a36Sopenharmony_ci	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
386162306a36Sopenharmony_ci		return -EINVAL;
386262306a36Sopenharmony_ci
386362306a36Sopenharmony_ci	size = PAGE_ALIGN(size);
386462306a36Sopenharmony_ci
386562306a36Sopenharmony_ci	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
386662306a36Sopenharmony_ci		return -EINVAL;
386762306a36Sopenharmony_ci
386862306a36Sopenharmony_ci	area = find_vm_area(kaddr);
386962306a36Sopenharmony_ci	if (!area)
387062306a36Sopenharmony_ci		return -EINVAL;
387162306a36Sopenharmony_ci
387262306a36Sopenharmony_ci	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
387362306a36Sopenharmony_ci		return -EINVAL;
387462306a36Sopenharmony_ci
387562306a36Sopenharmony_ci	if (check_add_overflow(size, off, &end_index) ||
387662306a36Sopenharmony_ci	    end_index > get_vm_area_size(area))
387762306a36Sopenharmony_ci		return -EINVAL;
387862306a36Sopenharmony_ci	kaddr += off;
387962306a36Sopenharmony_ci
388062306a36Sopenharmony_ci	do {
388162306a36Sopenharmony_ci		struct page *page = vmalloc_to_page(kaddr);
388262306a36Sopenharmony_ci		int ret;
388362306a36Sopenharmony_ci
388462306a36Sopenharmony_ci		ret = vm_insert_page(vma, uaddr, page);
388562306a36Sopenharmony_ci		if (ret)
388662306a36Sopenharmony_ci			return ret;
388762306a36Sopenharmony_ci
388862306a36Sopenharmony_ci		uaddr += PAGE_SIZE;
388962306a36Sopenharmony_ci		kaddr += PAGE_SIZE;
389062306a36Sopenharmony_ci		size -= PAGE_SIZE;
389162306a36Sopenharmony_ci	} while (size > 0);
389262306a36Sopenharmony_ci
389362306a36Sopenharmony_ci	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
389462306a36Sopenharmony_ci
389562306a36Sopenharmony_ci	return 0;
389662306a36Sopenharmony_ci}
389762306a36Sopenharmony_ci
389862306a36Sopenharmony_ci/**
389962306a36Sopenharmony_ci * remap_vmalloc_range - map vmalloc pages to userspace
390062306a36Sopenharmony_ci * @vma:		vma to cover (map full range of vma)
390162306a36Sopenharmony_ci * @addr:		vmalloc memory
390262306a36Sopenharmony_ci * @pgoff:		number of pages into addr before first page to map
390362306a36Sopenharmony_ci *
390462306a36Sopenharmony_ci * Returns:	0 for success, -Exxx on failure
390562306a36Sopenharmony_ci *
390662306a36Sopenharmony_ci * This function checks that addr is a valid vmalloc'ed area, and
390762306a36Sopenharmony_ci * that it is big enough to cover the vma. Will return failure if
390862306a36Sopenharmony_ci * that criteria isn't met.
390962306a36Sopenharmony_ci *
391062306a36Sopenharmony_ci * Similar to remap_pfn_range() (see mm/memory.c)
391162306a36Sopenharmony_ci */
391262306a36Sopenharmony_ciint remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
391362306a36Sopenharmony_ci						unsigned long pgoff)
391462306a36Sopenharmony_ci{
391562306a36Sopenharmony_ci	return remap_vmalloc_range_partial(vma, vma->vm_start,
391662306a36Sopenharmony_ci					   addr, pgoff,
391762306a36Sopenharmony_ci					   vma->vm_end - vma->vm_start);
391862306a36Sopenharmony_ci}
391962306a36Sopenharmony_ciEXPORT_SYMBOL(remap_vmalloc_range);
392062306a36Sopenharmony_ci
392162306a36Sopenharmony_civoid free_vm_area(struct vm_struct *area)
392262306a36Sopenharmony_ci{
392362306a36Sopenharmony_ci	struct vm_struct *ret;
392462306a36Sopenharmony_ci	ret = remove_vm_area(area->addr);
392562306a36Sopenharmony_ci	BUG_ON(ret != area);
392662306a36Sopenharmony_ci	kfree(area);
392762306a36Sopenharmony_ci}
392862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(free_vm_area);
392962306a36Sopenharmony_ci
393062306a36Sopenharmony_ci#ifdef CONFIG_SMP
393162306a36Sopenharmony_cistatic struct vmap_area *node_to_va(struct rb_node *n)
393262306a36Sopenharmony_ci{
393362306a36Sopenharmony_ci	return rb_entry_safe(n, struct vmap_area, rb_node);
393462306a36Sopenharmony_ci}
393562306a36Sopenharmony_ci
393662306a36Sopenharmony_ci/**
393762306a36Sopenharmony_ci * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
393862306a36Sopenharmony_ci * @addr: target address
393962306a36Sopenharmony_ci *
394062306a36Sopenharmony_ci * Returns: vmap_area if it is found. If there is no such area
394162306a36Sopenharmony_ci *   the first highest(reverse order) vmap_area is returned
394262306a36Sopenharmony_ci *   i.e. va->va_start < addr && va->va_end < addr or NULL
394362306a36Sopenharmony_ci *   if there are no any areas before @addr.
394462306a36Sopenharmony_ci */
394562306a36Sopenharmony_cistatic struct vmap_area *
394662306a36Sopenharmony_cipvm_find_va_enclose_addr(unsigned long addr)
394762306a36Sopenharmony_ci{
394862306a36Sopenharmony_ci	struct vmap_area *va, *tmp;
394962306a36Sopenharmony_ci	struct rb_node *n;
395062306a36Sopenharmony_ci
395162306a36Sopenharmony_ci	n = free_vmap_area_root.rb_node;
395262306a36Sopenharmony_ci	va = NULL;
395362306a36Sopenharmony_ci
395462306a36Sopenharmony_ci	while (n) {
395562306a36Sopenharmony_ci		tmp = rb_entry(n, struct vmap_area, rb_node);
395662306a36Sopenharmony_ci		if (tmp->va_start <= addr) {
395762306a36Sopenharmony_ci			va = tmp;
395862306a36Sopenharmony_ci			if (tmp->va_end >= addr)
395962306a36Sopenharmony_ci				break;
396062306a36Sopenharmony_ci
396162306a36Sopenharmony_ci			n = n->rb_right;
396262306a36Sopenharmony_ci		} else {
396362306a36Sopenharmony_ci			n = n->rb_left;
396462306a36Sopenharmony_ci		}
396562306a36Sopenharmony_ci	}
396662306a36Sopenharmony_ci
396762306a36Sopenharmony_ci	return va;
396862306a36Sopenharmony_ci}
396962306a36Sopenharmony_ci
397062306a36Sopenharmony_ci/**
397162306a36Sopenharmony_ci * pvm_determine_end_from_reverse - find the highest aligned address
397262306a36Sopenharmony_ci * of free block below VMALLOC_END
397362306a36Sopenharmony_ci * @va:
397462306a36Sopenharmony_ci *   in - the VA we start the search(reverse order);
397562306a36Sopenharmony_ci *   out - the VA with the highest aligned end address.
397662306a36Sopenharmony_ci * @align: alignment for required highest address
397762306a36Sopenharmony_ci *
397862306a36Sopenharmony_ci * Returns: determined end address within vmap_area
397962306a36Sopenharmony_ci */
398062306a36Sopenharmony_cistatic unsigned long
398162306a36Sopenharmony_cipvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
398262306a36Sopenharmony_ci{
398362306a36Sopenharmony_ci	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
398462306a36Sopenharmony_ci	unsigned long addr;
398562306a36Sopenharmony_ci
398662306a36Sopenharmony_ci	if (likely(*va)) {
398762306a36Sopenharmony_ci		list_for_each_entry_from_reverse((*va),
398862306a36Sopenharmony_ci				&free_vmap_area_list, list) {
398962306a36Sopenharmony_ci			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
399062306a36Sopenharmony_ci			if ((*va)->va_start < addr)
399162306a36Sopenharmony_ci				return addr;
399262306a36Sopenharmony_ci		}
399362306a36Sopenharmony_ci	}
399462306a36Sopenharmony_ci
399562306a36Sopenharmony_ci	return 0;
399662306a36Sopenharmony_ci}
399762306a36Sopenharmony_ci
399862306a36Sopenharmony_ci/**
399962306a36Sopenharmony_ci * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
400062306a36Sopenharmony_ci * @offsets: array containing offset of each area
400162306a36Sopenharmony_ci * @sizes: array containing size of each area
400262306a36Sopenharmony_ci * @nr_vms: the number of areas to allocate
400362306a36Sopenharmony_ci * @align: alignment, all entries in @offsets and @sizes must be aligned to this
400462306a36Sopenharmony_ci *
400562306a36Sopenharmony_ci * Returns: kmalloc'd vm_struct pointer array pointing to allocated
400662306a36Sopenharmony_ci *	    vm_structs on success, %NULL on failure
400762306a36Sopenharmony_ci *
400862306a36Sopenharmony_ci * Percpu allocator wants to use congruent vm areas so that it can
400962306a36Sopenharmony_ci * maintain the offsets among percpu areas.  This function allocates
401062306a36Sopenharmony_ci * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
401162306a36Sopenharmony_ci * be scattered pretty far, distance between two areas easily going up
401262306a36Sopenharmony_ci * to gigabytes.  To avoid interacting with regular vmallocs, these
401362306a36Sopenharmony_ci * areas are allocated from top.
401462306a36Sopenharmony_ci *
401562306a36Sopenharmony_ci * Despite its complicated look, this allocator is rather simple. It
401662306a36Sopenharmony_ci * does everything top-down and scans free blocks from the end looking
401762306a36Sopenharmony_ci * for matching base. While scanning, if any of the areas do not fit the
401862306a36Sopenharmony_ci * base address is pulled down to fit the area. Scanning is repeated till
401962306a36Sopenharmony_ci * all the areas fit and then all necessary data structures are inserted
402062306a36Sopenharmony_ci * and the result is returned.
402162306a36Sopenharmony_ci */
402262306a36Sopenharmony_cistruct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
402362306a36Sopenharmony_ci				     const size_t *sizes, int nr_vms,
402462306a36Sopenharmony_ci				     size_t align)
402562306a36Sopenharmony_ci{
402662306a36Sopenharmony_ci	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
402762306a36Sopenharmony_ci	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
402862306a36Sopenharmony_ci	struct vmap_area **vas, *va;
402962306a36Sopenharmony_ci	struct vm_struct **vms;
403062306a36Sopenharmony_ci	int area, area2, last_area, term_area;
403162306a36Sopenharmony_ci	unsigned long base, start, size, end, last_end, orig_start, orig_end;
403262306a36Sopenharmony_ci	bool purged = false;
403362306a36Sopenharmony_ci
403462306a36Sopenharmony_ci	/* verify parameters and allocate data structures */
403562306a36Sopenharmony_ci	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
403662306a36Sopenharmony_ci	for (last_area = 0, area = 0; area < nr_vms; area++) {
403762306a36Sopenharmony_ci		start = offsets[area];
403862306a36Sopenharmony_ci		end = start + sizes[area];
403962306a36Sopenharmony_ci
404062306a36Sopenharmony_ci		/* is everything aligned properly? */
404162306a36Sopenharmony_ci		BUG_ON(!IS_ALIGNED(offsets[area], align));
404262306a36Sopenharmony_ci		BUG_ON(!IS_ALIGNED(sizes[area], align));
404362306a36Sopenharmony_ci
404462306a36Sopenharmony_ci		/* detect the area with the highest address */
404562306a36Sopenharmony_ci		if (start > offsets[last_area])
404662306a36Sopenharmony_ci			last_area = area;
404762306a36Sopenharmony_ci
404862306a36Sopenharmony_ci		for (area2 = area + 1; area2 < nr_vms; area2++) {
404962306a36Sopenharmony_ci			unsigned long start2 = offsets[area2];
405062306a36Sopenharmony_ci			unsigned long end2 = start2 + sizes[area2];
405162306a36Sopenharmony_ci
405262306a36Sopenharmony_ci			BUG_ON(start2 < end && start < end2);
405362306a36Sopenharmony_ci		}
405462306a36Sopenharmony_ci	}
405562306a36Sopenharmony_ci	last_end = offsets[last_area] + sizes[last_area];
405662306a36Sopenharmony_ci
405762306a36Sopenharmony_ci	if (vmalloc_end - vmalloc_start < last_end) {
405862306a36Sopenharmony_ci		WARN_ON(true);
405962306a36Sopenharmony_ci		return NULL;
406062306a36Sopenharmony_ci	}
406162306a36Sopenharmony_ci
406262306a36Sopenharmony_ci	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
406362306a36Sopenharmony_ci	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
406462306a36Sopenharmony_ci	if (!vas || !vms)
406562306a36Sopenharmony_ci		goto err_free2;
406662306a36Sopenharmony_ci
406762306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++) {
406862306a36Sopenharmony_ci		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
406962306a36Sopenharmony_ci		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
407062306a36Sopenharmony_ci		if (!vas[area] || !vms[area])
407162306a36Sopenharmony_ci			goto err_free;
407262306a36Sopenharmony_ci	}
407362306a36Sopenharmony_ciretry:
407462306a36Sopenharmony_ci	spin_lock(&free_vmap_area_lock);
407562306a36Sopenharmony_ci
407662306a36Sopenharmony_ci	/* start scanning - we scan from the top, begin with the last area */
407762306a36Sopenharmony_ci	area = term_area = last_area;
407862306a36Sopenharmony_ci	start = offsets[area];
407962306a36Sopenharmony_ci	end = start + sizes[area];
408062306a36Sopenharmony_ci
408162306a36Sopenharmony_ci	va = pvm_find_va_enclose_addr(vmalloc_end);
408262306a36Sopenharmony_ci	base = pvm_determine_end_from_reverse(&va, align) - end;
408362306a36Sopenharmony_ci
408462306a36Sopenharmony_ci	while (true) {
408562306a36Sopenharmony_ci		/*
408662306a36Sopenharmony_ci		 * base might have underflowed, add last_end before
408762306a36Sopenharmony_ci		 * comparing.
408862306a36Sopenharmony_ci		 */
408962306a36Sopenharmony_ci		if (base + last_end < vmalloc_start + last_end)
409062306a36Sopenharmony_ci			goto overflow;
409162306a36Sopenharmony_ci
409262306a36Sopenharmony_ci		/*
409362306a36Sopenharmony_ci		 * Fitting base has not been found.
409462306a36Sopenharmony_ci		 */
409562306a36Sopenharmony_ci		if (va == NULL)
409662306a36Sopenharmony_ci			goto overflow;
409762306a36Sopenharmony_ci
409862306a36Sopenharmony_ci		/*
409962306a36Sopenharmony_ci		 * If required width exceeds current VA block, move
410062306a36Sopenharmony_ci		 * base downwards and then recheck.
410162306a36Sopenharmony_ci		 */
410262306a36Sopenharmony_ci		if (base + end > va->va_end) {
410362306a36Sopenharmony_ci			base = pvm_determine_end_from_reverse(&va, align) - end;
410462306a36Sopenharmony_ci			term_area = area;
410562306a36Sopenharmony_ci			continue;
410662306a36Sopenharmony_ci		}
410762306a36Sopenharmony_ci
410862306a36Sopenharmony_ci		/*
410962306a36Sopenharmony_ci		 * If this VA does not fit, move base downwards and recheck.
411062306a36Sopenharmony_ci		 */
411162306a36Sopenharmony_ci		if (base + start < va->va_start) {
411262306a36Sopenharmony_ci			va = node_to_va(rb_prev(&va->rb_node));
411362306a36Sopenharmony_ci			base = pvm_determine_end_from_reverse(&va, align) - end;
411462306a36Sopenharmony_ci			term_area = area;
411562306a36Sopenharmony_ci			continue;
411662306a36Sopenharmony_ci		}
411762306a36Sopenharmony_ci
411862306a36Sopenharmony_ci		/*
411962306a36Sopenharmony_ci		 * This area fits, move on to the previous one.  If
412062306a36Sopenharmony_ci		 * the previous one is the terminal one, we're done.
412162306a36Sopenharmony_ci		 */
412262306a36Sopenharmony_ci		area = (area + nr_vms - 1) % nr_vms;
412362306a36Sopenharmony_ci		if (area == term_area)
412462306a36Sopenharmony_ci			break;
412562306a36Sopenharmony_ci
412662306a36Sopenharmony_ci		start = offsets[area];
412762306a36Sopenharmony_ci		end = start + sizes[area];
412862306a36Sopenharmony_ci		va = pvm_find_va_enclose_addr(base + end);
412962306a36Sopenharmony_ci	}
413062306a36Sopenharmony_ci
413162306a36Sopenharmony_ci	/* we've found a fitting base, insert all va's */
413262306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++) {
413362306a36Sopenharmony_ci		int ret;
413462306a36Sopenharmony_ci
413562306a36Sopenharmony_ci		start = base + offsets[area];
413662306a36Sopenharmony_ci		size = sizes[area];
413762306a36Sopenharmony_ci
413862306a36Sopenharmony_ci		va = pvm_find_va_enclose_addr(start);
413962306a36Sopenharmony_ci		if (WARN_ON_ONCE(va == NULL))
414062306a36Sopenharmony_ci			/* It is a BUG(), but trigger recovery instead. */
414162306a36Sopenharmony_ci			goto recovery;
414262306a36Sopenharmony_ci
414362306a36Sopenharmony_ci		ret = adjust_va_to_fit_type(&free_vmap_area_root,
414462306a36Sopenharmony_ci					    &free_vmap_area_list,
414562306a36Sopenharmony_ci					    va, start, size);
414662306a36Sopenharmony_ci		if (WARN_ON_ONCE(unlikely(ret)))
414762306a36Sopenharmony_ci			/* It is a BUG(), but trigger recovery instead. */
414862306a36Sopenharmony_ci			goto recovery;
414962306a36Sopenharmony_ci
415062306a36Sopenharmony_ci		/* Allocated area. */
415162306a36Sopenharmony_ci		va = vas[area];
415262306a36Sopenharmony_ci		va->va_start = start;
415362306a36Sopenharmony_ci		va->va_end = start + size;
415462306a36Sopenharmony_ci	}
415562306a36Sopenharmony_ci
415662306a36Sopenharmony_ci	spin_unlock(&free_vmap_area_lock);
415762306a36Sopenharmony_ci
415862306a36Sopenharmony_ci	/* populate the kasan shadow space */
415962306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++) {
416062306a36Sopenharmony_ci		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
416162306a36Sopenharmony_ci			goto err_free_shadow;
416262306a36Sopenharmony_ci	}
416362306a36Sopenharmony_ci
416462306a36Sopenharmony_ci	/* insert all vm's */
416562306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
416662306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++) {
416762306a36Sopenharmony_ci		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
416862306a36Sopenharmony_ci
416962306a36Sopenharmony_ci		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
417062306a36Sopenharmony_ci				 pcpu_get_vm_areas);
417162306a36Sopenharmony_ci	}
417262306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
417362306a36Sopenharmony_ci
417462306a36Sopenharmony_ci	/*
417562306a36Sopenharmony_ci	 * Mark allocated areas as accessible. Do it now as a best-effort
417662306a36Sopenharmony_ci	 * approach, as they can be mapped outside of vmalloc code.
417762306a36Sopenharmony_ci	 * With hardware tag-based KASAN, marking is skipped for
417862306a36Sopenharmony_ci	 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
417962306a36Sopenharmony_ci	 */
418062306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++)
418162306a36Sopenharmony_ci		vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
418262306a36Sopenharmony_ci				vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
418362306a36Sopenharmony_ci
418462306a36Sopenharmony_ci	kfree(vas);
418562306a36Sopenharmony_ci	return vms;
418662306a36Sopenharmony_ci
418762306a36Sopenharmony_cirecovery:
418862306a36Sopenharmony_ci	/*
418962306a36Sopenharmony_ci	 * Remove previously allocated areas. There is no
419062306a36Sopenharmony_ci	 * need in removing these areas from the busy tree,
419162306a36Sopenharmony_ci	 * because they are inserted only on the final step
419262306a36Sopenharmony_ci	 * and when pcpu_get_vm_areas() is success.
419362306a36Sopenharmony_ci	 */
419462306a36Sopenharmony_ci	while (area--) {
419562306a36Sopenharmony_ci		orig_start = vas[area]->va_start;
419662306a36Sopenharmony_ci		orig_end = vas[area]->va_end;
419762306a36Sopenharmony_ci		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
419862306a36Sopenharmony_ci				&free_vmap_area_list);
419962306a36Sopenharmony_ci		if (va)
420062306a36Sopenharmony_ci			kasan_release_vmalloc(orig_start, orig_end,
420162306a36Sopenharmony_ci				va->va_start, va->va_end);
420262306a36Sopenharmony_ci		vas[area] = NULL;
420362306a36Sopenharmony_ci	}
420462306a36Sopenharmony_ci
420562306a36Sopenharmony_cioverflow:
420662306a36Sopenharmony_ci	spin_unlock(&free_vmap_area_lock);
420762306a36Sopenharmony_ci	if (!purged) {
420862306a36Sopenharmony_ci		reclaim_and_purge_vmap_areas();
420962306a36Sopenharmony_ci		purged = true;
421062306a36Sopenharmony_ci
421162306a36Sopenharmony_ci		/* Before "retry", check if we recover. */
421262306a36Sopenharmony_ci		for (area = 0; area < nr_vms; area++) {
421362306a36Sopenharmony_ci			if (vas[area])
421462306a36Sopenharmony_ci				continue;
421562306a36Sopenharmony_ci
421662306a36Sopenharmony_ci			vas[area] = kmem_cache_zalloc(
421762306a36Sopenharmony_ci				vmap_area_cachep, GFP_KERNEL);
421862306a36Sopenharmony_ci			if (!vas[area])
421962306a36Sopenharmony_ci				goto err_free;
422062306a36Sopenharmony_ci		}
422162306a36Sopenharmony_ci
422262306a36Sopenharmony_ci		goto retry;
422362306a36Sopenharmony_ci	}
422462306a36Sopenharmony_ci
422562306a36Sopenharmony_cierr_free:
422662306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++) {
422762306a36Sopenharmony_ci		if (vas[area])
422862306a36Sopenharmony_ci			kmem_cache_free(vmap_area_cachep, vas[area]);
422962306a36Sopenharmony_ci
423062306a36Sopenharmony_ci		kfree(vms[area]);
423162306a36Sopenharmony_ci	}
423262306a36Sopenharmony_cierr_free2:
423362306a36Sopenharmony_ci	kfree(vas);
423462306a36Sopenharmony_ci	kfree(vms);
423562306a36Sopenharmony_ci	return NULL;
423662306a36Sopenharmony_ci
423762306a36Sopenharmony_cierr_free_shadow:
423862306a36Sopenharmony_ci	spin_lock(&free_vmap_area_lock);
423962306a36Sopenharmony_ci	/*
424062306a36Sopenharmony_ci	 * We release all the vmalloc shadows, even the ones for regions that
424162306a36Sopenharmony_ci	 * hadn't been successfully added. This relies on kasan_release_vmalloc
424262306a36Sopenharmony_ci	 * being able to tolerate this case.
424362306a36Sopenharmony_ci	 */
424462306a36Sopenharmony_ci	for (area = 0; area < nr_vms; area++) {
424562306a36Sopenharmony_ci		orig_start = vas[area]->va_start;
424662306a36Sopenharmony_ci		orig_end = vas[area]->va_end;
424762306a36Sopenharmony_ci		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
424862306a36Sopenharmony_ci				&free_vmap_area_list);
424962306a36Sopenharmony_ci		if (va)
425062306a36Sopenharmony_ci			kasan_release_vmalloc(orig_start, orig_end,
425162306a36Sopenharmony_ci				va->va_start, va->va_end);
425262306a36Sopenharmony_ci		vas[area] = NULL;
425362306a36Sopenharmony_ci		kfree(vms[area]);
425462306a36Sopenharmony_ci	}
425562306a36Sopenharmony_ci	spin_unlock(&free_vmap_area_lock);
425662306a36Sopenharmony_ci	kfree(vas);
425762306a36Sopenharmony_ci	kfree(vms);
425862306a36Sopenharmony_ci	return NULL;
425962306a36Sopenharmony_ci}
426062306a36Sopenharmony_ci
426162306a36Sopenharmony_ci/**
426262306a36Sopenharmony_ci * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
426362306a36Sopenharmony_ci * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
426462306a36Sopenharmony_ci * @nr_vms: the number of allocated areas
426562306a36Sopenharmony_ci *
426662306a36Sopenharmony_ci * Free vm_structs and the array allocated by pcpu_get_vm_areas().
426762306a36Sopenharmony_ci */
426862306a36Sopenharmony_civoid pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
426962306a36Sopenharmony_ci{
427062306a36Sopenharmony_ci	int i;
427162306a36Sopenharmony_ci
427262306a36Sopenharmony_ci	for (i = 0; i < nr_vms; i++)
427362306a36Sopenharmony_ci		free_vm_area(vms[i]);
427462306a36Sopenharmony_ci	kfree(vms);
427562306a36Sopenharmony_ci}
427662306a36Sopenharmony_ci#endif	/* CONFIG_SMP */
427762306a36Sopenharmony_ci
427862306a36Sopenharmony_ci#ifdef CONFIG_PRINTK
427962306a36Sopenharmony_cibool vmalloc_dump_obj(void *object)
428062306a36Sopenharmony_ci{
428162306a36Sopenharmony_ci	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
428262306a36Sopenharmony_ci	const void *caller;
428362306a36Sopenharmony_ci	struct vm_struct *vm;
428462306a36Sopenharmony_ci	struct vmap_area *va;
428562306a36Sopenharmony_ci	unsigned long addr;
428662306a36Sopenharmony_ci	unsigned int nr_pages;
428762306a36Sopenharmony_ci
428862306a36Sopenharmony_ci	if (!spin_trylock(&vmap_area_lock))
428962306a36Sopenharmony_ci		return false;
429062306a36Sopenharmony_ci	va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
429162306a36Sopenharmony_ci	if (!va) {
429262306a36Sopenharmony_ci		spin_unlock(&vmap_area_lock);
429362306a36Sopenharmony_ci		return false;
429462306a36Sopenharmony_ci	}
429562306a36Sopenharmony_ci
429662306a36Sopenharmony_ci	vm = va->vm;
429762306a36Sopenharmony_ci	if (!vm) {
429862306a36Sopenharmony_ci		spin_unlock(&vmap_area_lock);
429962306a36Sopenharmony_ci		return false;
430062306a36Sopenharmony_ci	}
430162306a36Sopenharmony_ci	addr = (unsigned long)vm->addr;
430262306a36Sopenharmony_ci	caller = vm->caller;
430362306a36Sopenharmony_ci	nr_pages = vm->nr_pages;
430462306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
430562306a36Sopenharmony_ci	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
430662306a36Sopenharmony_ci		nr_pages, addr, caller);
430762306a36Sopenharmony_ci	return true;
430862306a36Sopenharmony_ci}
430962306a36Sopenharmony_ci#endif
431062306a36Sopenharmony_ci
431162306a36Sopenharmony_ci#ifdef CONFIG_PROC_FS
431262306a36Sopenharmony_cistatic void *s_start(struct seq_file *m, loff_t *pos)
431362306a36Sopenharmony_ci	__acquires(&vmap_purge_lock)
431462306a36Sopenharmony_ci	__acquires(&vmap_area_lock)
431562306a36Sopenharmony_ci{
431662306a36Sopenharmony_ci	mutex_lock(&vmap_purge_lock);
431762306a36Sopenharmony_ci	spin_lock(&vmap_area_lock);
431862306a36Sopenharmony_ci
431962306a36Sopenharmony_ci	return seq_list_start(&vmap_area_list, *pos);
432062306a36Sopenharmony_ci}
432162306a36Sopenharmony_ci
432262306a36Sopenharmony_cistatic void *s_next(struct seq_file *m, void *p, loff_t *pos)
432362306a36Sopenharmony_ci{
432462306a36Sopenharmony_ci	return seq_list_next(p, &vmap_area_list, pos);
432562306a36Sopenharmony_ci}
432662306a36Sopenharmony_ci
432762306a36Sopenharmony_cistatic void s_stop(struct seq_file *m, void *p)
432862306a36Sopenharmony_ci	__releases(&vmap_area_lock)
432962306a36Sopenharmony_ci	__releases(&vmap_purge_lock)
433062306a36Sopenharmony_ci{
433162306a36Sopenharmony_ci	spin_unlock(&vmap_area_lock);
433262306a36Sopenharmony_ci	mutex_unlock(&vmap_purge_lock);
433362306a36Sopenharmony_ci}
433462306a36Sopenharmony_ci
433562306a36Sopenharmony_cistatic void show_numa_info(struct seq_file *m, struct vm_struct *v)
433662306a36Sopenharmony_ci{
433762306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_NUMA)) {
433862306a36Sopenharmony_ci		unsigned int nr, *counters = m->private;
433962306a36Sopenharmony_ci		unsigned int step = 1U << vm_area_page_order(v);
434062306a36Sopenharmony_ci
434162306a36Sopenharmony_ci		if (!counters)
434262306a36Sopenharmony_ci			return;
434362306a36Sopenharmony_ci
434462306a36Sopenharmony_ci		if (v->flags & VM_UNINITIALIZED)
434562306a36Sopenharmony_ci			return;
434662306a36Sopenharmony_ci		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
434762306a36Sopenharmony_ci		smp_rmb();
434862306a36Sopenharmony_ci
434962306a36Sopenharmony_ci		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
435062306a36Sopenharmony_ci
435162306a36Sopenharmony_ci		for (nr = 0; nr < v->nr_pages; nr += step)
435262306a36Sopenharmony_ci			counters[page_to_nid(v->pages[nr])] += step;
435362306a36Sopenharmony_ci		for_each_node_state(nr, N_HIGH_MEMORY)
435462306a36Sopenharmony_ci			if (counters[nr])
435562306a36Sopenharmony_ci				seq_printf(m, " N%u=%u", nr, counters[nr]);
435662306a36Sopenharmony_ci	}
435762306a36Sopenharmony_ci}
435862306a36Sopenharmony_ci
435962306a36Sopenharmony_cistatic void show_purge_info(struct seq_file *m)
436062306a36Sopenharmony_ci{
436162306a36Sopenharmony_ci	struct vmap_area *va;
436262306a36Sopenharmony_ci
436362306a36Sopenharmony_ci	spin_lock(&purge_vmap_area_lock);
436462306a36Sopenharmony_ci	list_for_each_entry(va, &purge_vmap_area_list, list) {
436562306a36Sopenharmony_ci		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
436662306a36Sopenharmony_ci			(void *)va->va_start, (void *)va->va_end,
436762306a36Sopenharmony_ci			va->va_end - va->va_start);
436862306a36Sopenharmony_ci	}
436962306a36Sopenharmony_ci	spin_unlock(&purge_vmap_area_lock);
437062306a36Sopenharmony_ci}
437162306a36Sopenharmony_ci
437262306a36Sopenharmony_cistatic int s_show(struct seq_file *m, void *p)
437362306a36Sopenharmony_ci{
437462306a36Sopenharmony_ci	struct vmap_area *va;
437562306a36Sopenharmony_ci	struct vm_struct *v;
437662306a36Sopenharmony_ci
437762306a36Sopenharmony_ci	va = list_entry(p, struct vmap_area, list);
437862306a36Sopenharmony_ci
437962306a36Sopenharmony_ci	if (!va->vm) {
438062306a36Sopenharmony_ci		if (va->flags & VMAP_RAM)
438162306a36Sopenharmony_ci			seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
438262306a36Sopenharmony_ci				(void *)va->va_start, (void *)va->va_end,
438362306a36Sopenharmony_ci				va->va_end - va->va_start);
438462306a36Sopenharmony_ci
438562306a36Sopenharmony_ci		goto final;
438662306a36Sopenharmony_ci	}
438762306a36Sopenharmony_ci
438862306a36Sopenharmony_ci	v = va->vm;
438962306a36Sopenharmony_ci
439062306a36Sopenharmony_ci	seq_printf(m, "0x%pK-0x%pK %7ld",
439162306a36Sopenharmony_ci		v->addr, v->addr + v->size, v->size);
439262306a36Sopenharmony_ci
439362306a36Sopenharmony_ci	if (v->caller)
439462306a36Sopenharmony_ci		seq_printf(m, " %pS", v->caller);
439562306a36Sopenharmony_ci
439662306a36Sopenharmony_ci	if (v->nr_pages)
439762306a36Sopenharmony_ci		seq_printf(m, " pages=%d", v->nr_pages);
439862306a36Sopenharmony_ci
439962306a36Sopenharmony_ci	if (v->phys_addr)
440062306a36Sopenharmony_ci		seq_printf(m, " phys=%pa", &v->phys_addr);
440162306a36Sopenharmony_ci
440262306a36Sopenharmony_ci	if (v->flags & VM_IOREMAP)
440362306a36Sopenharmony_ci		seq_puts(m, " ioremap");
440462306a36Sopenharmony_ci
440562306a36Sopenharmony_ci	if (v->flags & VM_ALLOC)
440662306a36Sopenharmony_ci		seq_puts(m, " vmalloc");
440762306a36Sopenharmony_ci
440862306a36Sopenharmony_ci	if (v->flags & VM_MAP)
440962306a36Sopenharmony_ci		seq_puts(m, " vmap");
441062306a36Sopenharmony_ci
441162306a36Sopenharmony_ci	if (v->flags & VM_USERMAP)
441262306a36Sopenharmony_ci		seq_puts(m, " user");
441362306a36Sopenharmony_ci
441462306a36Sopenharmony_ci	if (v->flags & VM_DMA_COHERENT)
441562306a36Sopenharmony_ci		seq_puts(m, " dma-coherent");
441662306a36Sopenharmony_ci
441762306a36Sopenharmony_ci	if (is_vmalloc_addr(v->pages))
441862306a36Sopenharmony_ci		seq_puts(m, " vpages");
441962306a36Sopenharmony_ci
442062306a36Sopenharmony_ci	show_numa_info(m, v);
442162306a36Sopenharmony_ci	seq_putc(m, '\n');
442262306a36Sopenharmony_ci
442362306a36Sopenharmony_ci	/*
442462306a36Sopenharmony_ci	 * As a final step, dump "unpurged" areas.
442562306a36Sopenharmony_ci	 */
442662306a36Sopenharmony_cifinal:
442762306a36Sopenharmony_ci	if (list_is_last(&va->list, &vmap_area_list))
442862306a36Sopenharmony_ci		show_purge_info(m);
442962306a36Sopenharmony_ci
443062306a36Sopenharmony_ci	return 0;
443162306a36Sopenharmony_ci}
443262306a36Sopenharmony_ci
443362306a36Sopenharmony_cistatic const struct seq_operations vmalloc_op = {
443462306a36Sopenharmony_ci	.start = s_start,
443562306a36Sopenharmony_ci	.next = s_next,
443662306a36Sopenharmony_ci	.stop = s_stop,
443762306a36Sopenharmony_ci	.show = s_show,
443862306a36Sopenharmony_ci};
443962306a36Sopenharmony_ci
444062306a36Sopenharmony_cistatic int __init proc_vmalloc_init(void)
444162306a36Sopenharmony_ci{
444262306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_NUMA))
444362306a36Sopenharmony_ci		proc_create_seq_private("vmallocinfo", 0400, NULL,
444462306a36Sopenharmony_ci				&vmalloc_op,
444562306a36Sopenharmony_ci				nr_node_ids * sizeof(unsigned int), NULL);
444662306a36Sopenharmony_ci	else
444762306a36Sopenharmony_ci		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
444862306a36Sopenharmony_ci	return 0;
444962306a36Sopenharmony_ci}
445062306a36Sopenharmony_cimodule_init(proc_vmalloc_init);
445162306a36Sopenharmony_ci
445262306a36Sopenharmony_ci#endif
445362306a36Sopenharmony_ci
445462306a36Sopenharmony_civoid __init vmalloc_init(void)
445562306a36Sopenharmony_ci{
445662306a36Sopenharmony_ci	struct vmap_area *va;
445762306a36Sopenharmony_ci	struct vm_struct *tmp;
445862306a36Sopenharmony_ci	int i;
445962306a36Sopenharmony_ci
446062306a36Sopenharmony_ci	/*
446162306a36Sopenharmony_ci	 * Create the cache for vmap_area objects.
446262306a36Sopenharmony_ci	 */
446362306a36Sopenharmony_ci	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
446462306a36Sopenharmony_ci
446562306a36Sopenharmony_ci	for_each_possible_cpu(i) {
446662306a36Sopenharmony_ci		struct vmap_block_queue *vbq;
446762306a36Sopenharmony_ci		struct vfree_deferred *p;
446862306a36Sopenharmony_ci
446962306a36Sopenharmony_ci		vbq = &per_cpu(vmap_block_queue, i);
447062306a36Sopenharmony_ci		spin_lock_init(&vbq->lock);
447162306a36Sopenharmony_ci		INIT_LIST_HEAD(&vbq->free);
447262306a36Sopenharmony_ci		p = &per_cpu(vfree_deferred, i);
447362306a36Sopenharmony_ci		init_llist_head(&p->list);
447462306a36Sopenharmony_ci		INIT_WORK(&p->wq, delayed_vfree_work);
447562306a36Sopenharmony_ci		xa_init(&vbq->vmap_blocks);
447662306a36Sopenharmony_ci	}
447762306a36Sopenharmony_ci
447862306a36Sopenharmony_ci	/* Import existing vmlist entries. */
447962306a36Sopenharmony_ci	for (tmp = vmlist; tmp; tmp = tmp->next) {
448062306a36Sopenharmony_ci		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
448162306a36Sopenharmony_ci		if (WARN_ON_ONCE(!va))
448262306a36Sopenharmony_ci			continue;
448362306a36Sopenharmony_ci
448462306a36Sopenharmony_ci		va->va_start = (unsigned long)tmp->addr;
448562306a36Sopenharmony_ci		va->va_end = va->va_start + tmp->size;
448662306a36Sopenharmony_ci		va->vm = tmp;
448762306a36Sopenharmony_ci		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
448862306a36Sopenharmony_ci	}
448962306a36Sopenharmony_ci
449062306a36Sopenharmony_ci	/*
449162306a36Sopenharmony_ci	 * Now we can initialize a free vmap space.
449262306a36Sopenharmony_ci	 */
449362306a36Sopenharmony_ci	vmap_init_free_space();
449462306a36Sopenharmony_ci	vmap_initialized = true;
449562306a36Sopenharmony_ci}
4496