162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Slab allocator functions that are independent of the allocator strategy
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * (C) 2012 Christoph Lameter <cl@linux.com>
662306a36Sopenharmony_ci */
762306a36Sopenharmony_ci#include <linux/slab.h>
862306a36Sopenharmony_ci
962306a36Sopenharmony_ci#include <linux/mm.h>
1062306a36Sopenharmony_ci#include <linux/poison.h>
1162306a36Sopenharmony_ci#include <linux/interrupt.h>
1262306a36Sopenharmony_ci#include <linux/memory.h>
1362306a36Sopenharmony_ci#include <linux/cache.h>
1462306a36Sopenharmony_ci#include <linux/compiler.h>
1562306a36Sopenharmony_ci#include <linux/kfence.h>
1662306a36Sopenharmony_ci#include <linux/module.h>
1762306a36Sopenharmony_ci#include <linux/cpu.h>
1862306a36Sopenharmony_ci#include <linux/uaccess.h>
1962306a36Sopenharmony_ci#include <linux/seq_file.h>
2062306a36Sopenharmony_ci#include <linux/dma-mapping.h>
2162306a36Sopenharmony_ci#include <linux/swiotlb.h>
2262306a36Sopenharmony_ci#include <linux/proc_fs.h>
2362306a36Sopenharmony_ci#include <linux/debugfs.h>
2462306a36Sopenharmony_ci#include <linux/kasan.h>
2562306a36Sopenharmony_ci#include <asm/cacheflush.h>
2662306a36Sopenharmony_ci#include <asm/tlbflush.h>
2762306a36Sopenharmony_ci#include <asm/page.h>
2862306a36Sopenharmony_ci#include <linux/memcontrol.h>
2962306a36Sopenharmony_ci#include <linux/stackdepot.h>
3062306a36Sopenharmony_ci
3162306a36Sopenharmony_ci#include "internal.h"
3262306a36Sopenharmony_ci#include "slab.h"
3362306a36Sopenharmony_ci
3462306a36Sopenharmony_ci#define CREATE_TRACE_POINTS
3562306a36Sopenharmony_ci#include <trace/events/kmem.h>
3662306a36Sopenharmony_ci
3762306a36Sopenharmony_cienum slab_state slab_state;
3862306a36Sopenharmony_ciLIST_HEAD(slab_caches);
3962306a36Sopenharmony_ciDEFINE_MUTEX(slab_mutex);
4062306a36Sopenharmony_cistruct kmem_cache *kmem_cache;
4162306a36Sopenharmony_ci
4262306a36Sopenharmony_cistatic LIST_HEAD(slab_caches_to_rcu_destroy);
4362306a36Sopenharmony_cistatic void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
4462306a36Sopenharmony_cistatic DECLARE_WORK(slab_caches_to_rcu_destroy_work,
4562306a36Sopenharmony_ci		    slab_caches_to_rcu_destroy_workfn);
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_ci/*
4862306a36Sopenharmony_ci * Set of flags that will prevent slab merging
4962306a36Sopenharmony_ci */
5062306a36Sopenharmony_ci#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
5162306a36Sopenharmony_ci		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
5262306a36Sopenharmony_ci		SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge())
5362306a36Sopenharmony_ci
5462306a36Sopenharmony_ci#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
5562306a36Sopenharmony_ci			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_ci/*
5862306a36Sopenharmony_ci * Merge control. If this is set then no merging of slab caches will occur.
5962306a36Sopenharmony_ci */
6062306a36Sopenharmony_cistatic bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_cistatic int __init setup_slab_nomerge(char *str)
6362306a36Sopenharmony_ci{
6462306a36Sopenharmony_ci	slab_nomerge = true;
6562306a36Sopenharmony_ci	return 1;
6662306a36Sopenharmony_ci}
6762306a36Sopenharmony_ci
6862306a36Sopenharmony_cistatic int __init setup_slab_merge(char *str)
6962306a36Sopenharmony_ci{
7062306a36Sopenharmony_ci	slab_nomerge = false;
7162306a36Sopenharmony_ci	return 1;
7262306a36Sopenharmony_ci}
7362306a36Sopenharmony_ci
7462306a36Sopenharmony_ci#ifdef CONFIG_SLUB
7562306a36Sopenharmony_ci__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
7662306a36Sopenharmony_ci__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
7762306a36Sopenharmony_ci#endif
7862306a36Sopenharmony_ci
7962306a36Sopenharmony_ci__setup("slab_nomerge", setup_slab_nomerge);
8062306a36Sopenharmony_ci__setup("slab_merge", setup_slab_merge);
8162306a36Sopenharmony_ci
8262306a36Sopenharmony_ci/*
8362306a36Sopenharmony_ci * Determine the size of a slab object
8462306a36Sopenharmony_ci */
8562306a36Sopenharmony_ciunsigned int kmem_cache_size(struct kmem_cache *s)
8662306a36Sopenharmony_ci{
8762306a36Sopenharmony_ci	return s->object_size;
8862306a36Sopenharmony_ci}
8962306a36Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_size);
9062306a36Sopenharmony_ci
9162306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_VM
9262306a36Sopenharmony_cistatic int kmem_cache_sanity_check(const char *name, unsigned int size)
9362306a36Sopenharmony_ci{
9462306a36Sopenharmony_ci	if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
9562306a36Sopenharmony_ci		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
9662306a36Sopenharmony_ci		return -EINVAL;
9762306a36Sopenharmony_ci	}
9862306a36Sopenharmony_ci
9962306a36Sopenharmony_ci	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
10062306a36Sopenharmony_ci	return 0;
10162306a36Sopenharmony_ci}
10262306a36Sopenharmony_ci#else
10362306a36Sopenharmony_cistatic inline int kmem_cache_sanity_check(const char *name, unsigned int size)
10462306a36Sopenharmony_ci{
10562306a36Sopenharmony_ci	return 0;
10662306a36Sopenharmony_ci}
10762306a36Sopenharmony_ci#endif
10862306a36Sopenharmony_ci
10962306a36Sopenharmony_ci/*
11062306a36Sopenharmony_ci * Figure out what the alignment of the objects will be given a set of
11162306a36Sopenharmony_ci * flags, a user specified alignment and the size of the objects.
11262306a36Sopenharmony_ci */
11362306a36Sopenharmony_cistatic unsigned int calculate_alignment(slab_flags_t flags,
11462306a36Sopenharmony_ci		unsigned int align, unsigned int size)
11562306a36Sopenharmony_ci{
11662306a36Sopenharmony_ci	/*
11762306a36Sopenharmony_ci	 * If the user wants hardware cache aligned objects then follow that
11862306a36Sopenharmony_ci	 * suggestion if the object is sufficiently large.
11962306a36Sopenharmony_ci	 *
12062306a36Sopenharmony_ci	 * The hardware cache alignment cannot override the specified
12162306a36Sopenharmony_ci	 * alignment though. If that is greater then use it.
12262306a36Sopenharmony_ci	 */
12362306a36Sopenharmony_ci	if (flags & SLAB_HWCACHE_ALIGN) {
12462306a36Sopenharmony_ci		unsigned int ralign;
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_ci		ralign = cache_line_size();
12762306a36Sopenharmony_ci		while (size <= ralign / 2)
12862306a36Sopenharmony_ci			ralign /= 2;
12962306a36Sopenharmony_ci		align = max(align, ralign);
13062306a36Sopenharmony_ci	}
13162306a36Sopenharmony_ci
13262306a36Sopenharmony_ci	align = max(align, arch_slab_minalign());
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_ci	return ALIGN(align, sizeof(void *));
13562306a36Sopenharmony_ci}
13662306a36Sopenharmony_ci
13762306a36Sopenharmony_ci/*
13862306a36Sopenharmony_ci * Find a mergeable slab cache
13962306a36Sopenharmony_ci */
14062306a36Sopenharmony_ciint slab_unmergeable(struct kmem_cache *s)
14162306a36Sopenharmony_ci{
14262306a36Sopenharmony_ci	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
14362306a36Sopenharmony_ci		return 1;
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci	if (s->ctor)
14662306a36Sopenharmony_ci		return 1;
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_ci#ifdef CONFIG_HARDENED_USERCOPY
14962306a36Sopenharmony_ci	if (s->usersize)
15062306a36Sopenharmony_ci		return 1;
15162306a36Sopenharmony_ci#endif
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ci	/*
15462306a36Sopenharmony_ci	 * We may have set a slab to be unmergeable during bootstrap.
15562306a36Sopenharmony_ci	 */
15662306a36Sopenharmony_ci	if (s->refcount < 0)
15762306a36Sopenharmony_ci		return 1;
15862306a36Sopenharmony_ci
15962306a36Sopenharmony_ci	return 0;
16062306a36Sopenharmony_ci}
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_cistruct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
16362306a36Sopenharmony_ci		slab_flags_t flags, const char *name, void (*ctor)(void *))
16462306a36Sopenharmony_ci{
16562306a36Sopenharmony_ci	struct kmem_cache *s;
16662306a36Sopenharmony_ci
16762306a36Sopenharmony_ci	if (slab_nomerge)
16862306a36Sopenharmony_ci		return NULL;
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	if (ctor)
17162306a36Sopenharmony_ci		return NULL;
17262306a36Sopenharmony_ci
17362306a36Sopenharmony_ci	size = ALIGN(size, sizeof(void *));
17462306a36Sopenharmony_ci	align = calculate_alignment(flags, align, size);
17562306a36Sopenharmony_ci	size = ALIGN(size, align);
17662306a36Sopenharmony_ci	flags = kmem_cache_flags(size, flags, name);
17762306a36Sopenharmony_ci
17862306a36Sopenharmony_ci	if (flags & SLAB_NEVER_MERGE)
17962306a36Sopenharmony_ci		return NULL;
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_ci	list_for_each_entry_reverse(s, &slab_caches, list) {
18262306a36Sopenharmony_ci		if (slab_unmergeable(s))
18362306a36Sopenharmony_ci			continue;
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_ci		if (size > s->size)
18662306a36Sopenharmony_ci			continue;
18762306a36Sopenharmony_ci
18862306a36Sopenharmony_ci		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
18962306a36Sopenharmony_ci			continue;
19062306a36Sopenharmony_ci		/*
19162306a36Sopenharmony_ci		 * Check if alignment is compatible.
19262306a36Sopenharmony_ci		 * Courtesy of Adrian Drzewiecki
19362306a36Sopenharmony_ci		 */
19462306a36Sopenharmony_ci		if ((s->size & ~(align - 1)) != s->size)
19562306a36Sopenharmony_ci			continue;
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci		if (s->size - size >= sizeof(void *))
19862306a36Sopenharmony_ci			continue;
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_SLAB) && align &&
20162306a36Sopenharmony_ci			(align > s->align || s->align % align))
20262306a36Sopenharmony_ci			continue;
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci		return s;
20562306a36Sopenharmony_ci	}
20662306a36Sopenharmony_ci	return NULL;
20762306a36Sopenharmony_ci}
20862306a36Sopenharmony_ci
20962306a36Sopenharmony_cistatic struct kmem_cache *create_cache(const char *name,
21062306a36Sopenharmony_ci		unsigned int object_size, unsigned int align,
21162306a36Sopenharmony_ci		slab_flags_t flags, unsigned int useroffset,
21262306a36Sopenharmony_ci		unsigned int usersize, void (*ctor)(void *),
21362306a36Sopenharmony_ci		struct kmem_cache *root_cache)
21462306a36Sopenharmony_ci{
21562306a36Sopenharmony_ci	struct kmem_cache *s;
21662306a36Sopenharmony_ci	int err;
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_ci	if (WARN_ON(useroffset + usersize > object_size))
21962306a36Sopenharmony_ci		useroffset = usersize = 0;
22062306a36Sopenharmony_ci
22162306a36Sopenharmony_ci	err = -ENOMEM;
22262306a36Sopenharmony_ci	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
22362306a36Sopenharmony_ci	if (!s)
22462306a36Sopenharmony_ci		goto out;
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci	s->name = name;
22762306a36Sopenharmony_ci	s->size = s->object_size = object_size;
22862306a36Sopenharmony_ci	s->align = align;
22962306a36Sopenharmony_ci	s->ctor = ctor;
23062306a36Sopenharmony_ci#ifdef CONFIG_HARDENED_USERCOPY
23162306a36Sopenharmony_ci	s->useroffset = useroffset;
23262306a36Sopenharmony_ci	s->usersize = usersize;
23362306a36Sopenharmony_ci#endif
23462306a36Sopenharmony_ci
23562306a36Sopenharmony_ci	err = __kmem_cache_create(s, flags);
23662306a36Sopenharmony_ci	if (err)
23762306a36Sopenharmony_ci		goto out_free_cache;
23862306a36Sopenharmony_ci
23962306a36Sopenharmony_ci	s->refcount = 1;
24062306a36Sopenharmony_ci	list_add(&s->list, &slab_caches);
24162306a36Sopenharmony_ci	return s;
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_ciout_free_cache:
24462306a36Sopenharmony_ci	kmem_cache_free(kmem_cache, s);
24562306a36Sopenharmony_ciout:
24662306a36Sopenharmony_ci	return ERR_PTR(err);
24762306a36Sopenharmony_ci}
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci/**
25062306a36Sopenharmony_ci * kmem_cache_create_usercopy - Create a cache with a region suitable
25162306a36Sopenharmony_ci * for copying to userspace
25262306a36Sopenharmony_ci * @name: A string which is used in /proc/slabinfo to identify this cache.
25362306a36Sopenharmony_ci * @size: The size of objects to be created in this cache.
25462306a36Sopenharmony_ci * @align: The required alignment for the objects.
25562306a36Sopenharmony_ci * @flags: SLAB flags
25662306a36Sopenharmony_ci * @useroffset: Usercopy region offset
25762306a36Sopenharmony_ci * @usersize: Usercopy region size
25862306a36Sopenharmony_ci * @ctor: A constructor for the objects.
25962306a36Sopenharmony_ci *
26062306a36Sopenharmony_ci * Cannot be called within a interrupt, but can be interrupted.
26162306a36Sopenharmony_ci * The @ctor is run when new pages are allocated by the cache.
26262306a36Sopenharmony_ci *
26362306a36Sopenharmony_ci * The flags are
26462306a36Sopenharmony_ci *
26562306a36Sopenharmony_ci * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
26662306a36Sopenharmony_ci * to catch references to uninitialised memory.
26762306a36Sopenharmony_ci *
26862306a36Sopenharmony_ci * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
26962306a36Sopenharmony_ci * for buffer overruns.
27062306a36Sopenharmony_ci *
27162306a36Sopenharmony_ci * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
27262306a36Sopenharmony_ci * cacheline.  This can be beneficial if you're counting cycles as closely
27362306a36Sopenharmony_ci * as davem.
27462306a36Sopenharmony_ci *
27562306a36Sopenharmony_ci * Return: a pointer to the cache on success, NULL on failure.
27662306a36Sopenharmony_ci */
27762306a36Sopenharmony_cistruct kmem_cache *
27862306a36Sopenharmony_cikmem_cache_create_usercopy(const char *name,
27962306a36Sopenharmony_ci		  unsigned int size, unsigned int align,
28062306a36Sopenharmony_ci		  slab_flags_t flags,
28162306a36Sopenharmony_ci		  unsigned int useroffset, unsigned int usersize,
28262306a36Sopenharmony_ci		  void (*ctor)(void *))
28362306a36Sopenharmony_ci{
28462306a36Sopenharmony_ci	struct kmem_cache *s = NULL;
28562306a36Sopenharmony_ci	const char *cache_name;
28662306a36Sopenharmony_ci	int err;
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
28962306a36Sopenharmony_ci	/*
29062306a36Sopenharmony_ci	 * If no slub_debug was enabled globally, the static key is not yet
29162306a36Sopenharmony_ci	 * enabled by setup_slub_debug(). Enable it if the cache is being
29262306a36Sopenharmony_ci	 * created with any of the debugging flags passed explicitly.
29362306a36Sopenharmony_ci	 * It's also possible that this is the first cache created with
29462306a36Sopenharmony_ci	 * SLAB_STORE_USER and we should init stack_depot for it.
29562306a36Sopenharmony_ci	 */
29662306a36Sopenharmony_ci	if (flags & SLAB_DEBUG_FLAGS)
29762306a36Sopenharmony_ci		static_branch_enable(&slub_debug_enabled);
29862306a36Sopenharmony_ci	if (flags & SLAB_STORE_USER)
29962306a36Sopenharmony_ci		stack_depot_init();
30062306a36Sopenharmony_ci#endif
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	mutex_lock(&slab_mutex);
30362306a36Sopenharmony_ci
30462306a36Sopenharmony_ci	err = kmem_cache_sanity_check(name, size);
30562306a36Sopenharmony_ci	if (err) {
30662306a36Sopenharmony_ci		goto out_unlock;
30762306a36Sopenharmony_ci	}
30862306a36Sopenharmony_ci
30962306a36Sopenharmony_ci	/* Refuse requests with allocator specific flags */
31062306a36Sopenharmony_ci	if (flags & ~SLAB_FLAGS_PERMITTED) {
31162306a36Sopenharmony_ci		err = -EINVAL;
31262306a36Sopenharmony_ci		goto out_unlock;
31362306a36Sopenharmony_ci	}
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci	/*
31662306a36Sopenharmony_ci	 * Some allocators will constraint the set of valid flags to a subset
31762306a36Sopenharmony_ci	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
31862306a36Sopenharmony_ci	 * case, and we'll just provide them with a sanitized version of the
31962306a36Sopenharmony_ci	 * passed flags.
32062306a36Sopenharmony_ci	 */
32162306a36Sopenharmony_ci	flags &= CACHE_CREATE_MASK;
32262306a36Sopenharmony_ci
32362306a36Sopenharmony_ci	/* Fail closed on bad usersize of useroffset values. */
32462306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
32562306a36Sopenharmony_ci	    WARN_ON(!usersize && useroffset) ||
32662306a36Sopenharmony_ci	    WARN_ON(size < usersize || size - usersize < useroffset))
32762306a36Sopenharmony_ci		usersize = useroffset = 0;
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci	if (!usersize)
33062306a36Sopenharmony_ci		s = __kmem_cache_alias(name, size, align, flags, ctor);
33162306a36Sopenharmony_ci	if (s)
33262306a36Sopenharmony_ci		goto out_unlock;
33362306a36Sopenharmony_ci
33462306a36Sopenharmony_ci	cache_name = kstrdup_const(name, GFP_KERNEL);
33562306a36Sopenharmony_ci	if (!cache_name) {
33662306a36Sopenharmony_ci		err = -ENOMEM;
33762306a36Sopenharmony_ci		goto out_unlock;
33862306a36Sopenharmony_ci	}
33962306a36Sopenharmony_ci
34062306a36Sopenharmony_ci	s = create_cache(cache_name, size,
34162306a36Sopenharmony_ci			 calculate_alignment(flags, align, size),
34262306a36Sopenharmony_ci			 flags, useroffset, usersize, ctor, NULL);
34362306a36Sopenharmony_ci	if (IS_ERR(s)) {
34462306a36Sopenharmony_ci		err = PTR_ERR(s);
34562306a36Sopenharmony_ci		kfree_const(cache_name);
34662306a36Sopenharmony_ci	}
34762306a36Sopenharmony_ci
34862306a36Sopenharmony_ciout_unlock:
34962306a36Sopenharmony_ci	mutex_unlock(&slab_mutex);
35062306a36Sopenharmony_ci
35162306a36Sopenharmony_ci	if (err) {
35262306a36Sopenharmony_ci		if (flags & SLAB_PANIC)
35362306a36Sopenharmony_ci			panic("%s: Failed to create slab '%s'. Error %d\n",
35462306a36Sopenharmony_ci				__func__, name, err);
35562306a36Sopenharmony_ci		else {
35662306a36Sopenharmony_ci			pr_warn("%s(%s) failed with error %d\n",
35762306a36Sopenharmony_ci				__func__, name, err);
35862306a36Sopenharmony_ci			dump_stack();
35962306a36Sopenharmony_ci		}
36062306a36Sopenharmony_ci		return NULL;
36162306a36Sopenharmony_ci	}
36262306a36Sopenharmony_ci	return s;
36362306a36Sopenharmony_ci}
36462306a36Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_create_usercopy);
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_ci/**
36762306a36Sopenharmony_ci * kmem_cache_create - Create a cache.
36862306a36Sopenharmony_ci * @name: A string which is used in /proc/slabinfo to identify this cache.
36962306a36Sopenharmony_ci * @size: The size of objects to be created in this cache.
37062306a36Sopenharmony_ci * @align: The required alignment for the objects.
37162306a36Sopenharmony_ci * @flags: SLAB flags
37262306a36Sopenharmony_ci * @ctor: A constructor for the objects.
37362306a36Sopenharmony_ci *
37462306a36Sopenharmony_ci * Cannot be called within a interrupt, but can be interrupted.
37562306a36Sopenharmony_ci * The @ctor is run when new pages are allocated by the cache.
37662306a36Sopenharmony_ci *
37762306a36Sopenharmony_ci * The flags are
37862306a36Sopenharmony_ci *
37962306a36Sopenharmony_ci * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
38062306a36Sopenharmony_ci * to catch references to uninitialised memory.
38162306a36Sopenharmony_ci *
38262306a36Sopenharmony_ci * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
38362306a36Sopenharmony_ci * for buffer overruns.
38462306a36Sopenharmony_ci *
38562306a36Sopenharmony_ci * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
38662306a36Sopenharmony_ci * cacheline.  This can be beneficial if you're counting cycles as closely
38762306a36Sopenharmony_ci * as davem.
38862306a36Sopenharmony_ci *
38962306a36Sopenharmony_ci * Return: a pointer to the cache on success, NULL on failure.
39062306a36Sopenharmony_ci */
39162306a36Sopenharmony_cistruct kmem_cache *
39262306a36Sopenharmony_cikmem_cache_create(const char *name, unsigned int size, unsigned int align,
39362306a36Sopenharmony_ci		slab_flags_t flags, void (*ctor)(void *))
39462306a36Sopenharmony_ci{
39562306a36Sopenharmony_ci	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
39662306a36Sopenharmony_ci					  ctor);
39762306a36Sopenharmony_ci}
39862306a36Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_create);
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci#ifdef SLAB_SUPPORTS_SYSFS
40162306a36Sopenharmony_ci/*
40262306a36Sopenharmony_ci * For a given kmem_cache, kmem_cache_destroy() should only be called
40362306a36Sopenharmony_ci * once or there will be a use-after-free problem. The actual deletion
40462306a36Sopenharmony_ci * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
40562306a36Sopenharmony_ci * protection. So they are now done without holding those locks.
40662306a36Sopenharmony_ci *
40762306a36Sopenharmony_ci * Note that there will be a slight delay in the deletion of sysfs files
40862306a36Sopenharmony_ci * if kmem_cache_release() is called indrectly from a work function.
40962306a36Sopenharmony_ci */
41062306a36Sopenharmony_cistatic void kmem_cache_release(struct kmem_cache *s)
41162306a36Sopenharmony_ci{
41262306a36Sopenharmony_ci	sysfs_slab_unlink(s);
41362306a36Sopenharmony_ci	sysfs_slab_release(s);
41462306a36Sopenharmony_ci}
41562306a36Sopenharmony_ci#else
41662306a36Sopenharmony_cistatic void kmem_cache_release(struct kmem_cache *s)
41762306a36Sopenharmony_ci{
41862306a36Sopenharmony_ci	slab_kmem_cache_release(s);
41962306a36Sopenharmony_ci}
42062306a36Sopenharmony_ci#endif
42162306a36Sopenharmony_ci
42262306a36Sopenharmony_cistatic void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
42362306a36Sopenharmony_ci{
42462306a36Sopenharmony_ci	LIST_HEAD(to_destroy);
42562306a36Sopenharmony_ci	struct kmem_cache *s, *s2;
42662306a36Sopenharmony_ci
42762306a36Sopenharmony_ci	/*
42862306a36Sopenharmony_ci	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
42962306a36Sopenharmony_ci	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
43062306a36Sopenharmony_ci	 * through RCU and the associated kmem_cache are dereferenced
43162306a36Sopenharmony_ci	 * while freeing the pages, so the kmem_caches should be freed only
43262306a36Sopenharmony_ci	 * after the pending RCU operations are finished.  As rcu_barrier()
43362306a36Sopenharmony_ci	 * is a pretty slow operation, we batch all pending destructions
43462306a36Sopenharmony_ci	 * asynchronously.
43562306a36Sopenharmony_ci	 */
43662306a36Sopenharmony_ci	mutex_lock(&slab_mutex);
43762306a36Sopenharmony_ci	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
43862306a36Sopenharmony_ci	mutex_unlock(&slab_mutex);
43962306a36Sopenharmony_ci
44062306a36Sopenharmony_ci	if (list_empty(&to_destroy))
44162306a36Sopenharmony_ci		return;
44262306a36Sopenharmony_ci
44362306a36Sopenharmony_ci	rcu_barrier();
44462306a36Sopenharmony_ci
44562306a36Sopenharmony_ci	list_for_each_entry_safe(s, s2, &to_destroy, list) {
44662306a36Sopenharmony_ci		debugfs_slab_release(s);
44762306a36Sopenharmony_ci		kfence_shutdown_cache(s);
44862306a36Sopenharmony_ci		kmem_cache_release(s);
44962306a36Sopenharmony_ci	}
45062306a36Sopenharmony_ci}
45162306a36Sopenharmony_ci
45262306a36Sopenharmony_cistatic int shutdown_cache(struct kmem_cache *s)
45362306a36Sopenharmony_ci{
45462306a36Sopenharmony_ci	/* free asan quarantined objects */
45562306a36Sopenharmony_ci	kasan_cache_shutdown(s);
45662306a36Sopenharmony_ci
45762306a36Sopenharmony_ci	if (__kmem_cache_shutdown(s) != 0)
45862306a36Sopenharmony_ci		return -EBUSY;
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_ci	list_del(&s->list);
46162306a36Sopenharmony_ci
46262306a36Sopenharmony_ci	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
46362306a36Sopenharmony_ci		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
46462306a36Sopenharmony_ci		schedule_work(&slab_caches_to_rcu_destroy_work);
46562306a36Sopenharmony_ci	} else {
46662306a36Sopenharmony_ci		kfence_shutdown_cache(s);
46762306a36Sopenharmony_ci		debugfs_slab_release(s);
46862306a36Sopenharmony_ci	}
46962306a36Sopenharmony_ci
47062306a36Sopenharmony_ci	return 0;
47162306a36Sopenharmony_ci}
47262306a36Sopenharmony_ci
47362306a36Sopenharmony_civoid slab_kmem_cache_release(struct kmem_cache *s)
47462306a36Sopenharmony_ci{
47562306a36Sopenharmony_ci	__kmem_cache_release(s);
47662306a36Sopenharmony_ci	kfree_const(s->name);
47762306a36Sopenharmony_ci	kmem_cache_free(kmem_cache, s);
47862306a36Sopenharmony_ci}
47962306a36Sopenharmony_ci
48062306a36Sopenharmony_civoid kmem_cache_destroy(struct kmem_cache *s)
48162306a36Sopenharmony_ci{
48262306a36Sopenharmony_ci	int err = -EBUSY;
48362306a36Sopenharmony_ci	bool rcu_set;
48462306a36Sopenharmony_ci
48562306a36Sopenharmony_ci	if (unlikely(!s) || !kasan_check_byte(s))
48662306a36Sopenharmony_ci		return;
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_ci	cpus_read_lock();
48962306a36Sopenharmony_ci	mutex_lock(&slab_mutex);
49062306a36Sopenharmony_ci
49162306a36Sopenharmony_ci	rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
49262306a36Sopenharmony_ci
49362306a36Sopenharmony_ci	s->refcount--;
49462306a36Sopenharmony_ci	if (s->refcount)
49562306a36Sopenharmony_ci		goto out_unlock;
49662306a36Sopenharmony_ci
49762306a36Sopenharmony_ci	err = shutdown_cache(s);
49862306a36Sopenharmony_ci	WARN(err, "%s %s: Slab cache still has objects when called from %pS",
49962306a36Sopenharmony_ci	     __func__, s->name, (void *)_RET_IP_);
50062306a36Sopenharmony_ciout_unlock:
50162306a36Sopenharmony_ci	mutex_unlock(&slab_mutex);
50262306a36Sopenharmony_ci	cpus_read_unlock();
50362306a36Sopenharmony_ci	if (!err && !rcu_set)
50462306a36Sopenharmony_ci		kmem_cache_release(s);
50562306a36Sopenharmony_ci}
50662306a36Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_destroy);
50762306a36Sopenharmony_ci
50862306a36Sopenharmony_ci/**
50962306a36Sopenharmony_ci * kmem_cache_shrink - Shrink a cache.
51062306a36Sopenharmony_ci * @cachep: The cache to shrink.
51162306a36Sopenharmony_ci *
51262306a36Sopenharmony_ci * Releases as many slabs as possible for a cache.
51362306a36Sopenharmony_ci * To help debugging, a zero exit status indicates all slabs were released.
51462306a36Sopenharmony_ci *
51562306a36Sopenharmony_ci * Return: %0 if all slabs were released, non-zero otherwise
51662306a36Sopenharmony_ci */
51762306a36Sopenharmony_ciint kmem_cache_shrink(struct kmem_cache *cachep)
51862306a36Sopenharmony_ci{
51962306a36Sopenharmony_ci	kasan_cache_shrink(cachep);
52062306a36Sopenharmony_ci
52162306a36Sopenharmony_ci	return __kmem_cache_shrink(cachep);
52262306a36Sopenharmony_ci}
52362306a36Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_shrink);
52462306a36Sopenharmony_ci
52562306a36Sopenharmony_cibool slab_is_available(void)
52662306a36Sopenharmony_ci{
52762306a36Sopenharmony_ci	return slab_state >= UP;
52862306a36Sopenharmony_ci}
52962306a36Sopenharmony_ci
53062306a36Sopenharmony_ci#ifdef CONFIG_PRINTK
53162306a36Sopenharmony_ci/**
53262306a36Sopenharmony_ci * kmem_valid_obj - does the pointer reference a valid slab object?
53362306a36Sopenharmony_ci * @object: pointer to query.
53462306a36Sopenharmony_ci *
53562306a36Sopenharmony_ci * Return: %true if the pointer is to a not-yet-freed object from
53662306a36Sopenharmony_ci * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
53762306a36Sopenharmony_ci * is to an already-freed object, and %false otherwise.
53862306a36Sopenharmony_ci */
53962306a36Sopenharmony_cibool kmem_valid_obj(void *object)
54062306a36Sopenharmony_ci{
54162306a36Sopenharmony_ci	struct folio *folio;
54262306a36Sopenharmony_ci
54362306a36Sopenharmony_ci	/* Some arches consider ZERO_SIZE_PTR to be a valid address. */
54462306a36Sopenharmony_ci	if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
54562306a36Sopenharmony_ci		return false;
54662306a36Sopenharmony_ci	folio = virt_to_folio(object);
54762306a36Sopenharmony_ci	return folio_test_slab(folio);
54862306a36Sopenharmony_ci}
54962306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(kmem_valid_obj);
55062306a36Sopenharmony_ci
55162306a36Sopenharmony_cistatic void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
55262306a36Sopenharmony_ci{
55362306a36Sopenharmony_ci	if (__kfence_obj_info(kpp, object, slab))
55462306a36Sopenharmony_ci		return;
55562306a36Sopenharmony_ci	__kmem_obj_info(kpp, object, slab);
55662306a36Sopenharmony_ci}
55762306a36Sopenharmony_ci
55862306a36Sopenharmony_ci/**
55962306a36Sopenharmony_ci * kmem_dump_obj - Print available slab provenance information
56062306a36Sopenharmony_ci * @object: slab object for which to find provenance information.
56162306a36Sopenharmony_ci *
56262306a36Sopenharmony_ci * This function uses pr_cont(), so that the caller is expected to have
56362306a36Sopenharmony_ci * printed out whatever preamble is appropriate.  The provenance information
56462306a36Sopenharmony_ci * depends on the type of object and on how much debugging is enabled.
56562306a36Sopenharmony_ci * For a slab-cache object, the fact that it is a slab object is printed,
56662306a36Sopenharmony_ci * and, if available, the slab name, return address, and stack trace from
56762306a36Sopenharmony_ci * the allocation and last free path of that object.
56862306a36Sopenharmony_ci *
56962306a36Sopenharmony_ci * This function will splat if passed a pointer to a non-slab object.
57062306a36Sopenharmony_ci * If you are not sure what type of object you have, you should instead
57162306a36Sopenharmony_ci * use mem_dump_obj().
57262306a36Sopenharmony_ci */
57362306a36Sopenharmony_civoid kmem_dump_obj(void *object)
57462306a36Sopenharmony_ci{
57562306a36Sopenharmony_ci	char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
57662306a36Sopenharmony_ci	int i;
57762306a36Sopenharmony_ci	struct slab *slab;
57862306a36Sopenharmony_ci	unsigned long ptroffset;
57962306a36Sopenharmony_ci	struct kmem_obj_info kp = { };
58062306a36Sopenharmony_ci
58162306a36Sopenharmony_ci	if (WARN_ON_ONCE(!virt_addr_valid(object)))
58262306a36Sopenharmony_ci		return;
58362306a36Sopenharmony_ci	slab = virt_to_slab(object);
58462306a36Sopenharmony_ci	if (WARN_ON_ONCE(!slab)) {
58562306a36Sopenharmony_ci		pr_cont(" non-slab memory.\n");
58662306a36Sopenharmony_ci		return;
58762306a36Sopenharmony_ci	}
58862306a36Sopenharmony_ci	kmem_obj_info(&kp, object, slab);
58962306a36Sopenharmony_ci	if (kp.kp_slab_cache)
59062306a36Sopenharmony_ci		pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
59162306a36Sopenharmony_ci	else
59262306a36Sopenharmony_ci		pr_cont(" slab%s", cp);
59362306a36Sopenharmony_ci	if (is_kfence_address(object))
59462306a36Sopenharmony_ci		pr_cont(" (kfence)");
59562306a36Sopenharmony_ci	if (kp.kp_objp)
59662306a36Sopenharmony_ci		pr_cont(" start %px", kp.kp_objp);
59762306a36Sopenharmony_ci	if (kp.kp_data_offset)
59862306a36Sopenharmony_ci		pr_cont(" data offset %lu", kp.kp_data_offset);
59962306a36Sopenharmony_ci	if (kp.kp_objp) {
60062306a36Sopenharmony_ci		ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
60162306a36Sopenharmony_ci		pr_cont(" pointer offset %lu", ptroffset);
60262306a36Sopenharmony_ci	}
60362306a36Sopenharmony_ci	if (kp.kp_slab_cache && kp.kp_slab_cache->object_size)
60462306a36Sopenharmony_ci		pr_cont(" size %u", kp.kp_slab_cache->object_size);
60562306a36Sopenharmony_ci	if (kp.kp_ret)
60662306a36Sopenharmony_ci		pr_cont(" allocated at %pS\n", kp.kp_ret);
60762306a36Sopenharmony_ci	else
60862306a36Sopenharmony_ci		pr_cont("\n");
60962306a36Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
61062306a36Sopenharmony_ci		if (!kp.kp_stack[i])
61162306a36Sopenharmony_ci			break;
61262306a36Sopenharmony_ci		pr_info("    %pS\n", kp.kp_stack[i]);
61362306a36Sopenharmony_ci	}
61462306a36Sopenharmony_ci
61562306a36Sopenharmony_ci	if (kp.kp_free_stack[0])
61662306a36Sopenharmony_ci		pr_cont(" Free path:\n");
61762306a36Sopenharmony_ci
61862306a36Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
61962306a36Sopenharmony_ci		if (!kp.kp_free_stack[i])
62062306a36Sopenharmony_ci			break;
62162306a36Sopenharmony_ci		pr_info("    %pS\n", kp.kp_free_stack[i]);
62262306a36Sopenharmony_ci	}
62362306a36Sopenharmony_ci
62462306a36Sopenharmony_ci}
62562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(kmem_dump_obj);
62662306a36Sopenharmony_ci#endif
62762306a36Sopenharmony_ci
62862306a36Sopenharmony_ci/* Create a cache during boot when no slab services are available yet */
62962306a36Sopenharmony_civoid __init create_boot_cache(struct kmem_cache *s, const char *name,
63062306a36Sopenharmony_ci		unsigned int size, slab_flags_t flags,
63162306a36Sopenharmony_ci		unsigned int useroffset, unsigned int usersize)
63262306a36Sopenharmony_ci{
63362306a36Sopenharmony_ci	int err;
63462306a36Sopenharmony_ci	unsigned int align = ARCH_KMALLOC_MINALIGN;
63562306a36Sopenharmony_ci
63662306a36Sopenharmony_ci	s->name = name;
63762306a36Sopenharmony_ci	s->size = s->object_size = size;
63862306a36Sopenharmony_ci
63962306a36Sopenharmony_ci	/*
64062306a36Sopenharmony_ci	 * For power of two sizes, guarantee natural alignment for kmalloc
64162306a36Sopenharmony_ci	 * caches, regardless of SL*B debugging options.
64262306a36Sopenharmony_ci	 */
64362306a36Sopenharmony_ci	if (is_power_of_2(size))
64462306a36Sopenharmony_ci		align = max(align, size);
64562306a36Sopenharmony_ci	s->align = calculate_alignment(flags, align, size);
64662306a36Sopenharmony_ci
64762306a36Sopenharmony_ci#ifdef CONFIG_HARDENED_USERCOPY
64862306a36Sopenharmony_ci	s->useroffset = useroffset;
64962306a36Sopenharmony_ci	s->usersize = usersize;
65062306a36Sopenharmony_ci#endif
65162306a36Sopenharmony_ci
65262306a36Sopenharmony_ci	err = __kmem_cache_create(s, flags);
65362306a36Sopenharmony_ci
65462306a36Sopenharmony_ci	if (err)
65562306a36Sopenharmony_ci		panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
65662306a36Sopenharmony_ci					name, size, err);
65762306a36Sopenharmony_ci
65862306a36Sopenharmony_ci	s->refcount = -1;	/* Exempt from merging for now */
65962306a36Sopenharmony_ci}
66062306a36Sopenharmony_ci
66162306a36Sopenharmony_cistatic struct kmem_cache *__init create_kmalloc_cache(const char *name,
66262306a36Sopenharmony_ci						      unsigned int size,
66362306a36Sopenharmony_ci						      slab_flags_t flags)
66462306a36Sopenharmony_ci{
66562306a36Sopenharmony_ci	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
66662306a36Sopenharmony_ci
66762306a36Sopenharmony_ci	if (!s)
66862306a36Sopenharmony_ci		panic("Out of memory when creating slab %s\n", name);
66962306a36Sopenharmony_ci
67062306a36Sopenharmony_ci	create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);
67162306a36Sopenharmony_ci	list_add(&s->list, &slab_caches);
67262306a36Sopenharmony_ci	s->refcount = 1;
67362306a36Sopenharmony_ci	return s;
67462306a36Sopenharmony_ci}
67562306a36Sopenharmony_ci
67662306a36Sopenharmony_cistruct kmem_cache *
67762306a36Sopenharmony_cikmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
67862306a36Sopenharmony_ci{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
67962306a36Sopenharmony_ciEXPORT_SYMBOL(kmalloc_caches);
68062306a36Sopenharmony_ci
68162306a36Sopenharmony_ci#ifdef CONFIG_RANDOM_KMALLOC_CACHES
68262306a36Sopenharmony_ciunsigned long random_kmalloc_seed __ro_after_init;
68362306a36Sopenharmony_ciEXPORT_SYMBOL(random_kmalloc_seed);
68462306a36Sopenharmony_ci#endif
68562306a36Sopenharmony_ci
68662306a36Sopenharmony_ci/*
68762306a36Sopenharmony_ci * Conversion table for small slabs sizes / 8 to the index in the
68862306a36Sopenharmony_ci * kmalloc array. This is necessary for slabs < 192 since we have non power
68962306a36Sopenharmony_ci * of two cache sizes there. The size of larger slabs can be determined using
69062306a36Sopenharmony_ci * fls.
69162306a36Sopenharmony_ci */
69262306a36Sopenharmony_cistatic u8 size_index[24] __ro_after_init = {
69362306a36Sopenharmony_ci	3,	/* 8 */
69462306a36Sopenharmony_ci	4,	/* 16 */
69562306a36Sopenharmony_ci	5,	/* 24 */
69662306a36Sopenharmony_ci	5,	/* 32 */
69762306a36Sopenharmony_ci	6,	/* 40 */
69862306a36Sopenharmony_ci	6,	/* 48 */
69962306a36Sopenharmony_ci	6,	/* 56 */
70062306a36Sopenharmony_ci	6,	/* 64 */
70162306a36Sopenharmony_ci	1,	/* 72 */
70262306a36Sopenharmony_ci	1,	/* 80 */
70362306a36Sopenharmony_ci	1,	/* 88 */
70462306a36Sopenharmony_ci	1,	/* 96 */
70562306a36Sopenharmony_ci	7,	/* 104 */
70662306a36Sopenharmony_ci	7,	/* 112 */
70762306a36Sopenharmony_ci	7,	/* 120 */
70862306a36Sopenharmony_ci	7,	/* 128 */
70962306a36Sopenharmony_ci	2,	/* 136 */
71062306a36Sopenharmony_ci	2,	/* 144 */
71162306a36Sopenharmony_ci	2,	/* 152 */
71262306a36Sopenharmony_ci	2,	/* 160 */
71362306a36Sopenharmony_ci	2,	/* 168 */
71462306a36Sopenharmony_ci	2,	/* 176 */
71562306a36Sopenharmony_ci	2,	/* 184 */
71662306a36Sopenharmony_ci	2	/* 192 */
71762306a36Sopenharmony_ci};
71862306a36Sopenharmony_ci
71962306a36Sopenharmony_cistatic inline unsigned int size_index_elem(unsigned int bytes)
72062306a36Sopenharmony_ci{
72162306a36Sopenharmony_ci	return (bytes - 1) / 8;
72262306a36Sopenharmony_ci}
72362306a36Sopenharmony_ci
72462306a36Sopenharmony_ci/*
72562306a36Sopenharmony_ci * Find the kmem_cache structure that serves a given size of
72662306a36Sopenharmony_ci * allocation
72762306a36Sopenharmony_ci */
72862306a36Sopenharmony_cistruct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
72962306a36Sopenharmony_ci{
73062306a36Sopenharmony_ci	unsigned int index;
73162306a36Sopenharmony_ci
73262306a36Sopenharmony_ci	if (size <= 192) {
73362306a36Sopenharmony_ci		if (!size)
73462306a36Sopenharmony_ci			return ZERO_SIZE_PTR;
73562306a36Sopenharmony_ci
73662306a36Sopenharmony_ci		index = size_index[size_index_elem(size)];
73762306a36Sopenharmony_ci	} else {
73862306a36Sopenharmony_ci		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
73962306a36Sopenharmony_ci			return NULL;
74062306a36Sopenharmony_ci		index = fls(size - 1);
74162306a36Sopenharmony_ci	}
74262306a36Sopenharmony_ci
74362306a36Sopenharmony_ci	return kmalloc_caches[kmalloc_type(flags, caller)][index];
74462306a36Sopenharmony_ci}
74562306a36Sopenharmony_ci
74662306a36Sopenharmony_cisize_t kmalloc_size_roundup(size_t size)
74762306a36Sopenharmony_ci{
74862306a36Sopenharmony_ci	if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
74962306a36Sopenharmony_ci		/*
75062306a36Sopenharmony_ci		 * The flags don't matter since size_index is common to all.
75162306a36Sopenharmony_ci		 * Neither does the caller for just getting ->object_size.
75262306a36Sopenharmony_ci		 */
75362306a36Sopenharmony_ci		return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
75462306a36Sopenharmony_ci	}
75562306a36Sopenharmony_ci
75662306a36Sopenharmony_ci	/* Above the smaller buckets, size is a multiple of page size. */
75762306a36Sopenharmony_ci	if (size && size <= KMALLOC_MAX_SIZE)
75862306a36Sopenharmony_ci		return PAGE_SIZE << get_order(size);
75962306a36Sopenharmony_ci
76062306a36Sopenharmony_ci	/*
76162306a36Sopenharmony_ci	 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
76262306a36Sopenharmony_ci	 * and very large size - kmalloc() may fail.
76362306a36Sopenharmony_ci	 */
76462306a36Sopenharmony_ci	return size;
76562306a36Sopenharmony_ci
76662306a36Sopenharmony_ci}
76762306a36Sopenharmony_ciEXPORT_SYMBOL(kmalloc_size_roundup);
76862306a36Sopenharmony_ci
76962306a36Sopenharmony_ci#ifdef CONFIG_ZONE_DMA
77062306a36Sopenharmony_ci#define KMALLOC_DMA_NAME(sz)	.name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
77162306a36Sopenharmony_ci#else
77262306a36Sopenharmony_ci#define KMALLOC_DMA_NAME(sz)
77362306a36Sopenharmony_ci#endif
77462306a36Sopenharmony_ci
77562306a36Sopenharmony_ci#ifdef CONFIG_MEMCG_KMEM
77662306a36Sopenharmony_ci#define KMALLOC_CGROUP_NAME(sz)	.name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
77762306a36Sopenharmony_ci#else
77862306a36Sopenharmony_ci#define KMALLOC_CGROUP_NAME(sz)
77962306a36Sopenharmony_ci#endif
78062306a36Sopenharmony_ci
78162306a36Sopenharmony_ci#ifndef CONFIG_SLUB_TINY
78262306a36Sopenharmony_ci#define KMALLOC_RCL_NAME(sz)	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
78362306a36Sopenharmony_ci#else
78462306a36Sopenharmony_ci#define KMALLOC_RCL_NAME(sz)
78562306a36Sopenharmony_ci#endif
78662306a36Sopenharmony_ci
78762306a36Sopenharmony_ci#ifdef CONFIG_RANDOM_KMALLOC_CACHES
78862306a36Sopenharmony_ci#define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
78962306a36Sopenharmony_ci#define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
79062306a36Sopenharmony_ci#define KMA_RAND_1(sz)                  .name[KMALLOC_RANDOM_START +  1] = "kmalloc-rnd-01-" #sz,
79162306a36Sopenharmony_ci#define KMA_RAND_2(sz)  KMA_RAND_1(sz)  .name[KMALLOC_RANDOM_START +  2] = "kmalloc-rnd-02-" #sz,
79262306a36Sopenharmony_ci#define KMA_RAND_3(sz)  KMA_RAND_2(sz)  .name[KMALLOC_RANDOM_START +  3] = "kmalloc-rnd-03-" #sz,
79362306a36Sopenharmony_ci#define KMA_RAND_4(sz)  KMA_RAND_3(sz)  .name[KMALLOC_RANDOM_START +  4] = "kmalloc-rnd-04-" #sz,
79462306a36Sopenharmony_ci#define KMA_RAND_5(sz)  KMA_RAND_4(sz)  .name[KMALLOC_RANDOM_START +  5] = "kmalloc-rnd-05-" #sz,
79562306a36Sopenharmony_ci#define KMA_RAND_6(sz)  KMA_RAND_5(sz)  .name[KMALLOC_RANDOM_START +  6] = "kmalloc-rnd-06-" #sz,
79662306a36Sopenharmony_ci#define KMA_RAND_7(sz)  KMA_RAND_6(sz)  .name[KMALLOC_RANDOM_START +  7] = "kmalloc-rnd-07-" #sz,
79762306a36Sopenharmony_ci#define KMA_RAND_8(sz)  KMA_RAND_7(sz)  .name[KMALLOC_RANDOM_START +  8] = "kmalloc-rnd-08-" #sz,
79862306a36Sopenharmony_ci#define KMA_RAND_9(sz)  KMA_RAND_8(sz)  .name[KMALLOC_RANDOM_START +  9] = "kmalloc-rnd-09-" #sz,
79962306a36Sopenharmony_ci#define KMA_RAND_10(sz) KMA_RAND_9(sz)  .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
80062306a36Sopenharmony_ci#define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
80162306a36Sopenharmony_ci#define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
80262306a36Sopenharmony_ci#define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
80362306a36Sopenharmony_ci#define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
80462306a36Sopenharmony_ci#define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
80562306a36Sopenharmony_ci#else // CONFIG_RANDOM_KMALLOC_CACHES
80662306a36Sopenharmony_ci#define KMALLOC_RANDOM_NAME(N, sz)
80762306a36Sopenharmony_ci#endif
80862306a36Sopenharmony_ci
80962306a36Sopenharmony_ci#define INIT_KMALLOC_INFO(__size, __short_size)			\
81062306a36Sopenharmony_ci{								\
81162306a36Sopenharmony_ci	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
81262306a36Sopenharmony_ci	KMALLOC_RCL_NAME(__short_size)				\
81362306a36Sopenharmony_ci	KMALLOC_CGROUP_NAME(__short_size)			\
81462306a36Sopenharmony_ci	KMALLOC_DMA_NAME(__short_size)				\
81562306a36Sopenharmony_ci	KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size)	\
81662306a36Sopenharmony_ci	.size = __size,						\
81762306a36Sopenharmony_ci}
81862306a36Sopenharmony_ci
81962306a36Sopenharmony_ci/*
82062306a36Sopenharmony_ci * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
82162306a36Sopenharmony_ci * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
82262306a36Sopenharmony_ci * kmalloc-2M.
82362306a36Sopenharmony_ci */
82462306a36Sopenharmony_ciconst struct kmalloc_info_struct kmalloc_info[] __initconst = {
82562306a36Sopenharmony_ci	INIT_KMALLOC_INFO(0, 0),
82662306a36Sopenharmony_ci	INIT_KMALLOC_INFO(96, 96),
82762306a36Sopenharmony_ci	INIT_KMALLOC_INFO(192, 192),
82862306a36Sopenharmony_ci	INIT_KMALLOC_INFO(8, 8),
82962306a36Sopenharmony_ci	INIT_KMALLOC_INFO(16, 16),
83062306a36Sopenharmony_ci	INIT_KMALLOC_INFO(32, 32),
83162306a36Sopenharmony_ci	INIT_KMALLOC_INFO(64, 64),
83262306a36Sopenharmony_ci	INIT_KMALLOC_INFO(128, 128),
83362306a36Sopenharmony_ci	INIT_KMALLOC_INFO(256, 256),
83462306a36Sopenharmony_ci	INIT_KMALLOC_INFO(512, 512),
83562306a36Sopenharmony_ci	INIT_KMALLOC_INFO(1024, 1k),
83662306a36Sopenharmony_ci	INIT_KMALLOC_INFO(2048, 2k),
83762306a36Sopenharmony_ci	INIT_KMALLOC_INFO(4096, 4k),
83862306a36Sopenharmony_ci	INIT_KMALLOC_INFO(8192, 8k),
83962306a36Sopenharmony_ci	INIT_KMALLOC_INFO(16384, 16k),
84062306a36Sopenharmony_ci	INIT_KMALLOC_INFO(32768, 32k),
84162306a36Sopenharmony_ci	INIT_KMALLOC_INFO(65536, 64k),
84262306a36Sopenharmony_ci	INIT_KMALLOC_INFO(131072, 128k),
84362306a36Sopenharmony_ci	INIT_KMALLOC_INFO(262144, 256k),
84462306a36Sopenharmony_ci	INIT_KMALLOC_INFO(524288, 512k),
84562306a36Sopenharmony_ci	INIT_KMALLOC_INFO(1048576, 1M),
84662306a36Sopenharmony_ci	INIT_KMALLOC_INFO(2097152, 2M)
84762306a36Sopenharmony_ci};
84862306a36Sopenharmony_ci
84962306a36Sopenharmony_ci/*
85062306a36Sopenharmony_ci * Patch up the size_index table if we have strange large alignment
85162306a36Sopenharmony_ci * requirements for the kmalloc array. This is only the case for
85262306a36Sopenharmony_ci * MIPS it seems. The standard arches will not generate any code here.
85362306a36Sopenharmony_ci *
85462306a36Sopenharmony_ci * Largest permitted alignment is 256 bytes due to the way we
85562306a36Sopenharmony_ci * handle the index determination for the smaller caches.
85662306a36Sopenharmony_ci *
85762306a36Sopenharmony_ci * Make sure that nothing crazy happens if someone starts tinkering
85862306a36Sopenharmony_ci * around with ARCH_KMALLOC_MINALIGN
85962306a36Sopenharmony_ci */
86062306a36Sopenharmony_civoid __init setup_kmalloc_cache_index_table(void)
86162306a36Sopenharmony_ci{
86262306a36Sopenharmony_ci	unsigned int i;
86362306a36Sopenharmony_ci
86462306a36Sopenharmony_ci	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
86562306a36Sopenharmony_ci		!is_power_of_2(KMALLOC_MIN_SIZE));
86662306a36Sopenharmony_ci
86762306a36Sopenharmony_ci	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
86862306a36Sopenharmony_ci		unsigned int elem = size_index_elem(i);
86962306a36Sopenharmony_ci
87062306a36Sopenharmony_ci		if (elem >= ARRAY_SIZE(size_index))
87162306a36Sopenharmony_ci			break;
87262306a36Sopenharmony_ci		size_index[elem] = KMALLOC_SHIFT_LOW;
87362306a36Sopenharmony_ci	}
87462306a36Sopenharmony_ci
87562306a36Sopenharmony_ci	if (KMALLOC_MIN_SIZE >= 64) {
87662306a36Sopenharmony_ci		/*
87762306a36Sopenharmony_ci		 * The 96 byte sized cache is not used if the alignment
87862306a36Sopenharmony_ci		 * is 64 byte.
87962306a36Sopenharmony_ci		 */
88062306a36Sopenharmony_ci		for (i = 64 + 8; i <= 96; i += 8)
88162306a36Sopenharmony_ci			size_index[size_index_elem(i)] = 7;
88262306a36Sopenharmony_ci
88362306a36Sopenharmony_ci	}
88462306a36Sopenharmony_ci
88562306a36Sopenharmony_ci	if (KMALLOC_MIN_SIZE >= 128) {
88662306a36Sopenharmony_ci		/*
88762306a36Sopenharmony_ci		 * The 192 byte sized cache is not used if the alignment
88862306a36Sopenharmony_ci		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
88962306a36Sopenharmony_ci		 * instead.
89062306a36Sopenharmony_ci		 */
89162306a36Sopenharmony_ci		for (i = 128 + 8; i <= 192; i += 8)
89262306a36Sopenharmony_ci			size_index[size_index_elem(i)] = 8;
89362306a36Sopenharmony_ci	}
89462306a36Sopenharmony_ci}
89562306a36Sopenharmony_ci
89662306a36Sopenharmony_cistatic unsigned int __kmalloc_minalign(void)
89762306a36Sopenharmony_ci{
89862306a36Sopenharmony_ci	unsigned int minalign = dma_get_cache_alignment();
89962306a36Sopenharmony_ci
90062306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
90162306a36Sopenharmony_ci	    is_swiotlb_allocated())
90262306a36Sopenharmony_ci		minalign = ARCH_KMALLOC_MINALIGN;
90362306a36Sopenharmony_ci
90462306a36Sopenharmony_ci	return max(minalign, arch_slab_minalign());
90562306a36Sopenharmony_ci}
90662306a36Sopenharmony_ci
90762306a36Sopenharmony_civoid __init
90862306a36Sopenharmony_cinew_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
90962306a36Sopenharmony_ci{
91062306a36Sopenharmony_ci	unsigned int minalign = __kmalloc_minalign();
91162306a36Sopenharmony_ci	unsigned int aligned_size = kmalloc_info[idx].size;
91262306a36Sopenharmony_ci	int aligned_idx = idx;
91362306a36Sopenharmony_ci
91462306a36Sopenharmony_ci	if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
91562306a36Sopenharmony_ci		flags |= SLAB_RECLAIM_ACCOUNT;
91662306a36Sopenharmony_ci	} else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
91762306a36Sopenharmony_ci		if (mem_cgroup_kmem_disabled()) {
91862306a36Sopenharmony_ci			kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
91962306a36Sopenharmony_ci			return;
92062306a36Sopenharmony_ci		}
92162306a36Sopenharmony_ci		flags |= SLAB_ACCOUNT;
92262306a36Sopenharmony_ci	} else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
92362306a36Sopenharmony_ci		flags |= SLAB_CACHE_DMA;
92462306a36Sopenharmony_ci	}
92562306a36Sopenharmony_ci
92662306a36Sopenharmony_ci#ifdef CONFIG_RANDOM_KMALLOC_CACHES
92762306a36Sopenharmony_ci	if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END)
92862306a36Sopenharmony_ci		flags |= SLAB_NO_MERGE;
92962306a36Sopenharmony_ci#endif
93062306a36Sopenharmony_ci
93162306a36Sopenharmony_ci	/*
93262306a36Sopenharmony_ci	 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
93362306a36Sopenharmony_ci	 * KMALLOC_NORMAL caches.
93462306a36Sopenharmony_ci	 */
93562306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
93662306a36Sopenharmony_ci		flags |= SLAB_NO_MERGE;
93762306a36Sopenharmony_ci
93862306a36Sopenharmony_ci	if (minalign > ARCH_KMALLOC_MINALIGN) {
93962306a36Sopenharmony_ci		aligned_size = ALIGN(aligned_size, minalign);
94062306a36Sopenharmony_ci		aligned_idx = __kmalloc_index(aligned_size, false);
94162306a36Sopenharmony_ci	}
94262306a36Sopenharmony_ci
94362306a36Sopenharmony_ci	if (!kmalloc_caches[type][aligned_idx])
94462306a36Sopenharmony_ci		kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
94562306a36Sopenharmony_ci					kmalloc_info[aligned_idx].name[type],
94662306a36Sopenharmony_ci					aligned_size, flags);
94762306a36Sopenharmony_ci	if (idx != aligned_idx)
94862306a36Sopenharmony_ci		kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
94962306a36Sopenharmony_ci}
95062306a36Sopenharmony_ci
95162306a36Sopenharmony_ci/*
95262306a36Sopenharmony_ci * Create the kmalloc array. Some of the regular kmalloc arrays
95362306a36Sopenharmony_ci * may already have been created because they were needed to
95462306a36Sopenharmony_ci * enable allocations for slab creation.
95562306a36Sopenharmony_ci */
95662306a36Sopenharmony_civoid __init create_kmalloc_caches(slab_flags_t flags)
95762306a36Sopenharmony_ci{
95862306a36Sopenharmony_ci	int i;
95962306a36Sopenharmony_ci	enum kmalloc_cache_type type;
96062306a36Sopenharmony_ci
96162306a36Sopenharmony_ci	/*
96262306a36Sopenharmony_ci	 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
96362306a36Sopenharmony_ci	 */
96462306a36Sopenharmony_ci	for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
96562306a36Sopenharmony_ci		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
96662306a36Sopenharmony_ci			if (!kmalloc_caches[type][i])
96762306a36Sopenharmony_ci				new_kmalloc_cache(i, type, flags);
96862306a36Sopenharmony_ci
96962306a36Sopenharmony_ci			/*
97062306a36Sopenharmony_ci			 * Caches that are not of the two-to-the-power-of size.
97162306a36Sopenharmony_ci			 * These have to be created immediately after the
97262306a36Sopenharmony_ci			 * earlier power of two caches
97362306a36Sopenharmony_ci			 */
97462306a36Sopenharmony_ci			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
97562306a36Sopenharmony_ci					!kmalloc_caches[type][1])
97662306a36Sopenharmony_ci				new_kmalloc_cache(1, type, flags);
97762306a36Sopenharmony_ci			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
97862306a36Sopenharmony_ci					!kmalloc_caches[type][2])
97962306a36Sopenharmony_ci				new_kmalloc_cache(2, type, flags);
98062306a36Sopenharmony_ci		}
98162306a36Sopenharmony_ci	}
98262306a36Sopenharmony_ci#ifdef CONFIG_RANDOM_KMALLOC_CACHES
98362306a36Sopenharmony_ci	random_kmalloc_seed = get_random_u64();
98462306a36Sopenharmony_ci#endif
98562306a36Sopenharmony_ci
98662306a36Sopenharmony_ci	/* Kmalloc array is now usable */
98762306a36Sopenharmony_ci	slab_state = UP;
98862306a36Sopenharmony_ci}
98962306a36Sopenharmony_ci
99062306a36Sopenharmony_civoid free_large_kmalloc(struct folio *folio, void *object)
99162306a36Sopenharmony_ci{
99262306a36Sopenharmony_ci	unsigned int order = folio_order(folio);
99362306a36Sopenharmony_ci
99462306a36Sopenharmony_ci	if (WARN_ON_ONCE(order == 0))
99562306a36Sopenharmony_ci		pr_warn_once("object pointer: 0x%p\n", object);
99662306a36Sopenharmony_ci
99762306a36Sopenharmony_ci	kmemleak_free(object);
99862306a36Sopenharmony_ci	kasan_kfree_large(object);
99962306a36Sopenharmony_ci	kmsan_kfree_large(object);
100062306a36Sopenharmony_ci
100162306a36Sopenharmony_ci	mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
100262306a36Sopenharmony_ci			      -(PAGE_SIZE << order));
100362306a36Sopenharmony_ci	__free_pages(folio_page(folio, 0), order);
100462306a36Sopenharmony_ci}
100562306a36Sopenharmony_ci
100662306a36Sopenharmony_cistatic void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
100762306a36Sopenharmony_cistatic __always_inline
100862306a36Sopenharmony_civoid *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
100962306a36Sopenharmony_ci{
101062306a36Sopenharmony_ci	struct kmem_cache *s;
101162306a36Sopenharmony_ci	void *ret;
101262306a36Sopenharmony_ci
101362306a36Sopenharmony_ci	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
101462306a36Sopenharmony_ci		ret = __kmalloc_large_node(size, flags, node);
101562306a36Sopenharmony_ci		trace_kmalloc(caller, ret, size,
101662306a36Sopenharmony_ci			      PAGE_SIZE << get_order(size), flags, node);
101762306a36Sopenharmony_ci		return ret;
101862306a36Sopenharmony_ci	}
101962306a36Sopenharmony_ci
102062306a36Sopenharmony_ci	s = kmalloc_slab(size, flags, caller);
102162306a36Sopenharmony_ci
102262306a36Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(s)))
102362306a36Sopenharmony_ci		return s;
102462306a36Sopenharmony_ci
102562306a36Sopenharmony_ci	ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
102662306a36Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, flags);
102762306a36Sopenharmony_ci	trace_kmalloc(caller, ret, size, s->size, flags, node);
102862306a36Sopenharmony_ci	return ret;
102962306a36Sopenharmony_ci}
103062306a36Sopenharmony_ci
103162306a36Sopenharmony_civoid *__kmalloc_node(size_t size, gfp_t flags, int node)
103262306a36Sopenharmony_ci{
103362306a36Sopenharmony_ci	return __do_kmalloc_node(size, flags, node, _RET_IP_);
103462306a36Sopenharmony_ci}
103562306a36Sopenharmony_ciEXPORT_SYMBOL(__kmalloc_node);
103662306a36Sopenharmony_ci
103762306a36Sopenharmony_civoid *__kmalloc(size_t size, gfp_t flags)
103862306a36Sopenharmony_ci{
103962306a36Sopenharmony_ci	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
104062306a36Sopenharmony_ci}
104162306a36Sopenharmony_ciEXPORT_SYMBOL(__kmalloc);
104262306a36Sopenharmony_ci
104362306a36Sopenharmony_civoid *__kmalloc_node_track_caller(size_t size, gfp_t flags,
104462306a36Sopenharmony_ci				  int node, unsigned long caller)
104562306a36Sopenharmony_ci{
104662306a36Sopenharmony_ci	return __do_kmalloc_node(size, flags, node, caller);
104762306a36Sopenharmony_ci}
104862306a36Sopenharmony_ciEXPORT_SYMBOL(__kmalloc_node_track_caller);
104962306a36Sopenharmony_ci
105062306a36Sopenharmony_ci/**
105162306a36Sopenharmony_ci * kfree - free previously allocated memory
105262306a36Sopenharmony_ci * @object: pointer returned by kmalloc() or kmem_cache_alloc()
105362306a36Sopenharmony_ci *
105462306a36Sopenharmony_ci * If @object is NULL, no operation is performed.
105562306a36Sopenharmony_ci */
105662306a36Sopenharmony_civoid kfree(const void *object)
105762306a36Sopenharmony_ci{
105862306a36Sopenharmony_ci	struct folio *folio;
105962306a36Sopenharmony_ci	struct slab *slab;
106062306a36Sopenharmony_ci	struct kmem_cache *s;
106162306a36Sopenharmony_ci
106262306a36Sopenharmony_ci	trace_kfree(_RET_IP_, object);
106362306a36Sopenharmony_ci
106462306a36Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(object)))
106562306a36Sopenharmony_ci		return;
106662306a36Sopenharmony_ci
106762306a36Sopenharmony_ci	folio = virt_to_folio(object);
106862306a36Sopenharmony_ci	if (unlikely(!folio_test_slab(folio))) {
106962306a36Sopenharmony_ci		free_large_kmalloc(folio, (void *)object);
107062306a36Sopenharmony_ci		return;
107162306a36Sopenharmony_ci	}
107262306a36Sopenharmony_ci
107362306a36Sopenharmony_ci	slab = folio_slab(folio);
107462306a36Sopenharmony_ci	s = slab->slab_cache;
107562306a36Sopenharmony_ci	__kmem_cache_free(s, (void *)object, _RET_IP_);
107662306a36Sopenharmony_ci}
107762306a36Sopenharmony_ciEXPORT_SYMBOL(kfree);
107862306a36Sopenharmony_ci
107962306a36Sopenharmony_ci/**
108062306a36Sopenharmony_ci * __ksize -- Report full size of underlying allocation
108162306a36Sopenharmony_ci * @object: pointer to the object
108262306a36Sopenharmony_ci *
108362306a36Sopenharmony_ci * This should only be used internally to query the true size of allocations.
108462306a36Sopenharmony_ci * It is not meant to be a way to discover the usable size of an allocation
108562306a36Sopenharmony_ci * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
108662306a36Sopenharmony_ci * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
108762306a36Sopenharmony_ci * and/or FORTIFY_SOURCE.
108862306a36Sopenharmony_ci *
108962306a36Sopenharmony_ci * Return: size of the actual memory used by @object in bytes
109062306a36Sopenharmony_ci */
109162306a36Sopenharmony_cisize_t __ksize(const void *object)
109262306a36Sopenharmony_ci{
109362306a36Sopenharmony_ci	struct folio *folio;
109462306a36Sopenharmony_ci
109562306a36Sopenharmony_ci	if (unlikely(object == ZERO_SIZE_PTR))
109662306a36Sopenharmony_ci		return 0;
109762306a36Sopenharmony_ci
109862306a36Sopenharmony_ci	folio = virt_to_folio(object);
109962306a36Sopenharmony_ci
110062306a36Sopenharmony_ci	if (unlikely(!folio_test_slab(folio))) {
110162306a36Sopenharmony_ci		if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
110262306a36Sopenharmony_ci			return 0;
110362306a36Sopenharmony_ci		if (WARN_ON(object != folio_address(folio)))
110462306a36Sopenharmony_ci			return 0;
110562306a36Sopenharmony_ci		return folio_size(folio);
110662306a36Sopenharmony_ci	}
110762306a36Sopenharmony_ci
110862306a36Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
110962306a36Sopenharmony_ci	skip_orig_size_check(folio_slab(folio)->slab_cache, object);
111062306a36Sopenharmony_ci#endif
111162306a36Sopenharmony_ci
111262306a36Sopenharmony_ci	return slab_ksize(folio_slab(folio)->slab_cache);
111362306a36Sopenharmony_ci}
111462306a36Sopenharmony_ci
111562306a36Sopenharmony_civoid *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
111662306a36Sopenharmony_ci{
111762306a36Sopenharmony_ci	void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
111862306a36Sopenharmony_ci					    size, _RET_IP_);
111962306a36Sopenharmony_ci
112062306a36Sopenharmony_ci	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
112162306a36Sopenharmony_ci
112262306a36Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, gfpflags);
112362306a36Sopenharmony_ci	return ret;
112462306a36Sopenharmony_ci}
112562306a36Sopenharmony_ciEXPORT_SYMBOL(kmalloc_trace);
112662306a36Sopenharmony_ci
112762306a36Sopenharmony_civoid *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
112862306a36Sopenharmony_ci			 int node, size_t size)
112962306a36Sopenharmony_ci{
113062306a36Sopenharmony_ci	void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
113162306a36Sopenharmony_ci
113262306a36Sopenharmony_ci	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
113362306a36Sopenharmony_ci
113462306a36Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, gfpflags);
113562306a36Sopenharmony_ci	return ret;
113662306a36Sopenharmony_ci}
113762306a36Sopenharmony_ciEXPORT_SYMBOL(kmalloc_node_trace);
113862306a36Sopenharmony_ci
113962306a36Sopenharmony_cigfp_t kmalloc_fix_flags(gfp_t flags)
114062306a36Sopenharmony_ci{
114162306a36Sopenharmony_ci	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
114262306a36Sopenharmony_ci
114362306a36Sopenharmony_ci	flags &= ~GFP_SLAB_BUG_MASK;
114462306a36Sopenharmony_ci	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
114562306a36Sopenharmony_ci			invalid_mask, &invalid_mask, flags, &flags);
114662306a36Sopenharmony_ci	dump_stack();
114762306a36Sopenharmony_ci
114862306a36Sopenharmony_ci	return flags;
114962306a36Sopenharmony_ci}
115062306a36Sopenharmony_ci
115162306a36Sopenharmony_ci/*
115262306a36Sopenharmony_ci * To avoid unnecessary overhead, we pass through large allocation requests
115362306a36Sopenharmony_ci * directly to the page allocator. We use __GFP_COMP, because we will need to
115462306a36Sopenharmony_ci * know the allocation order to free the pages properly in kfree.
115562306a36Sopenharmony_ci */
115662306a36Sopenharmony_ci
115762306a36Sopenharmony_cistatic void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
115862306a36Sopenharmony_ci{
115962306a36Sopenharmony_ci	struct page *page;
116062306a36Sopenharmony_ci	void *ptr = NULL;
116162306a36Sopenharmony_ci	unsigned int order = get_order(size);
116262306a36Sopenharmony_ci
116362306a36Sopenharmony_ci	if (unlikely(flags & GFP_SLAB_BUG_MASK))
116462306a36Sopenharmony_ci		flags = kmalloc_fix_flags(flags);
116562306a36Sopenharmony_ci
116662306a36Sopenharmony_ci	flags |= __GFP_COMP;
116762306a36Sopenharmony_ci	page = alloc_pages_node(node, flags, order);
116862306a36Sopenharmony_ci	if (page) {
116962306a36Sopenharmony_ci		ptr = page_address(page);
117062306a36Sopenharmony_ci		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
117162306a36Sopenharmony_ci				      PAGE_SIZE << order);
117262306a36Sopenharmony_ci	}
117362306a36Sopenharmony_ci
117462306a36Sopenharmony_ci	ptr = kasan_kmalloc_large(ptr, size, flags);
117562306a36Sopenharmony_ci	/* As ptr might get tagged, call kmemleak hook after KASAN. */
117662306a36Sopenharmony_ci	kmemleak_alloc(ptr, size, 1, flags);
117762306a36Sopenharmony_ci	kmsan_kmalloc_large(ptr, size, flags);
117862306a36Sopenharmony_ci
117962306a36Sopenharmony_ci	return ptr;
118062306a36Sopenharmony_ci}
118162306a36Sopenharmony_ci
118262306a36Sopenharmony_civoid *kmalloc_large(size_t size, gfp_t flags)
118362306a36Sopenharmony_ci{
118462306a36Sopenharmony_ci	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
118562306a36Sopenharmony_ci
118662306a36Sopenharmony_ci	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
118762306a36Sopenharmony_ci		      flags, NUMA_NO_NODE);
118862306a36Sopenharmony_ci	return ret;
118962306a36Sopenharmony_ci}
119062306a36Sopenharmony_ciEXPORT_SYMBOL(kmalloc_large);
119162306a36Sopenharmony_ci
119262306a36Sopenharmony_civoid *kmalloc_large_node(size_t size, gfp_t flags, int node)
119362306a36Sopenharmony_ci{
119462306a36Sopenharmony_ci	void *ret = __kmalloc_large_node(size, flags, node);
119562306a36Sopenharmony_ci
119662306a36Sopenharmony_ci	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
119762306a36Sopenharmony_ci		      flags, node);
119862306a36Sopenharmony_ci	return ret;
119962306a36Sopenharmony_ci}
120062306a36Sopenharmony_ciEXPORT_SYMBOL(kmalloc_large_node);
120162306a36Sopenharmony_ci
120262306a36Sopenharmony_ci#ifdef CONFIG_SLAB_FREELIST_RANDOM
120362306a36Sopenharmony_ci/* Randomize a generic freelist */
120462306a36Sopenharmony_cistatic void freelist_randomize(unsigned int *list,
120562306a36Sopenharmony_ci			       unsigned int count)
120662306a36Sopenharmony_ci{
120762306a36Sopenharmony_ci	unsigned int rand;
120862306a36Sopenharmony_ci	unsigned int i;
120962306a36Sopenharmony_ci
121062306a36Sopenharmony_ci	for (i = 0; i < count; i++)
121162306a36Sopenharmony_ci		list[i] = i;
121262306a36Sopenharmony_ci
121362306a36Sopenharmony_ci	/* Fisher-Yates shuffle */
121462306a36Sopenharmony_ci	for (i = count - 1; i > 0; i--) {
121562306a36Sopenharmony_ci		rand = get_random_u32_below(i + 1);
121662306a36Sopenharmony_ci		swap(list[i], list[rand]);
121762306a36Sopenharmony_ci	}
121862306a36Sopenharmony_ci}
121962306a36Sopenharmony_ci
122062306a36Sopenharmony_ci/* Create a random sequence per cache */
122162306a36Sopenharmony_ciint cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
122262306a36Sopenharmony_ci				    gfp_t gfp)
122362306a36Sopenharmony_ci{
122462306a36Sopenharmony_ci
122562306a36Sopenharmony_ci	if (count < 2 || cachep->random_seq)
122662306a36Sopenharmony_ci		return 0;
122762306a36Sopenharmony_ci
122862306a36Sopenharmony_ci	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
122962306a36Sopenharmony_ci	if (!cachep->random_seq)
123062306a36Sopenharmony_ci		return -ENOMEM;
123162306a36Sopenharmony_ci
123262306a36Sopenharmony_ci	freelist_randomize(cachep->random_seq, count);
123362306a36Sopenharmony_ci	return 0;
123462306a36Sopenharmony_ci}
123562306a36Sopenharmony_ci
123662306a36Sopenharmony_ci/* Destroy the per-cache random freelist sequence */
123762306a36Sopenharmony_civoid cache_random_seq_destroy(struct kmem_cache *cachep)
123862306a36Sopenharmony_ci{
123962306a36Sopenharmony_ci	kfree(cachep->random_seq);
124062306a36Sopenharmony_ci	cachep->random_seq = NULL;
124162306a36Sopenharmony_ci}
124262306a36Sopenharmony_ci#endif /* CONFIG_SLAB_FREELIST_RANDOM */
124362306a36Sopenharmony_ci
124462306a36Sopenharmony_ci#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
124562306a36Sopenharmony_ci#ifdef CONFIG_SLAB
124662306a36Sopenharmony_ci#define SLABINFO_RIGHTS (0600)
124762306a36Sopenharmony_ci#else
124862306a36Sopenharmony_ci#define SLABINFO_RIGHTS (0400)
124962306a36Sopenharmony_ci#endif
125062306a36Sopenharmony_ci
125162306a36Sopenharmony_cistatic void print_slabinfo_header(struct seq_file *m)
125262306a36Sopenharmony_ci{
125362306a36Sopenharmony_ci	/*
125462306a36Sopenharmony_ci	 * Output format version, so at least we can change it
125562306a36Sopenharmony_ci	 * without _too_ many complaints.
125662306a36Sopenharmony_ci	 */
125762306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_SLAB
125862306a36Sopenharmony_ci	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
125962306a36Sopenharmony_ci#else
126062306a36Sopenharmony_ci	seq_puts(m, "slabinfo - version: 2.1\n");
126162306a36Sopenharmony_ci#endif
126262306a36Sopenharmony_ci	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
126362306a36Sopenharmony_ci	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
126462306a36Sopenharmony_ci	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
126562306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_SLAB
126662306a36Sopenharmony_ci	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
126762306a36Sopenharmony_ci	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
126862306a36Sopenharmony_ci#endif
126962306a36Sopenharmony_ci	seq_putc(m, '\n');
127062306a36Sopenharmony_ci}
127162306a36Sopenharmony_ci
127262306a36Sopenharmony_cistatic void *slab_start(struct seq_file *m, loff_t *pos)
127362306a36Sopenharmony_ci{
127462306a36Sopenharmony_ci	mutex_lock(&slab_mutex);
127562306a36Sopenharmony_ci	return seq_list_start(&slab_caches, *pos);
127662306a36Sopenharmony_ci}
127762306a36Sopenharmony_ci
127862306a36Sopenharmony_cistatic void *slab_next(struct seq_file *m, void *p, loff_t *pos)
127962306a36Sopenharmony_ci{
128062306a36Sopenharmony_ci	return seq_list_next(p, &slab_caches, pos);
128162306a36Sopenharmony_ci}
128262306a36Sopenharmony_ci
128362306a36Sopenharmony_cistatic void slab_stop(struct seq_file *m, void *p)
128462306a36Sopenharmony_ci{
128562306a36Sopenharmony_ci	mutex_unlock(&slab_mutex);
128662306a36Sopenharmony_ci}
128762306a36Sopenharmony_ci
128862306a36Sopenharmony_cistatic void cache_show(struct kmem_cache *s, struct seq_file *m)
128962306a36Sopenharmony_ci{
129062306a36Sopenharmony_ci	struct slabinfo sinfo;
129162306a36Sopenharmony_ci
129262306a36Sopenharmony_ci	memset(&sinfo, 0, sizeof(sinfo));
129362306a36Sopenharmony_ci	get_slabinfo(s, &sinfo);
129462306a36Sopenharmony_ci
129562306a36Sopenharmony_ci	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
129662306a36Sopenharmony_ci		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
129762306a36Sopenharmony_ci		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
129862306a36Sopenharmony_ci
129962306a36Sopenharmony_ci	seq_printf(m, " : tunables %4u %4u %4u",
130062306a36Sopenharmony_ci		   sinfo.limit, sinfo.batchcount, sinfo.shared);
130162306a36Sopenharmony_ci	seq_printf(m, " : slabdata %6lu %6lu %6lu",
130262306a36Sopenharmony_ci		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
130362306a36Sopenharmony_ci	slabinfo_show_stats(m, s);
130462306a36Sopenharmony_ci	seq_putc(m, '\n');
130562306a36Sopenharmony_ci}
130662306a36Sopenharmony_ci
130762306a36Sopenharmony_cistatic int slab_show(struct seq_file *m, void *p)
130862306a36Sopenharmony_ci{
130962306a36Sopenharmony_ci	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
131062306a36Sopenharmony_ci
131162306a36Sopenharmony_ci	if (p == slab_caches.next)
131262306a36Sopenharmony_ci		print_slabinfo_header(m);
131362306a36Sopenharmony_ci	cache_show(s, m);
131462306a36Sopenharmony_ci	return 0;
131562306a36Sopenharmony_ci}
131662306a36Sopenharmony_ci
131762306a36Sopenharmony_civoid dump_unreclaimable_slab(void)
131862306a36Sopenharmony_ci{
131962306a36Sopenharmony_ci	struct kmem_cache *s;
132062306a36Sopenharmony_ci	struct slabinfo sinfo;
132162306a36Sopenharmony_ci
132262306a36Sopenharmony_ci	/*
132362306a36Sopenharmony_ci	 * Here acquiring slab_mutex is risky since we don't prefer to get
132462306a36Sopenharmony_ci	 * sleep in oom path. But, without mutex hold, it may introduce a
132562306a36Sopenharmony_ci	 * risk of crash.
132662306a36Sopenharmony_ci	 * Use mutex_trylock to protect the list traverse, dump nothing
132762306a36Sopenharmony_ci	 * without acquiring the mutex.
132862306a36Sopenharmony_ci	 */
132962306a36Sopenharmony_ci	if (!mutex_trylock(&slab_mutex)) {
133062306a36Sopenharmony_ci		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
133162306a36Sopenharmony_ci		return;
133262306a36Sopenharmony_ci	}
133362306a36Sopenharmony_ci
133462306a36Sopenharmony_ci	pr_info("Unreclaimable slab info:\n");
133562306a36Sopenharmony_ci	pr_info("Name                      Used          Total\n");
133662306a36Sopenharmony_ci
133762306a36Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list) {
133862306a36Sopenharmony_ci		if (s->flags & SLAB_RECLAIM_ACCOUNT)
133962306a36Sopenharmony_ci			continue;
134062306a36Sopenharmony_ci
134162306a36Sopenharmony_ci		get_slabinfo(s, &sinfo);
134262306a36Sopenharmony_ci
134362306a36Sopenharmony_ci		if (sinfo.num_objs > 0)
134462306a36Sopenharmony_ci			pr_info("%-17s %10luKB %10luKB\n", s->name,
134562306a36Sopenharmony_ci				(sinfo.active_objs * s->size) / 1024,
134662306a36Sopenharmony_ci				(sinfo.num_objs * s->size) / 1024);
134762306a36Sopenharmony_ci	}
134862306a36Sopenharmony_ci	mutex_unlock(&slab_mutex);
134962306a36Sopenharmony_ci}
135062306a36Sopenharmony_ci
135162306a36Sopenharmony_ci/*
135262306a36Sopenharmony_ci * slabinfo_op - iterator that generates /proc/slabinfo
135362306a36Sopenharmony_ci *
135462306a36Sopenharmony_ci * Output layout:
135562306a36Sopenharmony_ci * cache-name
135662306a36Sopenharmony_ci * num-active-objs
135762306a36Sopenharmony_ci * total-objs
135862306a36Sopenharmony_ci * object size
135962306a36Sopenharmony_ci * num-active-slabs
136062306a36Sopenharmony_ci * total-slabs
136162306a36Sopenharmony_ci * num-pages-per-slab
136262306a36Sopenharmony_ci * + further values on SMP and with statistics enabled
136362306a36Sopenharmony_ci */
136462306a36Sopenharmony_cistatic const struct seq_operations slabinfo_op = {
136562306a36Sopenharmony_ci	.start = slab_start,
136662306a36Sopenharmony_ci	.next = slab_next,
136762306a36Sopenharmony_ci	.stop = slab_stop,
136862306a36Sopenharmony_ci	.show = slab_show,
136962306a36Sopenharmony_ci};
137062306a36Sopenharmony_ci
137162306a36Sopenharmony_cistatic int slabinfo_open(struct inode *inode, struct file *file)
137262306a36Sopenharmony_ci{
137362306a36Sopenharmony_ci	return seq_open(file, &slabinfo_op);
137462306a36Sopenharmony_ci}
137562306a36Sopenharmony_ci
137662306a36Sopenharmony_cistatic const struct proc_ops slabinfo_proc_ops = {
137762306a36Sopenharmony_ci	.proc_flags	= PROC_ENTRY_PERMANENT,
137862306a36Sopenharmony_ci	.proc_open	= slabinfo_open,
137962306a36Sopenharmony_ci	.proc_read	= seq_read,
138062306a36Sopenharmony_ci	.proc_write	= slabinfo_write,
138162306a36Sopenharmony_ci	.proc_lseek	= seq_lseek,
138262306a36Sopenharmony_ci	.proc_release	= seq_release,
138362306a36Sopenharmony_ci};
138462306a36Sopenharmony_ci
138562306a36Sopenharmony_cistatic int __init slab_proc_init(void)
138662306a36Sopenharmony_ci{
138762306a36Sopenharmony_ci	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
138862306a36Sopenharmony_ci	return 0;
138962306a36Sopenharmony_ci}
139062306a36Sopenharmony_cimodule_init(slab_proc_init);
139162306a36Sopenharmony_ci
139262306a36Sopenharmony_ci#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
139362306a36Sopenharmony_ci
139462306a36Sopenharmony_cistatic __always_inline __realloc_size(2) void *
139562306a36Sopenharmony_ci__do_krealloc(const void *p, size_t new_size, gfp_t flags)
139662306a36Sopenharmony_ci{
139762306a36Sopenharmony_ci	void *ret;
139862306a36Sopenharmony_ci	size_t ks;
139962306a36Sopenharmony_ci
140062306a36Sopenharmony_ci	/* Check for double-free before calling ksize. */
140162306a36Sopenharmony_ci	if (likely(!ZERO_OR_NULL_PTR(p))) {
140262306a36Sopenharmony_ci		if (!kasan_check_byte(p))
140362306a36Sopenharmony_ci			return NULL;
140462306a36Sopenharmony_ci		ks = ksize(p);
140562306a36Sopenharmony_ci	} else
140662306a36Sopenharmony_ci		ks = 0;
140762306a36Sopenharmony_ci
140862306a36Sopenharmony_ci	/* If the object still fits, repoison it precisely. */
140962306a36Sopenharmony_ci	if (ks >= new_size) {
141062306a36Sopenharmony_ci		p = kasan_krealloc((void *)p, new_size, flags);
141162306a36Sopenharmony_ci		return (void *)p;
141262306a36Sopenharmony_ci	}
141362306a36Sopenharmony_ci
141462306a36Sopenharmony_ci	ret = kmalloc_track_caller(new_size, flags);
141562306a36Sopenharmony_ci	if (ret && p) {
141662306a36Sopenharmony_ci		/* Disable KASAN checks as the object's redzone is accessed. */
141762306a36Sopenharmony_ci		kasan_disable_current();
141862306a36Sopenharmony_ci		memcpy(ret, kasan_reset_tag(p), ks);
141962306a36Sopenharmony_ci		kasan_enable_current();
142062306a36Sopenharmony_ci	}
142162306a36Sopenharmony_ci
142262306a36Sopenharmony_ci	return ret;
142362306a36Sopenharmony_ci}
142462306a36Sopenharmony_ci
142562306a36Sopenharmony_ci/**
142662306a36Sopenharmony_ci * krealloc - reallocate memory. The contents will remain unchanged.
142762306a36Sopenharmony_ci * @p: object to reallocate memory for.
142862306a36Sopenharmony_ci * @new_size: how many bytes of memory are required.
142962306a36Sopenharmony_ci * @flags: the type of memory to allocate.
143062306a36Sopenharmony_ci *
143162306a36Sopenharmony_ci * The contents of the object pointed to are preserved up to the
143262306a36Sopenharmony_ci * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
143362306a36Sopenharmony_ci * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
143462306a36Sopenharmony_ci * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
143562306a36Sopenharmony_ci *
143662306a36Sopenharmony_ci * Return: pointer to the allocated memory or %NULL in case of error
143762306a36Sopenharmony_ci */
143862306a36Sopenharmony_civoid *krealloc(const void *p, size_t new_size, gfp_t flags)
143962306a36Sopenharmony_ci{
144062306a36Sopenharmony_ci	void *ret;
144162306a36Sopenharmony_ci
144262306a36Sopenharmony_ci	if (unlikely(!new_size)) {
144362306a36Sopenharmony_ci		kfree(p);
144462306a36Sopenharmony_ci		return ZERO_SIZE_PTR;
144562306a36Sopenharmony_ci	}
144662306a36Sopenharmony_ci
144762306a36Sopenharmony_ci	ret = __do_krealloc(p, new_size, flags);
144862306a36Sopenharmony_ci	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
144962306a36Sopenharmony_ci		kfree(p);
145062306a36Sopenharmony_ci
145162306a36Sopenharmony_ci	return ret;
145262306a36Sopenharmony_ci}
145362306a36Sopenharmony_ciEXPORT_SYMBOL(krealloc);
145462306a36Sopenharmony_ci
145562306a36Sopenharmony_ci/**
145662306a36Sopenharmony_ci * kfree_sensitive - Clear sensitive information in memory before freeing
145762306a36Sopenharmony_ci * @p: object to free memory of
145862306a36Sopenharmony_ci *
145962306a36Sopenharmony_ci * The memory of the object @p points to is zeroed before freed.
146062306a36Sopenharmony_ci * If @p is %NULL, kfree_sensitive() does nothing.
146162306a36Sopenharmony_ci *
146262306a36Sopenharmony_ci * Note: this function zeroes the whole allocated buffer which can be a good
146362306a36Sopenharmony_ci * deal bigger than the requested buffer size passed to kmalloc(). So be
146462306a36Sopenharmony_ci * careful when using this function in performance sensitive code.
146562306a36Sopenharmony_ci */
146662306a36Sopenharmony_civoid kfree_sensitive(const void *p)
146762306a36Sopenharmony_ci{
146862306a36Sopenharmony_ci	size_t ks;
146962306a36Sopenharmony_ci	void *mem = (void *)p;
147062306a36Sopenharmony_ci
147162306a36Sopenharmony_ci	ks = ksize(mem);
147262306a36Sopenharmony_ci	if (ks) {
147362306a36Sopenharmony_ci		kasan_unpoison_range(mem, ks);
147462306a36Sopenharmony_ci		memzero_explicit(mem, ks);
147562306a36Sopenharmony_ci	}
147662306a36Sopenharmony_ci	kfree(mem);
147762306a36Sopenharmony_ci}
147862306a36Sopenharmony_ciEXPORT_SYMBOL(kfree_sensitive);
147962306a36Sopenharmony_ci
148062306a36Sopenharmony_cisize_t ksize(const void *objp)
148162306a36Sopenharmony_ci{
148262306a36Sopenharmony_ci	/*
148362306a36Sopenharmony_ci	 * We need to first check that the pointer to the object is valid.
148462306a36Sopenharmony_ci	 * The KASAN report printed from ksize() is more useful, then when
148562306a36Sopenharmony_ci	 * it's printed later when the behaviour could be undefined due to
148662306a36Sopenharmony_ci	 * a potential use-after-free or double-free.
148762306a36Sopenharmony_ci	 *
148862306a36Sopenharmony_ci	 * We use kasan_check_byte(), which is supported for the hardware
148962306a36Sopenharmony_ci	 * tag-based KASAN mode, unlike kasan_check_read/write().
149062306a36Sopenharmony_ci	 *
149162306a36Sopenharmony_ci	 * If the pointed to memory is invalid, we return 0 to avoid users of
149262306a36Sopenharmony_ci	 * ksize() writing to and potentially corrupting the memory region.
149362306a36Sopenharmony_ci	 *
149462306a36Sopenharmony_ci	 * We want to perform the check before __ksize(), to avoid potentially
149562306a36Sopenharmony_ci	 * crashing in __ksize() due to accessing invalid metadata.
149662306a36Sopenharmony_ci	 */
149762306a36Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
149862306a36Sopenharmony_ci		return 0;
149962306a36Sopenharmony_ci
150062306a36Sopenharmony_ci	return kfence_ksize(objp) ?: __ksize(objp);
150162306a36Sopenharmony_ci}
150262306a36Sopenharmony_ciEXPORT_SYMBOL(ksize);
150362306a36Sopenharmony_ci
150462306a36Sopenharmony_ci/* Tracepoints definitions. */
150562306a36Sopenharmony_ciEXPORT_TRACEPOINT_SYMBOL(kmalloc);
150662306a36Sopenharmony_ciEXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
150762306a36Sopenharmony_ciEXPORT_TRACEPOINT_SYMBOL(kfree);
150862306a36Sopenharmony_ciEXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
150962306a36Sopenharmony_ci
151062306a36Sopenharmony_ciint should_failslab(struct kmem_cache *s, gfp_t gfpflags)
151162306a36Sopenharmony_ci{
151262306a36Sopenharmony_ci	if (__should_failslab(s, gfpflags))
151362306a36Sopenharmony_ci		return -ENOMEM;
151462306a36Sopenharmony_ci	return 0;
151562306a36Sopenharmony_ci}
151662306a36Sopenharmony_ciALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1517