18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * SLUB: A slab allocator that limits cache line use instead of queuing
48c2ecf20Sopenharmony_ci * objects in per cpu and per node lists.
58c2ecf20Sopenharmony_ci *
68c2ecf20Sopenharmony_ci * The allocator synchronizes using per slab locks or atomic operatios
78c2ecf20Sopenharmony_ci * and only uses a centralized lock to manage a pool of partial slabs.
88c2ecf20Sopenharmony_ci *
98c2ecf20Sopenharmony_ci * (C) 2007 SGI, Christoph Lameter
108c2ecf20Sopenharmony_ci * (C) 2011 Linux Foundation, Christoph Lameter
118c2ecf20Sopenharmony_ci */
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_ci#include <linux/mm.h>
148c2ecf20Sopenharmony_ci#include <linux/swap.h> /* struct reclaim_state */
158c2ecf20Sopenharmony_ci#include <linux/module.h>
168c2ecf20Sopenharmony_ci#include <linux/bit_spinlock.h>
178c2ecf20Sopenharmony_ci#include <linux/interrupt.h>
188c2ecf20Sopenharmony_ci#include <linux/swab.h>
198c2ecf20Sopenharmony_ci#include <linux/bitops.h>
208c2ecf20Sopenharmony_ci#include <linux/slab.h>
218c2ecf20Sopenharmony_ci#include "slab.h"
228c2ecf20Sopenharmony_ci#include <linux/proc_fs.h>
238c2ecf20Sopenharmony_ci#include <linux/seq_file.h>
248c2ecf20Sopenharmony_ci#include <linux/kasan.h>
258c2ecf20Sopenharmony_ci#include <linux/cpu.h>
268c2ecf20Sopenharmony_ci#include <linux/cpuset.h>
278c2ecf20Sopenharmony_ci#include <linux/mempolicy.h>
288c2ecf20Sopenharmony_ci#include <linux/ctype.h>
298c2ecf20Sopenharmony_ci#include <linux/debugobjects.h>
308c2ecf20Sopenharmony_ci#include <linux/kallsyms.h>
318c2ecf20Sopenharmony_ci#include <linux/memory.h>
328c2ecf20Sopenharmony_ci#include <linux/math64.h>
338c2ecf20Sopenharmony_ci#include <linux/fault-inject.h>
348c2ecf20Sopenharmony_ci#include <linux/stacktrace.h>
358c2ecf20Sopenharmony_ci#include <linux/prefetch.h>
368c2ecf20Sopenharmony_ci#include <linux/memcontrol.h>
378c2ecf20Sopenharmony_ci#include <linux/random.h>
388c2ecf20Sopenharmony_ci
398c2ecf20Sopenharmony_ci#include <trace/events/kmem.h>
408c2ecf20Sopenharmony_ci
418c2ecf20Sopenharmony_ci#include "internal.h"
428c2ecf20Sopenharmony_ci
438c2ecf20Sopenharmony_ci/*
448c2ecf20Sopenharmony_ci * Lock order:
458c2ecf20Sopenharmony_ci *   1. slab_mutex (Global Mutex)
468c2ecf20Sopenharmony_ci *   2. node->list_lock
478c2ecf20Sopenharmony_ci *   3. slab_lock(page) (Only on some arches and for debugging)
488c2ecf20Sopenharmony_ci *
498c2ecf20Sopenharmony_ci *   slab_mutex
508c2ecf20Sopenharmony_ci *
518c2ecf20Sopenharmony_ci *   The role of the slab_mutex is to protect the list of all the slabs
528c2ecf20Sopenharmony_ci *   and to synchronize major metadata changes to slab cache structures.
538c2ecf20Sopenharmony_ci *
548c2ecf20Sopenharmony_ci *   The slab_lock is only used for debugging and on arches that do not
558c2ecf20Sopenharmony_ci *   have the ability to do a cmpxchg_double. It only protects:
568c2ecf20Sopenharmony_ci *	A. page->freelist	-> List of object free in a page
578c2ecf20Sopenharmony_ci *	B. page->inuse		-> Number of objects in use
588c2ecf20Sopenharmony_ci *	C. page->objects	-> Number of objects in page
598c2ecf20Sopenharmony_ci *	D. page->frozen		-> frozen state
608c2ecf20Sopenharmony_ci *
618c2ecf20Sopenharmony_ci *   If a slab is frozen then it is exempt from list management. It is not
628c2ecf20Sopenharmony_ci *   on any list except per cpu partial list. The processor that froze the
638c2ecf20Sopenharmony_ci *   slab is the one who can perform list operations on the page. Other
648c2ecf20Sopenharmony_ci *   processors may put objects onto the freelist but the processor that
658c2ecf20Sopenharmony_ci *   froze the slab is the only one that can retrieve the objects from the
668c2ecf20Sopenharmony_ci *   page's freelist.
678c2ecf20Sopenharmony_ci *
688c2ecf20Sopenharmony_ci *   The list_lock protects the partial and full list on each node and
698c2ecf20Sopenharmony_ci *   the partial slab counter. If taken then no new slabs may be added or
708c2ecf20Sopenharmony_ci *   removed from the lists nor make the number of partial slabs be modified.
718c2ecf20Sopenharmony_ci *   (Note that the total number of slabs is an atomic value that may be
728c2ecf20Sopenharmony_ci *   modified without taking the list lock).
738c2ecf20Sopenharmony_ci *
748c2ecf20Sopenharmony_ci *   The list_lock is a centralized lock and thus we avoid taking it as
758c2ecf20Sopenharmony_ci *   much as possible. As long as SLUB does not have to handle partial
768c2ecf20Sopenharmony_ci *   slabs, operations can continue without any centralized lock. F.e.
778c2ecf20Sopenharmony_ci *   allocating a long series of objects that fill up slabs does not require
788c2ecf20Sopenharmony_ci *   the list lock.
798c2ecf20Sopenharmony_ci *   Interrupts are disabled during allocation and deallocation in order to
808c2ecf20Sopenharmony_ci *   make the slab allocator safe to use in the context of an irq. In addition
818c2ecf20Sopenharmony_ci *   interrupts are disabled to ensure that the processor does not change
828c2ecf20Sopenharmony_ci *   while handling per_cpu slabs, due to kernel preemption.
838c2ecf20Sopenharmony_ci *
848c2ecf20Sopenharmony_ci * SLUB assigns one slab for allocation to each processor.
858c2ecf20Sopenharmony_ci * Allocations only occur from these slabs called cpu slabs.
868c2ecf20Sopenharmony_ci *
878c2ecf20Sopenharmony_ci * Slabs with free elements are kept on a partial list and during regular
888c2ecf20Sopenharmony_ci * operations no list for full slabs is used. If an object in a full slab is
898c2ecf20Sopenharmony_ci * freed then the slab will show up again on the partial lists.
908c2ecf20Sopenharmony_ci * We track full slabs for debugging purposes though because otherwise we
918c2ecf20Sopenharmony_ci * cannot scan all objects.
928c2ecf20Sopenharmony_ci *
938c2ecf20Sopenharmony_ci * Slabs are freed when they become empty. Teardown and setup is
948c2ecf20Sopenharmony_ci * minimal so we rely on the page allocators per cpu caches for
958c2ecf20Sopenharmony_ci * fast frees and allocs.
968c2ecf20Sopenharmony_ci *
978c2ecf20Sopenharmony_ci * page->frozen		The slab is frozen and exempt from list processing.
988c2ecf20Sopenharmony_ci * 			This means that the slab is dedicated to a purpose
998c2ecf20Sopenharmony_ci * 			such as satisfying allocations for a specific
1008c2ecf20Sopenharmony_ci * 			processor. Objects may be freed in the slab while
1018c2ecf20Sopenharmony_ci * 			it is frozen but slab_free will then skip the usual
1028c2ecf20Sopenharmony_ci * 			list operations. It is up to the processor holding
1038c2ecf20Sopenharmony_ci * 			the slab to integrate the slab into the slab lists
1048c2ecf20Sopenharmony_ci * 			when the slab is no longer needed.
1058c2ecf20Sopenharmony_ci *
1068c2ecf20Sopenharmony_ci * 			One use of this flag is to mark slabs that are
1078c2ecf20Sopenharmony_ci * 			used for allocations. Then such a slab becomes a cpu
1088c2ecf20Sopenharmony_ci * 			slab. The cpu slab may be equipped with an additional
1098c2ecf20Sopenharmony_ci * 			freelist that allows lockless access to
1108c2ecf20Sopenharmony_ci * 			free objects in addition to the regular freelist
1118c2ecf20Sopenharmony_ci * 			that requires the slab lock.
1128c2ecf20Sopenharmony_ci *
1138c2ecf20Sopenharmony_ci * SLAB_DEBUG_FLAGS	Slab requires special handling due to debug
1148c2ecf20Sopenharmony_ci * 			options set. This moves	slab handling out of
1158c2ecf20Sopenharmony_ci * 			the fast path and disables lockless freelists.
1168c2ecf20Sopenharmony_ci */
1178c2ecf20Sopenharmony_ci
1188c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
1198c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG_ON
1208c2ecf20Sopenharmony_ciDEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
1218c2ecf20Sopenharmony_ci#else
1228c2ecf20Sopenharmony_ciDEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
1238c2ecf20Sopenharmony_ci#endif
1248c2ecf20Sopenharmony_ci#endif
1258c2ecf20Sopenharmony_ci
1268c2ecf20Sopenharmony_cistatic inline bool kmem_cache_debug(struct kmem_cache *s)
1278c2ecf20Sopenharmony_ci{
1288c2ecf20Sopenharmony_ci	return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
1298c2ecf20Sopenharmony_ci}
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_civoid *fixup_red_left(struct kmem_cache *s, void *p)
1328c2ecf20Sopenharmony_ci{
1338c2ecf20Sopenharmony_ci	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
1348c2ecf20Sopenharmony_ci		p += s->red_left_pad;
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_ci	return p;
1378c2ecf20Sopenharmony_ci}
1388c2ecf20Sopenharmony_ci
1398c2ecf20Sopenharmony_cistatic inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
1408c2ecf20Sopenharmony_ci{
1418c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_CPU_PARTIAL
1428c2ecf20Sopenharmony_ci	return !kmem_cache_debug(s);
1438c2ecf20Sopenharmony_ci#else
1448c2ecf20Sopenharmony_ci	return false;
1458c2ecf20Sopenharmony_ci#endif
1468c2ecf20Sopenharmony_ci}
1478c2ecf20Sopenharmony_ci
1488c2ecf20Sopenharmony_ci/*
1498c2ecf20Sopenharmony_ci * Issues still to be resolved:
1508c2ecf20Sopenharmony_ci *
1518c2ecf20Sopenharmony_ci * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
1528c2ecf20Sopenharmony_ci *
1538c2ecf20Sopenharmony_ci * - Variable sizing of the per node arrays
1548c2ecf20Sopenharmony_ci */
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci/* Enable to test recovery from slab corruption on boot */
1578c2ecf20Sopenharmony_ci#undef SLUB_RESILIENCY_TEST
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ci/* Enable to log cmpxchg failures */
1608c2ecf20Sopenharmony_ci#undef SLUB_DEBUG_CMPXCHG
1618c2ecf20Sopenharmony_ci
1628c2ecf20Sopenharmony_ci/*
1638c2ecf20Sopenharmony_ci * Mininum number of partial slabs. These will be left on the partial
1648c2ecf20Sopenharmony_ci * lists even if they are empty. kmem_cache_shrink may reclaim them.
1658c2ecf20Sopenharmony_ci */
1668c2ecf20Sopenharmony_ci#define MIN_PARTIAL 5
1678c2ecf20Sopenharmony_ci
1688c2ecf20Sopenharmony_ci/*
1698c2ecf20Sopenharmony_ci * Maximum number of desirable partial slabs.
1708c2ecf20Sopenharmony_ci * The existence of more partial slabs makes kmem_cache_shrink
1718c2ecf20Sopenharmony_ci * sort the partial list by the number of objects in use.
1728c2ecf20Sopenharmony_ci */
1738c2ecf20Sopenharmony_ci#define MAX_PARTIAL 10
1748c2ecf20Sopenharmony_ci
1758c2ecf20Sopenharmony_ci#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
1768c2ecf20Sopenharmony_ci				SLAB_POISON | SLAB_STORE_USER)
1778c2ecf20Sopenharmony_ci
1788c2ecf20Sopenharmony_ci/*
1798c2ecf20Sopenharmony_ci * These debug flags cannot use CMPXCHG because there might be consistency
1808c2ecf20Sopenharmony_ci * issues when checking or reading debug information
1818c2ecf20Sopenharmony_ci */
1828c2ecf20Sopenharmony_ci#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
1838c2ecf20Sopenharmony_ci				SLAB_TRACE)
1848c2ecf20Sopenharmony_ci
1858c2ecf20Sopenharmony_ci
1868c2ecf20Sopenharmony_ci/*
1878c2ecf20Sopenharmony_ci * Debugging flags that require metadata to be stored in the slab.  These get
1888c2ecf20Sopenharmony_ci * disabled when slub_debug=O is used and a cache's min order increases with
1898c2ecf20Sopenharmony_ci * metadata.
1908c2ecf20Sopenharmony_ci */
1918c2ecf20Sopenharmony_ci#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
1928c2ecf20Sopenharmony_ci
1938c2ecf20Sopenharmony_ci#define OO_SHIFT	16
1948c2ecf20Sopenharmony_ci#define OO_MASK		((1 << OO_SHIFT) - 1)
1958c2ecf20Sopenharmony_ci#define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
1968c2ecf20Sopenharmony_ci
1978c2ecf20Sopenharmony_ci/* Internal SLUB flags */
1988c2ecf20Sopenharmony_ci/* Poison object */
1998c2ecf20Sopenharmony_ci#define __OBJECT_POISON		((slab_flags_t __force)0x80000000U)
2008c2ecf20Sopenharmony_ci/* Use cmpxchg_double */
2018c2ecf20Sopenharmony_ci#define __CMPXCHG_DOUBLE	((slab_flags_t __force)0x40000000U)
2028c2ecf20Sopenharmony_ci
2038c2ecf20Sopenharmony_ci/*
2048c2ecf20Sopenharmony_ci * Tracking user of a slab.
2058c2ecf20Sopenharmony_ci */
2068c2ecf20Sopenharmony_ci#define TRACK_ADDRS_COUNT 16
2078c2ecf20Sopenharmony_cistruct track {
2088c2ecf20Sopenharmony_ci	unsigned long addr;	/* Called from address */
2098c2ecf20Sopenharmony_ci#ifdef CONFIG_STACKTRACE
2108c2ecf20Sopenharmony_ci	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
2118c2ecf20Sopenharmony_ci#endif
2128c2ecf20Sopenharmony_ci	int cpu;		/* Was running on cpu */
2138c2ecf20Sopenharmony_ci	int pid;		/* Pid context */
2148c2ecf20Sopenharmony_ci	unsigned long when;	/* When did the operation occur */
2158c2ecf20Sopenharmony_ci};
2168c2ecf20Sopenharmony_ci
2178c2ecf20Sopenharmony_cienum track_item { TRACK_ALLOC, TRACK_FREE };
2188c2ecf20Sopenharmony_ci
2198c2ecf20Sopenharmony_ci#ifdef CONFIG_SYSFS
2208c2ecf20Sopenharmony_cistatic int sysfs_slab_add(struct kmem_cache *);
2218c2ecf20Sopenharmony_cistatic int sysfs_slab_alias(struct kmem_cache *, const char *);
2228c2ecf20Sopenharmony_ci#else
2238c2ecf20Sopenharmony_cistatic inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
2248c2ecf20Sopenharmony_cistatic inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
2258c2ecf20Sopenharmony_ci							{ return 0; }
2268c2ecf20Sopenharmony_ci#endif
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_cistatic inline void stat(const struct kmem_cache *s, enum stat_item si)
2298c2ecf20Sopenharmony_ci{
2308c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_STATS
2318c2ecf20Sopenharmony_ci	/*
2328c2ecf20Sopenharmony_ci	 * The rmw is racy on a preemptible kernel but this is acceptable, so
2338c2ecf20Sopenharmony_ci	 * avoid this_cpu_add()'s irq-disable overhead.
2348c2ecf20Sopenharmony_ci	 */
2358c2ecf20Sopenharmony_ci	raw_cpu_inc(s->cpu_slab->stat[si]);
2368c2ecf20Sopenharmony_ci#endif
2378c2ecf20Sopenharmony_ci}
2388c2ecf20Sopenharmony_ci
2398c2ecf20Sopenharmony_ci/********************************************************************
2408c2ecf20Sopenharmony_ci * 			Core slab cache functions
2418c2ecf20Sopenharmony_ci *******************************************************************/
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci/*
2448c2ecf20Sopenharmony_ci * Returns freelist pointer (ptr). With hardening, this is obfuscated
2458c2ecf20Sopenharmony_ci * with an XOR of the address where the pointer is held and a per-cache
2468c2ecf20Sopenharmony_ci * random number.
2478c2ecf20Sopenharmony_ci */
2488c2ecf20Sopenharmony_cistatic inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
2498c2ecf20Sopenharmony_ci				 unsigned long ptr_addr)
2508c2ecf20Sopenharmony_ci{
2518c2ecf20Sopenharmony_ci#ifdef CONFIG_SLAB_FREELIST_HARDENED
2528c2ecf20Sopenharmony_ci	/*
2538c2ecf20Sopenharmony_ci	 * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
2548c2ecf20Sopenharmony_ci	 * Normally, this doesn't cause any issues, as both set_freepointer()
2558c2ecf20Sopenharmony_ci	 * and get_freepointer() are called with a pointer with the same tag.
2568c2ecf20Sopenharmony_ci	 * However, there are some issues with CONFIG_SLUB_DEBUG code. For
2578c2ecf20Sopenharmony_ci	 * example, when __free_slub() iterates over objects in a cache, it
2588c2ecf20Sopenharmony_ci	 * passes untagged pointers to check_object(). check_object() in turns
2598c2ecf20Sopenharmony_ci	 * calls get_freepointer() with an untagged pointer, which causes the
2608c2ecf20Sopenharmony_ci	 * freepointer to be restored incorrectly.
2618c2ecf20Sopenharmony_ci	 */
2628c2ecf20Sopenharmony_ci	return (void *)((unsigned long)ptr ^ s->random ^
2638c2ecf20Sopenharmony_ci			swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
2648c2ecf20Sopenharmony_ci#else
2658c2ecf20Sopenharmony_ci	return ptr;
2668c2ecf20Sopenharmony_ci#endif
2678c2ecf20Sopenharmony_ci}
2688c2ecf20Sopenharmony_ci
2698c2ecf20Sopenharmony_ci/* Returns the freelist pointer recorded at location ptr_addr. */
2708c2ecf20Sopenharmony_cistatic inline void *freelist_dereference(const struct kmem_cache *s,
2718c2ecf20Sopenharmony_ci					 void *ptr_addr)
2728c2ecf20Sopenharmony_ci{
2738c2ecf20Sopenharmony_ci	return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr),
2748c2ecf20Sopenharmony_ci			    (unsigned long)ptr_addr);
2758c2ecf20Sopenharmony_ci}
2768c2ecf20Sopenharmony_ci
2778c2ecf20Sopenharmony_cistatic inline void *get_freepointer(struct kmem_cache *s, void *object)
2788c2ecf20Sopenharmony_ci{
2798c2ecf20Sopenharmony_ci	return freelist_dereference(s, object + s->offset);
2808c2ecf20Sopenharmony_ci}
2818c2ecf20Sopenharmony_ci
2828c2ecf20Sopenharmony_cistatic void prefetch_freepointer(const struct kmem_cache *s, void *object)
2838c2ecf20Sopenharmony_ci{
2848c2ecf20Sopenharmony_ci	prefetch(object + s->offset);
2858c2ecf20Sopenharmony_ci}
2868c2ecf20Sopenharmony_ci
2878c2ecf20Sopenharmony_cistatic inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
2888c2ecf20Sopenharmony_ci{
2898c2ecf20Sopenharmony_ci	unsigned long freepointer_addr;
2908c2ecf20Sopenharmony_ci	void *p;
2918c2ecf20Sopenharmony_ci
2928c2ecf20Sopenharmony_ci	if (!debug_pagealloc_enabled_static())
2938c2ecf20Sopenharmony_ci		return get_freepointer(s, object);
2948c2ecf20Sopenharmony_ci
2958c2ecf20Sopenharmony_ci	freepointer_addr = (unsigned long)object + s->offset;
2968c2ecf20Sopenharmony_ci	copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
2978c2ecf20Sopenharmony_ci	return freelist_ptr(s, p, freepointer_addr);
2988c2ecf20Sopenharmony_ci}
2998c2ecf20Sopenharmony_ci
3008c2ecf20Sopenharmony_cistatic inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
3018c2ecf20Sopenharmony_ci{
3028c2ecf20Sopenharmony_ci	unsigned long freeptr_addr = (unsigned long)object + s->offset;
3038c2ecf20Sopenharmony_ci
3048c2ecf20Sopenharmony_ci#ifdef CONFIG_SLAB_FREELIST_HARDENED
3058c2ecf20Sopenharmony_ci	BUG_ON(object == fp); /* naive detection of double free or corruption */
3068c2ecf20Sopenharmony_ci#endif
3078c2ecf20Sopenharmony_ci
3088c2ecf20Sopenharmony_ci	*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
3098c2ecf20Sopenharmony_ci}
3108c2ecf20Sopenharmony_ci
3118c2ecf20Sopenharmony_ci/* Loop over all objects in a slab */
3128c2ecf20Sopenharmony_ci#define for_each_object(__p, __s, __addr, __objects) \
3138c2ecf20Sopenharmony_ci	for (__p = fixup_red_left(__s, __addr); \
3148c2ecf20Sopenharmony_ci		__p < (__addr) + (__objects) * (__s)->size; \
3158c2ecf20Sopenharmony_ci		__p += (__s)->size)
3168c2ecf20Sopenharmony_ci
3178c2ecf20Sopenharmony_cistatic inline unsigned int order_objects(unsigned int order, unsigned int size)
3188c2ecf20Sopenharmony_ci{
3198c2ecf20Sopenharmony_ci	return ((unsigned int)PAGE_SIZE << order) / size;
3208c2ecf20Sopenharmony_ci}
3218c2ecf20Sopenharmony_ci
3228c2ecf20Sopenharmony_cistatic inline struct kmem_cache_order_objects oo_make(unsigned int order,
3238c2ecf20Sopenharmony_ci		unsigned int size)
3248c2ecf20Sopenharmony_ci{
3258c2ecf20Sopenharmony_ci	struct kmem_cache_order_objects x = {
3268c2ecf20Sopenharmony_ci		(order << OO_SHIFT) + order_objects(order, size)
3278c2ecf20Sopenharmony_ci	};
3288c2ecf20Sopenharmony_ci
3298c2ecf20Sopenharmony_ci	return x;
3308c2ecf20Sopenharmony_ci}
3318c2ecf20Sopenharmony_ci
3328c2ecf20Sopenharmony_cistatic inline unsigned int oo_order(struct kmem_cache_order_objects x)
3338c2ecf20Sopenharmony_ci{
3348c2ecf20Sopenharmony_ci	return x.x >> OO_SHIFT;
3358c2ecf20Sopenharmony_ci}
3368c2ecf20Sopenharmony_ci
3378c2ecf20Sopenharmony_cistatic inline unsigned int oo_objects(struct kmem_cache_order_objects x)
3388c2ecf20Sopenharmony_ci{
3398c2ecf20Sopenharmony_ci	return x.x & OO_MASK;
3408c2ecf20Sopenharmony_ci}
3418c2ecf20Sopenharmony_ci
3428c2ecf20Sopenharmony_ci/*
3438c2ecf20Sopenharmony_ci * Per slab locking using the pagelock
3448c2ecf20Sopenharmony_ci */
3458c2ecf20Sopenharmony_cistatic __always_inline void slab_lock(struct page *page)
3468c2ecf20Sopenharmony_ci{
3478c2ecf20Sopenharmony_ci	VM_BUG_ON_PAGE(PageTail(page), page);
3488c2ecf20Sopenharmony_ci	bit_spin_lock(PG_locked, &page->flags);
3498c2ecf20Sopenharmony_ci}
3508c2ecf20Sopenharmony_ci
3518c2ecf20Sopenharmony_cistatic __always_inline void slab_unlock(struct page *page)
3528c2ecf20Sopenharmony_ci{
3538c2ecf20Sopenharmony_ci	VM_BUG_ON_PAGE(PageTail(page), page);
3548c2ecf20Sopenharmony_ci	__bit_spin_unlock(PG_locked, &page->flags);
3558c2ecf20Sopenharmony_ci}
3568c2ecf20Sopenharmony_ci
3578c2ecf20Sopenharmony_ci/* Interrupts must be disabled (for the fallback code to work right) */
3588c2ecf20Sopenharmony_cistatic inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
3598c2ecf20Sopenharmony_ci		void *freelist_old, unsigned long counters_old,
3608c2ecf20Sopenharmony_ci		void *freelist_new, unsigned long counters_new,
3618c2ecf20Sopenharmony_ci		const char *n)
3628c2ecf20Sopenharmony_ci{
3638c2ecf20Sopenharmony_ci	VM_BUG_ON(!irqs_disabled());
3648c2ecf20Sopenharmony_ci#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3658c2ecf20Sopenharmony_ci    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3668c2ecf20Sopenharmony_ci	if (s->flags & __CMPXCHG_DOUBLE) {
3678c2ecf20Sopenharmony_ci		if (cmpxchg_double(&page->freelist, &page->counters,
3688c2ecf20Sopenharmony_ci				   freelist_old, counters_old,
3698c2ecf20Sopenharmony_ci				   freelist_new, counters_new))
3708c2ecf20Sopenharmony_ci			return true;
3718c2ecf20Sopenharmony_ci	} else
3728c2ecf20Sopenharmony_ci#endif
3738c2ecf20Sopenharmony_ci	{
3748c2ecf20Sopenharmony_ci		slab_lock(page);
3758c2ecf20Sopenharmony_ci		if (page->freelist == freelist_old &&
3768c2ecf20Sopenharmony_ci					page->counters == counters_old) {
3778c2ecf20Sopenharmony_ci			page->freelist = freelist_new;
3788c2ecf20Sopenharmony_ci			page->counters = counters_new;
3798c2ecf20Sopenharmony_ci			slab_unlock(page);
3808c2ecf20Sopenharmony_ci			return true;
3818c2ecf20Sopenharmony_ci		}
3828c2ecf20Sopenharmony_ci		slab_unlock(page);
3838c2ecf20Sopenharmony_ci	}
3848c2ecf20Sopenharmony_ci
3858c2ecf20Sopenharmony_ci	cpu_relax();
3868c2ecf20Sopenharmony_ci	stat(s, CMPXCHG_DOUBLE_FAIL);
3878c2ecf20Sopenharmony_ci
3888c2ecf20Sopenharmony_ci#ifdef SLUB_DEBUG_CMPXCHG
3898c2ecf20Sopenharmony_ci	pr_info("%s %s: cmpxchg double redo ", n, s->name);
3908c2ecf20Sopenharmony_ci#endif
3918c2ecf20Sopenharmony_ci
3928c2ecf20Sopenharmony_ci	return false;
3938c2ecf20Sopenharmony_ci}
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_cistatic inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
3968c2ecf20Sopenharmony_ci		void *freelist_old, unsigned long counters_old,
3978c2ecf20Sopenharmony_ci		void *freelist_new, unsigned long counters_new,
3988c2ecf20Sopenharmony_ci		const char *n)
3998c2ecf20Sopenharmony_ci{
4008c2ecf20Sopenharmony_ci#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
4018c2ecf20Sopenharmony_ci    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
4028c2ecf20Sopenharmony_ci	if (s->flags & __CMPXCHG_DOUBLE) {
4038c2ecf20Sopenharmony_ci		if (cmpxchg_double(&page->freelist, &page->counters,
4048c2ecf20Sopenharmony_ci				   freelist_old, counters_old,
4058c2ecf20Sopenharmony_ci				   freelist_new, counters_new))
4068c2ecf20Sopenharmony_ci			return true;
4078c2ecf20Sopenharmony_ci	} else
4088c2ecf20Sopenharmony_ci#endif
4098c2ecf20Sopenharmony_ci	{
4108c2ecf20Sopenharmony_ci		unsigned long flags;
4118c2ecf20Sopenharmony_ci
4128c2ecf20Sopenharmony_ci		local_irq_save(flags);
4138c2ecf20Sopenharmony_ci		slab_lock(page);
4148c2ecf20Sopenharmony_ci		if (page->freelist == freelist_old &&
4158c2ecf20Sopenharmony_ci					page->counters == counters_old) {
4168c2ecf20Sopenharmony_ci			page->freelist = freelist_new;
4178c2ecf20Sopenharmony_ci			page->counters = counters_new;
4188c2ecf20Sopenharmony_ci			slab_unlock(page);
4198c2ecf20Sopenharmony_ci			local_irq_restore(flags);
4208c2ecf20Sopenharmony_ci			return true;
4218c2ecf20Sopenharmony_ci		}
4228c2ecf20Sopenharmony_ci		slab_unlock(page);
4238c2ecf20Sopenharmony_ci		local_irq_restore(flags);
4248c2ecf20Sopenharmony_ci	}
4258c2ecf20Sopenharmony_ci
4268c2ecf20Sopenharmony_ci	cpu_relax();
4278c2ecf20Sopenharmony_ci	stat(s, CMPXCHG_DOUBLE_FAIL);
4288c2ecf20Sopenharmony_ci
4298c2ecf20Sopenharmony_ci#ifdef SLUB_DEBUG_CMPXCHG
4308c2ecf20Sopenharmony_ci	pr_info("%s %s: cmpxchg double redo ", n, s->name);
4318c2ecf20Sopenharmony_ci#endif
4328c2ecf20Sopenharmony_ci
4338c2ecf20Sopenharmony_ci	return false;
4348c2ecf20Sopenharmony_ci}
4358c2ecf20Sopenharmony_ci
4368c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
4378c2ecf20Sopenharmony_cistatic unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
4388c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(object_map_lock);
4398c2ecf20Sopenharmony_ci
4408c2ecf20Sopenharmony_ci/*
4418c2ecf20Sopenharmony_ci * Determine a map of object in use on a page.
4428c2ecf20Sopenharmony_ci *
4438c2ecf20Sopenharmony_ci * Node listlock must be held to guarantee that the page does
4448c2ecf20Sopenharmony_ci * not vanish from under us.
4458c2ecf20Sopenharmony_ci */
4468c2ecf20Sopenharmony_cistatic unsigned long *get_map(struct kmem_cache *s, struct page *page)
4478c2ecf20Sopenharmony_ci	__acquires(&object_map_lock)
4488c2ecf20Sopenharmony_ci{
4498c2ecf20Sopenharmony_ci	void *p;
4508c2ecf20Sopenharmony_ci	void *addr = page_address(page);
4518c2ecf20Sopenharmony_ci
4528c2ecf20Sopenharmony_ci	VM_BUG_ON(!irqs_disabled());
4538c2ecf20Sopenharmony_ci
4548c2ecf20Sopenharmony_ci	spin_lock(&object_map_lock);
4558c2ecf20Sopenharmony_ci
4568c2ecf20Sopenharmony_ci	bitmap_zero(object_map, page->objects);
4578c2ecf20Sopenharmony_ci
4588c2ecf20Sopenharmony_ci	for (p = page->freelist; p; p = get_freepointer(s, p))
4598c2ecf20Sopenharmony_ci		set_bit(__obj_to_index(s, addr, p), object_map);
4608c2ecf20Sopenharmony_ci
4618c2ecf20Sopenharmony_ci	return object_map;
4628c2ecf20Sopenharmony_ci}
4638c2ecf20Sopenharmony_ci
4648c2ecf20Sopenharmony_cistatic void put_map(unsigned long *map) __releases(&object_map_lock)
4658c2ecf20Sopenharmony_ci{
4668c2ecf20Sopenharmony_ci	VM_BUG_ON(map != object_map);
4678c2ecf20Sopenharmony_ci	spin_unlock(&object_map_lock);
4688c2ecf20Sopenharmony_ci}
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_cistatic inline unsigned int size_from_object(struct kmem_cache *s)
4718c2ecf20Sopenharmony_ci{
4728c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE)
4738c2ecf20Sopenharmony_ci		return s->size - s->red_left_pad;
4748c2ecf20Sopenharmony_ci
4758c2ecf20Sopenharmony_ci	return s->size;
4768c2ecf20Sopenharmony_ci}
4778c2ecf20Sopenharmony_ci
4788c2ecf20Sopenharmony_cistatic inline void *restore_red_left(struct kmem_cache *s, void *p)
4798c2ecf20Sopenharmony_ci{
4808c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE)
4818c2ecf20Sopenharmony_ci		p -= s->red_left_pad;
4828c2ecf20Sopenharmony_ci
4838c2ecf20Sopenharmony_ci	return p;
4848c2ecf20Sopenharmony_ci}
4858c2ecf20Sopenharmony_ci
4868c2ecf20Sopenharmony_ci/*
4878c2ecf20Sopenharmony_ci * Debug settings:
4888c2ecf20Sopenharmony_ci */
4898c2ecf20Sopenharmony_ci#if defined(CONFIG_SLUB_DEBUG_ON)
4908c2ecf20Sopenharmony_cistatic slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
4918c2ecf20Sopenharmony_ci#else
4928c2ecf20Sopenharmony_cistatic slab_flags_t slub_debug;
4938c2ecf20Sopenharmony_ci#endif
4948c2ecf20Sopenharmony_ci
4958c2ecf20Sopenharmony_cistatic char *slub_debug_string;
4968c2ecf20Sopenharmony_cistatic int disable_higher_order_debug;
4978c2ecf20Sopenharmony_ci
4988c2ecf20Sopenharmony_ci/*
4998c2ecf20Sopenharmony_ci * slub is about to manipulate internal object metadata.  This memory lies
5008c2ecf20Sopenharmony_ci * outside the range of the allocated object, so accessing it would normally
5018c2ecf20Sopenharmony_ci * be reported by kasan as a bounds error.  metadata_access_enable() is used
5028c2ecf20Sopenharmony_ci * to tell kasan that these accesses are OK.
5038c2ecf20Sopenharmony_ci */
5048c2ecf20Sopenharmony_cistatic inline void metadata_access_enable(void)
5058c2ecf20Sopenharmony_ci{
5068c2ecf20Sopenharmony_ci	kasan_disable_current();
5078c2ecf20Sopenharmony_ci}
5088c2ecf20Sopenharmony_ci
5098c2ecf20Sopenharmony_cistatic inline void metadata_access_disable(void)
5108c2ecf20Sopenharmony_ci{
5118c2ecf20Sopenharmony_ci	kasan_enable_current();
5128c2ecf20Sopenharmony_ci}
5138c2ecf20Sopenharmony_ci
5148c2ecf20Sopenharmony_ci/*
5158c2ecf20Sopenharmony_ci * Object debugging
5168c2ecf20Sopenharmony_ci */
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci/* Verify that a pointer has an address that is valid within a slab page */
5198c2ecf20Sopenharmony_cistatic inline int check_valid_pointer(struct kmem_cache *s,
5208c2ecf20Sopenharmony_ci				struct page *page, void *object)
5218c2ecf20Sopenharmony_ci{
5228c2ecf20Sopenharmony_ci	void *base;
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci	if (!object)
5258c2ecf20Sopenharmony_ci		return 1;
5268c2ecf20Sopenharmony_ci
5278c2ecf20Sopenharmony_ci	base = page_address(page);
5288c2ecf20Sopenharmony_ci	object = kasan_reset_tag(object);
5298c2ecf20Sopenharmony_ci	object = restore_red_left(s, object);
5308c2ecf20Sopenharmony_ci	if (object < base || object >= base + page->objects * s->size ||
5318c2ecf20Sopenharmony_ci		(object - base) % s->size) {
5328c2ecf20Sopenharmony_ci		return 0;
5338c2ecf20Sopenharmony_ci	}
5348c2ecf20Sopenharmony_ci
5358c2ecf20Sopenharmony_ci	return 1;
5368c2ecf20Sopenharmony_ci}
5378c2ecf20Sopenharmony_ci
5388c2ecf20Sopenharmony_cistatic void print_section(char *level, char *text, u8 *addr,
5398c2ecf20Sopenharmony_ci			  unsigned int length)
5408c2ecf20Sopenharmony_ci{
5418c2ecf20Sopenharmony_ci	metadata_access_enable();
5428c2ecf20Sopenharmony_ci	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
5438c2ecf20Sopenharmony_ci			length, 1);
5448c2ecf20Sopenharmony_ci	metadata_access_disable();
5458c2ecf20Sopenharmony_ci}
5468c2ecf20Sopenharmony_ci
5478c2ecf20Sopenharmony_ci/*
5488c2ecf20Sopenharmony_ci * See comment in calculate_sizes().
5498c2ecf20Sopenharmony_ci */
5508c2ecf20Sopenharmony_cistatic inline bool freeptr_outside_object(struct kmem_cache *s)
5518c2ecf20Sopenharmony_ci{
5528c2ecf20Sopenharmony_ci	return s->offset >= s->inuse;
5538c2ecf20Sopenharmony_ci}
5548c2ecf20Sopenharmony_ci
5558c2ecf20Sopenharmony_ci/*
5568c2ecf20Sopenharmony_ci * Return offset of the end of info block which is inuse + free pointer if
5578c2ecf20Sopenharmony_ci * not overlapping with object.
5588c2ecf20Sopenharmony_ci */
5598c2ecf20Sopenharmony_cistatic inline unsigned int get_info_end(struct kmem_cache *s)
5608c2ecf20Sopenharmony_ci{
5618c2ecf20Sopenharmony_ci	if (freeptr_outside_object(s))
5628c2ecf20Sopenharmony_ci		return s->inuse + sizeof(void *);
5638c2ecf20Sopenharmony_ci	else
5648c2ecf20Sopenharmony_ci		return s->inuse;
5658c2ecf20Sopenharmony_ci}
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_cistatic struct track *get_track(struct kmem_cache *s, void *object,
5688c2ecf20Sopenharmony_ci	enum track_item alloc)
5698c2ecf20Sopenharmony_ci{
5708c2ecf20Sopenharmony_ci	struct track *p;
5718c2ecf20Sopenharmony_ci
5728c2ecf20Sopenharmony_ci	p = object + get_info_end(s);
5738c2ecf20Sopenharmony_ci
5748c2ecf20Sopenharmony_ci	return p + alloc;
5758c2ecf20Sopenharmony_ci}
5768c2ecf20Sopenharmony_ci
5778c2ecf20Sopenharmony_cistatic void set_track(struct kmem_cache *s, void *object,
5788c2ecf20Sopenharmony_ci			enum track_item alloc, unsigned long addr)
5798c2ecf20Sopenharmony_ci{
5808c2ecf20Sopenharmony_ci	struct track *p = get_track(s, object, alloc);
5818c2ecf20Sopenharmony_ci
5828c2ecf20Sopenharmony_ci	if (addr) {
5838c2ecf20Sopenharmony_ci#ifdef CONFIG_STACKTRACE
5848c2ecf20Sopenharmony_ci		unsigned int nr_entries;
5858c2ecf20Sopenharmony_ci
5868c2ecf20Sopenharmony_ci		metadata_access_enable();
5878c2ecf20Sopenharmony_ci		nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
5888c2ecf20Sopenharmony_ci		metadata_access_disable();
5898c2ecf20Sopenharmony_ci
5908c2ecf20Sopenharmony_ci		if (nr_entries < TRACK_ADDRS_COUNT)
5918c2ecf20Sopenharmony_ci			p->addrs[nr_entries] = 0;
5928c2ecf20Sopenharmony_ci#endif
5938c2ecf20Sopenharmony_ci		p->addr = addr;
5948c2ecf20Sopenharmony_ci		p->cpu = smp_processor_id();
5958c2ecf20Sopenharmony_ci		p->pid = current->pid;
5968c2ecf20Sopenharmony_ci		p->when = jiffies;
5978c2ecf20Sopenharmony_ci	} else {
5988c2ecf20Sopenharmony_ci		memset(p, 0, sizeof(struct track));
5998c2ecf20Sopenharmony_ci	}
6008c2ecf20Sopenharmony_ci}
6018c2ecf20Sopenharmony_ci
6028c2ecf20Sopenharmony_cistatic void init_tracking(struct kmem_cache *s, void *object)
6038c2ecf20Sopenharmony_ci{
6048c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
6058c2ecf20Sopenharmony_ci		return;
6068c2ecf20Sopenharmony_ci
6078c2ecf20Sopenharmony_ci	set_track(s, object, TRACK_FREE, 0UL);
6088c2ecf20Sopenharmony_ci	set_track(s, object, TRACK_ALLOC, 0UL);
6098c2ecf20Sopenharmony_ci}
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_cistatic void print_track(const char *s, struct track *t, unsigned long pr_time)
6128c2ecf20Sopenharmony_ci{
6138c2ecf20Sopenharmony_ci	if (!t->addr)
6148c2ecf20Sopenharmony_ci		return;
6158c2ecf20Sopenharmony_ci
6168c2ecf20Sopenharmony_ci	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
6178c2ecf20Sopenharmony_ci	       s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
6188c2ecf20Sopenharmony_ci#ifdef CONFIG_STACKTRACE
6198c2ecf20Sopenharmony_ci	{
6208c2ecf20Sopenharmony_ci		int i;
6218c2ecf20Sopenharmony_ci		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
6228c2ecf20Sopenharmony_ci			if (t->addrs[i])
6238c2ecf20Sopenharmony_ci				pr_err("\t%pS\n", (void *)t->addrs[i]);
6248c2ecf20Sopenharmony_ci			else
6258c2ecf20Sopenharmony_ci				break;
6268c2ecf20Sopenharmony_ci	}
6278c2ecf20Sopenharmony_ci#endif
6288c2ecf20Sopenharmony_ci}
6298c2ecf20Sopenharmony_ci
6308c2ecf20Sopenharmony_civoid print_tracking(struct kmem_cache *s, void *object)
6318c2ecf20Sopenharmony_ci{
6328c2ecf20Sopenharmony_ci	unsigned long pr_time = jiffies;
6338c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
6348c2ecf20Sopenharmony_ci		return;
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_ci	print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
6378c2ecf20Sopenharmony_ci	print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
6388c2ecf20Sopenharmony_ci}
6398c2ecf20Sopenharmony_ci
6408c2ecf20Sopenharmony_cistatic void print_page_info(struct page *page)
6418c2ecf20Sopenharmony_ci{
6428c2ecf20Sopenharmony_ci	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
6438c2ecf20Sopenharmony_ci	       page, page->objects, page->inuse, page->freelist, page->flags);
6448c2ecf20Sopenharmony_ci
6458c2ecf20Sopenharmony_ci}
6468c2ecf20Sopenharmony_ci
6478c2ecf20Sopenharmony_cistatic void slab_bug(struct kmem_cache *s, char *fmt, ...)
6488c2ecf20Sopenharmony_ci{
6498c2ecf20Sopenharmony_ci	struct va_format vaf;
6508c2ecf20Sopenharmony_ci	va_list args;
6518c2ecf20Sopenharmony_ci
6528c2ecf20Sopenharmony_ci	va_start(args, fmt);
6538c2ecf20Sopenharmony_ci	vaf.fmt = fmt;
6548c2ecf20Sopenharmony_ci	vaf.va = &args;
6558c2ecf20Sopenharmony_ci	pr_err("=============================================================================\n");
6568c2ecf20Sopenharmony_ci	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
6578c2ecf20Sopenharmony_ci	pr_err("-----------------------------------------------------------------------------\n\n");
6588c2ecf20Sopenharmony_ci
6598c2ecf20Sopenharmony_ci	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
6608c2ecf20Sopenharmony_ci	va_end(args);
6618c2ecf20Sopenharmony_ci}
6628c2ecf20Sopenharmony_ci
6638c2ecf20Sopenharmony_cistatic void slab_fix(struct kmem_cache *s, char *fmt, ...)
6648c2ecf20Sopenharmony_ci{
6658c2ecf20Sopenharmony_ci	struct va_format vaf;
6668c2ecf20Sopenharmony_ci	va_list args;
6678c2ecf20Sopenharmony_ci
6688c2ecf20Sopenharmony_ci	va_start(args, fmt);
6698c2ecf20Sopenharmony_ci	vaf.fmt = fmt;
6708c2ecf20Sopenharmony_ci	vaf.va = &args;
6718c2ecf20Sopenharmony_ci	pr_err("FIX %s: %pV\n", s->name, &vaf);
6728c2ecf20Sopenharmony_ci	va_end(args);
6738c2ecf20Sopenharmony_ci}
6748c2ecf20Sopenharmony_ci
6758c2ecf20Sopenharmony_cistatic bool freelist_corrupted(struct kmem_cache *s, struct page *page,
6768c2ecf20Sopenharmony_ci			       void **freelist, void *nextfree)
6778c2ecf20Sopenharmony_ci{
6788c2ecf20Sopenharmony_ci	if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
6798c2ecf20Sopenharmony_ci	    !check_valid_pointer(s, page, nextfree) && freelist) {
6808c2ecf20Sopenharmony_ci		object_err(s, page, *freelist, "Freechain corrupt");
6818c2ecf20Sopenharmony_ci		*freelist = NULL;
6828c2ecf20Sopenharmony_ci		slab_fix(s, "Isolate corrupted freechain");
6838c2ecf20Sopenharmony_ci		return true;
6848c2ecf20Sopenharmony_ci	}
6858c2ecf20Sopenharmony_ci
6868c2ecf20Sopenharmony_ci	return false;
6878c2ecf20Sopenharmony_ci}
6888c2ecf20Sopenharmony_ci
6898c2ecf20Sopenharmony_cistatic void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
6908c2ecf20Sopenharmony_ci{
6918c2ecf20Sopenharmony_ci	unsigned int off;	/* Offset of last byte */
6928c2ecf20Sopenharmony_ci	u8 *addr = page_address(page);
6938c2ecf20Sopenharmony_ci
6948c2ecf20Sopenharmony_ci	print_tracking(s, p);
6958c2ecf20Sopenharmony_ci
6968c2ecf20Sopenharmony_ci	print_page_info(page);
6978c2ecf20Sopenharmony_ci
6988c2ecf20Sopenharmony_ci	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
6998c2ecf20Sopenharmony_ci	       p, p - addr, get_freepointer(s, p));
7008c2ecf20Sopenharmony_ci
7018c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE)
7028c2ecf20Sopenharmony_ci		print_section(KERN_ERR, "Redzone  ", p - s->red_left_pad,
7038c2ecf20Sopenharmony_ci			      s->red_left_pad);
7048c2ecf20Sopenharmony_ci	else if (p > addr + 16)
7058c2ecf20Sopenharmony_ci		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
7068c2ecf20Sopenharmony_ci
7078c2ecf20Sopenharmony_ci	print_section(KERN_ERR,         "Object   ", p,
7088c2ecf20Sopenharmony_ci		      min_t(unsigned int, s->object_size, PAGE_SIZE));
7098c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE)
7108c2ecf20Sopenharmony_ci		print_section(KERN_ERR, "Redzone  ", p + s->object_size,
7118c2ecf20Sopenharmony_ci			s->inuse - s->object_size);
7128c2ecf20Sopenharmony_ci
7138c2ecf20Sopenharmony_ci	off = get_info_end(s);
7148c2ecf20Sopenharmony_ci
7158c2ecf20Sopenharmony_ci	if (s->flags & SLAB_STORE_USER)
7168c2ecf20Sopenharmony_ci		off += 2 * sizeof(struct track);
7178c2ecf20Sopenharmony_ci
7188c2ecf20Sopenharmony_ci	off += kasan_metadata_size(s);
7198c2ecf20Sopenharmony_ci
7208c2ecf20Sopenharmony_ci	if (off != size_from_object(s))
7218c2ecf20Sopenharmony_ci		/* Beginning of the filler is the free pointer */
7228c2ecf20Sopenharmony_ci		print_section(KERN_ERR, "Padding  ", p + off,
7238c2ecf20Sopenharmony_ci			      size_from_object(s) - off);
7248c2ecf20Sopenharmony_ci
7258c2ecf20Sopenharmony_ci	dump_stack();
7268c2ecf20Sopenharmony_ci}
7278c2ecf20Sopenharmony_ci
7288c2ecf20Sopenharmony_civoid object_err(struct kmem_cache *s, struct page *page,
7298c2ecf20Sopenharmony_ci			u8 *object, char *reason)
7308c2ecf20Sopenharmony_ci{
7318c2ecf20Sopenharmony_ci	slab_bug(s, "%s", reason);
7328c2ecf20Sopenharmony_ci	print_trailer(s, page, object);
7338c2ecf20Sopenharmony_ci}
7348c2ecf20Sopenharmony_ci
7358c2ecf20Sopenharmony_cistatic __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
7368c2ecf20Sopenharmony_ci			const char *fmt, ...)
7378c2ecf20Sopenharmony_ci{
7388c2ecf20Sopenharmony_ci	va_list args;
7398c2ecf20Sopenharmony_ci	char buf[100];
7408c2ecf20Sopenharmony_ci
7418c2ecf20Sopenharmony_ci	va_start(args, fmt);
7428c2ecf20Sopenharmony_ci	vsnprintf(buf, sizeof(buf), fmt, args);
7438c2ecf20Sopenharmony_ci	va_end(args);
7448c2ecf20Sopenharmony_ci	slab_bug(s, "%s", buf);
7458c2ecf20Sopenharmony_ci	print_page_info(page);
7468c2ecf20Sopenharmony_ci	dump_stack();
7478c2ecf20Sopenharmony_ci}
7488c2ecf20Sopenharmony_ci
7498c2ecf20Sopenharmony_cistatic void init_object(struct kmem_cache *s, void *object, u8 val)
7508c2ecf20Sopenharmony_ci{
7518c2ecf20Sopenharmony_ci	u8 *p = object;
7528c2ecf20Sopenharmony_ci
7538c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE)
7548c2ecf20Sopenharmony_ci		memset(p - s->red_left_pad, val, s->red_left_pad);
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci	if (s->flags & __OBJECT_POISON) {
7578c2ecf20Sopenharmony_ci		memset(p, POISON_FREE, s->object_size - 1);
7588c2ecf20Sopenharmony_ci		p[s->object_size - 1] = POISON_END;
7598c2ecf20Sopenharmony_ci	}
7608c2ecf20Sopenharmony_ci
7618c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE)
7628c2ecf20Sopenharmony_ci		memset(p + s->object_size, val, s->inuse - s->object_size);
7638c2ecf20Sopenharmony_ci}
7648c2ecf20Sopenharmony_ci
7658c2ecf20Sopenharmony_cistatic void restore_bytes(struct kmem_cache *s, char *message, u8 data,
7668c2ecf20Sopenharmony_ci						void *from, void *to)
7678c2ecf20Sopenharmony_ci{
7688c2ecf20Sopenharmony_ci	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
7698c2ecf20Sopenharmony_ci	memset(from, data, to - from);
7708c2ecf20Sopenharmony_ci}
7718c2ecf20Sopenharmony_ci
7728c2ecf20Sopenharmony_cistatic int check_bytes_and_report(struct kmem_cache *s, struct page *page,
7738c2ecf20Sopenharmony_ci			u8 *object, char *what,
7748c2ecf20Sopenharmony_ci			u8 *start, unsigned int value, unsigned int bytes)
7758c2ecf20Sopenharmony_ci{
7768c2ecf20Sopenharmony_ci	u8 *fault;
7778c2ecf20Sopenharmony_ci	u8 *end;
7788c2ecf20Sopenharmony_ci	u8 *addr = page_address(page);
7798c2ecf20Sopenharmony_ci
7808c2ecf20Sopenharmony_ci	metadata_access_enable();
7818c2ecf20Sopenharmony_ci	fault = memchr_inv(start, value, bytes);
7828c2ecf20Sopenharmony_ci	metadata_access_disable();
7838c2ecf20Sopenharmony_ci	if (!fault)
7848c2ecf20Sopenharmony_ci		return 1;
7858c2ecf20Sopenharmony_ci
7868c2ecf20Sopenharmony_ci	end = start + bytes;
7878c2ecf20Sopenharmony_ci	while (end > fault && end[-1] == value)
7888c2ecf20Sopenharmony_ci		end--;
7898c2ecf20Sopenharmony_ci
7908c2ecf20Sopenharmony_ci	slab_bug(s, "%s overwritten", what);
7918c2ecf20Sopenharmony_ci	pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
7928c2ecf20Sopenharmony_ci					fault, end - 1, fault - addr,
7938c2ecf20Sopenharmony_ci					fault[0], value);
7948c2ecf20Sopenharmony_ci	print_trailer(s, page, object);
7958c2ecf20Sopenharmony_ci
7968c2ecf20Sopenharmony_ci	restore_bytes(s, what, value, fault, end);
7978c2ecf20Sopenharmony_ci	return 0;
7988c2ecf20Sopenharmony_ci}
7998c2ecf20Sopenharmony_ci
8008c2ecf20Sopenharmony_ci/*
8018c2ecf20Sopenharmony_ci * Object layout:
8028c2ecf20Sopenharmony_ci *
8038c2ecf20Sopenharmony_ci * object address
8048c2ecf20Sopenharmony_ci * 	Bytes of the object to be managed.
8058c2ecf20Sopenharmony_ci * 	If the freepointer may overlay the object then the free
8068c2ecf20Sopenharmony_ci *	pointer is at the middle of the object.
8078c2ecf20Sopenharmony_ci *
8088c2ecf20Sopenharmony_ci * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
8098c2ecf20Sopenharmony_ci * 	0xa5 (POISON_END)
8108c2ecf20Sopenharmony_ci *
8118c2ecf20Sopenharmony_ci * object + s->object_size
8128c2ecf20Sopenharmony_ci * 	Padding to reach word boundary. This is also used for Redzoning.
8138c2ecf20Sopenharmony_ci * 	Padding is extended by another word if Redzoning is enabled and
8148c2ecf20Sopenharmony_ci * 	object_size == inuse.
8158c2ecf20Sopenharmony_ci *
8168c2ecf20Sopenharmony_ci * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
8178c2ecf20Sopenharmony_ci * 	0xcc (RED_ACTIVE) for objects in use.
8188c2ecf20Sopenharmony_ci *
8198c2ecf20Sopenharmony_ci * object + s->inuse
8208c2ecf20Sopenharmony_ci * 	Meta data starts here.
8218c2ecf20Sopenharmony_ci *
8228c2ecf20Sopenharmony_ci * 	A. Free pointer (if we cannot overwrite object on free)
8238c2ecf20Sopenharmony_ci * 	B. Tracking data for SLAB_STORE_USER
8248c2ecf20Sopenharmony_ci * 	C. Padding to reach required alignment boundary or at mininum
8258c2ecf20Sopenharmony_ci * 		one word if debugging is on to be able to detect writes
8268c2ecf20Sopenharmony_ci * 		before the word boundary.
8278c2ecf20Sopenharmony_ci *
8288c2ecf20Sopenharmony_ci *	Padding is done using 0x5a (POISON_INUSE)
8298c2ecf20Sopenharmony_ci *
8308c2ecf20Sopenharmony_ci * object + s->size
8318c2ecf20Sopenharmony_ci * 	Nothing is used beyond s->size.
8328c2ecf20Sopenharmony_ci *
8338c2ecf20Sopenharmony_ci * If slabcaches are merged then the object_size and inuse boundaries are mostly
8348c2ecf20Sopenharmony_ci * ignored. And therefore no slab options that rely on these boundaries
8358c2ecf20Sopenharmony_ci * may be used with merged slabcaches.
8368c2ecf20Sopenharmony_ci */
8378c2ecf20Sopenharmony_ci
8388c2ecf20Sopenharmony_cistatic int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
8398c2ecf20Sopenharmony_ci{
8408c2ecf20Sopenharmony_ci	unsigned long off = get_info_end(s);	/* The end of info */
8418c2ecf20Sopenharmony_ci
8428c2ecf20Sopenharmony_ci	if (s->flags & SLAB_STORE_USER)
8438c2ecf20Sopenharmony_ci		/* We also have user information there */
8448c2ecf20Sopenharmony_ci		off += 2 * sizeof(struct track);
8458c2ecf20Sopenharmony_ci
8468c2ecf20Sopenharmony_ci	off += kasan_metadata_size(s);
8478c2ecf20Sopenharmony_ci
8488c2ecf20Sopenharmony_ci	if (size_from_object(s) == off)
8498c2ecf20Sopenharmony_ci		return 1;
8508c2ecf20Sopenharmony_ci
8518c2ecf20Sopenharmony_ci	return check_bytes_and_report(s, page, p, "Object padding",
8528c2ecf20Sopenharmony_ci			p + off, POISON_INUSE, size_from_object(s) - off);
8538c2ecf20Sopenharmony_ci}
8548c2ecf20Sopenharmony_ci
8558c2ecf20Sopenharmony_ci/* Check the pad bytes at the end of a slab page */
8568c2ecf20Sopenharmony_cistatic int slab_pad_check(struct kmem_cache *s, struct page *page)
8578c2ecf20Sopenharmony_ci{
8588c2ecf20Sopenharmony_ci	u8 *start;
8598c2ecf20Sopenharmony_ci	u8 *fault;
8608c2ecf20Sopenharmony_ci	u8 *end;
8618c2ecf20Sopenharmony_ci	u8 *pad;
8628c2ecf20Sopenharmony_ci	int length;
8638c2ecf20Sopenharmony_ci	int remainder;
8648c2ecf20Sopenharmony_ci
8658c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_POISON))
8668c2ecf20Sopenharmony_ci		return 1;
8678c2ecf20Sopenharmony_ci
8688c2ecf20Sopenharmony_ci	start = page_address(page);
8698c2ecf20Sopenharmony_ci	length = page_size(page);
8708c2ecf20Sopenharmony_ci	end = start + length;
8718c2ecf20Sopenharmony_ci	remainder = length % s->size;
8728c2ecf20Sopenharmony_ci	if (!remainder)
8738c2ecf20Sopenharmony_ci		return 1;
8748c2ecf20Sopenharmony_ci
8758c2ecf20Sopenharmony_ci	pad = end - remainder;
8768c2ecf20Sopenharmony_ci	metadata_access_enable();
8778c2ecf20Sopenharmony_ci	fault = memchr_inv(pad, POISON_INUSE, remainder);
8788c2ecf20Sopenharmony_ci	metadata_access_disable();
8798c2ecf20Sopenharmony_ci	if (!fault)
8808c2ecf20Sopenharmony_ci		return 1;
8818c2ecf20Sopenharmony_ci	while (end > fault && end[-1] == POISON_INUSE)
8828c2ecf20Sopenharmony_ci		end--;
8838c2ecf20Sopenharmony_ci
8848c2ecf20Sopenharmony_ci	slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
8858c2ecf20Sopenharmony_ci			fault, end - 1, fault - start);
8868c2ecf20Sopenharmony_ci	print_section(KERN_ERR, "Padding ", pad, remainder);
8878c2ecf20Sopenharmony_ci
8888c2ecf20Sopenharmony_ci	restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
8898c2ecf20Sopenharmony_ci	return 0;
8908c2ecf20Sopenharmony_ci}
8918c2ecf20Sopenharmony_ci
8928c2ecf20Sopenharmony_cistatic int check_object(struct kmem_cache *s, struct page *page,
8938c2ecf20Sopenharmony_ci					void *object, u8 val)
8948c2ecf20Sopenharmony_ci{
8958c2ecf20Sopenharmony_ci	u8 *p = object;
8968c2ecf20Sopenharmony_ci	u8 *endobject = object + s->object_size;
8978c2ecf20Sopenharmony_ci
8988c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RED_ZONE) {
8998c2ecf20Sopenharmony_ci		if (!check_bytes_and_report(s, page, object, "Left Redzone",
9008c2ecf20Sopenharmony_ci			object - s->red_left_pad, val, s->red_left_pad))
9018c2ecf20Sopenharmony_ci			return 0;
9028c2ecf20Sopenharmony_ci
9038c2ecf20Sopenharmony_ci		if (!check_bytes_and_report(s, page, object, "Right Redzone",
9048c2ecf20Sopenharmony_ci			endobject, val, s->inuse - s->object_size))
9058c2ecf20Sopenharmony_ci			return 0;
9068c2ecf20Sopenharmony_ci	} else {
9078c2ecf20Sopenharmony_ci		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
9088c2ecf20Sopenharmony_ci			check_bytes_and_report(s, page, p, "Alignment padding",
9098c2ecf20Sopenharmony_ci				endobject, POISON_INUSE,
9108c2ecf20Sopenharmony_ci				s->inuse - s->object_size);
9118c2ecf20Sopenharmony_ci		}
9128c2ecf20Sopenharmony_ci	}
9138c2ecf20Sopenharmony_ci
9148c2ecf20Sopenharmony_ci	if (s->flags & SLAB_POISON) {
9158c2ecf20Sopenharmony_ci		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
9168c2ecf20Sopenharmony_ci			(!check_bytes_and_report(s, page, p, "Poison", p,
9178c2ecf20Sopenharmony_ci					POISON_FREE, s->object_size - 1) ||
9188c2ecf20Sopenharmony_ci			 !check_bytes_and_report(s, page, p, "End Poison",
9198c2ecf20Sopenharmony_ci				p + s->object_size - 1, POISON_END, 1)))
9208c2ecf20Sopenharmony_ci			return 0;
9218c2ecf20Sopenharmony_ci		/*
9228c2ecf20Sopenharmony_ci		 * check_pad_bytes cleans up on its own.
9238c2ecf20Sopenharmony_ci		 */
9248c2ecf20Sopenharmony_ci		check_pad_bytes(s, page, p);
9258c2ecf20Sopenharmony_ci	}
9268c2ecf20Sopenharmony_ci
9278c2ecf20Sopenharmony_ci	if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
9288c2ecf20Sopenharmony_ci		/*
9298c2ecf20Sopenharmony_ci		 * Object and freepointer overlap. Cannot check
9308c2ecf20Sopenharmony_ci		 * freepointer while object is allocated.
9318c2ecf20Sopenharmony_ci		 */
9328c2ecf20Sopenharmony_ci		return 1;
9338c2ecf20Sopenharmony_ci
9348c2ecf20Sopenharmony_ci	/* Check free pointer validity */
9358c2ecf20Sopenharmony_ci	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
9368c2ecf20Sopenharmony_ci		object_err(s, page, p, "Freepointer corrupt");
9378c2ecf20Sopenharmony_ci		/*
9388c2ecf20Sopenharmony_ci		 * No choice but to zap it and thus lose the remainder
9398c2ecf20Sopenharmony_ci		 * of the free objects in this slab. May cause
9408c2ecf20Sopenharmony_ci		 * another error because the object count is now wrong.
9418c2ecf20Sopenharmony_ci		 */
9428c2ecf20Sopenharmony_ci		set_freepointer(s, p, NULL);
9438c2ecf20Sopenharmony_ci		return 0;
9448c2ecf20Sopenharmony_ci	}
9458c2ecf20Sopenharmony_ci	return 1;
9468c2ecf20Sopenharmony_ci}
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_cistatic int check_slab(struct kmem_cache *s, struct page *page)
9498c2ecf20Sopenharmony_ci{
9508c2ecf20Sopenharmony_ci	int maxobj;
9518c2ecf20Sopenharmony_ci
9528c2ecf20Sopenharmony_ci	VM_BUG_ON(!irqs_disabled());
9538c2ecf20Sopenharmony_ci
9548c2ecf20Sopenharmony_ci	if (!PageSlab(page)) {
9558c2ecf20Sopenharmony_ci		slab_err(s, page, "Not a valid slab page");
9568c2ecf20Sopenharmony_ci		return 0;
9578c2ecf20Sopenharmony_ci	}
9588c2ecf20Sopenharmony_ci
9598c2ecf20Sopenharmony_ci	maxobj = order_objects(compound_order(page), s->size);
9608c2ecf20Sopenharmony_ci	if (page->objects > maxobj) {
9618c2ecf20Sopenharmony_ci		slab_err(s, page, "objects %u > max %u",
9628c2ecf20Sopenharmony_ci			page->objects, maxobj);
9638c2ecf20Sopenharmony_ci		return 0;
9648c2ecf20Sopenharmony_ci	}
9658c2ecf20Sopenharmony_ci	if (page->inuse > page->objects) {
9668c2ecf20Sopenharmony_ci		slab_err(s, page, "inuse %u > max %u",
9678c2ecf20Sopenharmony_ci			page->inuse, page->objects);
9688c2ecf20Sopenharmony_ci		return 0;
9698c2ecf20Sopenharmony_ci	}
9708c2ecf20Sopenharmony_ci	/* Slab_pad_check fixes things up after itself */
9718c2ecf20Sopenharmony_ci	slab_pad_check(s, page);
9728c2ecf20Sopenharmony_ci	return 1;
9738c2ecf20Sopenharmony_ci}
9748c2ecf20Sopenharmony_ci
9758c2ecf20Sopenharmony_ci/*
9768c2ecf20Sopenharmony_ci * Determine if a certain object on a page is on the freelist. Must hold the
9778c2ecf20Sopenharmony_ci * slab lock to guarantee that the chains are in a consistent state.
9788c2ecf20Sopenharmony_ci */
9798c2ecf20Sopenharmony_cistatic int on_freelist(struct kmem_cache *s, struct page *page, void *search)
9808c2ecf20Sopenharmony_ci{
9818c2ecf20Sopenharmony_ci	int nr = 0;
9828c2ecf20Sopenharmony_ci	void *fp;
9838c2ecf20Sopenharmony_ci	void *object = NULL;
9848c2ecf20Sopenharmony_ci	int max_objects;
9858c2ecf20Sopenharmony_ci
9868c2ecf20Sopenharmony_ci	fp = page->freelist;
9878c2ecf20Sopenharmony_ci	while (fp && nr <= page->objects) {
9888c2ecf20Sopenharmony_ci		if (fp == search)
9898c2ecf20Sopenharmony_ci			return 1;
9908c2ecf20Sopenharmony_ci		if (!check_valid_pointer(s, page, fp)) {
9918c2ecf20Sopenharmony_ci			if (object) {
9928c2ecf20Sopenharmony_ci				object_err(s, page, object,
9938c2ecf20Sopenharmony_ci					"Freechain corrupt");
9948c2ecf20Sopenharmony_ci				set_freepointer(s, object, NULL);
9958c2ecf20Sopenharmony_ci			} else {
9968c2ecf20Sopenharmony_ci				slab_err(s, page, "Freepointer corrupt");
9978c2ecf20Sopenharmony_ci				page->freelist = NULL;
9988c2ecf20Sopenharmony_ci				page->inuse = page->objects;
9998c2ecf20Sopenharmony_ci				slab_fix(s, "Freelist cleared");
10008c2ecf20Sopenharmony_ci				return 0;
10018c2ecf20Sopenharmony_ci			}
10028c2ecf20Sopenharmony_ci			break;
10038c2ecf20Sopenharmony_ci		}
10048c2ecf20Sopenharmony_ci		object = fp;
10058c2ecf20Sopenharmony_ci		fp = get_freepointer(s, object);
10068c2ecf20Sopenharmony_ci		nr++;
10078c2ecf20Sopenharmony_ci	}
10088c2ecf20Sopenharmony_ci
10098c2ecf20Sopenharmony_ci	max_objects = order_objects(compound_order(page), s->size);
10108c2ecf20Sopenharmony_ci	if (max_objects > MAX_OBJS_PER_PAGE)
10118c2ecf20Sopenharmony_ci		max_objects = MAX_OBJS_PER_PAGE;
10128c2ecf20Sopenharmony_ci
10138c2ecf20Sopenharmony_ci	if (page->objects != max_objects) {
10148c2ecf20Sopenharmony_ci		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
10158c2ecf20Sopenharmony_ci			 page->objects, max_objects);
10168c2ecf20Sopenharmony_ci		page->objects = max_objects;
10178c2ecf20Sopenharmony_ci		slab_fix(s, "Number of objects adjusted.");
10188c2ecf20Sopenharmony_ci	}
10198c2ecf20Sopenharmony_ci	if (page->inuse != page->objects - nr) {
10208c2ecf20Sopenharmony_ci		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
10218c2ecf20Sopenharmony_ci			 page->inuse, page->objects - nr);
10228c2ecf20Sopenharmony_ci		page->inuse = page->objects - nr;
10238c2ecf20Sopenharmony_ci		slab_fix(s, "Object count adjusted.");
10248c2ecf20Sopenharmony_ci	}
10258c2ecf20Sopenharmony_ci	return search == NULL;
10268c2ecf20Sopenharmony_ci}
10278c2ecf20Sopenharmony_ci
10288c2ecf20Sopenharmony_cistatic void trace(struct kmem_cache *s, struct page *page, void *object,
10298c2ecf20Sopenharmony_ci								int alloc)
10308c2ecf20Sopenharmony_ci{
10318c2ecf20Sopenharmony_ci	if (s->flags & SLAB_TRACE) {
10328c2ecf20Sopenharmony_ci		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
10338c2ecf20Sopenharmony_ci			s->name,
10348c2ecf20Sopenharmony_ci			alloc ? "alloc" : "free",
10358c2ecf20Sopenharmony_ci			object, page->inuse,
10368c2ecf20Sopenharmony_ci			page->freelist);
10378c2ecf20Sopenharmony_ci
10388c2ecf20Sopenharmony_ci		if (!alloc)
10398c2ecf20Sopenharmony_ci			print_section(KERN_INFO, "Object ", (void *)object,
10408c2ecf20Sopenharmony_ci					s->object_size);
10418c2ecf20Sopenharmony_ci
10428c2ecf20Sopenharmony_ci		dump_stack();
10438c2ecf20Sopenharmony_ci	}
10448c2ecf20Sopenharmony_ci}
10458c2ecf20Sopenharmony_ci
10468c2ecf20Sopenharmony_ci/*
10478c2ecf20Sopenharmony_ci * Tracking of fully allocated slabs for debugging purposes.
10488c2ecf20Sopenharmony_ci */
10498c2ecf20Sopenharmony_cistatic void add_full(struct kmem_cache *s,
10508c2ecf20Sopenharmony_ci	struct kmem_cache_node *n, struct page *page)
10518c2ecf20Sopenharmony_ci{
10528c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
10538c2ecf20Sopenharmony_ci		return;
10548c2ecf20Sopenharmony_ci
10558c2ecf20Sopenharmony_ci	lockdep_assert_held(&n->list_lock);
10568c2ecf20Sopenharmony_ci	list_add(&page->slab_list, &n->full);
10578c2ecf20Sopenharmony_ci}
10588c2ecf20Sopenharmony_ci
10598c2ecf20Sopenharmony_cistatic void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
10608c2ecf20Sopenharmony_ci{
10618c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
10628c2ecf20Sopenharmony_ci		return;
10638c2ecf20Sopenharmony_ci
10648c2ecf20Sopenharmony_ci	lockdep_assert_held(&n->list_lock);
10658c2ecf20Sopenharmony_ci	list_del(&page->slab_list);
10668c2ecf20Sopenharmony_ci}
10678c2ecf20Sopenharmony_ci
10688c2ecf20Sopenharmony_ci/* Tracking of the number of slabs for debugging purposes */
10698c2ecf20Sopenharmony_cistatic inline unsigned long slabs_node(struct kmem_cache *s, int node)
10708c2ecf20Sopenharmony_ci{
10718c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = get_node(s, node);
10728c2ecf20Sopenharmony_ci
10738c2ecf20Sopenharmony_ci	return atomic_long_read(&n->nr_slabs);
10748c2ecf20Sopenharmony_ci}
10758c2ecf20Sopenharmony_ci
10768c2ecf20Sopenharmony_cistatic inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
10778c2ecf20Sopenharmony_ci{
10788c2ecf20Sopenharmony_ci	return atomic_long_read(&n->nr_slabs);
10798c2ecf20Sopenharmony_ci}
10808c2ecf20Sopenharmony_ci
10818c2ecf20Sopenharmony_cistatic inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
10828c2ecf20Sopenharmony_ci{
10838c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = get_node(s, node);
10848c2ecf20Sopenharmony_ci
10858c2ecf20Sopenharmony_ci	/*
10868c2ecf20Sopenharmony_ci	 * May be called early in order to allocate a slab for the
10878c2ecf20Sopenharmony_ci	 * kmem_cache_node structure. Solve the chicken-egg
10888c2ecf20Sopenharmony_ci	 * dilemma by deferring the increment of the count during
10898c2ecf20Sopenharmony_ci	 * bootstrap (see early_kmem_cache_node_alloc).
10908c2ecf20Sopenharmony_ci	 */
10918c2ecf20Sopenharmony_ci	if (likely(n)) {
10928c2ecf20Sopenharmony_ci		atomic_long_inc(&n->nr_slabs);
10938c2ecf20Sopenharmony_ci		atomic_long_add(objects, &n->total_objects);
10948c2ecf20Sopenharmony_ci	}
10958c2ecf20Sopenharmony_ci}
10968c2ecf20Sopenharmony_cistatic inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
10978c2ecf20Sopenharmony_ci{
10988c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = get_node(s, node);
10998c2ecf20Sopenharmony_ci
11008c2ecf20Sopenharmony_ci	atomic_long_dec(&n->nr_slabs);
11018c2ecf20Sopenharmony_ci	atomic_long_sub(objects, &n->total_objects);
11028c2ecf20Sopenharmony_ci}
11038c2ecf20Sopenharmony_ci
11048c2ecf20Sopenharmony_ci/* Object debug checks for alloc/free paths */
11058c2ecf20Sopenharmony_cistatic void setup_object_debug(struct kmem_cache *s, struct page *page,
11068c2ecf20Sopenharmony_ci								void *object)
11078c2ecf20Sopenharmony_ci{
11088c2ecf20Sopenharmony_ci	if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
11098c2ecf20Sopenharmony_ci		return;
11108c2ecf20Sopenharmony_ci
11118c2ecf20Sopenharmony_ci	init_object(s, object, SLUB_RED_INACTIVE);
11128c2ecf20Sopenharmony_ci	init_tracking(s, object);
11138c2ecf20Sopenharmony_ci}
11148c2ecf20Sopenharmony_ci
11158c2ecf20Sopenharmony_cistatic
11168c2ecf20Sopenharmony_civoid setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
11178c2ecf20Sopenharmony_ci{
11188c2ecf20Sopenharmony_ci	if (!kmem_cache_debug_flags(s, SLAB_POISON))
11198c2ecf20Sopenharmony_ci		return;
11208c2ecf20Sopenharmony_ci
11218c2ecf20Sopenharmony_ci	metadata_access_enable();
11228c2ecf20Sopenharmony_ci	memset(addr, POISON_INUSE, page_size(page));
11238c2ecf20Sopenharmony_ci	metadata_access_disable();
11248c2ecf20Sopenharmony_ci}
11258c2ecf20Sopenharmony_ci
11268c2ecf20Sopenharmony_cistatic inline int alloc_consistency_checks(struct kmem_cache *s,
11278c2ecf20Sopenharmony_ci					struct page *page, void *object)
11288c2ecf20Sopenharmony_ci{
11298c2ecf20Sopenharmony_ci	if (!check_slab(s, page))
11308c2ecf20Sopenharmony_ci		return 0;
11318c2ecf20Sopenharmony_ci
11328c2ecf20Sopenharmony_ci	if (!check_valid_pointer(s, page, object)) {
11338c2ecf20Sopenharmony_ci		object_err(s, page, object, "Freelist Pointer check fails");
11348c2ecf20Sopenharmony_ci		return 0;
11358c2ecf20Sopenharmony_ci	}
11368c2ecf20Sopenharmony_ci
11378c2ecf20Sopenharmony_ci	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
11388c2ecf20Sopenharmony_ci		return 0;
11398c2ecf20Sopenharmony_ci
11408c2ecf20Sopenharmony_ci	return 1;
11418c2ecf20Sopenharmony_ci}
11428c2ecf20Sopenharmony_ci
11438c2ecf20Sopenharmony_cistatic noinline int alloc_debug_processing(struct kmem_cache *s,
11448c2ecf20Sopenharmony_ci					struct page *page,
11458c2ecf20Sopenharmony_ci					void *object, unsigned long addr)
11468c2ecf20Sopenharmony_ci{
11478c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
11488c2ecf20Sopenharmony_ci		if (!alloc_consistency_checks(s, page, object))
11498c2ecf20Sopenharmony_ci			goto bad;
11508c2ecf20Sopenharmony_ci	}
11518c2ecf20Sopenharmony_ci
11528c2ecf20Sopenharmony_ci	/* Success perform special debug activities for allocs */
11538c2ecf20Sopenharmony_ci	if (s->flags & SLAB_STORE_USER)
11548c2ecf20Sopenharmony_ci		set_track(s, object, TRACK_ALLOC, addr);
11558c2ecf20Sopenharmony_ci	trace(s, page, object, 1);
11568c2ecf20Sopenharmony_ci	init_object(s, object, SLUB_RED_ACTIVE);
11578c2ecf20Sopenharmony_ci	return 1;
11588c2ecf20Sopenharmony_ci
11598c2ecf20Sopenharmony_cibad:
11608c2ecf20Sopenharmony_ci	if (PageSlab(page)) {
11618c2ecf20Sopenharmony_ci		/*
11628c2ecf20Sopenharmony_ci		 * If this is a slab page then lets do the best we can
11638c2ecf20Sopenharmony_ci		 * to avoid issues in the future. Marking all objects
11648c2ecf20Sopenharmony_ci		 * as used avoids touching the remaining objects.
11658c2ecf20Sopenharmony_ci		 */
11668c2ecf20Sopenharmony_ci		slab_fix(s, "Marking all objects used");
11678c2ecf20Sopenharmony_ci		page->inuse = page->objects;
11688c2ecf20Sopenharmony_ci		page->freelist = NULL;
11698c2ecf20Sopenharmony_ci	}
11708c2ecf20Sopenharmony_ci	return 0;
11718c2ecf20Sopenharmony_ci}
11728c2ecf20Sopenharmony_ci
11738c2ecf20Sopenharmony_cistatic inline int free_consistency_checks(struct kmem_cache *s,
11748c2ecf20Sopenharmony_ci		struct page *page, void *object, unsigned long addr)
11758c2ecf20Sopenharmony_ci{
11768c2ecf20Sopenharmony_ci	if (!check_valid_pointer(s, page, object)) {
11778c2ecf20Sopenharmony_ci		slab_err(s, page, "Invalid object pointer 0x%p", object);
11788c2ecf20Sopenharmony_ci		return 0;
11798c2ecf20Sopenharmony_ci	}
11808c2ecf20Sopenharmony_ci
11818c2ecf20Sopenharmony_ci	if (on_freelist(s, page, object)) {
11828c2ecf20Sopenharmony_ci		object_err(s, page, object, "Object already free");
11838c2ecf20Sopenharmony_ci		return 0;
11848c2ecf20Sopenharmony_ci	}
11858c2ecf20Sopenharmony_ci
11868c2ecf20Sopenharmony_ci	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
11878c2ecf20Sopenharmony_ci		return 0;
11888c2ecf20Sopenharmony_ci
11898c2ecf20Sopenharmony_ci	if (unlikely(s != page->slab_cache)) {
11908c2ecf20Sopenharmony_ci		if (!PageSlab(page)) {
11918c2ecf20Sopenharmony_ci			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
11928c2ecf20Sopenharmony_ci				 object);
11938c2ecf20Sopenharmony_ci		} else if (!page->slab_cache) {
11948c2ecf20Sopenharmony_ci			pr_err("SLUB <none>: no slab for object 0x%p.\n",
11958c2ecf20Sopenharmony_ci			       object);
11968c2ecf20Sopenharmony_ci			dump_stack();
11978c2ecf20Sopenharmony_ci		} else
11988c2ecf20Sopenharmony_ci			object_err(s, page, object,
11998c2ecf20Sopenharmony_ci					"page slab pointer corrupt.");
12008c2ecf20Sopenharmony_ci		return 0;
12018c2ecf20Sopenharmony_ci	}
12028c2ecf20Sopenharmony_ci	return 1;
12038c2ecf20Sopenharmony_ci}
12048c2ecf20Sopenharmony_ci
12058c2ecf20Sopenharmony_ci/* Supports checking bulk free of a constructed freelist */
12068c2ecf20Sopenharmony_cistatic noinline int free_debug_processing(
12078c2ecf20Sopenharmony_ci	struct kmem_cache *s, struct page *page,
12088c2ecf20Sopenharmony_ci	void *head, void *tail, int bulk_cnt,
12098c2ecf20Sopenharmony_ci	unsigned long addr)
12108c2ecf20Sopenharmony_ci{
12118c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
12128c2ecf20Sopenharmony_ci	void *object = head;
12138c2ecf20Sopenharmony_ci	int cnt = 0;
12148c2ecf20Sopenharmony_ci	unsigned long flags;
12158c2ecf20Sopenharmony_ci	int ret = 0;
12168c2ecf20Sopenharmony_ci
12178c2ecf20Sopenharmony_ci	spin_lock_irqsave(&n->list_lock, flags);
12188c2ecf20Sopenharmony_ci	slab_lock(page);
12198c2ecf20Sopenharmony_ci
12208c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
12218c2ecf20Sopenharmony_ci		if (!check_slab(s, page))
12228c2ecf20Sopenharmony_ci			goto out;
12238c2ecf20Sopenharmony_ci	}
12248c2ecf20Sopenharmony_ci
12258c2ecf20Sopenharmony_cinext_object:
12268c2ecf20Sopenharmony_ci	cnt++;
12278c2ecf20Sopenharmony_ci
12288c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
12298c2ecf20Sopenharmony_ci		if (!free_consistency_checks(s, page, object, addr))
12308c2ecf20Sopenharmony_ci			goto out;
12318c2ecf20Sopenharmony_ci	}
12328c2ecf20Sopenharmony_ci
12338c2ecf20Sopenharmony_ci	if (s->flags & SLAB_STORE_USER)
12348c2ecf20Sopenharmony_ci		set_track(s, object, TRACK_FREE, addr);
12358c2ecf20Sopenharmony_ci	trace(s, page, object, 0);
12368c2ecf20Sopenharmony_ci	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
12378c2ecf20Sopenharmony_ci	init_object(s, object, SLUB_RED_INACTIVE);
12388c2ecf20Sopenharmony_ci
12398c2ecf20Sopenharmony_ci	/* Reached end of constructed freelist yet? */
12408c2ecf20Sopenharmony_ci	if (object != tail) {
12418c2ecf20Sopenharmony_ci		object = get_freepointer(s, object);
12428c2ecf20Sopenharmony_ci		goto next_object;
12438c2ecf20Sopenharmony_ci	}
12448c2ecf20Sopenharmony_ci	ret = 1;
12458c2ecf20Sopenharmony_ci
12468c2ecf20Sopenharmony_ciout:
12478c2ecf20Sopenharmony_ci	if (cnt != bulk_cnt)
12488c2ecf20Sopenharmony_ci		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
12498c2ecf20Sopenharmony_ci			 bulk_cnt, cnt);
12508c2ecf20Sopenharmony_ci
12518c2ecf20Sopenharmony_ci	slab_unlock(page);
12528c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&n->list_lock, flags);
12538c2ecf20Sopenharmony_ci	if (!ret)
12548c2ecf20Sopenharmony_ci		slab_fix(s, "Object at 0x%p not freed", object);
12558c2ecf20Sopenharmony_ci	return ret;
12568c2ecf20Sopenharmony_ci}
12578c2ecf20Sopenharmony_ci
12588c2ecf20Sopenharmony_ci/*
12598c2ecf20Sopenharmony_ci * Parse a block of slub_debug options. Blocks are delimited by ';'
12608c2ecf20Sopenharmony_ci *
12618c2ecf20Sopenharmony_ci * @str:    start of block
12628c2ecf20Sopenharmony_ci * @flags:  returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
12638c2ecf20Sopenharmony_ci * @slabs:  return start of list of slabs, or NULL when there's no list
12648c2ecf20Sopenharmony_ci * @init:   assume this is initial parsing and not per-kmem-create parsing
12658c2ecf20Sopenharmony_ci *
12668c2ecf20Sopenharmony_ci * returns the start of next block if there's any, or NULL
12678c2ecf20Sopenharmony_ci */
12688c2ecf20Sopenharmony_cistatic char *
12698c2ecf20Sopenharmony_ciparse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
12708c2ecf20Sopenharmony_ci{
12718c2ecf20Sopenharmony_ci	bool higher_order_disable = false;
12728c2ecf20Sopenharmony_ci
12738c2ecf20Sopenharmony_ci	/* Skip any completely empty blocks */
12748c2ecf20Sopenharmony_ci	while (*str && *str == ';')
12758c2ecf20Sopenharmony_ci		str++;
12768c2ecf20Sopenharmony_ci
12778c2ecf20Sopenharmony_ci	if (*str == ',') {
12788c2ecf20Sopenharmony_ci		/*
12798c2ecf20Sopenharmony_ci		 * No options but restriction on slabs. This means full
12808c2ecf20Sopenharmony_ci		 * debugging for slabs matching a pattern.
12818c2ecf20Sopenharmony_ci		 */
12828c2ecf20Sopenharmony_ci		*flags = DEBUG_DEFAULT_FLAGS;
12838c2ecf20Sopenharmony_ci		goto check_slabs;
12848c2ecf20Sopenharmony_ci	}
12858c2ecf20Sopenharmony_ci	*flags = 0;
12868c2ecf20Sopenharmony_ci
12878c2ecf20Sopenharmony_ci	/* Determine which debug features should be switched on */
12888c2ecf20Sopenharmony_ci	for (; *str && *str != ',' && *str != ';'; str++) {
12898c2ecf20Sopenharmony_ci		switch (tolower(*str)) {
12908c2ecf20Sopenharmony_ci		case '-':
12918c2ecf20Sopenharmony_ci			*flags = 0;
12928c2ecf20Sopenharmony_ci			break;
12938c2ecf20Sopenharmony_ci		case 'f':
12948c2ecf20Sopenharmony_ci			*flags |= SLAB_CONSISTENCY_CHECKS;
12958c2ecf20Sopenharmony_ci			break;
12968c2ecf20Sopenharmony_ci		case 'z':
12978c2ecf20Sopenharmony_ci			*flags |= SLAB_RED_ZONE;
12988c2ecf20Sopenharmony_ci			break;
12998c2ecf20Sopenharmony_ci		case 'p':
13008c2ecf20Sopenharmony_ci			*flags |= SLAB_POISON;
13018c2ecf20Sopenharmony_ci			break;
13028c2ecf20Sopenharmony_ci		case 'u':
13038c2ecf20Sopenharmony_ci			*flags |= SLAB_STORE_USER;
13048c2ecf20Sopenharmony_ci			break;
13058c2ecf20Sopenharmony_ci		case 't':
13068c2ecf20Sopenharmony_ci			*flags |= SLAB_TRACE;
13078c2ecf20Sopenharmony_ci			break;
13088c2ecf20Sopenharmony_ci		case 'a':
13098c2ecf20Sopenharmony_ci			*flags |= SLAB_FAILSLAB;
13108c2ecf20Sopenharmony_ci			break;
13118c2ecf20Sopenharmony_ci		case 'o':
13128c2ecf20Sopenharmony_ci			/*
13138c2ecf20Sopenharmony_ci			 * Avoid enabling debugging on caches if its minimum
13148c2ecf20Sopenharmony_ci			 * order would increase as a result.
13158c2ecf20Sopenharmony_ci			 */
13168c2ecf20Sopenharmony_ci			higher_order_disable = true;
13178c2ecf20Sopenharmony_ci			break;
13188c2ecf20Sopenharmony_ci		default:
13198c2ecf20Sopenharmony_ci			if (init)
13208c2ecf20Sopenharmony_ci				pr_err("slub_debug option '%c' unknown. skipped\n", *str);
13218c2ecf20Sopenharmony_ci		}
13228c2ecf20Sopenharmony_ci	}
13238c2ecf20Sopenharmony_cicheck_slabs:
13248c2ecf20Sopenharmony_ci	if (*str == ',')
13258c2ecf20Sopenharmony_ci		*slabs = ++str;
13268c2ecf20Sopenharmony_ci	else
13278c2ecf20Sopenharmony_ci		*slabs = NULL;
13288c2ecf20Sopenharmony_ci
13298c2ecf20Sopenharmony_ci	/* Skip over the slab list */
13308c2ecf20Sopenharmony_ci	while (*str && *str != ';')
13318c2ecf20Sopenharmony_ci		str++;
13328c2ecf20Sopenharmony_ci
13338c2ecf20Sopenharmony_ci	/* Skip any completely empty blocks */
13348c2ecf20Sopenharmony_ci	while (*str && *str == ';')
13358c2ecf20Sopenharmony_ci		str++;
13368c2ecf20Sopenharmony_ci
13378c2ecf20Sopenharmony_ci	if (init && higher_order_disable)
13388c2ecf20Sopenharmony_ci		disable_higher_order_debug = 1;
13398c2ecf20Sopenharmony_ci
13408c2ecf20Sopenharmony_ci	if (*str)
13418c2ecf20Sopenharmony_ci		return str;
13428c2ecf20Sopenharmony_ci	else
13438c2ecf20Sopenharmony_ci		return NULL;
13448c2ecf20Sopenharmony_ci}
13458c2ecf20Sopenharmony_ci
13468c2ecf20Sopenharmony_cistatic int __init setup_slub_debug(char *str)
13478c2ecf20Sopenharmony_ci{
13488c2ecf20Sopenharmony_ci	slab_flags_t flags;
13498c2ecf20Sopenharmony_ci	slab_flags_t global_flags;
13508c2ecf20Sopenharmony_ci	char *saved_str;
13518c2ecf20Sopenharmony_ci	char *slab_list;
13528c2ecf20Sopenharmony_ci	bool global_slub_debug_changed = false;
13538c2ecf20Sopenharmony_ci	bool slab_list_specified = false;
13548c2ecf20Sopenharmony_ci
13558c2ecf20Sopenharmony_ci	global_flags = DEBUG_DEFAULT_FLAGS;
13568c2ecf20Sopenharmony_ci	if (*str++ != '=' || !*str)
13578c2ecf20Sopenharmony_ci		/*
13588c2ecf20Sopenharmony_ci		 * No options specified. Switch on full debugging.
13598c2ecf20Sopenharmony_ci		 */
13608c2ecf20Sopenharmony_ci		goto out;
13618c2ecf20Sopenharmony_ci
13628c2ecf20Sopenharmony_ci	saved_str = str;
13638c2ecf20Sopenharmony_ci	while (str) {
13648c2ecf20Sopenharmony_ci		str = parse_slub_debug_flags(str, &flags, &slab_list, true);
13658c2ecf20Sopenharmony_ci
13668c2ecf20Sopenharmony_ci		if (!slab_list) {
13678c2ecf20Sopenharmony_ci			global_flags = flags;
13688c2ecf20Sopenharmony_ci			global_slub_debug_changed = true;
13698c2ecf20Sopenharmony_ci		} else {
13708c2ecf20Sopenharmony_ci			slab_list_specified = true;
13718c2ecf20Sopenharmony_ci		}
13728c2ecf20Sopenharmony_ci	}
13738c2ecf20Sopenharmony_ci
13748c2ecf20Sopenharmony_ci	/*
13758c2ecf20Sopenharmony_ci	 * For backwards compatibility, a single list of flags with list of
13768c2ecf20Sopenharmony_ci	 * slabs means debugging is only changed for those slabs, so the global
13778c2ecf20Sopenharmony_ci	 * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
13788c2ecf20Sopenharmony_ci	 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
13798c2ecf20Sopenharmony_ci	 * long as there is no option specifying flags without a slab list.
13808c2ecf20Sopenharmony_ci	 */
13818c2ecf20Sopenharmony_ci	if (slab_list_specified) {
13828c2ecf20Sopenharmony_ci		if (!global_slub_debug_changed)
13838c2ecf20Sopenharmony_ci			global_flags = slub_debug;
13848c2ecf20Sopenharmony_ci		slub_debug_string = saved_str;
13858c2ecf20Sopenharmony_ci	}
13868c2ecf20Sopenharmony_ciout:
13878c2ecf20Sopenharmony_ci	slub_debug = global_flags;
13888c2ecf20Sopenharmony_ci	if (slub_debug != 0 || slub_debug_string)
13898c2ecf20Sopenharmony_ci		static_branch_enable(&slub_debug_enabled);
13908c2ecf20Sopenharmony_ci	if ((static_branch_unlikely(&init_on_alloc) ||
13918c2ecf20Sopenharmony_ci	     static_branch_unlikely(&init_on_free)) &&
13928c2ecf20Sopenharmony_ci	    (slub_debug & SLAB_POISON))
13938c2ecf20Sopenharmony_ci		pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
13948c2ecf20Sopenharmony_ci	return 1;
13958c2ecf20Sopenharmony_ci}
13968c2ecf20Sopenharmony_ci
13978c2ecf20Sopenharmony_ci__setup("slub_debug", setup_slub_debug);
13988c2ecf20Sopenharmony_ci
13998c2ecf20Sopenharmony_ci/*
14008c2ecf20Sopenharmony_ci * kmem_cache_flags - apply debugging options to the cache
14018c2ecf20Sopenharmony_ci * @object_size:	the size of an object without meta data
14028c2ecf20Sopenharmony_ci * @flags:		flags to set
14038c2ecf20Sopenharmony_ci * @name:		name of the cache
14048c2ecf20Sopenharmony_ci *
14058c2ecf20Sopenharmony_ci * Debug option(s) are applied to @flags. In addition to the debug
14068c2ecf20Sopenharmony_ci * option(s), if a slab name (or multiple) is specified i.e.
14078c2ecf20Sopenharmony_ci * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ...
14088c2ecf20Sopenharmony_ci * then only the select slabs will receive the debug option(s).
14098c2ecf20Sopenharmony_ci */
14108c2ecf20Sopenharmony_cislab_flags_t kmem_cache_flags(unsigned int object_size,
14118c2ecf20Sopenharmony_ci	slab_flags_t flags, const char *name)
14128c2ecf20Sopenharmony_ci{
14138c2ecf20Sopenharmony_ci	char *iter;
14148c2ecf20Sopenharmony_ci	size_t len;
14158c2ecf20Sopenharmony_ci	char *next_block;
14168c2ecf20Sopenharmony_ci	slab_flags_t block_flags;
14178c2ecf20Sopenharmony_ci
14188c2ecf20Sopenharmony_ci	len = strlen(name);
14198c2ecf20Sopenharmony_ci	next_block = slub_debug_string;
14208c2ecf20Sopenharmony_ci	/* Go through all blocks of debug options, see if any matches our slab's name */
14218c2ecf20Sopenharmony_ci	while (next_block) {
14228c2ecf20Sopenharmony_ci		next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
14238c2ecf20Sopenharmony_ci		if (!iter)
14248c2ecf20Sopenharmony_ci			continue;
14258c2ecf20Sopenharmony_ci		/* Found a block that has a slab list, search it */
14268c2ecf20Sopenharmony_ci		while (*iter) {
14278c2ecf20Sopenharmony_ci			char *end, *glob;
14288c2ecf20Sopenharmony_ci			size_t cmplen;
14298c2ecf20Sopenharmony_ci
14308c2ecf20Sopenharmony_ci			end = strchrnul(iter, ',');
14318c2ecf20Sopenharmony_ci			if (next_block && next_block < end)
14328c2ecf20Sopenharmony_ci				end = next_block - 1;
14338c2ecf20Sopenharmony_ci
14348c2ecf20Sopenharmony_ci			glob = strnchr(iter, end - iter, '*');
14358c2ecf20Sopenharmony_ci			if (glob)
14368c2ecf20Sopenharmony_ci				cmplen = glob - iter;
14378c2ecf20Sopenharmony_ci			else
14388c2ecf20Sopenharmony_ci				cmplen = max_t(size_t, len, (end - iter));
14398c2ecf20Sopenharmony_ci
14408c2ecf20Sopenharmony_ci			if (!strncmp(name, iter, cmplen)) {
14418c2ecf20Sopenharmony_ci				flags |= block_flags;
14428c2ecf20Sopenharmony_ci				return flags;
14438c2ecf20Sopenharmony_ci			}
14448c2ecf20Sopenharmony_ci
14458c2ecf20Sopenharmony_ci			if (!*end || *end == ';')
14468c2ecf20Sopenharmony_ci				break;
14478c2ecf20Sopenharmony_ci			iter = end + 1;
14488c2ecf20Sopenharmony_ci		}
14498c2ecf20Sopenharmony_ci	}
14508c2ecf20Sopenharmony_ci
14518c2ecf20Sopenharmony_ci	return flags | slub_debug;
14528c2ecf20Sopenharmony_ci}
14538c2ecf20Sopenharmony_ci#else /* !CONFIG_SLUB_DEBUG */
14548c2ecf20Sopenharmony_cistatic inline void setup_object_debug(struct kmem_cache *s,
14558c2ecf20Sopenharmony_ci			struct page *page, void *object) {}
14568c2ecf20Sopenharmony_cistatic inline
14578c2ecf20Sopenharmony_civoid setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {}
14588c2ecf20Sopenharmony_ci
14598c2ecf20Sopenharmony_cistatic inline int alloc_debug_processing(struct kmem_cache *s,
14608c2ecf20Sopenharmony_ci	struct page *page, void *object, unsigned long addr) { return 0; }
14618c2ecf20Sopenharmony_ci
14628c2ecf20Sopenharmony_cistatic inline int free_debug_processing(
14638c2ecf20Sopenharmony_ci	struct kmem_cache *s, struct page *page,
14648c2ecf20Sopenharmony_ci	void *head, void *tail, int bulk_cnt,
14658c2ecf20Sopenharmony_ci	unsigned long addr) { return 0; }
14668c2ecf20Sopenharmony_ci
14678c2ecf20Sopenharmony_cistatic inline int slab_pad_check(struct kmem_cache *s, struct page *page)
14688c2ecf20Sopenharmony_ci			{ return 1; }
14698c2ecf20Sopenharmony_cistatic inline int check_object(struct kmem_cache *s, struct page *page,
14708c2ecf20Sopenharmony_ci			void *object, u8 val) { return 1; }
14718c2ecf20Sopenharmony_cistatic inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
14728c2ecf20Sopenharmony_ci					struct page *page) {}
14738c2ecf20Sopenharmony_cistatic inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
14748c2ecf20Sopenharmony_ci					struct page *page) {}
14758c2ecf20Sopenharmony_cislab_flags_t kmem_cache_flags(unsigned int object_size,
14768c2ecf20Sopenharmony_ci	slab_flags_t flags, const char *name)
14778c2ecf20Sopenharmony_ci{
14788c2ecf20Sopenharmony_ci	return flags;
14798c2ecf20Sopenharmony_ci}
14808c2ecf20Sopenharmony_ci#define slub_debug 0
14818c2ecf20Sopenharmony_ci
14828c2ecf20Sopenharmony_ci#define disable_higher_order_debug 0
14838c2ecf20Sopenharmony_ci
14848c2ecf20Sopenharmony_cistatic inline unsigned long slabs_node(struct kmem_cache *s, int node)
14858c2ecf20Sopenharmony_ci							{ return 0; }
14868c2ecf20Sopenharmony_cistatic inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
14878c2ecf20Sopenharmony_ci							{ return 0; }
14888c2ecf20Sopenharmony_cistatic inline void inc_slabs_node(struct kmem_cache *s, int node,
14898c2ecf20Sopenharmony_ci							int objects) {}
14908c2ecf20Sopenharmony_cistatic inline void dec_slabs_node(struct kmem_cache *s, int node,
14918c2ecf20Sopenharmony_ci							int objects) {}
14928c2ecf20Sopenharmony_ci
14938c2ecf20Sopenharmony_cistatic bool freelist_corrupted(struct kmem_cache *s, struct page *page,
14948c2ecf20Sopenharmony_ci			       void **freelist, void *nextfree)
14958c2ecf20Sopenharmony_ci{
14968c2ecf20Sopenharmony_ci	return false;
14978c2ecf20Sopenharmony_ci}
14988c2ecf20Sopenharmony_ci#endif /* CONFIG_SLUB_DEBUG */
14998c2ecf20Sopenharmony_ci
15008c2ecf20Sopenharmony_ci/*
15018c2ecf20Sopenharmony_ci * Hooks for other subsystems that check memory allocations. In a typical
15028c2ecf20Sopenharmony_ci * production configuration these hooks all should produce no code at all.
15038c2ecf20Sopenharmony_ci */
15048c2ecf20Sopenharmony_cistatic inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
15058c2ecf20Sopenharmony_ci{
15068c2ecf20Sopenharmony_ci	ptr = kasan_kmalloc_large(ptr, size, flags);
15078c2ecf20Sopenharmony_ci	/* As ptr might get tagged, call kmemleak hook after KASAN. */
15088c2ecf20Sopenharmony_ci	kmemleak_alloc(ptr, size, 1, flags);
15098c2ecf20Sopenharmony_ci	return ptr;
15108c2ecf20Sopenharmony_ci}
15118c2ecf20Sopenharmony_ci
15128c2ecf20Sopenharmony_cistatic __always_inline void kfree_hook(void *x)
15138c2ecf20Sopenharmony_ci{
15148c2ecf20Sopenharmony_ci	kmemleak_free(x);
15158c2ecf20Sopenharmony_ci	kasan_kfree_large(x, _RET_IP_);
15168c2ecf20Sopenharmony_ci}
15178c2ecf20Sopenharmony_ci
15188c2ecf20Sopenharmony_cistatic __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
15198c2ecf20Sopenharmony_ci{
15208c2ecf20Sopenharmony_ci	kmemleak_free_recursive(x, s->flags);
15218c2ecf20Sopenharmony_ci
15228c2ecf20Sopenharmony_ci	/*
15238c2ecf20Sopenharmony_ci	 * Trouble is that we may no longer disable interrupts in the fast path
15248c2ecf20Sopenharmony_ci	 * So in order to make the debug calls that expect irqs to be
15258c2ecf20Sopenharmony_ci	 * disabled we need to disable interrupts temporarily.
15268c2ecf20Sopenharmony_ci	 */
15278c2ecf20Sopenharmony_ci#ifdef CONFIG_LOCKDEP
15288c2ecf20Sopenharmony_ci	{
15298c2ecf20Sopenharmony_ci		unsigned long flags;
15308c2ecf20Sopenharmony_ci
15318c2ecf20Sopenharmony_ci		local_irq_save(flags);
15328c2ecf20Sopenharmony_ci		debug_check_no_locks_freed(x, s->object_size);
15338c2ecf20Sopenharmony_ci		local_irq_restore(flags);
15348c2ecf20Sopenharmony_ci	}
15358c2ecf20Sopenharmony_ci#endif
15368c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_DEBUG_OBJECTS))
15378c2ecf20Sopenharmony_ci		debug_check_no_obj_freed(x, s->object_size);
15388c2ecf20Sopenharmony_ci
15398c2ecf20Sopenharmony_ci	/* Use KCSAN to help debug racy use-after-free. */
15408c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
15418c2ecf20Sopenharmony_ci		__kcsan_check_access(x, s->object_size,
15428c2ecf20Sopenharmony_ci				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
15438c2ecf20Sopenharmony_ci
15448c2ecf20Sopenharmony_ci	/* KASAN might put x into memory quarantine, delaying its reuse */
15458c2ecf20Sopenharmony_ci	return kasan_slab_free(s, x, _RET_IP_);
15468c2ecf20Sopenharmony_ci}
15478c2ecf20Sopenharmony_ci
15488c2ecf20Sopenharmony_cistatic inline bool slab_free_freelist_hook(struct kmem_cache *s,
15498c2ecf20Sopenharmony_ci					   void **head, void **tail,
15508c2ecf20Sopenharmony_ci					   int *cnt)
15518c2ecf20Sopenharmony_ci{
15528c2ecf20Sopenharmony_ci
15538c2ecf20Sopenharmony_ci	void *object;
15548c2ecf20Sopenharmony_ci	void *next = *head;
15558c2ecf20Sopenharmony_ci	void *old_tail = *tail ? *tail : *head;
15568c2ecf20Sopenharmony_ci	int rsize;
15578c2ecf20Sopenharmony_ci
15588c2ecf20Sopenharmony_ci	/* Head and tail of the reconstructed freelist */
15598c2ecf20Sopenharmony_ci	*head = NULL;
15608c2ecf20Sopenharmony_ci	*tail = NULL;
15618c2ecf20Sopenharmony_ci
15628c2ecf20Sopenharmony_ci	do {
15638c2ecf20Sopenharmony_ci		object = next;
15648c2ecf20Sopenharmony_ci		next = get_freepointer(s, object);
15658c2ecf20Sopenharmony_ci
15668c2ecf20Sopenharmony_ci		if (slab_want_init_on_free(s)) {
15678c2ecf20Sopenharmony_ci			/*
15688c2ecf20Sopenharmony_ci			 * Clear the object and the metadata, but don't touch
15698c2ecf20Sopenharmony_ci			 * the redzone.
15708c2ecf20Sopenharmony_ci			 */
15718c2ecf20Sopenharmony_ci			memset(object, 0, s->object_size);
15728c2ecf20Sopenharmony_ci			rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
15738c2ecf20Sopenharmony_ci							   : 0;
15748c2ecf20Sopenharmony_ci			memset((char *)object + s->inuse, 0,
15758c2ecf20Sopenharmony_ci			       s->size - s->inuse - rsize);
15768c2ecf20Sopenharmony_ci
15778c2ecf20Sopenharmony_ci		}
15788c2ecf20Sopenharmony_ci		/* If object's reuse doesn't have to be delayed */
15798c2ecf20Sopenharmony_ci		if (!slab_free_hook(s, object)) {
15808c2ecf20Sopenharmony_ci			/* Move object to the new freelist */
15818c2ecf20Sopenharmony_ci			set_freepointer(s, object, *head);
15828c2ecf20Sopenharmony_ci			*head = object;
15838c2ecf20Sopenharmony_ci			if (!*tail)
15848c2ecf20Sopenharmony_ci				*tail = object;
15858c2ecf20Sopenharmony_ci		} else {
15868c2ecf20Sopenharmony_ci			/*
15878c2ecf20Sopenharmony_ci			 * Adjust the reconstructed freelist depth
15888c2ecf20Sopenharmony_ci			 * accordingly if object's reuse is delayed.
15898c2ecf20Sopenharmony_ci			 */
15908c2ecf20Sopenharmony_ci			--(*cnt);
15918c2ecf20Sopenharmony_ci		}
15928c2ecf20Sopenharmony_ci	} while (object != old_tail);
15938c2ecf20Sopenharmony_ci
15948c2ecf20Sopenharmony_ci	if (*head == *tail)
15958c2ecf20Sopenharmony_ci		*tail = NULL;
15968c2ecf20Sopenharmony_ci
15978c2ecf20Sopenharmony_ci	return *head != NULL;
15988c2ecf20Sopenharmony_ci}
15998c2ecf20Sopenharmony_ci
16008c2ecf20Sopenharmony_cistatic void *setup_object(struct kmem_cache *s, struct page *page,
16018c2ecf20Sopenharmony_ci				void *object)
16028c2ecf20Sopenharmony_ci{
16038c2ecf20Sopenharmony_ci	setup_object_debug(s, page, object);
16048c2ecf20Sopenharmony_ci	object = kasan_init_slab_obj(s, object);
16058c2ecf20Sopenharmony_ci	if (unlikely(s->ctor)) {
16068c2ecf20Sopenharmony_ci		kasan_unpoison_object_data(s, object);
16078c2ecf20Sopenharmony_ci		s->ctor(object);
16088c2ecf20Sopenharmony_ci		kasan_poison_object_data(s, object);
16098c2ecf20Sopenharmony_ci	}
16108c2ecf20Sopenharmony_ci	return object;
16118c2ecf20Sopenharmony_ci}
16128c2ecf20Sopenharmony_ci
16138c2ecf20Sopenharmony_ci/*
16148c2ecf20Sopenharmony_ci * Slab allocation and freeing
16158c2ecf20Sopenharmony_ci */
16168c2ecf20Sopenharmony_cistatic inline struct page *alloc_slab_page(struct kmem_cache *s,
16178c2ecf20Sopenharmony_ci		gfp_t flags, int node, struct kmem_cache_order_objects oo)
16188c2ecf20Sopenharmony_ci{
16198c2ecf20Sopenharmony_ci	struct page *page;
16208c2ecf20Sopenharmony_ci	unsigned int order = oo_order(oo);
16218c2ecf20Sopenharmony_ci
16228c2ecf20Sopenharmony_ci	if (node == NUMA_NO_NODE)
16238c2ecf20Sopenharmony_ci		page = alloc_pages(flags, order);
16248c2ecf20Sopenharmony_ci	else
16258c2ecf20Sopenharmony_ci		page = __alloc_pages_node(node, flags, order);
16268c2ecf20Sopenharmony_ci
16278c2ecf20Sopenharmony_ci	if (page)
16288c2ecf20Sopenharmony_ci		account_slab_page(page, order, s);
16298c2ecf20Sopenharmony_ci
16308c2ecf20Sopenharmony_ci	return page;
16318c2ecf20Sopenharmony_ci}
16328c2ecf20Sopenharmony_ci
16338c2ecf20Sopenharmony_ci#ifdef CONFIG_SLAB_FREELIST_RANDOM
16348c2ecf20Sopenharmony_ci/* Pre-initialize the random sequence cache */
16358c2ecf20Sopenharmony_cistatic int init_cache_random_seq(struct kmem_cache *s)
16368c2ecf20Sopenharmony_ci{
16378c2ecf20Sopenharmony_ci	unsigned int count = oo_objects(s->oo);
16388c2ecf20Sopenharmony_ci	int err;
16398c2ecf20Sopenharmony_ci
16408c2ecf20Sopenharmony_ci	/* Bailout if already initialised */
16418c2ecf20Sopenharmony_ci	if (s->random_seq)
16428c2ecf20Sopenharmony_ci		return 0;
16438c2ecf20Sopenharmony_ci
16448c2ecf20Sopenharmony_ci	err = cache_random_seq_create(s, count, GFP_KERNEL);
16458c2ecf20Sopenharmony_ci	if (err) {
16468c2ecf20Sopenharmony_ci		pr_err("SLUB: Unable to initialize free list for %s\n",
16478c2ecf20Sopenharmony_ci			s->name);
16488c2ecf20Sopenharmony_ci		return err;
16498c2ecf20Sopenharmony_ci	}
16508c2ecf20Sopenharmony_ci
16518c2ecf20Sopenharmony_ci	/* Transform to an offset on the set of pages */
16528c2ecf20Sopenharmony_ci	if (s->random_seq) {
16538c2ecf20Sopenharmony_ci		unsigned int i;
16548c2ecf20Sopenharmony_ci
16558c2ecf20Sopenharmony_ci		for (i = 0; i < count; i++)
16568c2ecf20Sopenharmony_ci			s->random_seq[i] *= s->size;
16578c2ecf20Sopenharmony_ci	}
16588c2ecf20Sopenharmony_ci	return 0;
16598c2ecf20Sopenharmony_ci}
16608c2ecf20Sopenharmony_ci
16618c2ecf20Sopenharmony_ci/* Initialize each random sequence freelist per cache */
16628c2ecf20Sopenharmony_cistatic void __init init_freelist_randomization(void)
16638c2ecf20Sopenharmony_ci{
16648c2ecf20Sopenharmony_ci	struct kmem_cache *s;
16658c2ecf20Sopenharmony_ci
16668c2ecf20Sopenharmony_ci	mutex_lock(&slab_mutex);
16678c2ecf20Sopenharmony_ci
16688c2ecf20Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list)
16698c2ecf20Sopenharmony_ci		init_cache_random_seq(s);
16708c2ecf20Sopenharmony_ci
16718c2ecf20Sopenharmony_ci	mutex_unlock(&slab_mutex);
16728c2ecf20Sopenharmony_ci}
16738c2ecf20Sopenharmony_ci
16748c2ecf20Sopenharmony_ci/* Get the next entry on the pre-computed freelist randomized */
16758c2ecf20Sopenharmony_cistatic void *next_freelist_entry(struct kmem_cache *s, struct page *page,
16768c2ecf20Sopenharmony_ci				unsigned long *pos, void *start,
16778c2ecf20Sopenharmony_ci				unsigned long page_limit,
16788c2ecf20Sopenharmony_ci				unsigned long freelist_count)
16798c2ecf20Sopenharmony_ci{
16808c2ecf20Sopenharmony_ci	unsigned int idx;
16818c2ecf20Sopenharmony_ci
16828c2ecf20Sopenharmony_ci	/*
16838c2ecf20Sopenharmony_ci	 * If the target page allocation failed, the number of objects on the
16848c2ecf20Sopenharmony_ci	 * page might be smaller than the usual size defined by the cache.
16858c2ecf20Sopenharmony_ci	 */
16868c2ecf20Sopenharmony_ci	do {
16878c2ecf20Sopenharmony_ci		idx = s->random_seq[*pos];
16888c2ecf20Sopenharmony_ci		*pos += 1;
16898c2ecf20Sopenharmony_ci		if (*pos >= freelist_count)
16908c2ecf20Sopenharmony_ci			*pos = 0;
16918c2ecf20Sopenharmony_ci	} while (unlikely(idx >= page_limit));
16928c2ecf20Sopenharmony_ci
16938c2ecf20Sopenharmony_ci	return (char *)start + idx;
16948c2ecf20Sopenharmony_ci}
16958c2ecf20Sopenharmony_ci
16968c2ecf20Sopenharmony_ci/* Shuffle the single linked freelist based on a random pre-computed sequence */
16978c2ecf20Sopenharmony_cistatic bool shuffle_freelist(struct kmem_cache *s, struct page *page)
16988c2ecf20Sopenharmony_ci{
16998c2ecf20Sopenharmony_ci	void *start;
17008c2ecf20Sopenharmony_ci	void *cur;
17018c2ecf20Sopenharmony_ci	void *next;
17028c2ecf20Sopenharmony_ci	unsigned long idx, pos, page_limit, freelist_count;
17038c2ecf20Sopenharmony_ci
17048c2ecf20Sopenharmony_ci	if (page->objects < 2 || !s->random_seq)
17058c2ecf20Sopenharmony_ci		return false;
17068c2ecf20Sopenharmony_ci
17078c2ecf20Sopenharmony_ci	freelist_count = oo_objects(s->oo);
17088c2ecf20Sopenharmony_ci	pos = get_random_int() % freelist_count;
17098c2ecf20Sopenharmony_ci
17108c2ecf20Sopenharmony_ci	page_limit = page->objects * s->size;
17118c2ecf20Sopenharmony_ci	start = fixup_red_left(s, page_address(page));
17128c2ecf20Sopenharmony_ci
17138c2ecf20Sopenharmony_ci	/* First entry is used as the base of the freelist */
17148c2ecf20Sopenharmony_ci	cur = next_freelist_entry(s, page, &pos, start, page_limit,
17158c2ecf20Sopenharmony_ci				freelist_count);
17168c2ecf20Sopenharmony_ci	cur = setup_object(s, page, cur);
17178c2ecf20Sopenharmony_ci	page->freelist = cur;
17188c2ecf20Sopenharmony_ci
17198c2ecf20Sopenharmony_ci	for (idx = 1; idx < page->objects; idx++) {
17208c2ecf20Sopenharmony_ci		next = next_freelist_entry(s, page, &pos, start, page_limit,
17218c2ecf20Sopenharmony_ci			freelist_count);
17228c2ecf20Sopenharmony_ci		next = setup_object(s, page, next);
17238c2ecf20Sopenharmony_ci		set_freepointer(s, cur, next);
17248c2ecf20Sopenharmony_ci		cur = next;
17258c2ecf20Sopenharmony_ci	}
17268c2ecf20Sopenharmony_ci	set_freepointer(s, cur, NULL);
17278c2ecf20Sopenharmony_ci
17288c2ecf20Sopenharmony_ci	return true;
17298c2ecf20Sopenharmony_ci}
17308c2ecf20Sopenharmony_ci#else
17318c2ecf20Sopenharmony_cistatic inline int init_cache_random_seq(struct kmem_cache *s)
17328c2ecf20Sopenharmony_ci{
17338c2ecf20Sopenharmony_ci	return 0;
17348c2ecf20Sopenharmony_ci}
17358c2ecf20Sopenharmony_cistatic inline void init_freelist_randomization(void) { }
17368c2ecf20Sopenharmony_cistatic inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
17378c2ecf20Sopenharmony_ci{
17388c2ecf20Sopenharmony_ci	return false;
17398c2ecf20Sopenharmony_ci}
17408c2ecf20Sopenharmony_ci#endif /* CONFIG_SLAB_FREELIST_RANDOM */
17418c2ecf20Sopenharmony_ci
17428c2ecf20Sopenharmony_cistatic struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
17438c2ecf20Sopenharmony_ci{
17448c2ecf20Sopenharmony_ci	struct page *page;
17458c2ecf20Sopenharmony_ci	struct kmem_cache_order_objects oo = s->oo;
17468c2ecf20Sopenharmony_ci	gfp_t alloc_gfp;
17478c2ecf20Sopenharmony_ci	void *start, *p, *next;
17488c2ecf20Sopenharmony_ci	int idx;
17498c2ecf20Sopenharmony_ci	bool shuffle;
17508c2ecf20Sopenharmony_ci
17518c2ecf20Sopenharmony_ci	flags &= gfp_allowed_mask;
17528c2ecf20Sopenharmony_ci
17538c2ecf20Sopenharmony_ci	if (gfpflags_allow_blocking(flags))
17548c2ecf20Sopenharmony_ci		local_irq_enable();
17558c2ecf20Sopenharmony_ci
17568c2ecf20Sopenharmony_ci	flags |= s->allocflags;
17578c2ecf20Sopenharmony_ci
17588c2ecf20Sopenharmony_ci	/*
17598c2ecf20Sopenharmony_ci	 * Let the initial higher-order allocation fail under memory pressure
17608c2ecf20Sopenharmony_ci	 * so we fall-back to the minimum order allocation.
17618c2ecf20Sopenharmony_ci	 */
17628c2ecf20Sopenharmony_ci	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
17638c2ecf20Sopenharmony_ci	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
17648c2ecf20Sopenharmony_ci		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
17658c2ecf20Sopenharmony_ci
17668c2ecf20Sopenharmony_ci	page = alloc_slab_page(s, alloc_gfp, node, oo);
17678c2ecf20Sopenharmony_ci	if (unlikely(!page)) {
17688c2ecf20Sopenharmony_ci		oo = s->min;
17698c2ecf20Sopenharmony_ci		alloc_gfp = flags;
17708c2ecf20Sopenharmony_ci		/*
17718c2ecf20Sopenharmony_ci		 * Allocation may have failed due to fragmentation.
17728c2ecf20Sopenharmony_ci		 * Try a lower order alloc if possible
17738c2ecf20Sopenharmony_ci		 */
17748c2ecf20Sopenharmony_ci		page = alloc_slab_page(s, alloc_gfp, node, oo);
17758c2ecf20Sopenharmony_ci		if (unlikely(!page))
17768c2ecf20Sopenharmony_ci			goto out;
17778c2ecf20Sopenharmony_ci		stat(s, ORDER_FALLBACK);
17788c2ecf20Sopenharmony_ci	}
17798c2ecf20Sopenharmony_ci
17808c2ecf20Sopenharmony_ci	page->objects = oo_objects(oo);
17818c2ecf20Sopenharmony_ci
17828c2ecf20Sopenharmony_ci	page->slab_cache = s;
17838c2ecf20Sopenharmony_ci	__SetPageSlab(page);
17848c2ecf20Sopenharmony_ci	if (page_is_pfmemalloc(page))
17858c2ecf20Sopenharmony_ci		SetPageSlabPfmemalloc(page);
17868c2ecf20Sopenharmony_ci
17878c2ecf20Sopenharmony_ci	kasan_poison_slab(page);
17888c2ecf20Sopenharmony_ci
17898c2ecf20Sopenharmony_ci	start = page_address(page);
17908c2ecf20Sopenharmony_ci
17918c2ecf20Sopenharmony_ci	setup_page_debug(s, page, start);
17928c2ecf20Sopenharmony_ci
17938c2ecf20Sopenharmony_ci	shuffle = shuffle_freelist(s, page);
17948c2ecf20Sopenharmony_ci
17958c2ecf20Sopenharmony_ci	if (!shuffle) {
17968c2ecf20Sopenharmony_ci		start = fixup_red_left(s, start);
17978c2ecf20Sopenharmony_ci		start = setup_object(s, page, start);
17988c2ecf20Sopenharmony_ci		page->freelist = start;
17998c2ecf20Sopenharmony_ci		for (idx = 0, p = start; idx < page->objects - 1; idx++) {
18008c2ecf20Sopenharmony_ci			next = p + s->size;
18018c2ecf20Sopenharmony_ci			next = setup_object(s, page, next);
18028c2ecf20Sopenharmony_ci			set_freepointer(s, p, next);
18038c2ecf20Sopenharmony_ci			p = next;
18048c2ecf20Sopenharmony_ci		}
18058c2ecf20Sopenharmony_ci		set_freepointer(s, p, NULL);
18068c2ecf20Sopenharmony_ci	}
18078c2ecf20Sopenharmony_ci
18088c2ecf20Sopenharmony_ci	page->inuse = page->objects;
18098c2ecf20Sopenharmony_ci	page->frozen = 1;
18108c2ecf20Sopenharmony_ci
18118c2ecf20Sopenharmony_ciout:
18128c2ecf20Sopenharmony_ci	if (gfpflags_allow_blocking(flags))
18138c2ecf20Sopenharmony_ci		local_irq_disable();
18148c2ecf20Sopenharmony_ci	if (!page)
18158c2ecf20Sopenharmony_ci		return NULL;
18168c2ecf20Sopenharmony_ci
18178c2ecf20Sopenharmony_ci	inc_slabs_node(s, page_to_nid(page), page->objects);
18188c2ecf20Sopenharmony_ci
18198c2ecf20Sopenharmony_ci	return page;
18208c2ecf20Sopenharmony_ci}
18218c2ecf20Sopenharmony_ci
18228c2ecf20Sopenharmony_cistatic struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
18238c2ecf20Sopenharmony_ci{
18248c2ecf20Sopenharmony_ci	if (unlikely(flags & GFP_SLAB_BUG_MASK))
18258c2ecf20Sopenharmony_ci		flags = kmalloc_fix_flags(flags);
18268c2ecf20Sopenharmony_ci
18278c2ecf20Sopenharmony_ci	return allocate_slab(s,
18288c2ecf20Sopenharmony_ci		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
18298c2ecf20Sopenharmony_ci}
18308c2ecf20Sopenharmony_ci
18318c2ecf20Sopenharmony_cistatic void __free_slab(struct kmem_cache *s, struct page *page)
18328c2ecf20Sopenharmony_ci{
18338c2ecf20Sopenharmony_ci	int order = compound_order(page);
18348c2ecf20Sopenharmony_ci	int pages = 1 << order;
18358c2ecf20Sopenharmony_ci
18368c2ecf20Sopenharmony_ci	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
18378c2ecf20Sopenharmony_ci		void *p;
18388c2ecf20Sopenharmony_ci
18398c2ecf20Sopenharmony_ci		slab_pad_check(s, page);
18408c2ecf20Sopenharmony_ci		for_each_object(p, s, page_address(page),
18418c2ecf20Sopenharmony_ci						page->objects)
18428c2ecf20Sopenharmony_ci			check_object(s, page, p, SLUB_RED_INACTIVE);
18438c2ecf20Sopenharmony_ci	}
18448c2ecf20Sopenharmony_ci
18458c2ecf20Sopenharmony_ci	__ClearPageSlabPfmemalloc(page);
18468c2ecf20Sopenharmony_ci	__ClearPageSlab(page);
18478c2ecf20Sopenharmony_ci
18488c2ecf20Sopenharmony_ci	page->mapping = NULL;
18498c2ecf20Sopenharmony_ci	if (current->reclaim_state)
18508c2ecf20Sopenharmony_ci		current->reclaim_state->reclaimed_slab += pages;
18518c2ecf20Sopenharmony_ci	unaccount_slab_page(page, order, s);
18528c2ecf20Sopenharmony_ci	__free_pages(page, order);
18538c2ecf20Sopenharmony_ci}
18548c2ecf20Sopenharmony_ci
18558c2ecf20Sopenharmony_cistatic void rcu_free_slab(struct rcu_head *h)
18568c2ecf20Sopenharmony_ci{
18578c2ecf20Sopenharmony_ci	struct page *page = container_of(h, struct page, rcu_head);
18588c2ecf20Sopenharmony_ci
18598c2ecf20Sopenharmony_ci	__free_slab(page->slab_cache, page);
18608c2ecf20Sopenharmony_ci}
18618c2ecf20Sopenharmony_ci
18628c2ecf20Sopenharmony_cistatic void free_slab(struct kmem_cache *s, struct page *page)
18638c2ecf20Sopenharmony_ci{
18648c2ecf20Sopenharmony_ci	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
18658c2ecf20Sopenharmony_ci		call_rcu(&page->rcu_head, rcu_free_slab);
18668c2ecf20Sopenharmony_ci	} else
18678c2ecf20Sopenharmony_ci		__free_slab(s, page);
18688c2ecf20Sopenharmony_ci}
18698c2ecf20Sopenharmony_ci
18708c2ecf20Sopenharmony_cistatic void discard_slab(struct kmem_cache *s, struct page *page)
18718c2ecf20Sopenharmony_ci{
18728c2ecf20Sopenharmony_ci	dec_slabs_node(s, page_to_nid(page), page->objects);
18738c2ecf20Sopenharmony_ci	free_slab(s, page);
18748c2ecf20Sopenharmony_ci}
18758c2ecf20Sopenharmony_ci
18768c2ecf20Sopenharmony_ci/*
18778c2ecf20Sopenharmony_ci * Management of partially allocated slabs.
18788c2ecf20Sopenharmony_ci */
18798c2ecf20Sopenharmony_cistatic inline void
18808c2ecf20Sopenharmony_ci__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
18818c2ecf20Sopenharmony_ci{
18828c2ecf20Sopenharmony_ci	n->nr_partial++;
18838c2ecf20Sopenharmony_ci	if (tail == DEACTIVATE_TO_TAIL)
18848c2ecf20Sopenharmony_ci		list_add_tail(&page->slab_list, &n->partial);
18858c2ecf20Sopenharmony_ci	else
18868c2ecf20Sopenharmony_ci		list_add(&page->slab_list, &n->partial);
18878c2ecf20Sopenharmony_ci}
18888c2ecf20Sopenharmony_ci
18898c2ecf20Sopenharmony_cistatic inline void add_partial(struct kmem_cache_node *n,
18908c2ecf20Sopenharmony_ci				struct page *page, int tail)
18918c2ecf20Sopenharmony_ci{
18928c2ecf20Sopenharmony_ci	lockdep_assert_held(&n->list_lock);
18938c2ecf20Sopenharmony_ci	__add_partial(n, page, tail);
18948c2ecf20Sopenharmony_ci}
18958c2ecf20Sopenharmony_ci
18968c2ecf20Sopenharmony_cistatic inline void remove_partial(struct kmem_cache_node *n,
18978c2ecf20Sopenharmony_ci					struct page *page)
18988c2ecf20Sopenharmony_ci{
18998c2ecf20Sopenharmony_ci	lockdep_assert_held(&n->list_lock);
19008c2ecf20Sopenharmony_ci	list_del(&page->slab_list);
19018c2ecf20Sopenharmony_ci	n->nr_partial--;
19028c2ecf20Sopenharmony_ci}
19038c2ecf20Sopenharmony_ci
19048c2ecf20Sopenharmony_ci/*
19058c2ecf20Sopenharmony_ci * Remove slab from the partial list, freeze it and
19068c2ecf20Sopenharmony_ci * return the pointer to the freelist.
19078c2ecf20Sopenharmony_ci *
19088c2ecf20Sopenharmony_ci * Returns a list of objects or NULL if it fails.
19098c2ecf20Sopenharmony_ci */
19108c2ecf20Sopenharmony_cistatic inline void *acquire_slab(struct kmem_cache *s,
19118c2ecf20Sopenharmony_ci		struct kmem_cache_node *n, struct page *page,
19128c2ecf20Sopenharmony_ci		int mode, int *objects)
19138c2ecf20Sopenharmony_ci{
19148c2ecf20Sopenharmony_ci	void *freelist;
19158c2ecf20Sopenharmony_ci	unsigned long counters;
19168c2ecf20Sopenharmony_ci	struct page new;
19178c2ecf20Sopenharmony_ci
19188c2ecf20Sopenharmony_ci	lockdep_assert_held(&n->list_lock);
19198c2ecf20Sopenharmony_ci
19208c2ecf20Sopenharmony_ci	/*
19218c2ecf20Sopenharmony_ci	 * Zap the freelist and set the frozen bit.
19228c2ecf20Sopenharmony_ci	 * The old freelist is the list of objects for the
19238c2ecf20Sopenharmony_ci	 * per cpu allocation list.
19248c2ecf20Sopenharmony_ci	 */
19258c2ecf20Sopenharmony_ci	freelist = page->freelist;
19268c2ecf20Sopenharmony_ci	counters = page->counters;
19278c2ecf20Sopenharmony_ci	new.counters = counters;
19288c2ecf20Sopenharmony_ci	*objects = new.objects - new.inuse;
19298c2ecf20Sopenharmony_ci	if (mode) {
19308c2ecf20Sopenharmony_ci		new.inuse = page->objects;
19318c2ecf20Sopenharmony_ci		new.freelist = NULL;
19328c2ecf20Sopenharmony_ci	} else {
19338c2ecf20Sopenharmony_ci		new.freelist = freelist;
19348c2ecf20Sopenharmony_ci	}
19358c2ecf20Sopenharmony_ci
19368c2ecf20Sopenharmony_ci	VM_BUG_ON(new.frozen);
19378c2ecf20Sopenharmony_ci	new.frozen = 1;
19388c2ecf20Sopenharmony_ci
19398c2ecf20Sopenharmony_ci	if (!__cmpxchg_double_slab(s, page,
19408c2ecf20Sopenharmony_ci			freelist, counters,
19418c2ecf20Sopenharmony_ci			new.freelist, new.counters,
19428c2ecf20Sopenharmony_ci			"acquire_slab"))
19438c2ecf20Sopenharmony_ci		return NULL;
19448c2ecf20Sopenharmony_ci
19458c2ecf20Sopenharmony_ci	remove_partial(n, page);
19468c2ecf20Sopenharmony_ci	WARN_ON(!freelist);
19478c2ecf20Sopenharmony_ci	return freelist;
19488c2ecf20Sopenharmony_ci}
19498c2ecf20Sopenharmony_ci
19508c2ecf20Sopenharmony_cistatic void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
19518c2ecf20Sopenharmony_cistatic inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
19528c2ecf20Sopenharmony_ci
19538c2ecf20Sopenharmony_ci/*
19548c2ecf20Sopenharmony_ci * Try to allocate a partial slab from a specific node.
19558c2ecf20Sopenharmony_ci */
19568c2ecf20Sopenharmony_cistatic void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
19578c2ecf20Sopenharmony_ci				struct kmem_cache_cpu *c, gfp_t flags)
19588c2ecf20Sopenharmony_ci{
19598c2ecf20Sopenharmony_ci	struct page *page, *page2;
19608c2ecf20Sopenharmony_ci	void *object = NULL;
19618c2ecf20Sopenharmony_ci	unsigned int available = 0;
19628c2ecf20Sopenharmony_ci	int objects;
19638c2ecf20Sopenharmony_ci
19648c2ecf20Sopenharmony_ci	/*
19658c2ecf20Sopenharmony_ci	 * Racy check. If we mistakenly see no partial slabs then we
19668c2ecf20Sopenharmony_ci	 * just allocate an empty slab. If we mistakenly try to get a
19678c2ecf20Sopenharmony_ci	 * partial slab and there is none available then get_partial()
19688c2ecf20Sopenharmony_ci	 * will return NULL.
19698c2ecf20Sopenharmony_ci	 */
19708c2ecf20Sopenharmony_ci	if (!n || !n->nr_partial)
19718c2ecf20Sopenharmony_ci		return NULL;
19728c2ecf20Sopenharmony_ci
19738c2ecf20Sopenharmony_ci	spin_lock(&n->list_lock);
19748c2ecf20Sopenharmony_ci	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
19758c2ecf20Sopenharmony_ci		void *t;
19768c2ecf20Sopenharmony_ci
19778c2ecf20Sopenharmony_ci		if (!pfmemalloc_match(page, flags))
19788c2ecf20Sopenharmony_ci			continue;
19798c2ecf20Sopenharmony_ci
19808c2ecf20Sopenharmony_ci		t = acquire_slab(s, n, page, object == NULL, &objects);
19818c2ecf20Sopenharmony_ci		if (!t)
19828c2ecf20Sopenharmony_ci			break;
19838c2ecf20Sopenharmony_ci
19848c2ecf20Sopenharmony_ci		available += objects;
19858c2ecf20Sopenharmony_ci		if (!object) {
19868c2ecf20Sopenharmony_ci			c->page = page;
19878c2ecf20Sopenharmony_ci			stat(s, ALLOC_FROM_PARTIAL);
19888c2ecf20Sopenharmony_ci			object = t;
19898c2ecf20Sopenharmony_ci		} else {
19908c2ecf20Sopenharmony_ci			put_cpu_partial(s, page, 0);
19918c2ecf20Sopenharmony_ci			stat(s, CPU_PARTIAL_NODE);
19928c2ecf20Sopenharmony_ci		}
19938c2ecf20Sopenharmony_ci		if (!kmem_cache_has_cpu_partial(s)
19948c2ecf20Sopenharmony_ci			|| available > slub_cpu_partial(s) / 2)
19958c2ecf20Sopenharmony_ci			break;
19968c2ecf20Sopenharmony_ci
19978c2ecf20Sopenharmony_ci	}
19988c2ecf20Sopenharmony_ci	spin_unlock(&n->list_lock);
19998c2ecf20Sopenharmony_ci	return object;
20008c2ecf20Sopenharmony_ci}
20018c2ecf20Sopenharmony_ci
20028c2ecf20Sopenharmony_ci/*
20038c2ecf20Sopenharmony_ci * Get a page from somewhere. Search in increasing NUMA distances.
20048c2ecf20Sopenharmony_ci */
20058c2ecf20Sopenharmony_cistatic void *get_any_partial(struct kmem_cache *s, gfp_t flags,
20068c2ecf20Sopenharmony_ci		struct kmem_cache_cpu *c)
20078c2ecf20Sopenharmony_ci{
20088c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
20098c2ecf20Sopenharmony_ci	struct zonelist *zonelist;
20108c2ecf20Sopenharmony_ci	struct zoneref *z;
20118c2ecf20Sopenharmony_ci	struct zone *zone;
20128c2ecf20Sopenharmony_ci	enum zone_type highest_zoneidx = gfp_zone(flags);
20138c2ecf20Sopenharmony_ci	void *object;
20148c2ecf20Sopenharmony_ci	unsigned int cpuset_mems_cookie;
20158c2ecf20Sopenharmony_ci
20168c2ecf20Sopenharmony_ci	/*
20178c2ecf20Sopenharmony_ci	 * The defrag ratio allows a configuration of the tradeoffs between
20188c2ecf20Sopenharmony_ci	 * inter node defragmentation and node local allocations. A lower
20198c2ecf20Sopenharmony_ci	 * defrag_ratio increases the tendency to do local allocations
20208c2ecf20Sopenharmony_ci	 * instead of attempting to obtain partial slabs from other nodes.
20218c2ecf20Sopenharmony_ci	 *
20228c2ecf20Sopenharmony_ci	 * If the defrag_ratio is set to 0 then kmalloc() always
20238c2ecf20Sopenharmony_ci	 * returns node local objects. If the ratio is higher then kmalloc()
20248c2ecf20Sopenharmony_ci	 * may return off node objects because partial slabs are obtained
20258c2ecf20Sopenharmony_ci	 * from other nodes and filled up.
20268c2ecf20Sopenharmony_ci	 *
20278c2ecf20Sopenharmony_ci	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
20288c2ecf20Sopenharmony_ci	 * (which makes defrag_ratio = 1000) then every (well almost)
20298c2ecf20Sopenharmony_ci	 * allocation will first attempt to defrag slab caches on other nodes.
20308c2ecf20Sopenharmony_ci	 * This means scanning over all nodes to look for partial slabs which
20318c2ecf20Sopenharmony_ci	 * may be expensive if we do it every time we are trying to find a slab
20328c2ecf20Sopenharmony_ci	 * with available objects.
20338c2ecf20Sopenharmony_ci	 */
20348c2ecf20Sopenharmony_ci	if (!s->remote_node_defrag_ratio ||
20358c2ecf20Sopenharmony_ci			get_cycles() % 1024 > s->remote_node_defrag_ratio)
20368c2ecf20Sopenharmony_ci		return NULL;
20378c2ecf20Sopenharmony_ci
20388c2ecf20Sopenharmony_ci	do {
20398c2ecf20Sopenharmony_ci		cpuset_mems_cookie = read_mems_allowed_begin();
20408c2ecf20Sopenharmony_ci		zonelist = node_zonelist(mempolicy_slab_node(), flags);
20418c2ecf20Sopenharmony_ci		for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
20428c2ecf20Sopenharmony_ci			struct kmem_cache_node *n;
20438c2ecf20Sopenharmony_ci
20448c2ecf20Sopenharmony_ci			n = get_node(s, zone_to_nid(zone));
20458c2ecf20Sopenharmony_ci
20468c2ecf20Sopenharmony_ci			if (n && cpuset_zone_allowed(zone, flags) &&
20478c2ecf20Sopenharmony_ci					n->nr_partial > s->min_partial) {
20488c2ecf20Sopenharmony_ci				object = get_partial_node(s, n, c, flags);
20498c2ecf20Sopenharmony_ci				if (object) {
20508c2ecf20Sopenharmony_ci					/*
20518c2ecf20Sopenharmony_ci					 * Don't check read_mems_allowed_retry()
20528c2ecf20Sopenharmony_ci					 * here - if mems_allowed was updated in
20538c2ecf20Sopenharmony_ci					 * parallel, that was a harmless race
20548c2ecf20Sopenharmony_ci					 * between allocation and the cpuset
20558c2ecf20Sopenharmony_ci					 * update
20568c2ecf20Sopenharmony_ci					 */
20578c2ecf20Sopenharmony_ci					return object;
20588c2ecf20Sopenharmony_ci				}
20598c2ecf20Sopenharmony_ci			}
20608c2ecf20Sopenharmony_ci		}
20618c2ecf20Sopenharmony_ci	} while (read_mems_allowed_retry(cpuset_mems_cookie));
20628c2ecf20Sopenharmony_ci#endif	/* CONFIG_NUMA */
20638c2ecf20Sopenharmony_ci	return NULL;
20648c2ecf20Sopenharmony_ci}
20658c2ecf20Sopenharmony_ci
20668c2ecf20Sopenharmony_ci/*
20678c2ecf20Sopenharmony_ci * Get a partial page, lock it and return it.
20688c2ecf20Sopenharmony_ci */
20698c2ecf20Sopenharmony_cistatic void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
20708c2ecf20Sopenharmony_ci		struct kmem_cache_cpu *c)
20718c2ecf20Sopenharmony_ci{
20728c2ecf20Sopenharmony_ci	void *object;
20738c2ecf20Sopenharmony_ci	int searchnode = node;
20748c2ecf20Sopenharmony_ci
20758c2ecf20Sopenharmony_ci	if (node == NUMA_NO_NODE)
20768c2ecf20Sopenharmony_ci		searchnode = numa_mem_id();
20778c2ecf20Sopenharmony_ci
20788c2ecf20Sopenharmony_ci	object = get_partial_node(s, get_node(s, searchnode), c, flags);
20798c2ecf20Sopenharmony_ci	if (object || node != NUMA_NO_NODE)
20808c2ecf20Sopenharmony_ci		return object;
20818c2ecf20Sopenharmony_ci
20828c2ecf20Sopenharmony_ci	return get_any_partial(s, flags, c);
20838c2ecf20Sopenharmony_ci}
20848c2ecf20Sopenharmony_ci
20858c2ecf20Sopenharmony_ci#ifdef CONFIG_PREEMPTION
20868c2ecf20Sopenharmony_ci/*
20878c2ecf20Sopenharmony_ci * Calculate the next globally unique transaction for disambiguation
20888c2ecf20Sopenharmony_ci * during cmpxchg. The transactions start with the cpu number and are then
20898c2ecf20Sopenharmony_ci * incremented by CONFIG_NR_CPUS.
20908c2ecf20Sopenharmony_ci */
20918c2ecf20Sopenharmony_ci#define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
20928c2ecf20Sopenharmony_ci#else
20938c2ecf20Sopenharmony_ci/*
20948c2ecf20Sopenharmony_ci * No preemption supported therefore also no need to check for
20958c2ecf20Sopenharmony_ci * different cpus.
20968c2ecf20Sopenharmony_ci */
20978c2ecf20Sopenharmony_ci#define TID_STEP 1
20988c2ecf20Sopenharmony_ci#endif
20998c2ecf20Sopenharmony_ci
21008c2ecf20Sopenharmony_cistatic inline unsigned long next_tid(unsigned long tid)
21018c2ecf20Sopenharmony_ci{
21028c2ecf20Sopenharmony_ci	return tid + TID_STEP;
21038c2ecf20Sopenharmony_ci}
21048c2ecf20Sopenharmony_ci
21058c2ecf20Sopenharmony_ci#ifdef SLUB_DEBUG_CMPXCHG
21068c2ecf20Sopenharmony_cistatic inline unsigned int tid_to_cpu(unsigned long tid)
21078c2ecf20Sopenharmony_ci{
21088c2ecf20Sopenharmony_ci	return tid % TID_STEP;
21098c2ecf20Sopenharmony_ci}
21108c2ecf20Sopenharmony_ci
21118c2ecf20Sopenharmony_cistatic inline unsigned long tid_to_event(unsigned long tid)
21128c2ecf20Sopenharmony_ci{
21138c2ecf20Sopenharmony_ci	return tid / TID_STEP;
21148c2ecf20Sopenharmony_ci}
21158c2ecf20Sopenharmony_ci#endif
21168c2ecf20Sopenharmony_ci
21178c2ecf20Sopenharmony_cistatic inline unsigned int init_tid(int cpu)
21188c2ecf20Sopenharmony_ci{
21198c2ecf20Sopenharmony_ci	return cpu;
21208c2ecf20Sopenharmony_ci}
21218c2ecf20Sopenharmony_ci
21228c2ecf20Sopenharmony_cistatic inline void note_cmpxchg_failure(const char *n,
21238c2ecf20Sopenharmony_ci		const struct kmem_cache *s, unsigned long tid)
21248c2ecf20Sopenharmony_ci{
21258c2ecf20Sopenharmony_ci#ifdef SLUB_DEBUG_CMPXCHG
21268c2ecf20Sopenharmony_ci	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
21278c2ecf20Sopenharmony_ci
21288c2ecf20Sopenharmony_ci	pr_info("%s %s: cmpxchg redo ", n, s->name);
21298c2ecf20Sopenharmony_ci
21308c2ecf20Sopenharmony_ci#ifdef CONFIG_PREEMPTION
21318c2ecf20Sopenharmony_ci	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
21328c2ecf20Sopenharmony_ci		pr_warn("due to cpu change %d -> %d\n",
21338c2ecf20Sopenharmony_ci			tid_to_cpu(tid), tid_to_cpu(actual_tid));
21348c2ecf20Sopenharmony_ci	else
21358c2ecf20Sopenharmony_ci#endif
21368c2ecf20Sopenharmony_ci	if (tid_to_event(tid) != tid_to_event(actual_tid))
21378c2ecf20Sopenharmony_ci		pr_warn("due to cpu running other code. Event %ld->%ld\n",
21388c2ecf20Sopenharmony_ci			tid_to_event(tid), tid_to_event(actual_tid));
21398c2ecf20Sopenharmony_ci	else
21408c2ecf20Sopenharmony_ci		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
21418c2ecf20Sopenharmony_ci			actual_tid, tid, next_tid(tid));
21428c2ecf20Sopenharmony_ci#endif
21438c2ecf20Sopenharmony_ci	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
21448c2ecf20Sopenharmony_ci}
21458c2ecf20Sopenharmony_ci
21468c2ecf20Sopenharmony_cistatic void init_kmem_cache_cpus(struct kmem_cache *s)
21478c2ecf20Sopenharmony_ci{
21488c2ecf20Sopenharmony_ci	int cpu;
21498c2ecf20Sopenharmony_ci
21508c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu)
21518c2ecf20Sopenharmony_ci		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
21528c2ecf20Sopenharmony_ci}
21538c2ecf20Sopenharmony_ci
21548c2ecf20Sopenharmony_ci/*
21558c2ecf20Sopenharmony_ci * Remove the cpu slab
21568c2ecf20Sopenharmony_ci */
21578c2ecf20Sopenharmony_cistatic void deactivate_slab(struct kmem_cache *s, struct page *page,
21588c2ecf20Sopenharmony_ci				void *freelist, struct kmem_cache_cpu *c)
21598c2ecf20Sopenharmony_ci{
21608c2ecf20Sopenharmony_ci	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
21618c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
21628c2ecf20Sopenharmony_ci	int lock = 0;
21638c2ecf20Sopenharmony_ci	enum slab_modes l = M_NONE, m = M_NONE;
21648c2ecf20Sopenharmony_ci	void *nextfree;
21658c2ecf20Sopenharmony_ci	int tail = DEACTIVATE_TO_HEAD;
21668c2ecf20Sopenharmony_ci	struct page new;
21678c2ecf20Sopenharmony_ci	struct page old;
21688c2ecf20Sopenharmony_ci
21698c2ecf20Sopenharmony_ci	if (page->freelist) {
21708c2ecf20Sopenharmony_ci		stat(s, DEACTIVATE_REMOTE_FREES);
21718c2ecf20Sopenharmony_ci		tail = DEACTIVATE_TO_TAIL;
21728c2ecf20Sopenharmony_ci	}
21738c2ecf20Sopenharmony_ci
21748c2ecf20Sopenharmony_ci	/*
21758c2ecf20Sopenharmony_ci	 * Stage one: Free all available per cpu objects back
21768c2ecf20Sopenharmony_ci	 * to the page freelist while it is still frozen. Leave the
21778c2ecf20Sopenharmony_ci	 * last one.
21788c2ecf20Sopenharmony_ci	 *
21798c2ecf20Sopenharmony_ci	 * There is no need to take the list->lock because the page
21808c2ecf20Sopenharmony_ci	 * is still frozen.
21818c2ecf20Sopenharmony_ci	 */
21828c2ecf20Sopenharmony_ci	while (freelist && (nextfree = get_freepointer(s, freelist))) {
21838c2ecf20Sopenharmony_ci		void *prior;
21848c2ecf20Sopenharmony_ci		unsigned long counters;
21858c2ecf20Sopenharmony_ci
21868c2ecf20Sopenharmony_ci		/*
21878c2ecf20Sopenharmony_ci		 * If 'nextfree' is invalid, it is possible that the object at
21888c2ecf20Sopenharmony_ci		 * 'freelist' is already corrupted.  So isolate all objects
21898c2ecf20Sopenharmony_ci		 * starting at 'freelist'.
21908c2ecf20Sopenharmony_ci		 */
21918c2ecf20Sopenharmony_ci		if (freelist_corrupted(s, page, &freelist, nextfree))
21928c2ecf20Sopenharmony_ci			break;
21938c2ecf20Sopenharmony_ci
21948c2ecf20Sopenharmony_ci		do {
21958c2ecf20Sopenharmony_ci			prior = page->freelist;
21968c2ecf20Sopenharmony_ci			counters = page->counters;
21978c2ecf20Sopenharmony_ci			set_freepointer(s, freelist, prior);
21988c2ecf20Sopenharmony_ci			new.counters = counters;
21998c2ecf20Sopenharmony_ci			new.inuse--;
22008c2ecf20Sopenharmony_ci			VM_BUG_ON(!new.frozen);
22018c2ecf20Sopenharmony_ci
22028c2ecf20Sopenharmony_ci		} while (!__cmpxchg_double_slab(s, page,
22038c2ecf20Sopenharmony_ci			prior, counters,
22048c2ecf20Sopenharmony_ci			freelist, new.counters,
22058c2ecf20Sopenharmony_ci			"drain percpu freelist"));
22068c2ecf20Sopenharmony_ci
22078c2ecf20Sopenharmony_ci		freelist = nextfree;
22088c2ecf20Sopenharmony_ci	}
22098c2ecf20Sopenharmony_ci
22108c2ecf20Sopenharmony_ci	/*
22118c2ecf20Sopenharmony_ci	 * Stage two: Ensure that the page is unfrozen while the
22128c2ecf20Sopenharmony_ci	 * list presence reflects the actual number of objects
22138c2ecf20Sopenharmony_ci	 * during unfreeze.
22148c2ecf20Sopenharmony_ci	 *
22158c2ecf20Sopenharmony_ci	 * We setup the list membership and then perform a cmpxchg
22168c2ecf20Sopenharmony_ci	 * with the count. If there is a mismatch then the page
22178c2ecf20Sopenharmony_ci	 * is not unfrozen but the page is on the wrong list.
22188c2ecf20Sopenharmony_ci	 *
22198c2ecf20Sopenharmony_ci	 * Then we restart the process which may have to remove
22208c2ecf20Sopenharmony_ci	 * the page from the list that we just put it on again
22218c2ecf20Sopenharmony_ci	 * because the number of objects in the slab may have
22228c2ecf20Sopenharmony_ci	 * changed.
22238c2ecf20Sopenharmony_ci	 */
22248c2ecf20Sopenharmony_ciredo:
22258c2ecf20Sopenharmony_ci
22268c2ecf20Sopenharmony_ci	old.freelist = page->freelist;
22278c2ecf20Sopenharmony_ci	old.counters = page->counters;
22288c2ecf20Sopenharmony_ci	VM_BUG_ON(!old.frozen);
22298c2ecf20Sopenharmony_ci
22308c2ecf20Sopenharmony_ci	/* Determine target state of the slab */
22318c2ecf20Sopenharmony_ci	new.counters = old.counters;
22328c2ecf20Sopenharmony_ci	if (freelist) {
22338c2ecf20Sopenharmony_ci		new.inuse--;
22348c2ecf20Sopenharmony_ci		set_freepointer(s, freelist, old.freelist);
22358c2ecf20Sopenharmony_ci		new.freelist = freelist;
22368c2ecf20Sopenharmony_ci	} else
22378c2ecf20Sopenharmony_ci		new.freelist = old.freelist;
22388c2ecf20Sopenharmony_ci
22398c2ecf20Sopenharmony_ci	new.frozen = 0;
22408c2ecf20Sopenharmony_ci
22418c2ecf20Sopenharmony_ci	if (!new.inuse && n->nr_partial >= s->min_partial)
22428c2ecf20Sopenharmony_ci		m = M_FREE;
22438c2ecf20Sopenharmony_ci	else if (new.freelist) {
22448c2ecf20Sopenharmony_ci		m = M_PARTIAL;
22458c2ecf20Sopenharmony_ci		if (!lock) {
22468c2ecf20Sopenharmony_ci			lock = 1;
22478c2ecf20Sopenharmony_ci			/*
22488c2ecf20Sopenharmony_ci			 * Taking the spinlock removes the possibility
22498c2ecf20Sopenharmony_ci			 * that acquire_slab() will see a slab page that
22508c2ecf20Sopenharmony_ci			 * is frozen
22518c2ecf20Sopenharmony_ci			 */
22528c2ecf20Sopenharmony_ci			spin_lock(&n->list_lock);
22538c2ecf20Sopenharmony_ci		}
22548c2ecf20Sopenharmony_ci	} else {
22558c2ecf20Sopenharmony_ci		m = M_FULL;
22568c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
22578c2ecf20Sopenharmony_ci		if ((s->flags & SLAB_STORE_USER) && !lock) {
22588c2ecf20Sopenharmony_ci			lock = 1;
22598c2ecf20Sopenharmony_ci			/*
22608c2ecf20Sopenharmony_ci			 * This also ensures that the scanning of full
22618c2ecf20Sopenharmony_ci			 * slabs from diagnostic functions will not see
22628c2ecf20Sopenharmony_ci			 * any frozen slabs.
22638c2ecf20Sopenharmony_ci			 */
22648c2ecf20Sopenharmony_ci			spin_lock(&n->list_lock);
22658c2ecf20Sopenharmony_ci		}
22668c2ecf20Sopenharmony_ci#endif
22678c2ecf20Sopenharmony_ci	}
22688c2ecf20Sopenharmony_ci
22698c2ecf20Sopenharmony_ci	if (l != m) {
22708c2ecf20Sopenharmony_ci		if (l == M_PARTIAL)
22718c2ecf20Sopenharmony_ci			remove_partial(n, page);
22728c2ecf20Sopenharmony_ci		else if (l == M_FULL)
22738c2ecf20Sopenharmony_ci			remove_full(s, n, page);
22748c2ecf20Sopenharmony_ci
22758c2ecf20Sopenharmony_ci		if (m == M_PARTIAL)
22768c2ecf20Sopenharmony_ci			add_partial(n, page, tail);
22778c2ecf20Sopenharmony_ci		else if (m == M_FULL)
22788c2ecf20Sopenharmony_ci			add_full(s, n, page);
22798c2ecf20Sopenharmony_ci	}
22808c2ecf20Sopenharmony_ci
22818c2ecf20Sopenharmony_ci	l = m;
22828c2ecf20Sopenharmony_ci	if (!__cmpxchg_double_slab(s, page,
22838c2ecf20Sopenharmony_ci				old.freelist, old.counters,
22848c2ecf20Sopenharmony_ci				new.freelist, new.counters,
22858c2ecf20Sopenharmony_ci				"unfreezing slab"))
22868c2ecf20Sopenharmony_ci		goto redo;
22878c2ecf20Sopenharmony_ci
22888c2ecf20Sopenharmony_ci	if (lock)
22898c2ecf20Sopenharmony_ci		spin_unlock(&n->list_lock);
22908c2ecf20Sopenharmony_ci
22918c2ecf20Sopenharmony_ci	if (m == M_PARTIAL)
22928c2ecf20Sopenharmony_ci		stat(s, tail);
22938c2ecf20Sopenharmony_ci	else if (m == M_FULL)
22948c2ecf20Sopenharmony_ci		stat(s, DEACTIVATE_FULL);
22958c2ecf20Sopenharmony_ci	else if (m == M_FREE) {
22968c2ecf20Sopenharmony_ci		stat(s, DEACTIVATE_EMPTY);
22978c2ecf20Sopenharmony_ci		discard_slab(s, page);
22988c2ecf20Sopenharmony_ci		stat(s, FREE_SLAB);
22998c2ecf20Sopenharmony_ci	}
23008c2ecf20Sopenharmony_ci
23018c2ecf20Sopenharmony_ci	c->page = NULL;
23028c2ecf20Sopenharmony_ci	c->freelist = NULL;
23038c2ecf20Sopenharmony_ci	c->tid = next_tid(c->tid);
23048c2ecf20Sopenharmony_ci}
23058c2ecf20Sopenharmony_ci
23068c2ecf20Sopenharmony_ci/*
23078c2ecf20Sopenharmony_ci * Unfreeze all the cpu partial slabs.
23088c2ecf20Sopenharmony_ci *
23098c2ecf20Sopenharmony_ci * This function must be called with interrupts disabled
23108c2ecf20Sopenharmony_ci * for the cpu using c (or some other guarantee must be there
23118c2ecf20Sopenharmony_ci * to guarantee no concurrent accesses).
23128c2ecf20Sopenharmony_ci */
23138c2ecf20Sopenharmony_cistatic void unfreeze_partials(struct kmem_cache *s,
23148c2ecf20Sopenharmony_ci		struct kmem_cache_cpu *c)
23158c2ecf20Sopenharmony_ci{
23168c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_CPU_PARTIAL
23178c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = NULL, *n2 = NULL;
23188c2ecf20Sopenharmony_ci	struct page *page, *discard_page = NULL;
23198c2ecf20Sopenharmony_ci
23208c2ecf20Sopenharmony_ci	while ((page = slub_percpu_partial(c))) {
23218c2ecf20Sopenharmony_ci		struct page new;
23228c2ecf20Sopenharmony_ci		struct page old;
23238c2ecf20Sopenharmony_ci
23248c2ecf20Sopenharmony_ci		slub_set_percpu_partial(c, page);
23258c2ecf20Sopenharmony_ci
23268c2ecf20Sopenharmony_ci		n2 = get_node(s, page_to_nid(page));
23278c2ecf20Sopenharmony_ci		if (n != n2) {
23288c2ecf20Sopenharmony_ci			if (n)
23298c2ecf20Sopenharmony_ci				spin_unlock(&n->list_lock);
23308c2ecf20Sopenharmony_ci
23318c2ecf20Sopenharmony_ci			n = n2;
23328c2ecf20Sopenharmony_ci			spin_lock(&n->list_lock);
23338c2ecf20Sopenharmony_ci		}
23348c2ecf20Sopenharmony_ci
23358c2ecf20Sopenharmony_ci		do {
23368c2ecf20Sopenharmony_ci
23378c2ecf20Sopenharmony_ci			old.freelist = page->freelist;
23388c2ecf20Sopenharmony_ci			old.counters = page->counters;
23398c2ecf20Sopenharmony_ci			VM_BUG_ON(!old.frozen);
23408c2ecf20Sopenharmony_ci
23418c2ecf20Sopenharmony_ci			new.counters = old.counters;
23428c2ecf20Sopenharmony_ci			new.freelist = old.freelist;
23438c2ecf20Sopenharmony_ci
23448c2ecf20Sopenharmony_ci			new.frozen = 0;
23458c2ecf20Sopenharmony_ci
23468c2ecf20Sopenharmony_ci		} while (!__cmpxchg_double_slab(s, page,
23478c2ecf20Sopenharmony_ci				old.freelist, old.counters,
23488c2ecf20Sopenharmony_ci				new.freelist, new.counters,
23498c2ecf20Sopenharmony_ci				"unfreezing slab"));
23508c2ecf20Sopenharmony_ci
23518c2ecf20Sopenharmony_ci		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
23528c2ecf20Sopenharmony_ci			page->next = discard_page;
23538c2ecf20Sopenharmony_ci			discard_page = page;
23548c2ecf20Sopenharmony_ci		} else {
23558c2ecf20Sopenharmony_ci			add_partial(n, page, DEACTIVATE_TO_TAIL);
23568c2ecf20Sopenharmony_ci			stat(s, FREE_ADD_PARTIAL);
23578c2ecf20Sopenharmony_ci		}
23588c2ecf20Sopenharmony_ci	}
23598c2ecf20Sopenharmony_ci
23608c2ecf20Sopenharmony_ci	if (n)
23618c2ecf20Sopenharmony_ci		spin_unlock(&n->list_lock);
23628c2ecf20Sopenharmony_ci
23638c2ecf20Sopenharmony_ci	while (discard_page) {
23648c2ecf20Sopenharmony_ci		page = discard_page;
23658c2ecf20Sopenharmony_ci		discard_page = discard_page->next;
23668c2ecf20Sopenharmony_ci
23678c2ecf20Sopenharmony_ci		stat(s, DEACTIVATE_EMPTY);
23688c2ecf20Sopenharmony_ci		discard_slab(s, page);
23698c2ecf20Sopenharmony_ci		stat(s, FREE_SLAB);
23708c2ecf20Sopenharmony_ci	}
23718c2ecf20Sopenharmony_ci#endif	/* CONFIG_SLUB_CPU_PARTIAL */
23728c2ecf20Sopenharmony_ci}
23738c2ecf20Sopenharmony_ci
23748c2ecf20Sopenharmony_ci/*
23758c2ecf20Sopenharmony_ci * Put a page that was just frozen (in __slab_free|get_partial_node) into a
23768c2ecf20Sopenharmony_ci * partial page slot if available.
23778c2ecf20Sopenharmony_ci *
23788c2ecf20Sopenharmony_ci * If we did not find a slot then simply move all the partials to the
23798c2ecf20Sopenharmony_ci * per node partial list.
23808c2ecf20Sopenharmony_ci */
23818c2ecf20Sopenharmony_cistatic void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
23828c2ecf20Sopenharmony_ci{
23838c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_CPU_PARTIAL
23848c2ecf20Sopenharmony_ci	struct page *oldpage;
23858c2ecf20Sopenharmony_ci	int pages;
23868c2ecf20Sopenharmony_ci	int pobjects;
23878c2ecf20Sopenharmony_ci
23888c2ecf20Sopenharmony_ci	preempt_disable();
23898c2ecf20Sopenharmony_ci	do {
23908c2ecf20Sopenharmony_ci		pages = 0;
23918c2ecf20Sopenharmony_ci		pobjects = 0;
23928c2ecf20Sopenharmony_ci		oldpage = this_cpu_read(s->cpu_slab->partial);
23938c2ecf20Sopenharmony_ci
23948c2ecf20Sopenharmony_ci		if (oldpage) {
23958c2ecf20Sopenharmony_ci			pobjects = oldpage->pobjects;
23968c2ecf20Sopenharmony_ci			pages = oldpage->pages;
23978c2ecf20Sopenharmony_ci			if (drain && pobjects > slub_cpu_partial(s)) {
23988c2ecf20Sopenharmony_ci				unsigned long flags;
23998c2ecf20Sopenharmony_ci				/*
24008c2ecf20Sopenharmony_ci				 * partial array is full. Move the existing
24018c2ecf20Sopenharmony_ci				 * set to the per node partial list.
24028c2ecf20Sopenharmony_ci				 */
24038c2ecf20Sopenharmony_ci				local_irq_save(flags);
24048c2ecf20Sopenharmony_ci				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
24058c2ecf20Sopenharmony_ci				local_irq_restore(flags);
24068c2ecf20Sopenharmony_ci				oldpage = NULL;
24078c2ecf20Sopenharmony_ci				pobjects = 0;
24088c2ecf20Sopenharmony_ci				pages = 0;
24098c2ecf20Sopenharmony_ci				stat(s, CPU_PARTIAL_DRAIN);
24108c2ecf20Sopenharmony_ci			}
24118c2ecf20Sopenharmony_ci		}
24128c2ecf20Sopenharmony_ci
24138c2ecf20Sopenharmony_ci		pages++;
24148c2ecf20Sopenharmony_ci		pobjects += page->objects - page->inuse;
24158c2ecf20Sopenharmony_ci
24168c2ecf20Sopenharmony_ci		page->pages = pages;
24178c2ecf20Sopenharmony_ci		page->pobjects = pobjects;
24188c2ecf20Sopenharmony_ci		page->next = oldpage;
24198c2ecf20Sopenharmony_ci
24208c2ecf20Sopenharmony_ci	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
24218c2ecf20Sopenharmony_ci								!= oldpage);
24228c2ecf20Sopenharmony_ci	if (unlikely(!slub_cpu_partial(s))) {
24238c2ecf20Sopenharmony_ci		unsigned long flags;
24248c2ecf20Sopenharmony_ci
24258c2ecf20Sopenharmony_ci		local_irq_save(flags);
24268c2ecf20Sopenharmony_ci		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
24278c2ecf20Sopenharmony_ci		local_irq_restore(flags);
24288c2ecf20Sopenharmony_ci	}
24298c2ecf20Sopenharmony_ci	preempt_enable();
24308c2ecf20Sopenharmony_ci#endif	/* CONFIG_SLUB_CPU_PARTIAL */
24318c2ecf20Sopenharmony_ci}
24328c2ecf20Sopenharmony_ci
24338c2ecf20Sopenharmony_cistatic inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
24348c2ecf20Sopenharmony_ci{
24358c2ecf20Sopenharmony_ci	stat(s, CPUSLAB_FLUSH);
24368c2ecf20Sopenharmony_ci	deactivate_slab(s, c->page, c->freelist, c);
24378c2ecf20Sopenharmony_ci}
24388c2ecf20Sopenharmony_ci
24398c2ecf20Sopenharmony_ci/*
24408c2ecf20Sopenharmony_ci * Flush cpu slab.
24418c2ecf20Sopenharmony_ci *
24428c2ecf20Sopenharmony_ci * Called from IPI handler with interrupts disabled.
24438c2ecf20Sopenharmony_ci */
24448c2ecf20Sopenharmony_cistatic inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
24458c2ecf20Sopenharmony_ci{
24468c2ecf20Sopenharmony_ci	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
24478c2ecf20Sopenharmony_ci
24488c2ecf20Sopenharmony_ci	if (c->page)
24498c2ecf20Sopenharmony_ci		flush_slab(s, c);
24508c2ecf20Sopenharmony_ci
24518c2ecf20Sopenharmony_ci	unfreeze_partials(s, c);
24528c2ecf20Sopenharmony_ci}
24538c2ecf20Sopenharmony_ci
24548c2ecf20Sopenharmony_cistatic void flush_cpu_slab(void *d)
24558c2ecf20Sopenharmony_ci{
24568c2ecf20Sopenharmony_ci	struct kmem_cache *s = d;
24578c2ecf20Sopenharmony_ci
24588c2ecf20Sopenharmony_ci	__flush_cpu_slab(s, smp_processor_id());
24598c2ecf20Sopenharmony_ci}
24608c2ecf20Sopenharmony_ci
24618c2ecf20Sopenharmony_cistatic bool has_cpu_slab(int cpu, void *info)
24628c2ecf20Sopenharmony_ci{
24638c2ecf20Sopenharmony_ci	struct kmem_cache *s = info;
24648c2ecf20Sopenharmony_ci	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
24658c2ecf20Sopenharmony_ci
24668c2ecf20Sopenharmony_ci	return c->page || slub_percpu_partial(c);
24678c2ecf20Sopenharmony_ci}
24688c2ecf20Sopenharmony_ci
24698c2ecf20Sopenharmony_cistatic void flush_all(struct kmem_cache *s)
24708c2ecf20Sopenharmony_ci{
24718c2ecf20Sopenharmony_ci	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
24728c2ecf20Sopenharmony_ci}
24738c2ecf20Sopenharmony_ci
24748c2ecf20Sopenharmony_ci/*
24758c2ecf20Sopenharmony_ci * Use the cpu notifier to insure that the cpu slabs are flushed when
24768c2ecf20Sopenharmony_ci * necessary.
24778c2ecf20Sopenharmony_ci */
24788c2ecf20Sopenharmony_cistatic int slub_cpu_dead(unsigned int cpu)
24798c2ecf20Sopenharmony_ci{
24808c2ecf20Sopenharmony_ci	struct kmem_cache *s;
24818c2ecf20Sopenharmony_ci	unsigned long flags;
24828c2ecf20Sopenharmony_ci
24838c2ecf20Sopenharmony_ci	mutex_lock(&slab_mutex);
24848c2ecf20Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list) {
24858c2ecf20Sopenharmony_ci		local_irq_save(flags);
24868c2ecf20Sopenharmony_ci		__flush_cpu_slab(s, cpu);
24878c2ecf20Sopenharmony_ci		local_irq_restore(flags);
24888c2ecf20Sopenharmony_ci	}
24898c2ecf20Sopenharmony_ci	mutex_unlock(&slab_mutex);
24908c2ecf20Sopenharmony_ci	return 0;
24918c2ecf20Sopenharmony_ci}
24928c2ecf20Sopenharmony_ci
24938c2ecf20Sopenharmony_ci/*
24948c2ecf20Sopenharmony_ci * Check if the objects in a per cpu structure fit numa
24958c2ecf20Sopenharmony_ci * locality expectations.
24968c2ecf20Sopenharmony_ci */
24978c2ecf20Sopenharmony_cistatic inline int node_match(struct page *page, int node)
24988c2ecf20Sopenharmony_ci{
24998c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
25008c2ecf20Sopenharmony_ci	if (node != NUMA_NO_NODE && page_to_nid(page) != node)
25018c2ecf20Sopenharmony_ci		return 0;
25028c2ecf20Sopenharmony_ci#endif
25038c2ecf20Sopenharmony_ci	return 1;
25048c2ecf20Sopenharmony_ci}
25058c2ecf20Sopenharmony_ci
25068c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
25078c2ecf20Sopenharmony_cistatic int count_free(struct page *page)
25088c2ecf20Sopenharmony_ci{
25098c2ecf20Sopenharmony_ci	return page->objects - page->inuse;
25108c2ecf20Sopenharmony_ci}
25118c2ecf20Sopenharmony_ci
25128c2ecf20Sopenharmony_cistatic inline unsigned long node_nr_objs(struct kmem_cache_node *n)
25138c2ecf20Sopenharmony_ci{
25148c2ecf20Sopenharmony_ci	return atomic_long_read(&n->total_objects);
25158c2ecf20Sopenharmony_ci}
25168c2ecf20Sopenharmony_ci#endif /* CONFIG_SLUB_DEBUG */
25178c2ecf20Sopenharmony_ci
25188c2ecf20Sopenharmony_ci#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
25198c2ecf20Sopenharmony_cistatic unsigned long count_partial(struct kmem_cache_node *n,
25208c2ecf20Sopenharmony_ci					int (*get_count)(struct page *))
25218c2ecf20Sopenharmony_ci{
25228c2ecf20Sopenharmony_ci	unsigned long flags;
25238c2ecf20Sopenharmony_ci	unsigned long x = 0;
25248c2ecf20Sopenharmony_ci	struct page *page;
25258c2ecf20Sopenharmony_ci
25268c2ecf20Sopenharmony_ci	spin_lock_irqsave(&n->list_lock, flags);
25278c2ecf20Sopenharmony_ci	list_for_each_entry(page, &n->partial, slab_list)
25288c2ecf20Sopenharmony_ci		x += get_count(page);
25298c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&n->list_lock, flags);
25308c2ecf20Sopenharmony_ci	return x;
25318c2ecf20Sopenharmony_ci}
25328c2ecf20Sopenharmony_ci#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
25338c2ecf20Sopenharmony_ci
25348c2ecf20Sopenharmony_cistatic noinline void
25358c2ecf20Sopenharmony_cislab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
25368c2ecf20Sopenharmony_ci{
25378c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
25388c2ecf20Sopenharmony_ci	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
25398c2ecf20Sopenharmony_ci				      DEFAULT_RATELIMIT_BURST);
25408c2ecf20Sopenharmony_ci	int node;
25418c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
25428c2ecf20Sopenharmony_ci
25438c2ecf20Sopenharmony_ci	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
25448c2ecf20Sopenharmony_ci		return;
25458c2ecf20Sopenharmony_ci
25468c2ecf20Sopenharmony_ci	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
25478c2ecf20Sopenharmony_ci		nid, gfpflags, &gfpflags);
25488c2ecf20Sopenharmony_ci	pr_warn("  cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
25498c2ecf20Sopenharmony_ci		s->name, s->object_size, s->size, oo_order(s->oo),
25508c2ecf20Sopenharmony_ci		oo_order(s->min));
25518c2ecf20Sopenharmony_ci
25528c2ecf20Sopenharmony_ci	if (oo_order(s->min) > get_order(s->object_size))
25538c2ecf20Sopenharmony_ci		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
25548c2ecf20Sopenharmony_ci			s->name);
25558c2ecf20Sopenharmony_ci
25568c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
25578c2ecf20Sopenharmony_ci		unsigned long nr_slabs;
25588c2ecf20Sopenharmony_ci		unsigned long nr_objs;
25598c2ecf20Sopenharmony_ci		unsigned long nr_free;
25608c2ecf20Sopenharmony_ci
25618c2ecf20Sopenharmony_ci		nr_free  = count_partial(n, count_free);
25628c2ecf20Sopenharmony_ci		nr_slabs = node_nr_slabs(n);
25638c2ecf20Sopenharmony_ci		nr_objs  = node_nr_objs(n);
25648c2ecf20Sopenharmony_ci
25658c2ecf20Sopenharmony_ci		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
25668c2ecf20Sopenharmony_ci			node, nr_slabs, nr_objs, nr_free);
25678c2ecf20Sopenharmony_ci	}
25688c2ecf20Sopenharmony_ci#endif
25698c2ecf20Sopenharmony_ci}
25708c2ecf20Sopenharmony_ci
25718c2ecf20Sopenharmony_cistatic inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
25728c2ecf20Sopenharmony_ci			int node, struct kmem_cache_cpu **pc)
25738c2ecf20Sopenharmony_ci{
25748c2ecf20Sopenharmony_ci	void *freelist;
25758c2ecf20Sopenharmony_ci	struct kmem_cache_cpu *c = *pc;
25768c2ecf20Sopenharmony_ci	struct page *page;
25778c2ecf20Sopenharmony_ci
25788c2ecf20Sopenharmony_ci	WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
25798c2ecf20Sopenharmony_ci
25808c2ecf20Sopenharmony_ci	freelist = get_partial(s, flags, node, c);
25818c2ecf20Sopenharmony_ci
25828c2ecf20Sopenharmony_ci	if (freelist)
25838c2ecf20Sopenharmony_ci		return freelist;
25848c2ecf20Sopenharmony_ci
25858c2ecf20Sopenharmony_ci	page = new_slab(s, flags, node);
25868c2ecf20Sopenharmony_ci	if (page) {
25878c2ecf20Sopenharmony_ci		c = raw_cpu_ptr(s->cpu_slab);
25888c2ecf20Sopenharmony_ci		if (c->page)
25898c2ecf20Sopenharmony_ci			flush_slab(s, c);
25908c2ecf20Sopenharmony_ci
25918c2ecf20Sopenharmony_ci		/*
25928c2ecf20Sopenharmony_ci		 * No other reference to the page yet so we can
25938c2ecf20Sopenharmony_ci		 * muck around with it freely without cmpxchg
25948c2ecf20Sopenharmony_ci		 */
25958c2ecf20Sopenharmony_ci		freelist = page->freelist;
25968c2ecf20Sopenharmony_ci		page->freelist = NULL;
25978c2ecf20Sopenharmony_ci
25988c2ecf20Sopenharmony_ci		stat(s, ALLOC_SLAB);
25998c2ecf20Sopenharmony_ci		c->page = page;
26008c2ecf20Sopenharmony_ci		*pc = c;
26018c2ecf20Sopenharmony_ci	}
26028c2ecf20Sopenharmony_ci
26038c2ecf20Sopenharmony_ci	return freelist;
26048c2ecf20Sopenharmony_ci}
26058c2ecf20Sopenharmony_ci
26068c2ecf20Sopenharmony_cistatic inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
26078c2ecf20Sopenharmony_ci{
26088c2ecf20Sopenharmony_ci	if (unlikely(PageSlabPfmemalloc(page)))
26098c2ecf20Sopenharmony_ci		return gfp_pfmemalloc_allowed(gfpflags);
26108c2ecf20Sopenharmony_ci
26118c2ecf20Sopenharmony_ci	return true;
26128c2ecf20Sopenharmony_ci}
26138c2ecf20Sopenharmony_ci
26148c2ecf20Sopenharmony_ci/*
26158c2ecf20Sopenharmony_ci * Check the page->freelist of a page and either transfer the freelist to the
26168c2ecf20Sopenharmony_ci * per cpu freelist or deactivate the page.
26178c2ecf20Sopenharmony_ci *
26188c2ecf20Sopenharmony_ci * The page is still frozen if the return value is not NULL.
26198c2ecf20Sopenharmony_ci *
26208c2ecf20Sopenharmony_ci * If this function returns NULL then the page has been unfrozen.
26218c2ecf20Sopenharmony_ci *
26228c2ecf20Sopenharmony_ci * This function must be called with interrupt disabled.
26238c2ecf20Sopenharmony_ci */
26248c2ecf20Sopenharmony_cistatic inline void *get_freelist(struct kmem_cache *s, struct page *page)
26258c2ecf20Sopenharmony_ci{
26268c2ecf20Sopenharmony_ci	struct page new;
26278c2ecf20Sopenharmony_ci	unsigned long counters;
26288c2ecf20Sopenharmony_ci	void *freelist;
26298c2ecf20Sopenharmony_ci
26308c2ecf20Sopenharmony_ci	do {
26318c2ecf20Sopenharmony_ci		freelist = page->freelist;
26328c2ecf20Sopenharmony_ci		counters = page->counters;
26338c2ecf20Sopenharmony_ci
26348c2ecf20Sopenharmony_ci		new.counters = counters;
26358c2ecf20Sopenharmony_ci		VM_BUG_ON(!new.frozen);
26368c2ecf20Sopenharmony_ci
26378c2ecf20Sopenharmony_ci		new.inuse = page->objects;
26388c2ecf20Sopenharmony_ci		new.frozen = freelist != NULL;
26398c2ecf20Sopenharmony_ci
26408c2ecf20Sopenharmony_ci	} while (!__cmpxchg_double_slab(s, page,
26418c2ecf20Sopenharmony_ci		freelist, counters,
26428c2ecf20Sopenharmony_ci		NULL, new.counters,
26438c2ecf20Sopenharmony_ci		"get_freelist"));
26448c2ecf20Sopenharmony_ci
26458c2ecf20Sopenharmony_ci	return freelist;
26468c2ecf20Sopenharmony_ci}
26478c2ecf20Sopenharmony_ci
26488c2ecf20Sopenharmony_ci/*
26498c2ecf20Sopenharmony_ci * Slow path. The lockless freelist is empty or we need to perform
26508c2ecf20Sopenharmony_ci * debugging duties.
26518c2ecf20Sopenharmony_ci *
26528c2ecf20Sopenharmony_ci * Processing is still very fast if new objects have been freed to the
26538c2ecf20Sopenharmony_ci * regular freelist. In that case we simply take over the regular freelist
26548c2ecf20Sopenharmony_ci * as the lockless freelist and zap the regular freelist.
26558c2ecf20Sopenharmony_ci *
26568c2ecf20Sopenharmony_ci * If that is not working then we fall back to the partial lists. We take the
26578c2ecf20Sopenharmony_ci * first element of the freelist as the object to allocate now and move the
26588c2ecf20Sopenharmony_ci * rest of the freelist to the lockless freelist.
26598c2ecf20Sopenharmony_ci *
26608c2ecf20Sopenharmony_ci * And if we were unable to get a new slab from the partial slab lists then
26618c2ecf20Sopenharmony_ci * we need to allocate a new slab. This is the slowest path since it involves
26628c2ecf20Sopenharmony_ci * a call to the page allocator and the setup of a new slab.
26638c2ecf20Sopenharmony_ci *
26648c2ecf20Sopenharmony_ci * Version of __slab_alloc to use when we know that interrupts are
26658c2ecf20Sopenharmony_ci * already disabled (which is the case for bulk allocation).
26668c2ecf20Sopenharmony_ci */
26678c2ecf20Sopenharmony_cistatic void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
26688c2ecf20Sopenharmony_ci			  unsigned long addr, struct kmem_cache_cpu *c)
26698c2ecf20Sopenharmony_ci{
26708c2ecf20Sopenharmony_ci	void *freelist;
26718c2ecf20Sopenharmony_ci	struct page *page;
26728c2ecf20Sopenharmony_ci
26738c2ecf20Sopenharmony_ci	stat(s, ALLOC_SLOWPATH);
26748c2ecf20Sopenharmony_ci
26758c2ecf20Sopenharmony_ci	page = c->page;
26768c2ecf20Sopenharmony_ci	if (!page) {
26778c2ecf20Sopenharmony_ci		/*
26788c2ecf20Sopenharmony_ci		 * if the node is not online or has no normal memory, just
26798c2ecf20Sopenharmony_ci		 * ignore the node constraint
26808c2ecf20Sopenharmony_ci		 */
26818c2ecf20Sopenharmony_ci		if (unlikely(node != NUMA_NO_NODE &&
26828c2ecf20Sopenharmony_ci			     !node_state(node, N_NORMAL_MEMORY)))
26838c2ecf20Sopenharmony_ci			node = NUMA_NO_NODE;
26848c2ecf20Sopenharmony_ci		goto new_slab;
26858c2ecf20Sopenharmony_ci	}
26868c2ecf20Sopenharmony_ciredo:
26878c2ecf20Sopenharmony_ci
26888c2ecf20Sopenharmony_ci	if (unlikely(!node_match(page, node))) {
26898c2ecf20Sopenharmony_ci		/*
26908c2ecf20Sopenharmony_ci		 * same as above but node_match() being false already
26918c2ecf20Sopenharmony_ci		 * implies node != NUMA_NO_NODE
26928c2ecf20Sopenharmony_ci		 */
26938c2ecf20Sopenharmony_ci		if (!node_state(node, N_NORMAL_MEMORY)) {
26948c2ecf20Sopenharmony_ci			node = NUMA_NO_NODE;
26958c2ecf20Sopenharmony_ci			goto redo;
26968c2ecf20Sopenharmony_ci		} else {
26978c2ecf20Sopenharmony_ci			stat(s, ALLOC_NODE_MISMATCH);
26988c2ecf20Sopenharmony_ci			deactivate_slab(s, page, c->freelist, c);
26998c2ecf20Sopenharmony_ci			goto new_slab;
27008c2ecf20Sopenharmony_ci		}
27018c2ecf20Sopenharmony_ci	}
27028c2ecf20Sopenharmony_ci
27038c2ecf20Sopenharmony_ci	/*
27048c2ecf20Sopenharmony_ci	 * By rights, we should be searching for a slab page that was
27058c2ecf20Sopenharmony_ci	 * PFMEMALLOC but right now, we are losing the pfmemalloc
27068c2ecf20Sopenharmony_ci	 * information when the page leaves the per-cpu allocator
27078c2ecf20Sopenharmony_ci	 */
27088c2ecf20Sopenharmony_ci	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
27098c2ecf20Sopenharmony_ci		deactivate_slab(s, page, c->freelist, c);
27108c2ecf20Sopenharmony_ci		goto new_slab;
27118c2ecf20Sopenharmony_ci	}
27128c2ecf20Sopenharmony_ci
27138c2ecf20Sopenharmony_ci	/* must check again c->freelist in case of cpu migration or IRQ */
27148c2ecf20Sopenharmony_ci	freelist = c->freelist;
27158c2ecf20Sopenharmony_ci	if (freelist)
27168c2ecf20Sopenharmony_ci		goto load_freelist;
27178c2ecf20Sopenharmony_ci
27188c2ecf20Sopenharmony_ci	freelist = get_freelist(s, page);
27198c2ecf20Sopenharmony_ci
27208c2ecf20Sopenharmony_ci	if (!freelist) {
27218c2ecf20Sopenharmony_ci		c->page = NULL;
27228c2ecf20Sopenharmony_ci		c->tid = next_tid(c->tid);
27238c2ecf20Sopenharmony_ci		stat(s, DEACTIVATE_BYPASS);
27248c2ecf20Sopenharmony_ci		goto new_slab;
27258c2ecf20Sopenharmony_ci	}
27268c2ecf20Sopenharmony_ci
27278c2ecf20Sopenharmony_ci	stat(s, ALLOC_REFILL);
27288c2ecf20Sopenharmony_ci
27298c2ecf20Sopenharmony_ciload_freelist:
27308c2ecf20Sopenharmony_ci	/*
27318c2ecf20Sopenharmony_ci	 * freelist is pointing to the list of objects to be used.
27328c2ecf20Sopenharmony_ci	 * page is pointing to the page from which the objects are obtained.
27338c2ecf20Sopenharmony_ci	 * That page must be frozen for per cpu allocations to work.
27348c2ecf20Sopenharmony_ci	 */
27358c2ecf20Sopenharmony_ci	VM_BUG_ON(!c->page->frozen);
27368c2ecf20Sopenharmony_ci	c->freelist = get_freepointer(s, freelist);
27378c2ecf20Sopenharmony_ci	c->tid = next_tid(c->tid);
27388c2ecf20Sopenharmony_ci	return freelist;
27398c2ecf20Sopenharmony_ci
27408c2ecf20Sopenharmony_cinew_slab:
27418c2ecf20Sopenharmony_ci
27428c2ecf20Sopenharmony_ci	if (slub_percpu_partial(c)) {
27438c2ecf20Sopenharmony_ci		page = c->page = slub_percpu_partial(c);
27448c2ecf20Sopenharmony_ci		slub_set_percpu_partial(c, page);
27458c2ecf20Sopenharmony_ci		stat(s, CPU_PARTIAL_ALLOC);
27468c2ecf20Sopenharmony_ci		goto redo;
27478c2ecf20Sopenharmony_ci	}
27488c2ecf20Sopenharmony_ci
27498c2ecf20Sopenharmony_ci	freelist = new_slab_objects(s, gfpflags, node, &c);
27508c2ecf20Sopenharmony_ci
27518c2ecf20Sopenharmony_ci	if (unlikely(!freelist)) {
27528c2ecf20Sopenharmony_ci		slab_out_of_memory(s, gfpflags, node);
27538c2ecf20Sopenharmony_ci		return NULL;
27548c2ecf20Sopenharmony_ci	}
27558c2ecf20Sopenharmony_ci
27568c2ecf20Sopenharmony_ci	page = c->page;
27578c2ecf20Sopenharmony_ci	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
27588c2ecf20Sopenharmony_ci		goto load_freelist;
27598c2ecf20Sopenharmony_ci
27608c2ecf20Sopenharmony_ci	/* Only entered in the debug case */
27618c2ecf20Sopenharmony_ci	if (kmem_cache_debug(s) &&
27628c2ecf20Sopenharmony_ci			!alloc_debug_processing(s, page, freelist, addr))
27638c2ecf20Sopenharmony_ci		goto new_slab;	/* Slab failed checks. Next slab needed */
27648c2ecf20Sopenharmony_ci
27658c2ecf20Sopenharmony_ci	deactivate_slab(s, page, get_freepointer(s, freelist), c);
27668c2ecf20Sopenharmony_ci	return freelist;
27678c2ecf20Sopenharmony_ci}
27688c2ecf20Sopenharmony_ci
27698c2ecf20Sopenharmony_ci/*
27708c2ecf20Sopenharmony_ci * Another one that disabled interrupt and compensates for possible
27718c2ecf20Sopenharmony_ci * cpu changes by refetching the per cpu area pointer.
27728c2ecf20Sopenharmony_ci */
27738c2ecf20Sopenharmony_cistatic void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
27748c2ecf20Sopenharmony_ci			  unsigned long addr, struct kmem_cache_cpu *c)
27758c2ecf20Sopenharmony_ci{
27768c2ecf20Sopenharmony_ci	void *p;
27778c2ecf20Sopenharmony_ci	unsigned long flags;
27788c2ecf20Sopenharmony_ci
27798c2ecf20Sopenharmony_ci	local_irq_save(flags);
27808c2ecf20Sopenharmony_ci#ifdef CONFIG_PREEMPTION
27818c2ecf20Sopenharmony_ci	/*
27828c2ecf20Sopenharmony_ci	 * We may have been preempted and rescheduled on a different
27838c2ecf20Sopenharmony_ci	 * cpu before disabling interrupts. Need to reload cpu area
27848c2ecf20Sopenharmony_ci	 * pointer.
27858c2ecf20Sopenharmony_ci	 */
27868c2ecf20Sopenharmony_ci	c = this_cpu_ptr(s->cpu_slab);
27878c2ecf20Sopenharmony_ci#endif
27888c2ecf20Sopenharmony_ci
27898c2ecf20Sopenharmony_ci	p = ___slab_alloc(s, gfpflags, node, addr, c);
27908c2ecf20Sopenharmony_ci	local_irq_restore(flags);
27918c2ecf20Sopenharmony_ci	return p;
27928c2ecf20Sopenharmony_ci}
27938c2ecf20Sopenharmony_ci
27948c2ecf20Sopenharmony_ci/*
27958c2ecf20Sopenharmony_ci * If the object has been wiped upon free, make sure it's fully initialized by
27968c2ecf20Sopenharmony_ci * zeroing out freelist pointer.
27978c2ecf20Sopenharmony_ci */
27988c2ecf20Sopenharmony_cistatic __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
27998c2ecf20Sopenharmony_ci						   void *obj)
28008c2ecf20Sopenharmony_ci{
28018c2ecf20Sopenharmony_ci	if (unlikely(slab_want_init_on_free(s)) && obj)
28028c2ecf20Sopenharmony_ci		memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
28038c2ecf20Sopenharmony_ci}
28048c2ecf20Sopenharmony_ci
28058c2ecf20Sopenharmony_ci/*
28068c2ecf20Sopenharmony_ci * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
28078c2ecf20Sopenharmony_ci * have the fastpath folded into their functions. So no function call
28088c2ecf20Sopenharmony_ci * overhead for requests that can be satisfied on the fastpath.
28098c2ecf20Sopenharmony_ci *
28108c2ecf20Sopenharmony_ci * The fastpath works by first checking if the lockless freelist can be used.
28118c2ecf20Sopenharmony_ci * If not then __slab_alloc is called for slow processing.
28128c2ecf20Sopenharmony_ci *
28138c2ecf20Sopenharmony_ci * Otherwise we can simply pick the next object from the lockless free list.
28148c2ecf20Sopenharmony_ci */
28158c2ecf20Sopenharmony_cistatic __always_inline void *slab_alloc_node(struct kmem_cache *s,
28168c2ecf20Sopenharmony_ci		gfp_t gfpflags, int node, unsigned long addr)
28178c2ecf20Sopenharmony_ci{
28188c2ecf20Sopenharmony_ci	void *object;
28198c2ecf20Sopenharmony_ci	struct kmem_cache_cpu *c;
28208c2ecf20Sopenharmony_ci	struct page *page;
28218c2ecf20Sopenharmony_ci	unsigned long tid;
28228c2ecf20Sopenharmony_ci	struct obj_cgroup *objcg = NULL;
28238c2ecf20Sopenharmony_ci
28248c2ecf20Sopenharmony_ci	s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
28258c2ecf20Sopenharmony_ci	if (!s)
28268c2ecf20Sopenharmony_ci		return NULL;
28278c2ecf20Sopenharmony_ciredo:
28288c2ecf20Sopenharmony_ci	/*
28298c2ecf20Sopenharmony_ci	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
28308c2ecf20Sopenharmony_ci	 * enabled. We may switch back and forth between cpus while
28318c2ecf20Sopenharmony_ci	 * reading from one cpu area. That does not matter as long
28328c2ecf20Sopenharmony_ci	 * as we end up on the original cpu again when doing the cmpxchg.
28338c2ecf20Sopenharmony_ci	 *
28348c2ecf20Sopenharmony_ci	 * We should guarantee that tid and kmem_cache are retrieved on
28358c2ecf20Sopenharmony_ci	 * the same cpu. It could be different if CONFIG_PREEMPTION so we need
28368c2ecf20Sopenharmony_ci	 * to check if it is matched or not.
28378c2ecf20Sopenharmony_ci	 */
28388c2ecf20Sopenharmony_ci	do {
28398c2ecf20Sopenharmony_ci		tid = this_cpu_read(s->cpu_slab->tid);
28408c2ecf20Sopenharmony_ci		c = raw_cpu_ptr(s->cpu_slab);
28418c2ecf20Sopenharmony_ci	} while (IS_ENABLED(CONFIG_PREEMPTION) &&
28428c2ecf20Sopenharmony_ci		 unlikely(tid != READ_ONCE(c->tid)));
28438c2ecf20Sopenharmony_ci
28448c2ecf20Sopenharmony_ci	/*
28458c2ecf20Sopenharmony_ci	 * Irqless object alloc/free algorithm used here depends on sequence
28468c2ecf20Sopenharmony_ci	 * of fetching cpu_slab's data. tid should be fetched before anything
28478c2ecf20Sopenharmony_ci	 * on c to guarantee that object and page associated with previous tid
28488c2ecf20Sopenharmony_ci	 * won't be used with current tid. If we fetch tid first, object and
28498c2ecf20Sopenharmony_ci	 * page could be one associated with next tid and our alloc/free
28508c2ecf20Sopenharmony_ci	 * request will be failed. In this case, we will retry. So, no problem.
28518c2ecf20Sopenharmony_ci	 */
28528c2ecf20Sopenharmony_ci	barrier();
28538c2ecf20Sopenharmony_ci
28548c2ecf20Sopenharmony_ci	/*
28558c2ecf20Sopenharmony_ci	 * The transaction ids are globally unique per cpu and per operation on
28568c2ecf20Sopenharmony_ci	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
28578c2ecf20Sopenharmony_ci	 * occurs on the right processor and that there was no operation on the
28588c2ecf20Sopenharmony_ci	 * linked list in between.
28598c2ecf20Sopenharmony_ci	 */
28608c2ecf20Sopenharmony_ci
28618c2ecf20Sopenharmony_ci	object = c->freelist;
28628c2ecf20Sopenharmony_ci	page = c->page;
28638c2ecf20Sopenharmony_ci	if (unlikely(!object || !page || !node_match(page, node))) {
28648c2ecf20Sopenharmony_ci		object = __slab_alloc(s, gfpflags, node, addr, c);
28658c2ecf20Sopenharmony_ci	} else {
28668c2ecf20Sopenharmony_ci		void *next_object = get_freepointer_safe(s, object);
28678c2ecf20Sopenharmony_ci
28688c2ecf20Sopenharmony_ci		/*
28698c2ecf20Sopenharmony_ci		 * The cmpxchg will only match if there was no additional
28708c2ecf20Sopenharmony_ci		 * operation and if we are on the right processor.
28718c2ecf20Sopenharmony_ci		 *
28728c2ecf20Sopenharmony_ci		 * The cmpxchg does the following atomically (without lock
28738c2ecf20Sopenharmony_ci		 * semantics!)
28748c2ecf20Sopenharmony_ci		 * 1. Relocate first pointer to the current per cpu area.
28758c2ecf20Sopenharmony_ci		 * 2. Verify that tid and freelist have not been changed
28768c2ecf20Sopenharmony_ci		 * 3. If they were not changed replace tid and freelist
28778c2ecf20Sopenharmony_ci		 *
28788c2ecf20Sopenharmony_ci		 * Since this is without lock semantics the protection is only
28798c2ecf20Sopenharmony_ci		 * against code executing on this cpu *not* from access by
28808c2ecf20Sopenharmony_ci		 * other cpus.
28818c2ecf20Sopenharmony_ci		 */
28828c2ecf20Sopenharmony_ci		if (unlikely(!this_cpu_cmpxchg_double(
28838c2ecf20Sopenharmony_ci				s->cpu_slab->freelist, s->cpu_slab->tid,
28848c2ecf20Sopenharmony_ci				object, tid,
28858c2ecf20Sopenharmony_ci				next_object, next_tid(tid)))) {
28868c2ecf20Sopenharmony_ci
28878c2ecf20Sopenharmony_ci			note_cmpxchg_failure("slab_alloc", s, tid);
28888c2ecf20Sopenharmony_ci			goto redo;
28898c2ecf20Sopenharmony_ci		}
28908c2ecf20Sopenharmony_ci		prefetch_freepointer(s, next_object);
28918c2ecf20Sopenharmony_ci		stat(s, ALLOC_FASTPATH);
28928c2ecf20Sopenharmony_ci	}
28938c2ecf20Sopenharmony_ci
28948c2ecf20Sopenharmony_ci	maybe_wipe_obj_freeptr(s, object);
28958c2ecf20Sopenharmony_ci
28968c2ecf20Sopenharmony_ci	if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
28978c2ecf20Sopenharmony_ci		memset(object, 0, s->object_size);
28988c2ecf20Sopenharmony_ci
28998c2ecf20Sopenharmony_ci	slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
29008c2ecf20Sopenharmony_ci
29018c2ecf20Sopenharmony_ci	return object;
29028c2ecf20Sopenharmony_ci}
29038c2ecf20Sopenharmony_ci
29048c2ecf20Sopenharmony_cistatic __always_inline void *slab_alloc(struct kmem_cache *s,
29058c2ecf20Sopenharmony_ci		gfp_t gfpflags, unsigned long addr)
29068c2ecf20Sopenharmony_ci{
29078c2ecf20Sopenharmony_ci	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
29088c2ecf20Sopenharmony_ci}
29098c2ecf20Sopenharmony_ci
29108c2ecf20Sopenharmony_civoid *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
29118c2ecf20Sopenharmony_ci{
29128c2ecf20Sopenharmony_ci	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
29138c2ecf20Sopenharmony_ci
29148c2ecf20Sopenharmony_ci	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
29158c2ecf20Sopenharmony_ci				s->size, gfpflags);
29168c2ecf20Sopenharmony_ci
29178c2ecf20Sopenharmony_ci	return ret;
29188c2ecf20Sopenharmony_ci}
29198c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_alloc);
29208c2ecf20Sopenharmony_ci
29218c2ecf20Sopenharmony_ci#ifdef CONFIG_TRACING
29228c2ecf20Sopenharmony_civoid *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
29238c2ecf20Sopenharmony_ci{
29248c2ecf20Sopenharmony_ci	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
29258c2ecf20Sopenharmony_ci	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
29268c2ecf20Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, gfpflags);
29278c2ecf20Sopenharmony_ci	return ret;
29288c2ecf20Sopenharmony_ci}
29298c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_alloc_trace);
29308c2ecf20Sopenharmony_ci#endif
29318c2ecf20Sopenharmony_ci
29328c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
29338c2ecf20Sopenharmony_civoid *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
29348c2ecf20Sopenharmony_ci{
29358c2ecf20Sopenharmony_ci	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
29368c2ecf20Sopenharmony_ci
29378c2ecf20Sopenharmony_ci	trace_kmem_cache_alloc_node(_RET_IP_, ret,
29388c2ecf20Sopenharmony_ci				    s->object_size, s->size, gfpflags, node);
29398c2ecf20Sopenharmony_ci
29408c2ecf20Sopenharmony_ci	return ret;
29418c2ecf20Sopenharmony_ci}
29428c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_alloc_node);
29438c2ecf20Sopenharmony_ci
29448c2ecf20Sopenharmony_ci#ifdef CONFIG_TRACING
29458c2ecf20Sopenharmony_civoid *kmem_cache_alloc_node_trace(struct kmem_cache *s,
29468c2ecf20Sopenharmony_ci				    gfp_t gfpflags,
29478c2ecf20Sopenharmony_ci				    int node, size_t size)
29488c2ecf20Sopenharmony_ci{
29498c2ecf20Sopenharmony_ci	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
29508c2ecf20Sopenharmony_ci
29518c2ecf20Sopenharmony_ci	trace_kmalloc_node(_RET_IP_, ret,
29528c2ecf20Sopenharmony_ci			   size, s->size, gfpflags, node);
29538c2ecf20Sopenharmony_ci
29548c2ecf20Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, gfpflags);
29558c2ecf20Sopenharmony_ci	return ret;
29568c2ecf20Sopenharmony_ci}
29578c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_alloc_node_trace);
29588c2ecf20Sopenharmony_ci#endif
29598c2ecf20Sopenharmony_ci#endif	/* CONFIG_NUMA */
29608c2ecf20Sopenharmony_ci
29618c2ecf20Sopenharmony_ci/*
29628c2ecf20Sopenharmony_ci * Slow path handling. This may still be called frequently since objects
29638c2ecf20Sopenharmony_ci * have a longer lifetime than the cpu slabs in most processing loads.
29648c2ecf20Sopenharmony_ci *
29658c2ecf20Sopenharmony_ci * So we still attempt to reduce cache line usage. Just take the slab
29668c2ecf20Sopenharmony_ci * lock and free the item. If there is no additional partial page
29678c2ecf20Sopenharmony_ci * handling required then we can return immediately.
29688c2ecf20Sopenharmony_ci */
29698c2ecf20Sopenharmony_cistatic void __slab_free(struct kmem_cache *s, struct page *page,
29708c2ecf20Sopenharmony_ci			void *head, void *tail, int cnt,
29718c2ecf20Sopenharmony_ci			unsigned long addr)
29728c2ecf20Sopenharmony_ci
29738c2ecf20Sopenharmony_ci{
29748c2ecf20Sopenharmony_ci	void *prior;
29758c2ecf20Sopenharmony_ci	int was_frozen;
29768c2ecf20Sopenharmony_ci	struct page new;
29778c2ecf20Sopenharmony_ci	unsigned long counters;
29788c2ecf20Sopenharmony_ci	struct kmem_cache_node *n = NULL;
29798c2ecf20Sopenharmony_ci	unsigned long flags;
29808c2ecf20Sopenharmony_ci
29818c2ecf20Sopenharmony_ci	stat(s, FREE_SLOWPATH);
29828c2ecf20Sopenharmony_ci
29838c2ecf20Sopenharmony_ci	if (kmem_cache_debug(s) &&
29848c2ecf20Sopenharmony_ci	    !free_debug_processing(s, page, head, tail, cnt, addr))
29858c2ecf20Sopenharmony_ci		return;
29868c2ecf20Sopenharmony_ci
29878c2ecf20Sopenharmony_ci	do {
29888c2ecf20Sopenharmony_ci		if (unlikely(n)) {
29898c2ecf20Sopenharmony_ci			spin_unlock_irqrestore(&n->list_lock, flags);
29908c2ecf20Sopenharmony_ci			n = NULL;
29918c2ecf20Sopenharmony_ci		}
29928c2ecf20Sopenharmony_ci		prior = page->freelist;
29938c2ecf20Sopenharmony_ci		counters = page->counters;
29948c2ecf20Sopenharmony_ci		set_freepointer(s, tail, prior);
29958c2ecf20Sopenharmony_ci		new.counters = counters;
29968c2ecf20Sopenharmony_ci		was_frozen = new.frozen;
29978c2ecf20Sopenharmony_ci		new.inuse -= cnt;
29988c2ecf20Sopenharmony_ci		if ((!new.inuse || !prior) && !was_frozen) {
29998c2ecf20Sopenharmony_ci
30008c2ecf20Sopenharmony_ci			if (kmem_cache_has_cpu_partial(s) && !prior) {
30018c2ecf20Sopenharmony_ci
30028c2ecf20Sopenharmony_ci				/*
30038c2ecf20Sopenharmony_ci				 * Slab was on no list before and will be
30048c2ecf20Sopenharmony_ci				 * partially empty
30058c2ecf20Sopenharmony_ci				 * We can defer the list move and instead
30068c2ecf20Sopenharmony_ci				 * freeze it.
30078c2ecf20Sopenharmony_ci				 */
30088c2ecf20Sopenharmony_ci				new.frozen = 1;
30098c2ecf20Sopenharmony_ci
30108c2ecf20Sopenharmony_ci			} else { /* Needs to be taken off a list */
30118c2ecf20Sopenharmony_ci
30128c2ecf20Sopenharmony_ci				n = get_node(s, page_to_nid(page));
30138c2ecf20Sopenharmony_ci				/*
30148c2ecf20Sopenharmony_ci				 * Speculatively acquire the list_lock.
30158c2ecf20Sopenharmony_ci				 * If the cmpxchg does not succeed then we may
30168c2ecf20Sopenharmony_ci				 * drop the list_lock without any processing.
30178c2ecf20Sopenharmony_ci				 *
30188c2ecf20Sopenharmony_ci				 * Otherwise the list_lock will synchronize with
30198c2ecf20Sopenharmony_ci				 * other processors updating the list of slabs.
30208c2ecf20Sopenharmony_ci				 */
30218c2ecf20Sopenharmony_ci				spin_lock_irqsave(&n->list_lock, flags);
30228c2ecf20Sopenharmony_ci
30238c2ecf20Sopenharmony_ci			}
30248c2ecf20Sopenharmony_ci		}
30258c2ecf20Sopenharmony_ci
30268c2ecf20Sopenharmony_ci	} while (!cmpxchg_double_slab(s, page,
30278c2ecf20Sopenharmony_ci		prior, counters,
30288c2ecf20Sopenharmony_ci		head, new.counters,
30298c2ecf20Sopenharmony_ci		"__slab_free"));
30308c2ecf20Sopenharmony_ci
30318c2ecf20Sopenharmony_ci	if (likely(!n)) {
30328c2ecf20Sopenharmony_ci
30338c2ecf20Sopenharmony_ci		if (likely(was_frozen)) {
30348c2ecf20Sopenharmony_ci			/*
30358c2ecf20Sopenharmony_ci			 * The list lock was not taken therefore no list
30368c2ecf20Sopenharmony_ci			 * activity can be necessary.
30378c2ecf20Sopenharmony_ci			 */
30388c2ecf20Sopenharmony_ci			stat(s, FREE_FROZEN);
30398c2ecf20Sopenharmony_ci		} else if (new.frozen) {
30408c2ecf20Sopenharmony_ci			/*
30418c2ecf20Sopenharmony_ci			 * If we just froze the page then put it onto the
30428c2ecf20Sopenharmony_ci			 * per cpu partial list.
30438c2ecf20Sopenharmony_ci			 */
30448c2ecf20Sopenharmony_ci			put_cpu_partial(s, page, 1);
30458c2ecf20Sopenharmony_ci			stat(s, CPU_PARTIAL_FREE);
30468c2ecf20Sopenharmony_ci		}
30478c2ecf20Sopenharmony_ci
30488c2ecf20Sopenharmony_ci		return;
30498c2ecf20Sopenharmony_ci	}
30508c2ecf20Sopenharmony_ci
30518c2ecf20Sopenharmony_ci	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
30528c2ecf20Sopenharmony_ci		goto slab_empty;
30538c2ecf20Sopenharmony_ci
30548c2ecf20Sopenharmony_ci	/*
30558c2ecf20Sopenharmony_ci	 * Objects left in the slab. If it was not on the partial list before
30568c2ecf20Sopenharmony_ci	 * then add it.
30578c2ecf20Sopenharmony_ci	 */
30588c2ecf20Sopenharmony_ci	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
30598c2ecf20Sopenharmony_ci		remove_full(s, n, page);
30608c2ecf20Sopenharmony_ci		add_partial(n, page, DEACTIVATE_TO_TAIL);
30618c2ecf20Sopenharmony_ci		stat(s, FREE_ADD_PARTIAL);
30628c2ecf20Sopenharmony_ci	}
30638c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&n->list_lock, flags);
30648c2ecf20Sopenharmony_ci	return;
30658c2ecf20Sopenharmony_ci
30668c2ecf20Sopenharmony_cislab_empty:
30678c2ecf20Sopenharmony_ci	if (prior) {
30688c2ecf20Sopenharmony_ci		/*
30698c2ecf20Sopenharmony_ci		 * Slab on the partial list.
30708c2ecf20Sopenharmony_ci		 */
30718c2ecf20Sopenharmony_ci		remove_partial(n, page);
30728c2ecf20Sopenharmony_ci		stat(s, FREE_REMOVE_PARTIAL);
30738c2ecf20Sopenharmony_ci	} else {
30748c2ecf20Sopenharmony_ci		/* Slab must be on the full list */
30758c2ecf20Sopenharmony_ci		remove_full(s, n, page);
30768c2ecf20Sopenharmony_ci	}
30778c2ecf20Sopenharmony_ci
30788c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&n->list_lock, flags);
30798c2ecf20Sopenharmony_ci	stat(s, FREE_SLAB);
30808c2ecf20Sopenharmony_ci	discard_slab(s, page);
30818c2ecf20Sopenharmony_ci}
30828c2ecf20Sopenharmony_ci
30838c2ecf20Sopenharmony_ci/*
30848c2ecf20Sopenharmony_ci * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
30858c2ecf20Sopenharmony_ci * can perform fastpath freeing without additional function calls.
30868c2ecf20Sopenharmony_ci *
30878c2ecf20Sopenharmony_ci * The fastpath is only possible if we are freeing to the current cpu slab
30888c2ecf20Sopenharmony_ci * of this processor. This typically the case if we have just allocated
30898c2ecf20Sopenharmony_ci * the item before.
30908c2ecf20Sopenharmony_ci *
30918c2ecf20Sopenharmony_ci * If fastpath is not possible then fall back to __slab_free where we deal
30928c2ecf20Sopenharmony_ci * with all sorts of special processing.
30938c2ecf20Sopenharmony_ci *
30948c2ecf20Sopenharmony_ci * Bulk free of a freelist with several objects (all pointing to the
30958c2ecf20Sopenharmony_ci * same page) possible by specifying head and tail ptr, plus objects
30968c2ecf20Sopenharmony_ci * count (cnt). Bulk free indicated by tail pointer being set.
30978c2ecf20Sopenharmony_ci */
30988c2ecf20Sopenharmony_cistatic __always_inline void do_slab_free(struct kmem_cache *s,
30998c2ecf20Sopenharmony_ci				struct page *page, void *head, void *tail,
31008c2ecf20Sopenharmony_ci				int cnt, unsigned long addr)
31018c2ecf20Sopenharmony_ci{
31028c2ecf20Sopenharmony_ci	void *tail_obj = tail ? : head;
31038c2ecf20Sopenharmony_ci	struct kmem_cache_cpu *c;
31048c2ecf20Sopenharmony_ci	unsigned long tid;
31058c2ecf20Sopenharmony_ci
31068c2ecf20Sopenharmony_ci	/* memcg_slab_free_hook() is already called for bulk free. */
31078c2ecf20Sopenharmony_ci	if (!tail)
31088c2ecf20Sopenharmony_ci		memcg_slab_free_hook(s, &head, 1);
31098c2ecf20Sopenharmony_ciredo:
31108c2ecf20Sopenharmony_ci	/*
31118c2ecf20Sopenharmony_ci	 * Determine the currently cpus per cpu slab.
31128c2ecf20Sopenharmony_ci	 * The cpu may change afterward. However that does not matter since
31138c2ecf20Sopenharmony_ci	 * data is retrieved via this pointer. If we are on the same cpu
31148c2ecf20Sopenharmony_ci	 * during the cmpxchg then the free will succeed.
31158c2ecf20Sopenharmony_ci	 */
31168c2ecf20Sopenharmony_ci	do {
31178c2ecf20Sopenharmony_ci		tid = this_cpu_read(s->cpu_slab->tid);
31188c2ecf20Sopenharmony_ci		c = raw_cpu_ptr(s->cpu_slab);
31198c2ecf20Sopenharmony_ci	} while (IS_ENABLED(CONFIG_PREEMPTION) &&
31208c2ecf20Sopenharmony_ci		 unlikely(tid != READ_ONCE(c->tid)));
31218c2ecf20Sopenharmony_ci
31228c2ecf20Sopenharmony_ci	/* Same with comment on barrier() in slab_alloc_node() */
31238c2ecf20Sopenharmony_ci	barrier();
31248c2ecf20Sopenharmony_ci
31258c2ecf20Sopenharmony_ci	if (likely(page == c->page)) {
31268c2ecf20Sopenharmony_ci		void **freelist = READ_ONCE(c->freelist);
31278c2ecf20Sopenharmony_ci
31288c2ecf20Sopenharmony_ci		set_freepointer(s, tail_obj, freelist);
31298c2ecf20Sopenharmony_ci
31308c2ecf20Sopenharmony_ci		if (unlikely(!this_cpu_cmpxchg_double(
31318c2ecf20Sopenharmony_ci				s->cpu_slab->freelist, s->cpu_slab->tid,
31328c2ecf20Sopenharmony_ci				freelist, tid,
31338c2ecf20Sopenharmony_ci				head, next_tid(tid)))) {
31348c2ecf20Sopenharmony_ci
31358c2ecf20Sopenharmony_ci			note_cmpxchg_failure("slab_free", s, tid);
31368c2ecf20Sopenharmony_ci			goto redo;
31378c2ecf20Sopenharmony_ci		}
31388c2ecf20Sopenharmony_ci		stat(s, FREE_FASTPATH);
31398c2ecf20Sopenharmony_ci	} else
31408c2ecf20Sopenharmony_ci		__slab_free(s, page, head, tail_obj, cnt, addr);
31418c2ecf20Sopenharmony_ci
31428c2ecf20Sopenharmony_ci}
31438c2ecf20Sopenharmony_ci
31448c2ecf20Sopenharmony_cistatic __always_inline void slab_free(struct kmem_cache *s, struct page *page,
31458c2ecf20Sopenharmony_ci				      void *head, void *tail, int cnt,
31468c2ecf20Sopenharmony_ci				      unsigned long addr)
31478c2ecf20Sopenharmony_ci{
31488c2ecf20Sopenharmony_ci	/*
31498c2ecf20Sopenharmony_ci	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
31508c2ecf20Sopenharmony_ci	 * to remove objects, whose reuse must be delayed.
31518c2ecf20Sopenharmony_ci	 */
31528c2ecf20Sopenharmony_ci	if (slab_free_freelist_hook(s, &head, &tail, &cnt))
31538c2ecf20Sopenharmony_ci		do_slab_free(s, page, head, tail, cnt, addr);
31548c2ecf20Sopenharmony_ci}
31558c2ecf20Sopenharmony_ci
31568c2ecf20Sopenharmony_ci#ifdef CONFIG_KASAN_GENERIC
31578c2ecf20Sopenharmony_civoid ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
31588c2ecf20Sopenharmony_ci{
31598c2ecf20Sopenharmony_ci	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
31608c2ecf20Sopenharmony_ci}
31618c2ecf20Sopenharmony_ci#endif
31628c2ecf20Sopenharmony_ci
31638c2ecf20Sopenharmony_civoid kmem_cache_free(struct kmem_cache *s, void *x)
31648c2ecf20Sopenharmony_ci{
31658c2ecf20Sopenharmony_ci	s = cache_from_obj(s, x);
31668c2ecf20Sopenharmony_ci	if (!s)
31678c2ecf20Sopenharmony_ci		return;
31688c2ecf20Sopenharmony_ci	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
31698c2ecf20Sopenharmony_ci	trace_kmem_cache_free(_RET_IP_, x);
31708c2ecf20Sopenharmony_ci}
31718c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_free);
31728c2ecf20Sopenharmony_ci
31738c2ecf20Sopenharmony_cistruct detached_freelist {
31748c2ecf20Sopenharmony_ci	struct page *page;
31758c2ecf20Sopenharmony_ci	void *tail;
31768c2ecf20Sopenharmony_ci	void *freelist;
31778c2ecf20Sopenharmony_ci	int cnt;
31788c2ecf20Sopenharmony_ci	struct kmem_cache *s;
31798c2ecf20Sopenharmony_ci};
31808c2ecf20Sopenharmony_ci
31818c2ecf20Sopenharmony_ci/*
31828c2ecf20Sopenharmony_ci * This function progressively scans the array with free objects (with
31838c2ecf20Sopenharmony_ci * a limited look ahead) and extract objects belonging to the same
31848c2ecf20Sopenharmony_ci * page.  It builds a detached freelist directly within the given
31858c2ecf20Sopenharmony_ci * page/objects.  This can happen without any need for
31868c2ecf20Sopenharmony_ci * synchronization, because the objects are owned by running process.
31878c2ecf20Sopenharmony_ci * The freelist is build up as a single linked list in the objects.
31888c2ecf20Sopenharmony_ci * The idea is, that this detached freelist can then be bulk
31898c2ecf20Sopenharmony_ci * transferred to the real freelist(s), but only requiring a single
31908c2ecf20Sopenharmony_ci * synchronization primitive.  Look ahead in the array is limited due
31918c2ecf20Sopenharmony_ci * to performance reasons.
31928c2ecf20Sopenharmony_ci */
31938c2ecf20Sopenharmony_cistatic inline
31948c2ecf20Sopenharmony_ciint build_detached_freelist(struct kmem_cache *s, size_t size,
31958c2ecf20Sopenharmony_ci			    void **p, struct detached_freelist *df)
31968c2ecf20Sopenharmony_ci{
31978c2ecf20Sopenharmony_ci	size_t first_skipped_index = 0;
31988c2ecf20Sopenharmony_ci	int lookahead = 3;
31998c2ecf20Sopenharmony_ci	void *object;
32008c2ecf20Sopenharmony_ci	struct page *page;
32018c2ecf20Sopenharmony_ci
32028c2ecf20Sopenharmony_ci	/* Always re-init detached_freelist */
32038c2ecf20Sopenharmony_ci	df->page = NULL;
32048c2ecf20Sopenharmony_ci
32058c2ecf20Sopenharmony_ci	do {
32068c2ecf20Sopenharmony_ci		object = p[--size];
32078c2ecf20Sopenharmony_ci		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
32088c2ecf20Sopenharmony_ci	} while (!object && size);
32098c2ecf20Sopenharmony_ci
32108c2ecf20Sopenharmony_ci	if (!object)
32118c2ecf20Sopenharmony_ci		return 0;
32128c2ecf20Sopenharmony_ci
32138c2ecf20Sopenharmony_ci	page = virt_to_head_page(object);
32148c2ecf20Sopenharmony_ci	if (!s) {
32158c2ecf20Sopenharmony_ci		/* Handle kalloc'ed objects */
32168c2ecf20Sopenharmony_ci		if (unlikely(!PageSlab(page))) {
32178c2ecf20Sopenharmony_ci			BUG_ON(!PageCompound(page));
32188c2ecf20Sopenharmony_ci			kfree_hook(object);
32198c2ecf20Sopenharmony_ci			__free_pages(page, compound_order(page));
32208c2ecf20Sopenharmony_ci			p[size] = NULL; /* mark object processed */
32218c2ecf20Sopenharmony_ci			return size;
32228c2ecf20Sopenharmony_ci		}
32238c2ecf20Sopenharmony_ci		/* Derive kmem_cache from object */
32248c2ecf20Sopenharmony_ci		df->s = page->slab_cache;
32258c2ecf20Sopenharmony_ci	} else {
32268c2ecf20Sopenharmony_ci		df->s = cache_from_obj(s, object); /* Support for memcg */
32278c2ecf20Sopenharmony_ci	}
32288c2ecf20Sopenharmony_ci
32298c2ecf20Sopenharmony_ci	/* Start new detached freelist */
32308c2ecf20Sopenharmony_ci	df->page = page;
32318c2ecf20Sopenharmony_ci	set_freepointer(df->s, object, NULL);
32328c2ecf20Sopenharmony_ci	df->tail = object;
32338c2ecf20Sopenharmony_ci	df->freelist = object;
32348c2ecf20Sopenharmony_ci	p[size] = NULL; /* mark object processed */
32358c2ecf20Sopenharmony_ci	df->cnt = 1;
32368c2ecf20Sopenharmony_ci
32378c2ecf20Sopenharmony_ci	while (size) {
32388c2ecf20Sopenharmony_ci		object = p[--size];
32398c2ecf20Sopenharmony_ci		if (!object)
32408c2ecf20Sopenharmony_ci			continue; /* Skip processed objects */
32418c2ecf20Sopenharmony_ci
32428c2ecf20Sopenharmony_ci		/* df->page is always set at this point */
32438c2ecf20Sopenharmony_ci		if (df->page == virt_to_head_page(object)) {
32448c2ecf20Sopenharmony_ci			/* Opportunity build freelist */
32458c2ecf20Sopenharmony_ci			set_freepointer(df->s, object, df->freelist);
32468c2ecf20Sopenharmony_ci			df->freelist = object;
32478c2ecf20Sopenharmony_ci			df->cnt++;
32488c2ecf20Sopenharmony_ci			p[size] = NULL; /* mark object processed */
32498c2ecf20Sopenharmony_ci
32508c2ecf20Sopenharmony_ci			continue;
32518c2ecf20Sopenharmony_ci		}
32528c2ecf20Sopenharmony_ci
32538c2ecf20Sopenharmony_ci		/* Limit look ahead search */
32548c2ecf20Sopenharmony_ci		if (!--lookahead)
32558c2ecf20Sopenharmony_ci			break;
32568c2ecf20Sopenharmony_ci
32578c2ecf20Sopenharmony_ci		if (!first_skipped_index)
32588c2ecf20Sopenharmony_ci			first_skipped_index = size + 1;
32598c2ecf20Sopenharmony_ci	}
32608c2ecf20Sopenharmony_ci
32618c2ecf20Sopenharmony_ci	return first_skipped_index;
32628c2ecf20Sopenharmony_ci}
32638c2ecf20Sopenharmony_ci
32648c2ecf20Sopenharmony_ci/* Note that interrupts must be enabled when calling this function. */
32658c2ecf20Sopenharmony_civoid kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
32668c2ecf20Sopenharmony_ci{
32678c2ecf20Sopenharmony_ci	if (WARN_ON(!size))
32688c2ecf20Sopenharmony_ci		return;
32698c2ecf20Sopenharmony_ci
32708c2ecf20Sopenharmony_ci	memcg_slab_free_hook(s, p, size);
32718c2ecf20Sopenharmony_ci	do {
32728c2ecf20Sopenharmony_ci		struct detached_freelist df;
32738c2ecf20Sopenharmony_ci
32748c2ecf20Sopenharmony_ci		size = build_detached_freelist(s, size, p, &df);
32758c2ecf20Sopenharmony_ci		if (!df.page)
32768c2ecf20Sopenharmony_ci			continue;
32778c2ecf20Sopenharmony_ci
32788c2ecf20Sopenharmony_ci		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
32798c2ecf20Sopenharmony_ci	} while (likely(size));
32808c2ecf20Sopenharmony_ci}
32818c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_free_bulk);
32828c2ecf20Sopenharmony_ci
32838c2ecf20Sopenharmony_ci/* Note that interrupts must be enabled when calling this function. */
32848c2ecf20Sopenharmony_ciint kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
32858c2ecf20Sopenharmony_ci			  void **p)
32868c2ecf20Sopenharmony_ci{
32878c2ecf20Sopenharmony_ci	struct kmem_cache_cpu *c;
32888c2ecf20Sopenharmony_ci	int i;
32898c2ecf20Sopenharmony_ci	struct obj_cgroup *objcg = NULL;
32908c2ecf20Sopenharmony_ci
32918c2ecf20Sopenharmony_ci	/* memcg and kmem_cache debug support */
32928c2ecf20Sopenharmony_ci	s = slab_pre_alloc_hook(s, &objcg, size, flags);
32938c2ecf20Sopenharmony_ci	if (unlikely(!s))
32948c2ecf20Sopenharmony_ci		return false;
32958c2ecf20Sopenharmony_ci	/*
32968c2ecf20Sopenharmony_ci	 * Drain objects in the per cpu slab, while disabling local
32978c2ecf20Sopenharmony_ci	 * IRQs, which protects against PREEMPT and interrupts
32988c2ecf20Sopenharmony_ci	 * handlers invoking normal fastpath.
32998c2ecf20Sopenharmony_ci	 */
33008c2ecf20Sopenharmony_ci	local_irq_disable();
33018c2ecf20Sopenharmony_ci	c = this_cpu_ptr(s->cpu_slab);
33028c2ecf20Sopenharmony_ci
33038c2ecf20Sopenharmony_ci	for (i = 0; i < size; i++) {
33048c2ecf20Sopenharmony_ci		void *object = c->freelist;
33058c2ecf20Sopenharmony_ci
33068c2ecf20Sopenharmony_ci		if (unlikely(!object)) {
33078c2ecf20Sopenharmony_ci			/*
33088c2ecf20Sopenharmony_ci			 * We may have removed an object from c->freelist using
33098c2ecf20Sopenharmony_ci			 * the fastpath in the previous iteration; in that case,
33108c2ecf20Sopenharmony_ci			 * c->tid has not been bumped yet.
33118c2ecf20Sopenharmony_ci			 * Since ___slab_alloc() may reenable interrupts while
33128c2ecf20Sopenharmony_ci			 * allocating memory, we should bump c->tid now.
33138c2ecf20Sopenharmony_ci			 */
33148c2ecf20Sopenharmony_ci			c->tid = next_tid(c->tid);
33158c2ecf20Sopenharmony_ci
33168c2ecf20Sopenharmony_ci			/*
33178c2ecf20Sopenharmony_ci			 * Invoking slow path likely have side-effect
33188c2ecf20Sopenharmony_ci			 * of re-populating per CPU c->freelist
33198c2ecf20Sopenharmony_ci			 */
33208c2ecf20Sopenharmony_ci			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
33218c2ecf20Sopenharmony_ci					    _RET_IP_, c);
33228c2ecf20Sopenharmony_ci			if (unlikely(!p[i]))
33238c2ecf20Sopenharmony_ci				goto error;
33248c2ecf20Sopenharmony_ci
33258c2ecf20Sopenharmony_ci			c = this_cpu_ptr(s->cpu_slab);
33268c2ecf20Sopenharmony_ci			maybe_wipe_obj_freeptr(s, p[i]);
33278c2ecf20Sopenharmony_ci
33288c2ecf20Sopenharmony_ci			continue; /* goto for-loop */
33298c2ecf20Sopenharmony_ci		}
33308c2ecf20Sopenharmony_ci		c->freelist = get_freepointer(s, object);
33318c2ecf20Sopenharmony_ci		p[i] = object;
33328c2ecf20Sopenharmony_ci		maybe_wipe_obj_freeptr(s, p[i]);
33338c2ecf20Sopenharmony_ci	}
33348c2ecf20Sopenharmony_ci	c->tid = next_tid(c->tid);
33358c2ecf20Sopenharmony_ci	local_irq_enable();
33368c2ecf20Sopenharmony_ci
33378c2ecf20Sopenharmony_ci	/* Clear memory outside IRQ disabled fastpath loop */
33388c2ecf20Sopenharmony_ci	if (unlikely(slab_want_init_on_alloc(flags, s))) {
33398c2ecf20Sopenharmony_ci		int j;
33408c2ecf20Sopenharmony_ci
33418c2ecf20Sopenharmony_ci		for (j = 0; j < i; j++)
33428c2ecf20Sopenharmony_ci			memset(p[j], 0, s->object_size);
33438c2ecf20Sopenharmony_ci	}
33448c2ecf20Sopenharmony_ci
33458c2ecf20Sopenharmony_ci	/* memcg and kmem_cache debug support */
33468c2ecf20Sopenharmony_ci	slab_post_alloc_hook(s, objcg, flags, size, p);
33478c2ecf20Sopenharmony_ci	return i;
33488c2ecf20Sopenharmony_cierror:
33498c2ecf20Sopenharmony_ci	local_irq_enable();
33508c2ecf20Sopenharmony_ci	slab_post_alloc_hook(s, objcg, flags, i, p);
33518c2ecf20Sopenharmony_ci	__kmem_cache_free_bulk(s, i, p);
33528c2ecf20Sopenharmony_ci	return 0;
33538c2ecf20Sopenharmony_ci}
33548c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kmem_cache_alloc_bulk);
33558c2ecf20Sopenharmony_ci
33568c2ecf20Sopenharmony_ci
33578c2ecf20Sopenharmony_ci/*
33588c2ecf20Sopenharmony_ci * Object placement in a slab is made very easy because we always start at
33598c2ecf20Sopenharmony_ci * offset 0. If we tune the size of the object to the alignment then we can
33608c2ecf20Sopenharmony_ci * get the required alignment by putting one properly sized object after
33618c2ecf20Sopenharmony_ci * another.
33628c2ecf20Sopenharmony_ci *
33638c2ecf20Sopenharmony_ci * Notice that the allocation order determines the sizes of the per cpu
33648c2ecf20Sopenharmony_ci * caches. Each processor has always one slab available for allocations.
33658c2ecf20Sopenharmony_ci * Increasing the allocation order reduces the number of times that slabs
33668c2ecf20Sopenharmony_ci * must be moved on and off the partial lists and is therefore a factor in
33678c2ecf20Sopenharmony_ci * locking overhead.
33688c2ecf20Sopenharmony_ci */
33698c2ecf20Sopenharmony_ci
33708c2ecf20Sopenharmony_ci/*
33718c2ecf20Sopenharmony_ci * Mininum / Maximum order of slab pages. This influences locking overhead
33728c2ecf20Sopenharmony_ci * and slab fragmentation. A higher order reduces the number of partial slabs
33738c2ecf20Sopenharmony_ci * and increases the number of allocations possible without having to
33748c2ecf20Sopenharmony_ci * take the list_lock.
33758c2ecf20Sopenharmony_ci */
33768c2ecf20Sopenharmony_cistatic unsigned int slub_min_order;
33778c2ecf20Sopenharmony_cistatic unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
33788c2ecf20Sopenharmony_cistatic unsigned int slub_min_objects;
33798c2ecf20Sopenharmony_ci
33808c2ecf20Sopenharmony_ci/*
33818c2ecf20Sopenharmony_ci * Calculate the order of allocation given an slab object size.
33828c2ecf20Sopenharmony_ci *
33838c2ecf20Sopenharmony_ci * The order of allocation has significant impact on performance and other
33848c2ecf20Sopenharmony_ci * system components. Generally order 0 allocations should be preferred since
33858c2ecf20Sopenharmony_ci * order 0 does not cause fragmentation in the page allocator. Larger objects
33868c2ecf20Sopenharmony_ci * be problematic to put into order 0 slabs because there may be too much
33878c2ecf20Sopenharmony_ci * unused space left. We go to a higher order if more than 1/16th of the slab
33888c2ecf20Sopenharmony_ci * would be wasted.
33898c2ecf20Sopenharmony_ci *
33908c2ecf20Sopenharmony_ci * In order to reach satisfactory performance we must ensure that a minimum
33918c2ecf20Sopenharmony_ci * number of objects is in one slab. Otherwise we may generate too much
33928c2ecf20Sopenharmony_ci * activity on the partial lists which requires taking the list_lock. This is
33938c2ecf20Sopenharmony_ci * less a concern for large slabs though which are rarely used.
33948c2ecf20Sopenharmony_ci *
33958c2ecf20Sopenharmony_ci * slub_max_order specifies the order where we begin to stop considering the
33968c2ecf20Sopenharmony_ci * number of objects in a slab as critical. If we reach slub_max_order then
33978c2ecf20Sopenharmony_ci * we try to keep the page order as low as possible. So we accept more waste
33988c2ecf20Sopenharmony_ci * of space in favor of a small page order.
33998c2ecf20Sopenharmony_ci *
34008c2ecf20Sopenharmony_ci * Higher order allocations also allow the placement of more objects in a
34018c2ecf20Sopenharmony_ci * slab and thereby reduce object handling overhead. If the user has
34028c2ecf20Sopenharmony_ci * requested a higher mininum order then we start with that one instead of
34038c2ecf20Sopenharmony_ci * the smallest order which will fit the object.
34048c2ecf20Sopenharmony_ci */
34058c2ecf20Sopenharmony_cistatic inline unsigned int slab_order(unsigned int size,
34068c2ecf20Sopenharmony_ci		unsigned int min_objects, unsigned int max_order,
34078c2ecf20Sopenharmony_ci		unsigned int fract_leftover)
34088c2ecf20Sopenharmony_ci{
34098c2ecf20Sopenharmony_ci	unsigned int min_order = slub_min_order;
34108c2ecf20Sopenharmony_ci	unsigned int order;
34118c2ecf20Sopenharmony_ci
34128c2ecf20Sopenharmony_ci	if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
34138c2ecf20Sopenharmony_ci		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
34148c2ecf20Sopenharmony_ci
34158c2ecf20Sopenharmony_ci	for (order = max(min_order, (unsigned int)get_order(min_objects * size));
34168c2ecf20Sopenharmony_ci			order <= max_order; order++) {
34178c2ecf20Sopenharmony_ci
34188c2ecf20Sopenharmony_ci		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
34198c2ecf20Sopenharmony_ci		unsigned int rem;
34208c2ecf20Sopenharmony_ci
34218c2ecf20Sopenharmony_ci		rem = slab_size % size;
34228c2ecf20Sopenharmony_ci
34238c2ecf20Sopenharmony_ci		if (rem <= slab_size / fract_leftover)
34248c2ecf20Sopenharmony_ci			break;
34258c2ecf20Sopenharmony_ci	}
34268c2ecf20Sopenharmony_ci
34278c2ecf20Sopenharmony_ci	return order;
34288c2ecf20Sopenharmony_ci}
34298c2ecf20Sopenharmony_ci
34308c2ecf20Sopenharmony_cistatic inline int calculate_order(unsigned int size)
34318c2ecf20Sopenharmony_ci{
34328c2ecf20Sopenharmony_ci	unsigned int order;
34338c2ecf20Sopenharmony_ci	unsigned int min_objects;
34348c2ecf20Sopenharmony_ci	unsigned int max_objects;
34358c2ecf20Sopenharmony_ci
34368c2ecf20Sopenharmony_ci	/*
34378c2ecf20Sopenharmony_ci	 * Attempt to find best configuration for a slab. This
34388c2ecf20Sopenharmony_ci	 * works by first attempting to generate a layout with
34398c2ecf20Sopenharmony_ci	 * the best configuration and backing off gradually.
34408c2ecf20Sopenharmony_ci	 *
34418c2ecf20Sopenharmony_ci	 * First we increase the acceptable waste in a slab. Then
34428c2ecf20Sopenharmony_ci	 * we reduce the minimum objects required in a slab.
34438c2ecf20Sopenharmony_ci	 */
34448c2ecf20Sopenharmony_ci	min_objects = slub_min_objects;
34458c2ecf20Sopenharmony_ci	if (!min_objects)
34468c2ecf20Sopenharmony_ci		min_objects = 4 * (fls(nr_cpu_ids) + 1);
34478c2ecf20Sopenharmony_ci	max_objects = order_objects(slub_max_order, size);
34488c2ecf20Sopenharmony_ci	min_objects = min(min_objects, max_objects);
34498c2ecf20Sopenharmony_ci
34508c2ecf20Sopenharmony_ci	while (min_objects > 1) {
34518c2ecf20Sopenharmony_ci		unsigned int fraction;
34528c2ecf20Sopenharmony_ci
34538c2ecf20Sopenharmony_ci		fraction = 16;
34548c2ecf20Sopenharmony_ci		while (fraction >= 4) {
34558c2ecf20Sopenharmony_ci			order = slab_order(size, min_objects,
34568c2ecf20Sopenharmony_ci					slub_max_order, fraction);
34578c2ecf20Sopenharmony_ci			if (order <= slub_max_order)
34588c2ecf20Sopenharmony_ci				return order;
34598c2ecf20Sopenharmony_ci			fraction /= 2;
34608c2ecf20Sopenharmony_ci		}
34618c2ecf20Sopenharmony_ci		min_objects--;
34628c2ecf20Sopenharmony_ci	}
34638c2ecf20Sopenharmony_ci
34648c2ecf20Sopenharmony_ci	/*
34658c2ecf20Sopenharmony_ci	 * We were unable to place multiple objects in a slab. Now
34668c2ecf20Sopenharmony_ci	 * lets see if we can place a single object there.
34678c2ecf20Sopenharmony_ci	 */
34688c2ecf20Sopenharmony_ci	order = slab_order(size, 1, slub_max_order, 1);
34698c2ecf20Sopenharmony_ci	if (order <= slub_max_order)
34708c2ecf20Sopenharmony_ci		return order;
34718c2ecf20Sopenharmony_ci
34728c2ecf20Sopenharmony_ci	/*
34738c2ecf20Sopenharmony_ci	 * Doh this slab cannot be placed using slub_max_order.
34748c2ecf20Sopenharmony_ci	 */
34758c2ecf20Sopenharmony_ci	order = slab_order(size, 1, MAX_ORDER, 1);
34768c2ecf20Sopenharmony_ci	if (order < MAX_ORDER)
34778c2ecf20Sopenharmony_ci		return order;
34788c2ecf20Sopenharmony_ci	return -ENOSYS;
34798c2ecf20Sopenharmony_ci}
34808c2ecf20Sopenharmony_ci
34818c2ecf20Sopenharmony_cistatic void
34828c2ecf20Sopenharmony_ciinit_kmem_cache_node(struct kmem_cache_node *n)
34838c2ecf20Sopenharmony_ci{
34848c2ecf20Sopenharmony_ci	n->nr_partial = 0;
34858c2ecf20Sopenharmony_ci	spin_lock_init(&n->list_lock);
34868c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&n->partial);
34878c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
34888c2ecf20Sopenharmony_ci	atomic_long_set(&n->nr_slabs, 0);
34898c2ecf20Sopenharmony_ci	atomic_long_set(&n->total_objects, 0);
34908c2ecf20Sopenharmony_ci	INIT_LIST_HEAD(&n->full);
34918c2ecf20Sopenharmony_ci#endif
34928c2ecf20Sopenharmony_ci}
34938c2ecf20Sopenharmony_ci
34948c2ecf20Sopenharmony_cistatic inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
34958c2ecf20Sopenharmony_ci{
34968c2ecf20Sopenharmony_ci	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
34978c2ecf20Sopenharmony_ci			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
34988c2ecf20Sopenharmony_ci
34998c2ecf20Sopenharmony_ci	/*
35008c2ecf20Sopenharmony_ci	 * Must align to double word boundary for the double cmpxchg
35018c2ecf20Sopenharmony_ci	 * instructions to work; see __pcpu_double_call_return_bool().
35028c2ecf20Sopenharmony_ci	 */
35038c2ecf20Sopenharmony_ci	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
35048c2ecf20Sopenharmony_ci				     2 * sizeof(void *));
35058c2ecf20Sopenharmony_ci
35068c2ecf20Sopenharmony_ci	if (!s->cpu_slab)
35078c2ecf20Sopenharmony_ci		return 0;
35088c2ecf20Sopenharmony_ci
35098c2ecf20Sopenharmony_ci	init_kmem_cache_cpus(s);
35108c2ecf20Sopenharmony_ci
35118c2ecf20Sopenharmony_ci	return 1;
35128c2ecf20Sopenharmony_ci}
35138c2ecf20Sopenharmony_ci
35148c2ecf20Sopenharmony_cistatic struct kmem_cache *kmem_cache_node;
35158c2ecf20Sopenharmony_ci
35168c2ecf20Sopenharmony_ci/*
35178c2ecf20Sopenharmony_ci * No kmalloc_node yet so do it by hand. We know that this is the first
35188c2ecf20Sopenharmony_ci * slab on the node for this slabcache. There are no concurrent accesses
35198c2ecf20Sopenharmony_ci * possible.
35208c2ecf20Sopenharmony_ci *
35218c2ecf20Sopenharmony_ci * Note that this function only works on the kmem_cache_node
35228c2ecf20Sopenharmony_ci * when allocating for the kmem_cache_node. This is used for bootstrapping
35238c2ecf20Sopenharmony_ci * memory on a fresh node that has no slab structures yet.
35248c2ecf20Sopenharmony_ci */
35258c2ecf20Sopenharmony_cistatic void early_kmem_cache_node_alloc(int node)
35268c2ecf20Sopenharmony_ci{
35278c2ecf20Sopenharmony_ci	struct page *page;
35288c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
35298c2ecf20Sopenharmony_ci
35308c2ecf20Sopenharmony_ci	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
35318c2ecf20Sopenharmony_ci
35328c2ecf20Sopenharmony_ci	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
35338c2ecf20Sopenharmony_ci
35348c2ecf20Sopenharmony_ci	BUG_ON(!page);
35358c2ecf20Sopenharmony_ci	if (page_to_nid(page) != node) {
35368c2ecf20Sopenharmony_ci		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
35378c2ecf20Sopenharmony_ci		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
35388c2ecf20Sopenharmony_ci	}
35398c2ecf20Sopenharmony_ci
35408c2ecf20Sopenharmony_ci	n = page->freelist;
35418c2ecf20Sopenharmony_ci	BUG_ON(!n);
35428c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
35438c2ecf20Sopenharmony_ci	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
35448c2ecf20Sopenharmony_ci	init_tracking(kmem_cache_node, n);
35458c2ecf20Sopenharmony_ci#endif
35468c2ecf20Sopenharmony_ci	n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
35478c2ecf20Sopenharmony_ci		      GFP_KERNEL);
35488c2ecf20Sopenharmony_ci	page->freelist = get_freepointer(kmem_cache_node, n);
35498c2ecf20Sopenharmony_ci	page->inuse = 1;
35508c2ecf20Sopenharmony_ci	page->frozen = 0;
35518c2ecf20Sopenharmony_ci	kmem_cache_node->node[node] = n;
35528c2ecf20Sopenharmony_ci	init_kmem_cache_node(n);
35538c2ecf20Sopenharmony_ci	inc_slabs_node(kmem_cache_node, node, page->objects);
35548c2ecf20Sopenharmony_ci
35558c2ecf20Sopenharmony_ci	/*
35568c2ecf20Sopenharmony_ci	 * No locks need to be taken here as it has just been
35578c2ecf20Sopenharmony_ci	 * initialized and there is no concurrent access.
35588c2ecf20Sopenharmony_ci	 */
35598c2ecf20Sopenharmony_ci	__add_partial(n, page, DEACTIVATE_TO_HEAD);
35608c2ecf20Sopenharmony_ci}
35618c2ecf20Sopenharmony_ci
35628c2ecf20Sopenharmony_cistatic void free_kmem_cache_nodes(struct kmem_cache *s)
35638c2ecf20Sopenharmony_ci{
35648c2ecf20Sopenharmony_ci	int node;
35658c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
35668c2ecf20Sopenharmony_ci
35678c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
35688c2ecf20Sopenharmony_ci		s->node[node] = NULL;
35698c2ecf20Sopenharmony_ci		kmem_cache_free(kmem_cache_node, n);
35708c2ecf20Sopenharmony_ci	}
35718c2ecf20Sopenharmony_ci}
35728c2ecf20Sopenharmony_ci
35738c2ecf20Sopenharmony_civoid __kmem_cache_release(struct kmem_cache *s)
35748c2ecf20Sopenharmony_ci{
35758c2ecf20Sopenharmony_ci	cache_random_seq_destroy(s);
35768c2ecf20Sopenharmony_ci	free_percpu(s->cpu_slab);
35778c2ecf20Sopenharmony_ci	free_kmem_cache_nodes(s);
35788c2ecf20Sopenharmony_ci}
35798c2ecf20Sopenharmony_ci
35808c2ecf20Sopenharmony_cistatic int init_kmem_cache_nodes(struct kmem_cache *s)
35818c2ecf20Sopenharmony_ci{
35828c2ecf20Sopenharmony_ci	int node;
35838c2ecf20Sopenharmony_ci
35848c2ecf20Sopenharmony_ci	for_each_node_state(node, N_NORMAL_MEMORY) {
35858c2ecf20Sopenharmony_ci		struct kmem_cache_node *n;
35868c2ecf20Sopenharmony_ci
35878c2ecf20Sopenharmony_ci		if (slab_state == DOWN) {
35888c2ecf20Sopenharmony_ci			early_kmem_cache_node_alloc(node);
35898c2ecf20Sopenharmony_ci			continue;
35908c2ecf20Sopenharmony_ci		}
35918c2ecf20Sopenharmony_ci		n = kmem_cache_alloc_node(kmem_cache_node,
35928c2ecf20Sopenharmony_ci						GFP_KERNEL, node);
35938c2ecf20Sopenharmony_ci
35948c2ecf20Sopenharmony_ci		if (!n) {
35958c2ecf20Sopenharmony_ci			free_kmem_cache_nodes(s);
35968c2ecf20Sopenharmony_ci			return 0;
35978c2ecf20Sopenharmony_ci		}
35988c2ecf20Sopenharmony_ci
35998c2ecf20Sopenharmony_ci		init_kmem_cache_node(n);
36008c2ecf20Sopenharmony_ci		s->node[node] = n;
36018c2ecf20Sopenharmony_ci	}
36028c2ecf20Sopenharmony_ci	return 1;
36038c2ecf20Sopenharmony_ci}
36048c2ecf20Sopenharmony_ci
36058c2ecf20Sopenharmony_cistatic void set_min_partial(struct kmem_cache *s, unsigned long min)
36068c2ecf20Sopenharmony_ci{
36078c2ecf20Sopenharmony_ci	if (min < MIN_PARTIAL)
36088c2ecf20Sopenharmony_ci		min = MIN_PARTIAL;
36098c2ecf20Sopenharmony_ci	else if (min > MAX_PARTIAL)
36108c2ecf20Sopenharmony_ci		min = MAX_PARTIAL;
36118c2ecf20Sopenharmony_ci	s->min_partial = min;
36128c2ecf20Sopenharmony_ci}
36138c2ecf20Sopenharmony_ci
36148c2ecf20Sopenharmony_cistatic void set_cpu_partial(struct kmem_cache *s)
36158c2ecf20Sopenharmony_ci{
36168c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_CPU_PARTIAL
36178c2ecf20Sopenharmony_ci	/*
36188c2ecf20Sopenharmony_ci	 * cpu_partial determined the maximum number of objects kept in the
36198c2ecf20Sopenharmony_ci	 * per cpu partial lists of a processor.
36208c2ecf20Sopenharmony_ci	 *
36218c2ecf20Sopenharmony_ci	 * Per cpu partial lists mainly contain slabs that just have one
36228c2ecf20Sopenharmony_ci	 * object freed. If they are used for allocation then they can be
36238c2ecf20Sopenharmony_ci	 * filled up again with minimal effort. The slab will never hit the
36248c2ecf20Sopenharmony_ci	 * per node partial lists and therefore no locking will be required.
36258c2ecf20Sopenharmony_ci	 *
36268c2ecf20Sopenharmony_ci	 * This setting also determines
36278c2ecf20Sopenharmony_ci	 *
36288c2ecf20Sopenharmony_ci	 * A) The number of objects from per cpu partial slabs dumped to the
36298c2ecf20Sopenharmony_ci	 *    per node list when we reach the limit.
36308c2ecf20Sopenharmony_ci	 * B) The number of objects in cpu partial slabs to extract from the
36318c2ecf20Sopenharmony_ci	 *    per node list when we run out of per cpu objects. We only fetch
36328c2ecf20Sopenharmony_ci	 *    50% to keep some capacity around for frees.
36338c2ecf20Sopenharmony_ci	 */
36348c2ecf20Sopenharmony_ci	if (!kmem_cache_has_cpu_partial(s))
36358c2ecf20Sopenharmony_ci		slub_set_cpu_partial(s, 0);
36368c2ecf20Sopenharmony_ci	else if (s->size >= PAGE_SIZE)
36378c2ecf20Sopenharmony_ci		slub_set_cpu_partial(s, 2);
36388c2ecf20Sopenharmony_ci	else if (s->size >= 1024)
36398c2ecf20Sopenharmony_ci		slub_set_cpu_partial(s, 6);
36408c2ecf20Sopenharmony_ci	else if (s->size >= 256)
36418c2ecf20Sopenharmony_ci		slub_set_cpu_partial(s, 13);
36428c2ecf20Sopenharmony_ci	else
36438c2ecf20Sopenharmony_ci		slub_set_cpu_partial(s, 30);
36448c2ecf20Sopenharmony_ci#endif
36458c2ecf20Sopenharmony_ci}
36468c2ecf20Sopenharmony_ci
36478c2ecf20Sopenharmony_ci/*
36488c2ecf20Sopenharmony_ci * calculate_sizes() determines the order and the distribution of data within
36498c2ecf20Sopenharmony_ci * a slab object.
36508c2ecf20Sopenharmony_ci */
36518c2ecf20Sopenharmony_cistatic int calculate_sizes(struct kmem_cache *s, int forced_order)
36528c2ecf20Sopenharmony_ci{
36538c2ecf20Sopenharmony_ci	slab_flags_t flags = s->flags;
36548c2ecf20Sopenharmony_ci	unsigned int size = s->object_size;
36558c2ecf20Sopenharmony_ci	unsigned int order;
36568c2ecf20Sopenharmony_ci
36578c2ecf20Sopenharmony_ci	/*
36588c2ecf20Sopenharmony_ci	 * Round up object size to the next word boundary. We can only
36598c2ecf20Sopenharmony_ci	 * place the free pointer at word boundaries and this determines
36608c2ecf20Sopenharmony_ci	 * the possible location of the free pointer.
36618c2ecf20Sopenharmony_ci	 */
36628c2ecf20Sopenharmony_ci	size = ALIGN(size, sizeof(void *));
36638c2ecf20Sopenharmony_ci
36648c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
36658c2ecf20Sopenharmony_ci	/*
36668c2ecf20Sopenharmony_ci	 * Determine if we can poison the object itself. If the user of
36678c2ecf20Sopenharmony_ci	 * the slab may touch the object after free or before allocation
36688c2ecf20Sopenharmony_ci	 * then we should never poison the object itself.
36698c2ecf20Sopenharmony_ci	 */
36708c2ecf20Sopenharmony_ci	if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
36718c2ecf20Sopenharmony_ci			!s->ctor)
36728c2ecf20Sopenharmony_ci		s->flags |= __OBJECT_POISON;
36738c2ecf20Sopenharmony_ci	else
36748c2ecf20Sopenharmony_ci		s->flags &= ~__OBJECT_POISON;
36758c2ecf20Sopenharmony_ci
36768c2ecf20Sopenharmony_ci
36778c2ecf20Sopenharmony_ci	/*
36788c2ecf20Sopenharmony_ci	 * If we are Redzoning then check if there is some space between the
36798c2ecf20Sopenharmony_ci	 * end of the object and the free pointer. If not then add an
36808c2ecf20Sopenharmony_ci	 * additional word to have some bytes to store Redzone information.
36818c2ecf20Sopenharmony_ci	 */
36828c2ecf20Sopenharmony_ci	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
36838c2ecf20Sopenharmony_ci		size += sizeof(void *);
36848c2ecf20Sopenharmony_ci#endif
36858c2ecf20Sopenharmony_ci
36868c2ecf20Sopenharmony_ci	/*
36878c2ecf20Sopenharmony_ci	 * With that we have determined the number of bytes in actual use
36888c2ecf20Sopenharmony_ci	 * by the object and redzoning.
36898c2ecf20Sopenharmony_ci	 */
36908c2ecf20Sopenharmony_ci	s->inuse = size;
36918c2ecf20Sopenharmony_ci
36928c2ecf20Sopenharmony_ci	if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
36938c2ecf20Sopenharmony_ci	    ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
36948c2ecf20Sopenharmony_ci	    s->ctor) {
36958c2ecf20Sopenharmony_ci		/*
36968c2ecf20Sopenharmony_ci		 * Relocate free pointer after the object if it is not
36978c2ecf20Sopenharmony_ci		 * permitted to overwrite the first word of the object on
36988c2ecf20Sopenharmony_ci		 * kmem_cache_free.
36998c2ecf20Sopenharmony_ci		 *
37008c2ecf20Sopenharmony_ci		 * This is the case if we do RCU, have a constructor or
37018c2ecf20Sopenharmony_ci		 * destructor, are poisoning the objects, or are
37028c2ecf20Sopenharmony_ci		 * redzoning an object smaller than sizeof(void *).
37038c2ecf20Sopenharmony_ci		 *
37048c2ecf20Sopenharmony_ci		 * The assumption that s->offset >= s->inuse means free
37058c2ecf20Sopenharmony_ci		 * pointer is outside of the object is used in the
37068c2ecf20Sopenharmony_ci		 * freeptr_outside_object() function. If that is no
37078c2ecf20Sopenharmony_ci		 * longer true, the function needs to be modified.
37088c2ecf20Sopenharmony_ci		 */
37098c2ecf20Sopenharmony_ci		s->offset = size;
37108c2ecf20Sopenharmony_ci		size += sizeof(void *);
37118c2ecf20Sopenharmony_ci	} else {
37128c2ecf20Sopenharmony_ci		/*
37138c2ecf20Sopenharmony_ci		 * Store freelist pointer near middle of object to keep
37148c2ecf20Sopenharmony_ci		 * it away from the edges of the object to avoid small
37158c2ecf20Sopenharmony_ci		 * sized over/underflows from neighboring allocations.
37168c2ecf20Sopenharmony_ci		 */
37178c2ecf20Sopenharmony_ci		s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
37188c2ecf20Sopenharmony_ci	}
37198c2ecf20Sopenharmony_ci
37208c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
37218c2ecf20Sopenharmony_ci	if (flags & SLAB_STORE_USER)
37228c2ecf20Sopenharmony_ci		/*
37238c2ecf20Sopenharmony_ci		 * Need to store information about allocs and frees after
37248c2ecf20Sopenharmony_ci		 * the object.
37258c2ecf20Sopenharmony_ci		 */
37268c2ecf20Sopenharmony_ci		size += 2 * sizeof(struct track);
37278c2ecf20Sopenharmony_ci#endif
37288c2ecf20Sopenharmony_ci
37298c2ecf20Sopenharmony_ci	kasan_cache_create(s, &size, &s->flags);
37308c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
37318c2ecf20Sopenharmony_ci	if (flags & SLAB_RED_ZONE) {
37328c2ecf20Sopenharmony_ci		/*
37338c2ecf20Sopenharmony_ci		 * Add some empty padding so that we can catch
37348c2ecf20Sopenharmony_ci		 * overwrites from earlier objects rather than let
37358c2ecf20Sopenharmony_ci		 * tracking information or the free pointer be
37368c2ecf20Sopenharmony_ci		 * corrupted if a user writes before the start
37378c2ecf20Sopenharmony_ci		 * of the object.
37388c2ecf20Sopenharmony_ci		 */
37398c2ecf20Sopenharmony_ci		size += sizeof(void *);
37408c2ecf20Sopenharmony_ci
37418c2ecf20Sopenharmony_ci		s->red_left_pad = sizeof(void *);
37428c2ecf20Sopenharmony_ci		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
37438c2ecf20Sopenharmony_ci		size += s->red_left_pad;
37448c2ecf20Sopenharmony_ci	}
37458c2ecf20Sopenharmony_ci#endif
37468c2ecf20Sopenharmony_ci
37478c2ecf20Sopenharmony_ci	/*
37488c2ecf20Sopenharmony_ci	 * SLUB stores one object immediately after another beginning from
37498c2ecf20Sopenharmony_ci	 * offset 0. In order to align the objects we have to simply size
37508c2ecf20Sopenharmony_ci	 * each object to conform to the alignment.
37518c2ecf20Sopenharmony_ci	 */
37528c2ecf20Sopenharmony_ci	size = ALIGN(size, s->align);
37538c2ecf20Sopenharmony_ci	s->size = size;
37548c2ecf20Sopenharmony_ci	s->reciprocal_size = reciprocal_value(size);
37558c2ecf20Sopenharmony_ci	if (forced_order >= 0)
37568c2ecf20Sopenharmony_ci		order = forced_order;
37578c2ecf20Sopenharmony_ci	else
37588c2ecf20Sopenharmony_ci		order = calculate_order(size);
37598c2ecf20Sopenharmony_ci
37608c2ecf20Sopenharmony_ci	if ((int)order < 0)
37618c2ecf20Sopenharmony_ci		return 0;
37628c2ecf20Sopenharmony_ci
37638c2ecf20Sopenharmony_ci	s->allocflags = 0;
37648c2ecf20Sopenharmony_ci	if (order)
37658c2ecf20Sopenharmony_ci		s->allocflags |= __GFP_COMP;
37668c2ecf20Sopenharmony_ci
37678c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CACHE_DMA)
37688c2ecf20Sopenharmony_ci		s->allocflags |= GFP_DMA;
37698c2ecf20Sopenharmony_ci
37708c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CACHE_DMA32)
37718c2ecf20Sopenharmony_ci		s->allocflags |= GFP_DMA32;
37728c2ecf20Sopenharmony_ci
37738c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RECLAIM_ACCOUNT)
37748c2ecf20Sopenharmony_ci		s->allocflags |= __GFP_RECLAIMABLE;
37758c2ecf20Sopenharmony_ci
37768c2ecf20Sopenharmony_ci	/*
37778c2ecf20Sopenharmony_ci	 * Determine the number of objects per slab
37788c2ecf20Sopenharmony_ci	 */
37798c2ecf20Sopenharmony_ci	s->oo = oo_make(order, size);
37808c2ecf20Sopenharmony_ci	s->min = oo_make(get_order(size), size);
37818c2ecf20Sopenharmony_ci	if (oo_objects(s->oo) > oo_objects(s->max))
37828c2ecf20Sopenharmony_ci		s->max = s->oo;
37838c2ecf20Sopenharmony_ci
37848c2ecf20Sopenharmony_ci	return !!oo_objects(s->oo);
37858c2ecf20Sopenharmony_ci}
37868c2ecf20Sopenharmony_ci
37878c2ecf20Sopenharmony_cistatic int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
37888c2ecf20Sopenharmony_ci{
37898c2ecf20Sopenharmony_ci	s->flags = kmem_cache_flags(s->size, flags, s->name);
37908c2ecf20Sopenharmony_ci#ifdef CONFIG_SLAB_FREELIST_HARDENED
37918c2ecf20Sopenharmony_ci	s->random = get_random_long();
37928c2ecf20Sopenharmony_ci#endif
37938c2ecf20Sopenharmony_ci
37948c2ecf20Sopenharmony_ci	if (!calculate_sizes(s, -1))
37958c2ecf20Sopenharmony_ci		goto error;
37968c2ecf20Sopenharmony_ci	if (disable_higher_order_debug) {
37978c2ecf20Sopenharmony_ci		/*
37988c2ecf20Sopenharmony_ci		 * Disable debugging flags that store metadata if the min slab
37998c2ecf20Sopenharmony_ci		 * order increased.
38008c2ecf20Sopenharmony_ci		 */
38018c2ecf20Sopenharmony_ci		if (get_order(s->size) > get_order(s->object_size)) {
38028c2ecf20Sopenharmony_ci			s->flags &= ~DEBUG_METADATA_FLAGS;
38038c2ecf20Sopenharmony_ci			s->offset = 0;
38048c2ecf20Sopenharmony_ci			if (!calculate_sizes(s, -1))
38058c2ecf20Sopenharmony_ci				goto error;
38068c2ecf20Sopenharmony_ci		}
38078c2ecf20Sopenharmony_ci	}
38088c2ecf20Sopenharmony_ci
38098c2ecf20Sopenharmony_ci#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
38108c2ecf20Sopenharmony_ci    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
38118c2ecf20Sopenharmony_ci	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
38128c2ecf20Sopenharmony_ci		/* Enable fast mode */
38138c2ecf20Sopenharmony_ci		s->flags |= __CMPXCHG_DOUBLE;
38148c2ecf20Sopenharmony_ci#endif
38158c2ecf20Sopenharmony_ci
38168c2ecf20Sopenharmony_ci	/*
38178c2ecf20Sopenharmony_ci	 * The larger the object size is, the more pages we want on the partial
38188c2ecf20Sopenharmony_ci	 * list to avoid pounding the page allocator excessively.
38198c2ecf20Sopenharmony_ci	 */
38208c2ecf20Sopenharmony_ci	set_min_partial(s, ilog2(s->size) / 2);
38218c2ecf20Sopenharmony_ci
38228c2ecf20Sopenharmony_ci	set_cpu_partial(s);
38238c2ecf20Sopenharmony_ci
38248c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
38258c2ecf20Sopenharmony_ci	s->remote_node_defrag_ratio = 1000;
38268c2ecf20Sopenharmony_ci#endif
38278c2ecf20Sopenharmony_ci
38288c2ecf20Sopenharmony_ci	/* Initialize the pre-computed randomized freelist if slab is up */
38298c2ecf20Sopenharmony_ci	if (slab_state >= UP) {
38308c2ecf20Sopenharmony_ci		if (init_cache_random_seq(s))
38318c2ecf20Sopenharmony_ci			goto error;
38328c2ecf20Sopenharmony_ci	}
38338c2ecf20Sopenharmony_ci
38348c2ecf20Sopenharmony_ci	if (!init_kmem_cache_nodes(s))
38358c2ecf20Sopenharmony_ci		goto error;
38368c2ecf20Sopenharmony_ci
38378c2ecf20Sopenharmony_ci	if (alloc_kmem_cache_cpus(s))
38388c2ecf20Sopenharmony_ci		return 0;
38398c2ecf20Sopenharmony_ci
38408c2ecf20Sopenharmony_cierror:
38418c2ecf20Sopenharmony_ci	__kmem_cache_release(s);
38428c2ecf20Sopenharmony_ci	return -EINVAL;
38438c2ecf20Sopenharmony_ci}
38448c2ecf20Sopenharmony_ci
38458c2ecf20Sopenharmony_cistatic void list_slab_objects(struct kmem_cache *s, struct page *page,
38468c2ecf20Sopenharmony_ci			      const char *text)
38478c2ecf20Sopenharmony_ci{
38488c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
38498c2ecf20Sopenharmony_ci	void *addr = page_address(page);
38508c2ecf20Sopenharmony_ci	unsigned long *map;
38518c2ecf20Sopenharmony_ci	void *p;
38528c2ecf20Sopenharmony_ci
38538c2ecf20Sopenharmony_ci	slab_err(s, page, text, s->name);
38548c2ecf20Sopenharmony_ci	slab_lock(page);
38558c2ecf20Sopenharmony_ci
38568c2ecf20Sopenharmony_ci	map = get_map(s, page);
38578c2ecf20Sopenharmony_ci	for_each_object(p, s, addr, page->objects) {
38588c2ecf20Sopenharmony_ci
38598c2ecf20Sopenharmony_ci		if (!test_bit(__obj_to_index(s, addr, p), map)) {
38608c2ecf20Sopenharmony_ci			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
38618c2ecf20Sopenharmony_ci			print_tracking(s, p);
38628c2ecf20Sopenharmony_ci		}
38638c2ecf20Sopenharmony_ci	}
38648c2ecf20Sopenharmony_ci	put_map(map);
38658c2ecf20Sopenharmony_ci	slab_unlock(page);
38668c2ecf20Sopenharmony_ci#endif
38678c2ecf20Sopenharmony_ci}
38688c2ecf20Sopenharmony_ci
38698c2ecf20Sopenharmony_ci/*
38708c2ecf20Sopenharmony_ci * Attempt to free all partial slabs on a node.
38718c2ecf20Sopenharmony_ci * This is called from __kmem_cache_shutdown(). We must take list_lock
38728c2ecf20Sopenharmony_ci * because sysfs file might still access partial list after the shutdowning.
38738c2ecf20Sopenharmony_ci */
38748c2ecf20Sopenharmony_cistatic void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
38758c2ecf20Sopenharmony_ci{
38768c2ecf20Sopenharmony_ci	LIST_HEAD(discard);
38778c2ecf20Sopenharmony_ci	struct page *page, *h;
38788c2ecf20Sopenharmony_ci
38798c2ecf20Sopenharmony_ci	BUG_ON(irqs_disabled());
38808c2ecf20Sopenharmony_ci	spin_lock_irq(&n->list_lock);
38818c2ecf20Sopenharmony_ci	list_for_each_entry_safe(page, h, &n->partial, slab_list) {
38828c2ecf20Sopenharmony_ci		if (!page->inuse) {
38838c2ecf20Sopenharmony_ci			remove_partial(n, page);
38848c2ecf20Sopenharmony_ci			list_add(&page->slab_list, &discard);
38858c2ecf20Sopenharmony_ci		} else {
38868c2ecf20Sopenharmony_ci			list_slab_objects(s, page,
38878c2ecf20Sopenharmony_ci			  "Objects remaining in %s on __kmem_cache_shutdown()");
38888c2ecf20Sopenharmony_ci		}
38898c2ecf20Sopenharmony_ci	}
38908c2ecf20Sopenharmony_ci	spin_unlock_irq(&n->list_lock);
38918c2ecf20Sopenharmony_ci
38928c2ecf20Sopenharmony_ci	list_for_each_entry_safe(page, h, &discard, slab_list)
38938c2ecf20Sopenharmony_ci		discard_slab(s, page);
38948c2ecf20Sopenharmony_ci}
38958c2ecf20Sopenharmony_ci
38968c2ecf20Sopenharmony_cibool __kmem_cache_empty(struct kmem_cache *s)
38978c2ecf20Sopenharmony_ci{
38988c2ecf20Sopenharmony_ci	int node;
38998c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
39008c2ecf20Sopenharmony_ci
39018c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n)
39028c2ecf20Sopenharmony_ci		if (n->nr_partial || slabs_node(s, node))
39038c2ecf20Sopenharmony_ci			return false;
39048c2ecf20Sopenharmony_ci	return true;
39058c2ecf20Sopenharmony_ci}
39068c2ecf20Sopenharmony_ci
39078c2ecf20Sopenharmony_ci/*
39088c2ecf20Sopenharmony_ci * Release all resources used by a slab cache.
39098c2ecf20Sopenharmony_ci */
39108c2ecf20Sopenharmony_ciint __kmem_cache_shutdown(struct kmem_cache *s)
39118c2ecf20Sopenharmony_ci{
39128c2ecf20Sopenharmony_ci	int node;
39138c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
39148c2ecf20Sopenharmony_ci
39158c2ecf20Sopenharmony_ci	flush_all(s);
39168c2ecf20Sopenharmony_ci	/* Attempt to free all objects */
39178c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
39188c2ecf20Sopenharmony_ci		free_partial(s, n);
39198c2ecf20Sopenharmony_ci		if (n->nr_partial || slabs_node(s, node))
39208c2ecf20Sopenharmony_ci			return 1;
39218c2ecf20Sopenharmony_ci	}
39228c2ecf20Sopenharmony_ci	return 0;
39238c2ecf20Sopenharmony_ci}
39248c2ecf20Sopenharmony_ci
39258c2ecf20Sopenharmony_ci/********************************************************************
39268c2ecf20Sopenharmony_ci *		Kmalloc subsystem
39278c2ecf20Sopenharmony_ci *******************************************************************/
39288c2ecf20Sopenharmony_ci
39298c2ecf20Sopenharmony_cistatic int __init setup_slub_min_order(char *str)
39308c2ecf20Sopenharmony_ci{
39318c2ecf20Sopenharmony_ci	get_option(&str, (int *)&slub_min_order);
39328c2ecf20Sopenharmony_ci
39338c2ecf20Sopenharmony_ci	return 1;
39348c2ecf20Sopenharmony_ci}
39358c2ecf20Sopenharmony_ci
39368c2ecf20Sopenharmony_ci__setup("slub_min_order=", setup_slub_min_order);
39378c2ecf20Sopenharmony_ci
39388c2ecf20Sopenharmony_cistatic int __init setup_slub_max_order(char *str)
39398c2ecf20Sopenharmony_ci{
39408c2ecf20Sopenharmony_ci	get_option(&str, (int *)&slub_max_order);
39418c2ecf20Sopenharmony_ci	slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
39428c2ecf20Sopenharmony_ci
39438c2ecf20Sopenharmony_ci	return 1;
39448c2ecf20Sopenharmony_ci}
39458c2ecf20Sopenharmony_ci
39468c2ecf20Sopenharmony_ci__setup("slub_max_order=", setup_slub_max_order);
39478c2ecf20Sopenharmony_ci
39488c2ecf20Sopenharmony_cistatic int __init setup_slub_min_objects(char *str)
39498c2ecf20Sopenharmony_ci{
39508c2ecf20Sopenharmony_ci	get_option(&str, (int *)&slub_min_objects);
39518c2ecf20Sopenharmony_ci
39528c2ecf20Sopenharmony_ci	return 1;
39538c2ecf20Sopenharmony_ci}
39548c2ecf20Sopenharmony_ci
39558c2ecf20Sopenharmony_ci__setup("slub_min_objects=", setup_slub_min_objects);
39568c2ecf20Sopenharmony_ci
39578c2ecf20Sopenharmony_civoid *__kmalloc(size_t size, gfp_t flags)
39588c2ecf20Sopenharmony_ci{
39598c2ecf20Sopenharmony_ci	struct kmem_cache *s;
39608c2ecf20Sopenharmony_ci	void *ret;
39618c2ecf20Sopenharmony_ci
39628c2ecf20Sopenharmony_ci	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
39638c2ecf20Sopenharmony_ci		return kmalloc_large(size, flags);
39648c2ecf20Sopenharmony_ci
39658c2ecf20Sopenharmony_ci	s = kmalloc_slab(size, flags);
39668c2ecf20Sopenharmony_ci
39678c2ecf20Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(s)))
39688c2ecf20Sopenharmony_ci		return s;
39698c2ecf20Sopenharmony_ci
39708c2ecf20Sopenharmony_ci	ret = slab_alloc(s, flags, _RET_IP_);
39718c2ecf20Sopenharmony_ci
39728c2ecf20Sopenharmony_ci	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
39738c2ecf20Sopenharmony_ci
39748c2ecf20Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, flags);
39758c2ecf20Sopenharmony_ci
39768c2ecf20Sopenharmony_ci	return ret;
39778c2ecf20Sopenharmony_ci}
39788c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__kmalloc);
39798c2ecf20Sopenharmony_ci
39808c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
39818c2ecf20Sopenharmony_cistatic void *kmalloc_large_node(size_t size, gfp_t flags, int node)
39828c2ecf20Sopenharmony_ci{
39838c2ecf20Sopenharmony_ci	struct page *page;
39848c2ecf20Sopenharmony_ci	void *ptr = NULL;
39858c2ecf20Sopenharmony_ci	unsigned int order = get_order(size);
39868c2ecf20Sopenharmony_ci
39878c2ecf20Sopenharmony_ci	flags |= __GFP_COMP;
39888c2ecf20Sopenharmony_ci	page = alloc_pages_node(node, flags, order);
39898c2ecf20Sopenharmony_ci	if (page) {
39908c2ecf20Sopenharmony_ci		ptr = page_address(page);
39918c2ecf20Sopenharmony_ci		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
39928c2ecf20Sopenharmony_ci				      PAGE_SIZE << order);
39938c2ecf20Sopenharmony_ci	}
39948c2ecf20Sopenharmony_ci
39958c2ecf20Sopenharmony_ci	return kmalloc_large_node_hook(ptr, size, flags);
39968c2ecf20Sopenharmony_ci}
39978c2ecf20Sopenharmony_ci
39988c2ecf20Sopenharmony_civoid *__kmalloc_node(size_t size, gfp_t flags, int node)
39998c2ecf20Sopenharmony_ci{
40008c2ecf20Sopenharmony_ci	struct kmem_cache *s;
40018c2ecf20Sopenharmony_ci	void *ret;
40028c2ecf20Sopenharmony_ci
40038c2ecf20Sopenharmony_ci	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
40048c2ecf20Sopenharmony_ci		ret = kmalloc_large_node(size, flags, node);
40058c2ecf20Sopenharmony_ci
40068c2ecf20Sopenharmony_ci		trace_kmalloc_node(_RET_IP_, ret,
40078c2ecf20Sopenharmony_ci				   size, PAGE_SIZE << get_order(size),
40088c2ecf20Sopenharmony_ci				   flags, node);
40098c2ecf20Sopenharmony_ci
40108c2ecf20Sopenharmony_ci		return ret;
40118c2ecf20Sopenharmony_ci	}
40128c2ecf20Sopenharmony_ci
40138c2ecf20Sopenharmony_ci	s = kmalloc_slab(size, flags);
40148c2ecf20Sopenharmony_ci
40158c2ecf20Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(s)))
40168c2ecf20Sopenharmony_ci		return s;
40178c2ecf20Sopenharmony_ci
40188c2ecf20Sopenharmony_ci	ret = slab_alloc_node(s, flags, node, _RET_IP_);
40198c2ecf20Sopenharmony_ci
40208c2ecf20Sopenharmony_ci	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
40218c2ecf20Sopenharmony_ci
40228c2ecf20Sopenharmony_ci	ret = kasan_kmalloc(s, ret, size, flags);
40238c2ecf20Sopenharmony_ci
40248c2ecf20Sopenharmony_ci	return ret;
40258c2ecf20Sopenharmony_ci}
40268c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__kmalloc_node);
40278c2ecf20Sopenharmony_ci#endif	/* CONFIG_NUMA */
40288c2ecf20Sopenharmony_ci
40298c2ecf20Sopenharmony_ci#ifdef CONFIG_HARDENED_USERCOPY
40308c2ecf20Sopenharmony_ci/*
40318c2ecf20Sopenharmony_ci * Rejects incorrectly sized objects and objects that are to be copied
40328c2ecf20Sopenharmony_ci * to/from userspace but do not fall entirely within the containing slab
40338c2ecf20Sopenharmony_ci * cache's usercopy region.
40348c2ecf20Sopenharmony_ci *
40358c2ecf20Sopenharmony_ci * Returns NULL if check passes, otherwise const char * to name of cache
40368c2ecf20Sopenharmony_ci * to indicate an error.
40378c2ecf20Sopenharmony_ci */
40388c2ecf20Sopenharmony_civoid __check_heap_object(const void *ptr, unsigned long n, struct page *page,
40398c2ecf20Sopenharmony_ci			 bool to_user)
40408c2ecf20Sopenharmony_ci{
40418c2ecf20Sopenharmony_ci	struct kmem_cache *s;
40428c2ecf20Sopenharmony_ci	unsigned int offset;
40438c2ecf20Sopenharmony_ci	size_t object_size;
40448c2ecf20Sopenharmony_ci
40458c2ecf20Sopenharmony_ci	ptr = kasan_reset_tag(ptr);
40468c2ecf20Sopenharmony_ci
40478c2ecf20Sopenharmony_ci	/* Find object and usable object size. */
40488c2ecf20Sopenharmony_ci	s = page->slab_cache;
40498c2ecf20Sopenharmony_ci
40508c2ecf20Sopenharmony_ci	/* Reject impossible pointers. */
40518c2ecf20Sopenharmony_ci	if (ptr < page_address(page))
40528c2ecf20Sopenharmony_ci		usercopy_abort("SLUB object not in SLUB page?!", NULL,
40538c2ecf20Sopenharmony_ci			       to_user, 0, n);
40548c2ecf20Sopenharmony_ci
40558c2ecf20Sopenharmony_ci	/* Find offset within object. */
40568c2ecf20Sopenharmony_ci	offset = (ptr - page_address(page)) % s->size;
40578c2ecf20Sopenharmony_ci
40588c2ecf20Sopenharmony_ci	/* Adjust for redzone and reject if within the redzone. */
40598c2ecf20Sopenharmony_ci	if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
40608c2ecf20Sopenharmony_ci		if (offset < s->red_left_pad)
40618c2ecf20Sopenharmony_ci			usercopy_abort("SLUB object in left red zone",
40628c2ecf20Sopenharmony_ci				       s->name, to_user, offset, n);
40638c2ecf20Sopenharmony_ci		offset -= s->red_left_pad;
40648c2ecf20Sopenharmony_ci	}
40658c2ecf20Sopenharmony_ci
40668c2ecf20Sopenharmony_ci	/* Allow address range falling entirely within usercopy region. */
40678c2ecf20Sopenharmony_ci	if (offset >= s->useroffset &&
40688c2ecf20Sopenharmony_ci	    offset - s->useroffset <= s->usersize &&
40698c2ecf20Sopenharmony_ci	    n <= s->useroffset - offset + s->usersize)
40708c2ecf20Sopenharmony_ci		return;
40718c2ecf20Sopenharmony_ci
40728c2ecf20Sopenharmony_ci	/*
40738c2ecf20Sopenharmony_ci	 * If the copy is still within the allocated object, produce
40748c2ecf20Sopenharmony_ci	 * a warning instead of rejecting the copy. This is intended
40758c2ecf20Sopenharmony_ci	 * to be a temporary method to find any missing usercopy
40768c2ecf20Sopenharmony_ci	 * whitelists.
40778c2ecf20Sopenharmony_ci	 */
40788c2ecf20Sopenharmony_ci	object_size = slab_ksize(s);
40798c2ecf20Sopenharmony_ci	if (usercopy_fallback &&
40808c2ecf20Sopenharmony_ci	    offset <= object_size && n <= object_size - offset) {
40818c2ecf20Sopenharmony_ci		usercopy_warn("SLUB object", s->name, to_user, offset, n);
40828c2ecf20Sopenharmony_ci		return;
40838c2ecf20Sopenharmony_ci	}
40848c2ecf20Sopenharmony_ci
40858c2ecf20Sopenharmony_ci	usercopy_abort("SLUB object", s->name, to_user, offset, n);
40868c2ecf20Sopenharmony_ci}
40878c2ecf20Sopenharmony_ci#endif /* CONFIG_HARDENED_USERCOPY */
40888c2ecf20Sopenharmony_ci
40898c2ecf20Sopenharmony_cisize_t __ksize(const void *object)
40908c2ecf20Sopenharmony_ci{
40918c2ecf20Sopenharmony_ci	struct page *page;
40928c2ecf20Sopenharmony_ci
40938c2ecf20Sopenharmony_ci	if (unlikely(object == ZERO_SIZE_PTR))
40948c2ecf20Sopenharmony_ci		return 0;
40958c2ecf20Sopenharmony_ci
40968c2ecf20Sopenharmony_ci	page = virt_to_head_page(object);
40978c2ecf20Sopenharmony_ci
40988c2ecf20Sopenharmony_ci	if (unlikely(!PageSlab(page))) {
40998c2ecf20Sopenharmony_ci		WARN_ON(!PageCompound(page));
41008c2ecf20Sopenharmony_ci		return page_size(page);
41018c2ecf20Sopenharmony_ci	}
41028c2ecf20Sopenharmony_ci
41038c2ecf20Sopenharmony_ci	return slab_ksize(page->slab_cache);
41048c2ecf20Sopenharmony_ci}
41058c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__ksize);
41068c2ecf20Sopenharmony_ci
41078c2ecf20Sopenharmony_civoid kfree(const void *x)
41088c2ecf20Sopenharmony_ci{
41098c2ecf20Sopenharmony_ci	struct page *page;
41108c2ecf20Sopenharmony_ci	void *object = (void *)x;
41118c2ecf20Sopenharmony_ci
41128c2ecf20Sopenharmony_ci	trace_kfree(_RET_IP_, x);
41138c2ecf20Sopenharmony_ci
41148c2ecf20Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(x)))
41158c2ecf20Sopenharmony_ci		return;
41168c2ecf20Sopenharmony_ci
41178c2ecf20Sopenharmony_ci	page = virt_to_head_page(x);
41188c2ecf20Sopenharmony_ci	if (unlikely(!PageSlab(page))) {
41198c2ecf20Sopenharmony_ci		unsigned int order = compound_order(page);
41208c2ecf20Sopenharmony_ci
41218c2ecf20Sopenharmony_ci		BUG_ON(!PageCompound(page));
41228c2ecf20Sopenharmony_ci		kfree_hook(object);
41238c2ecf20Sopenharmony_ci		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
41248c2ecf20Sopenharmony_ci				      -(PAGE_SIZE << order));
41258c2ecf20Sopenharmony_ci		__free_pages(page, order);
41268c2ecf20Sopenharmony_ci		return;
41278c2ecf20Sopenharmony_ci	}
41288c2ecf20Sopenharmony_ci	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
41298c2ecf20Sopenharmony_ci}
41308c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kfree);
41318c2ecf20Sopenharmony_ci
41328c2ecf20Sopenharmony_ci#define SHRINK_PROMOTE_MAX 32
41338c2ecf20Sopenharmony_ci
41348c2ecf20Sopenharmony_ci/*
41358c2ecf20Sopenharmony_ci * kmem_cache_shrink discards empty slabs and promotes the slabs filled
41368c2ecf20Sopenharmony_ci * up most to the head of the partial lists. New allocations will then
41378c2ecf20Sopenharmony_ci * fill those up and thus they can be removed from the partial lists.
41388c2ecf20Sopenharmony_ci *
41398c2ecf20Sopenharmony_ci * The slabs with the least items are placed last. This results in them
41408c2ecf20Sopenharmony_ci * being allocated from last increasing the chance that the last objects
41418c2ecf20Sopenharmony_ci * are freed in them.
41428c2ecf20Sopenharmony_ci */
41438c2ecf20Sopenharmony_ciint __kmem_cache_shrink(struct kmem_cache *s)
41448c2ecf20Sopenharmony_ci{
41458c2ecf20Sopenharmony_ci	int node;
41468c2ecf20Sopenharmony_ci	int i;
41478c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
41488c2ecf20Sopenharmony_ci	struct page *page;
41498c2ecf20Sopenharmony_ci	struct page *t;
41508c2ecf20Sopenharmony_ci	struct list_head discard;
41518c2ecf20Sopenharmony_ci	struct list_head promote[SHRINK_PROMOTE_MAX];
41528c2ecf20Sopenharmony_ci	unsigned long flags;
41538c2ecf20Sopenharmony_ci	int ret = 0;
41548c2ecf20Sopenharmony_ci
41558c2ecf20Sopenharmony_ci	flush_all(s);
41568c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
41578c2ecf20Sopenharmony_ci		INIT_LIST_HEAD(&discard);
41588c2ecf20Sopenharmony_ci		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
41598c2ecf20Sopenharmony_ci			INIT_LIST_HEAD(promote + i);
41608c2ecf20Sopenharmony_ci
41618c2ecf20Sopenharmony_ci		spin_lock_irqsave(&n->list_lock, flags);
41628c2ecf20Sopenharmony_ci
41638c2ecf20Sopenharmony_ci		/*
41648c2ecf20Sopenharmony_ci		 * Build lists of slabs to discard or promote.
41658c2ecf20Sopenharmony_ci		 *
41668c2ecf20Sopenharmony_ci		 * Note that concurrent frees may occur while we hold the
41678c2ecf20Sopenharmony_ci		 * list_lock. page->inuse here is the upper limit.
41688c2ecf20Sopenharmony_ci		 */
41698c2ecf20Sopenharmony_ci		list_for_each_entry_safe(page, t, &n->partial, slab_list) {
41708c2ecf20Sopenharmony_ci			int free = page->objects - page->inuse;
41718c2ecf20Sopenharmony_ci
41728c2ecf20Sopenharmony_ci			/* Do not reread page->inuse */
41738c2ecf20Sopenharmony_ci			barrier();
41748c2ecf20Sopenharmony_ci
41758c2ecf20Sopenharmony_ci			/* We do not keep full slabs on the list */
41768c2ecf20Sopenharmony_ci			BUG_ON(free <= 0);
41778c2ecf20Sopenharmony_ci
41788c2ecf20Sopenharmony_ci			if (free == page->objects) {
41798c2ecf20Sopenharmony_ci				list_move(&page->slab_list, &discard);
41808c2ecf20Sopenharmony_ci				n->nr_partial--;
41818c2ecf20Sopenharmony_ci			} else if (free <= SHRINK_PROMOTE_MAX)
41828c2ecf20Sopenharmony_ci				list_move(&page->slab_list, promote + free - 1);
41838c2ecf20Sopenharmony_ci		}
41848c2ecf20Sopenharmony_ci
41858c2ecf20Sopenharmony_ci		/*
41868c2ecf20Sopenharmony_ci		 * Promote the slabs filled up most to the head of the
41878c2ecf20Sopenharmony_ci		 * partial list.
41888c2ecf20Sopenharmony_ci		 */
41898c2ecf20Sopenharmony_ci		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
41908c2ecf20Sopenharmony_ci			list_splice(promote + i, &n->partial);
41918c2ecf20Sopenharmony_ci
41928c2ecf20Sopenharmony_ci		spin_unlock_irqrestore(&n->list_lock, flags);
41938c2ecf20Sopenharmony_ci
41948c2ecf20Sopenharmony_ci		/* Release empty slabs */
41958c2ecf20Sopenharmony_ci		list_for_each_entry_safe(page, t, &discard, slab_list)
41968c2ecf20Sopenharmony_ci			discard_slab(s, page);
41978c2ecf20Sopenharmony_ci
41988c2ecf20Sopenharmony_ci		if (slabs_node(s, node))
41998c2ecf20Sopenharmony_ci			ret = 1;
42008c2ecf20Sopenharmony_ci	}
42018c2ecf20Sopenharmony_ci
42028c2ecf20Sopenharmony_ci	return ret;
42038c2ecf20Sopenharmony_ci}
42048c2ecf20Sopenharmony_ci
42058c2ecf20Sopenharmony_cistatic int slab_mem_going_offline_callback(void *arg)
42068c2ecf20Sopenharmony_ci{
42078c2ecf20Sopenharmony_ci	struct kmem_cache *s;
42088c2ecf20Sopenharmony_ci
42098c2ecf20Sopenharmony_ci	mutex_lock(&slab_mutex);
42108c2ecf20Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list)
42118c2ecf20Sopenharmony_ci		__kmem_cache_shrink(s);
42128c2ecf20Sopenharmony_ci	mutex_unlock(&slab_mutex);
42138c2ecf20Sopenharmony_ci
42148c2ecf20Sopenharmony_ci	return 0;
42158c2ecf20Sopenharmony_ci}
42168c2ecf20Sopenharmony_ci
42178c2ecf20Sopenharmony_cistatic void slab_mem_offline_callback(void *arg)
42188c2ecf20Sopenharmony_ci{
42198c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
42208c2ecf20Sopenharmony_ci	struct kmem_cache *s;
42218c2ecf20Sopenharmony_ci	struct memory_notify *marg = arg;
42228c2ecf20Sopenharmony_ci	int offline_node;
42238c2ecf20Sopenharmony_ci
42248c2ecf20Sopenharmony_ci	offline_node = marg->status_change_nid_normal;
42258c2ecf20Sopenharmony_ci
42268c2ecf20Sopenharmony_ci	/*
42278c2ecf20Sopenharmony_ci	 * If the node still has available memory. we need kmem_cache_node
42288c2ecf20Sopenharmony_ci	 * for it yet.
42298c2ecf20Sopenharmony_ci	 */
42308c2ecf20Sopenharmony_ci	if (offline_node < 0)
42318c2ecf20Sopenharmony_ci		return;
42328c2ecf20Sopenharmony_ci
42338c2ecf20Sopenharmony_ci	mutex_lock(&slab_mutex);
42348c2ecf20Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list) {
42358c2ecf20Sopenharmony_ci		n = get_node(s, offline_node);
42368c2ecf20Sopenharmony_ci		if (n) {
42378c2ecf20Sopenharmony_ci			/*
42388c2ecf20Sopenharmony_ci			 * if n->nr_slabs > 0, slabs still exist on the node
42398c2ecf20Sopenharmony_ci			 * that is going down. We were unable to free them,
42408c2ecf20Sopenharmony_ci			 * and offline_pages() function shouldn't call this
42418c2ecf20Sopenharmony_ci			 * callback. So, we must fail.
42428c2ecf20Sopenharmony_ci			 */
42438c2ecf20Sopenharmony_ci			BUG_ON(slabs_node(s, offline_node));
42448c2ecf20Sopenharmony_ci
42458c2ecf20Sopenharmony_ci			s->node[offline_node] = NULL;
42468c2ecf20Sopenharmony_ci			kmem_cache_free(kmem_cache_node, n);
42478c2ecf20Sopenharmony_ci		}
42488c2ecf20Sopenharmony_ci	}
42498c2ecf20Sopenharmony_ci	mutex_unlock(&slab_mutex);
42508c2ecf20Sopenharmony_ci}
42518c2ecf20Sopenharmony_ci
42528c2ecf20Sopenharmony_cistatic int slab_mem_going_online_callback(void *arg)
42538c2ecf20Sopenharmony_ci{
42548c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
42558c2ecf20Sopenharmony_ci	struct kmem_cache *s;
42568c2ecf20Sopenharmony_ci	struct memory_notify *marg = arg;
42578c2ecf20Sopenharmony_ci	int nid = marg->status_change_nid_normal;
42588c2ecf20Sopenharmony_ci	int ret = 0;
42598c2ecf20Sopenharmony_ci
42608c2ecf20Sopenharmony_ci	/*
42618c2ecf20Sopenharmony_ci	 * If the node's memory is already available, then kmem_cache_node is
42628c2ecf20Sopenharmony_ci	 * already created. Nothing to do.
42638c2ecf20Sopenharmony_ci	 */
42648c2ecf20Sopenharmony_ci	if (nid < 0)
42658c2ecf20Sopenharmony_ci		return 0;
42668c2ecf20Sopenharmony_ci
42678c2ecf20Sopenharmony_ci	/*
42688c2ecf20Sopenharmony_ci	 * We are bringing a node online. No memory is available yet. We must
42698c2ecf20Sopenharmony_ci	 * allocate a kmem_cache_node structure in order to bring the node
42708c2ecf20Sopenharmony_ci	 * online.
42718c2ecf20Sopenharmony_ci	 */
42728c2ecf20Sopenharmony_ci	mutex_lock(&slab_mutex);
42738c2ecf20Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list) {
42748c2ecf20Sopenharmony_ci		/*
42758c2ecf20Sopenharmony_ci		 * XXX: kmem_cache_alloc_node will fallback to other nodes
42768c2ecf20Sopenharmony_ci		 *      since memory is not yet available from the node that
42778c2ecf20Sopenharmony_ci		 *      is brought up.
42788c2ecf20Sopenharmony_ci		 */
42798c2ecf20Sopenharmony_ci		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
42808c2ecf20Sopenharmony_ci		if (!n) {
42818c2ecf20Sopenharmony_ci			ret = -ENOMEM;
42828c2ecf20Sopenharmony_ci			goto out;
42838c2ecf20Sopenharmony_ci		}
42848c2ecf20Sopenharmony_ci		init_kmem_cache_node(n);
42858c2ecf20Sopenharmony_ci		s->node[nid] = n;
42868c2ecf20Sopenharmony_ci	}
42878c2ecf20Sopenharmony_ciout:
42888c2ecf20Sopenharmony_ci	mutex_unlock(&slab_mutex);
42898c2ecf20Sopenharmony_ci	return ret;
42908c2ecf20Sopenharmony_ci}
42918c2ecf20Sopenharmony_ci
42928c2ecf20Sopenharmony_cistatic int slab_memory_callback(struct notifier_block *self,
42938c2ecf20Sopenharmony_ci				unsigned long action, void *arg)
42948c2ecf20Sopenharmony_ci{
42958c2ecf20Sopenharmony_ci	int ret = 0;
42968c2ecf20Sopenharmony_ci
42978c2ecf20Sopenharmony_ci	switch (action) {
42988c2ecf20Sopenharmony_ci	case MEM_GOING_ONLINE:
42998c2ecf20Sopenharmony_ci		ret = slab_mem_going_online_callback(arg);
43008c2ecf20Sopenharmony_ci		break;
43018c2ecf20Sopenharmony_ci	case MEM_GOING_OFFLINE:
43028c2ecf20Sopenharmony_ci		ret = slab_mem_going_offline_callback(arg);
43038c2ecf20Sopenharmony_ci		break;
43048c2ecf20Sopenharmony_ci	case MEM_OFFLINE:
43058c2ecf20Sopenharmony_ci	case MEM_CANCEL_ONLINE:
43068c2ecf20Sopenharmony_ci		slab_mem_offline_callback(arg);
43078c2ecf20Sopenharmony_ci		break;
43088c2ecf20Sopenharmony_ci	case MEM_ONLINE:
43098c2ecf20Sopenharmony_ci	case MEM_CANCEL_OFFLINE:
43108c2ecf20Sopenharmony_ci		break;
43118c2ecf20Sopenharmony_ci	}
43128c2ecf20Sopenharmony_ci	if (ret)
43138c2ecf20Sopenharmony_ci		ret = notifier_from_errno(ret);
43148c2ecf20Sopenharmony_ci	else
43158c2ecf20Sopenharmony_ci		ret = NOTIFY_OK;
43168c2ecf20Sopenharmony_ci	return ret;
43178c2ecf20Sopenharmony_ci}
43188c2ecf20Sopenharmony_ci
43198c2ecf20Sopenharmony_cistatic struct notifier_block slab_memory_callback_nb = {
43208c2ecf20Sopenharmony_ci	.notifier_call = slab_memory_callback,
43218c2ecf20Sopenharmony_ci	.priority = SLAB_CALLBACK_PRI,
43228c2ecf20Sopenharmony_ci};
43238c2ecf20Sopenharmony_ci
43248c2ecf20Sopenharmony_ci/********************************************************************
43258c2ecf20Sopenharmony_ci *			Basic setup of slabs
43268c2ecf20Sopenharmony_ci *******************************************************************/
43278c2ecf20Sopenharmony_ci
43288c2ecf20Sopenharmony_ci/*
43298c2ecf20Sopenharmony_ci * Used for early kmem_cache structures that were allocated using
43308c2ecf20Sopenharmony_ci * the page allocator. Allocate them properly then fix up the pointers
43318c2ecf20Sopenharmony_ci * that may be pointing to the wrong kmem_cache structure.
43328c2ecf20Sopenharmony_ci */
43338c2ecf20Sopenharmony_ci
43348c2ecf20Sopenharmony_cistatic struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
43358c2ecf20Sopenharmony_ci{
43368c2ecf20Sopenharmony_ci	int node;
43378c2ecf20Sopenharmony_ci	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
43388c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
43398c2ecf20Sopenharmony_ci
43408c2ecf20Sopenharmony_ci	memcpy(s, static_cache, kmem_cache->object_size);
43418c2ecf20Sopenharmony_ci
43428c2ecf20Sopenharmony_ci	/*
43438c2ecf20Sopenharmony_ci	 * This runs very early, and only the boot processor is supposed to be
43448c2ecf20Sopenharmony_ci	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
43458c2ecf20Sopenharmony_ci	 * IPIs around.
43468c2ecf20Sopenharmony_ci	 */
43478c2ecf20Sopenharmony_ci	__flush_cpu_slab(s, smp_processor_id());
43488c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
43498c2ecf20Sopenharmony_ci		struct page *p;
43508c2ecf20Sopenharmony_ci
43518c2ecf20Sopenharmony_ci		list_for_each_entry(p, &n->partial, slab_list)
43528c2ecf20Sopenharmony_ci			p->slab_cache = s;
43538c2ecf20Sopenharmony_ci
43548c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
43558c2ecf20Sopenharmony_ci		list_for_each_entry(p, &n->full, slab_list)
43568c2ecf20Sopenharmony_ci			p->slab_cache = s;
43578c2ecf20Sopenharmony_ci#endif
43588c2ecf20Sopenharmony_ci	}
43598c2ecf20Sopenharmony_ci	list_add(&s->list, &slab_caches);
43608c2ecf20Sopenharmony_ci	return s;
43618c2ecf20Sopenharmony_ci}
43628c2ecf20Sopenharmony_ci
43638c2ecf20Sopenharmony_civoid __init kmem_cache_init(void)
43648c2ecf20Sopenharmony_ci{
43658c2ecf20Sopenharmony_ci	static __initdata struct kmem_cache boot_kmem_cache,
43668c2ecf20Sopenharmony_ci		boot_kmem_cache_node;
43678c2ecf20Sopenharmony_ci
43688c2ecf20Sopenharmony_ci	if (debug_guardpage_minorder())
43698c2ecf20Sopenharmony_ci		slub_max_order = 0;
43708c2ecf20Sopenharmony_ci
43718c2ecf20Sopenharmony_ci	kmem_cache_node = &boot_kmem_cache_node;
43728c2ecf20Sopenharmony_ci	kmem_cache = &boot_kmem_cache;
43738c2ecf20Sopenharmony_ci
43748c2ecf20Sopenharmony_ci	create_boot_cache(kmem_cache_node, "kmem_cache_node",
43758c2ecf20Sopenharmony_ci		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0);
43768c2ecf20Sopenharmony_ci
43778c2ecf20Sopenharmony_ci	register_hotmemory_notifier(&slab_memory_callback_nb);
43788c2ecf20Sopenharmony_ci
43798c2ecf20Sopenharmony_ci	/* Able to allocate the per node structures */
43808c2ecf20Sopenharmony_ci	slab_state = PARTIAL;
43818c2ecf20Sopenharmony_ci
43828c2ecf20Sopenharmony_ci	create_boot_cache(kmem_cache, "kmem_cache",
43838c2ecf20Sopenharmony_ci			offsetof(struct kmem_cache, node) +
43848c2ecf20Sopenharmony_ci				nr_node_ids * sizeof(struct kmem_cache_node *),
43858c2ecf20Sopenharmony_ci		       SLAB_HWCACHE_ALIGN, 0, 0);
43868c2ecf20Sopenharmony_ci
43878c2ecf20Sopenharmony_ci	kmem_cache = bootstrap(&boot_kmem_cache);
43888c2ecf20Sopenharmony_ci	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
43898c2ecf20Sopenharmony_ci
43908c2ecf20Sopenharmony_ci	/* Now we can use the kmem_cache to allocate kmalloc slabs */
43918c2ecf20Sopenharmony_ci	setup_kmalloc_cache_index_table();
43928c2ecf20Sopenharmony_ci	create_kmalloc_caches(0);
43938c2ecf20Sopenharmony_ci
43948c2ecf20Sopenharmony_ci	/* Setup random freelists for each cache */
43958c2ecf20Sopenharmony_ci	init_freelist_randomization();
43968c2ecf20Sopenharmony_ci
43978c2ecf20Sopenharmony_ci	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
43988c2ecf20Sopenharmony_ci				  slub_cpu_dead);
43998c2ecf20Sopenharmony_ci
44008c2ecf20Sopenharmony_ci	pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
44018c2ecf20Sopenharmony_ci		cache_line_size(),
44028c2ecf20Sopenharmony_ci		slub_min_order, slub_max_order, slub_min_objects,
44038c2ecf20Sopenharmony_ci		nr_cpu_ids, nr_node_ids);
44048c2ecf20Sopenharmony_ci}
44058c2ecf20Sopenharmony_ci
44068c2ecf20Sopenharmony_civoid __init kmem_cache_init_late(void)
44078c2ecf20Sopenharmony_ci{
44088c2ecf20Sopenharmony_ci}
44098c2ecf20Sopenharmony_ci
44108c2ecf20Sopenharmony_cistruct kmem_cache *
44118c2ecf20Sopenharmony_ci__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
44128c2ecf20Sopenharmony_ci		   slab_flags_t flags, void (*ctor)(void *))
44138c2ecf20Sopenharmony_ci{
44148c2ecf20Sopenharmony_ci	struct kmem_cache *s;
44158c2ecf20Sopenharmony_ci
44168c2ecf20Sopenharmony_ci	s = find_mergeable(size, align, flags, name, ctor);
44178c2ecf20Sopenharmony_ci	if (s) {
44188c2ecf20Sopenharmony_ci		s->refcount++;
44198c2ecf20Sopenharmony_ci
44208c2ecf20Sopenharmony_ci		/*
44218c2ecf20Sopenharmony_ci		 * Adjust the object sizes so that we clear
44228c2ecf20Sopenharmony_ci		 * the complete object on kzalloc.
44238c2ecf20Sopenharmony_ci		 */
44248c2ecf20Sopenharmony_ci		s->object_size = max(s->object_size, size);
44258c2ecf20Sopenharmony_ci		s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
44268c2ecf20Sopenharmony_ci
44278c2ecf20Sopenharmony_ci		if (sysfs_slab_alias(s, name)) {
44288c2ecf20Sopenharmony_ci			s->refcount--;
44298c2ecf20Sopenharmony_ci			s = NULL;
44308c2ecf20Sopenharmony_ci		}
44318c2ecf20Sopenharmony_ci	}
44328c2ecf20Sopenharmony_ci
44338c2ecf20Sopenharmony_ci	return s;
44348c2ecf20Sopenharmony_ci}
44358c2ecf20Sopenharmony_ci
44368c2ecf20Sopenharmony_ciint __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
44378c2ecf20Sopenharmony_ci{
44388c2ecf20Sopenharmony_ci	int err;
44398c2ecf20Sopenharmony_ci
44408c2ecf20Sopenharmony_ci	err = kmem_cache_open(s, flags);
44418c2ecf20Sopenharmony_ci	if (err)
44428c2ecf20Sopenharmony_ci		return err;
44438c2ecf20Sopenharmony_ci
44448c2ecf20Sopenharmony_ci	/* Mutex is not taken during early boot */
44458c2ecf20Sopenharmony_ci	if (slab_state <= UP)
44468c2ecf20Sopenharmony_ci		return 0;
44478c2ecf20Sopenharmony_ci
44488c2ecf20Sopenharmony_ci	err = sysfs_slab_add(s);
44498c2ecf20Sopenharmony_ci	if (err)
44508c2ecf20Sopenharmony_ci		__kmem_cache_release(s);
44518c2ecf20Sopenharmony_ci
44528c2ecf20Sopenharmony_ci	return err;
44538c2ecf20Sopenharmony_ci}
44548c2ecf20Sopenharmony_ci
44558c2ecf20Sopenharmony_civoid *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
44568c2ecf20Sopenharmony_ci{
44578c2ecf20Sopenharmony_ci	struct kmem_cache *s;
44588c2ecf20Sopenharmony_ci	void *ret;
44598c2ecf20Sopenharmony_ci
44608c2ecf20Sopenharmony_ci	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
44618c2ecf20Sopenharmony_ci		return kmalloc_large(size, gfpflags);
44628c2ecf20Sopenharmony_ci
44638c2ecf20Sopenharmony_ci	s = kmalloc_slab(size, gfpflags);
44648c2ecf20Sopenharmony_ci
44658c2ecf20Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(s)))
44668c2ecf20Sopenharmony_ci		return s;
44678c2ecf20Sopenharmony_ci
44688c2ecf20Sopenharmony_ci	ret = slab_alloc(s, gfpflags, caller);
44698c2ecf20Sopenharmony_ci
44708c2ecf20Sopenharmony_ci	/* Honor the call site pointer we received. */
44718c2ecf20Sopenharmony_ci	trace_kmalloc(caller, ret, size, s->size, gfpflags);
44728c2ecf20Sopenharmony_ci
44738c2ecf20Sopenharmony_ci	return ret;
44748c2ecf20Sopenharmony_ci}
44758c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__kmalloc_track_caller);
44768c2ecf20Sopenharmony_ci
44778c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
44788c2ecf20Sopenharmony_civoid *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
44798c2ecf20Sopenharmony_ci					int node, unsigned long caller)
44808c2ecf20Sopenharmony_ci{
44818c2ecf20Sopenharmony_ci	struct kmem_cache *s;
44828c2ecf20Sopenharmony_ci	void *ret;
44838c2ecf20Sopenharmony_ci
44848c2ecf20Sopenharmony_ci	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
44858c2ecf20Sopenharmony_ci		ret = kmalloc_large_node(size, gfpflags, node);
44868c2ecf20Sopenharmony_ci
44878c2ecf20Sopenharmony_ci		trace_kmalloc_node(caller, ret,
44888c2ecf20Sopenharmony_ci				   size, PAGE_SIZE << get_order(size),
44898c2ecf20Sopenharmony_ci				   gfpflags, node);
44908c2ecf20Sopenharmony_ci
44918c2ecf20Sopenharmony_ci		return ret;
44928c2ecf20Sopenharmony_ci	}
44938c2ecf20Sopenharmony_ci
44948c2ecf20Sopenharmony_ci	s = kmalloc_slab(size, gfpflags);
44958c2ecf20Sopenharmony_ci
44968c2ecf20Sopenharmony_ci	if (unlikely(ZERO_OR_NULL_PTR(s)))
44978c2ecf20Sopenharmony_ci		return s;
44988c2ecf20Sopenharmony_ci
44998c2ecf20Sopenharmony_ci	ret = slab_alloc_node(s, gfpflags, node, caller);
45008c2ecf20Sopenharmony_ci
45018c2ecf20Sopenharmony_ci	/* Honor the call site pointer we received. */
45028c2ecf20Sopenharmony_ci	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
45038c2ecf20Sopenharmony_ci
45048c2ecf20Sopenharmony_ci	return ret;
45058c2ecf20Sopenharmony_ci}
45068c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__kmalloc_node_track_caller);
45078c2ecf20Sopenharmony_ci#endif
45088c2ecf20Sopenharmony_ci
45098c2ecf20Sopenharmony_ci#ifdef CONFIG_SYSFS
45108c2ecf20Sopenharmony_cistatic int count_inuse(struct page *page)
45118c2ecf20Sopenharmony_ci{
45128c2ecf20Sopenharmony_ci	return page->inuse;
45138c2ecf20Sopenharmony_ci}
45148c2ecf20Sopenharmony_ci
45158c2ecf20Sopenharmony_cistatic int count_total(struct page *page)
45168c2ecf20Sopenharmony_ci{
45178c2ecf20Sopenharmony_ci	return page->objects;
45188c2ecf20Sopenharmony_ci}
45198c2ecf20Sopenharmony_ci#endif
45208c2ecf20Sopenharmony_ci
45218c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
45228c2ecf20Sopenharmony_cistatic void validate_slab(struct kmem_cache *s, struct page *page)
45238c2ecf20Sopenharmony_ci{
45248c2ecf20Sopenharmony_ci	void *p;
45258c2ecf20Sopenharmony_ci	void *addr = page_address(page);
45268c2ecf20Sopenharmony_ci	unsigned long *map;
45278c2ecf20Sopenharmony_ci
45288c2ecf20Sopenharmony_ci	slab_lock(page);
45298c2ecf20Sopenharmony_ci
45308c2ecf20Sopenharmony_ci	if (!check_slab(s, page) || !on_freelist(s, page, NULL))
45318c2ecf20Sopenharmony_ci		goto unlock;
45328c2ecf20Sopenharmony_ci
45338c2ecf20Sopenharmony_ci	/* Now we know that a valid freelist exists */
45348c2ecf20Sopenharmony_ci	map = get_map(s, page);
45358c2ecf20Sopenharmony_ci	for_each_object(p, s, addr, page->objects) {
45368c2ecf20Sopenharmony_ci		u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
45378c2ecf20Sopenharmony_ci			 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
45388c2ecf20Sopenharmony_ci
45398c2ecf20Sopenharmony_ci		if (!check_object(s, page, p, val))
45408c2ecf20Sopenharmony_ci			break;
45418c2ecf20Sopenharmony_ci	}
45428c2ecf20Sopenharmony_ci	put_map(map);
45438c2ecf20Sopenharmony_ciunlock:
45448c2ecf20Sopenharmony_ci	slab_unlock(page);
45458c2ecf20Sopenharmony_ci}
45468c2ecf20Sopenharmony_ci
45478c2ecf20Sopenharmony_cistatic int validate_slab_node(struct kmem_cache *s,
45488c2ecf20Sopenharmony_ci		struct kmem_cache_node *n)
45498c2ecf20Sopenharmony_ci{
45508c2ecf20Sopenharmony_ci	unsigned long count = 0;
45518c2ecf20Sopenharmony_ci	struct page *page;
45528c2ecf20Sopenharmony_ci	unsigned long flags;
45538c2ecf20Sopenharmony_ci
45548c2ecf20Sopenharmony_ci	spin_lock_irqsave(&n->list_lock, flags);
45558c2ecf20Sopenharmony_ci
45568c2ecf20Sopenharmony_ci	list_for_each_entry(page, &n->partial, slab_list) {
45578c2ecf20Sopenharmony_ci		validate_slab(s, page);
45588c2ecf20Sopenharmony_ci		count++;
45598c2ecf20Sopenharmony_ci	}
45608c2ecf20Sopenharmony_ci	if (count != n->nr_partial)
45618c2ecf20Sopenharmony_ci		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
45628c2ecf20Sopenharmony_ci		       s->name, count, n->nr_partial);
45638c2ecf20Sopenharmony_ci
45648c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
45658c2ecf20Sopenharmony_ci		goto out;
45668c2ecf20Sopenharmony_ci
45678c2ecf20Sopenharmony_ci	list_for_each_entry(page, &n->full, slab_list) {
45688c2ecf20Sopenharmony_ci		validate_slab(s, page);
45698c2ecf20Sopenharmony_ci		count++;
45708c2ecf20Sopenharmony_ci	}
45718c2ecf20Sopenharmony_ci	if (count != atomic_long_read(&n->nr_slabs))
45728c2ecf20Sopenharmony_ci		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
45738c2ecf20Sopenharmony_ci		       s->name, count, atomic_long_read(&n->nr_slabs));
45748c2ecf20Sopenharmony_ci
45758c2ecf20Sopenharmony_ciout:
45768c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&n->list_lock, flags);
45778c2ecf20Sopenharmony_ci	return count;
45788c2ecf20Sopenharmony_ci}
45798c2ecf20Sopenharmony_ci
45808c2ecf20Sopenharmony_cistatic long validate_slab_cache(struct kmem_cache *s)
45818c2ecf20Sopenharmony_ci{
45828c2ecf20Sopenharmony_ci	int node;
45838c2ecf20Sopenharmony_ci	unsigned long count = 0;
45848c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
45858c2ecf20Sopenharmony_ci
45868c2ecf20Sopenharmony_ci	flush_all(s);
45878c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n)
45888c2ecf20Sopenharmony_ci		count += validate_slab_node(s, n);
45898c2ecf20Sopenharmony_ci
45908c2ecf20Sopenharmony_ci	return count;
45918c2ecf20Sopenharmony_ci}
45928c2ecf20Sopenharmony_ci/*
45938c2ecf20Sopenharmony_ci * Generate lists of code addresses where slabcache objects are allocated
45948c2ecf20Sopenharmony_ci * and freed.
45958c2ecf20Sopenharmony_ci */
45968c2ecf20Sopenharmony_ci
45978c2ecf20Sopenharmony_cistruct location {
45988c2ecf20Sopenharmony_ci	unsigned long count;
45998c2ecf20Sopenharmony_ci	unsigned long addr;
46008c2ecf20Sopenharmony_ci	long long sum_time;
46018c2ecf20Sopenharmony_ci	long min_time;
46028c2ecf20Sopenharmony_ci	long max_time;
46038c2ecf20Sopenharmony_ci	long min_pid;
46048c2ecf20Sopenharmony_ci	long max_pid;
46058c2ecf20Sopenharmony_ci	DECLARE_BITMAP(cpus, NR_CPUS);
46068c2ecf20Sopenharmony_ci	nodemask_t nodes;
46078c2ecf20Sopenharmony_ci};
46088c2ecf20Sopenharmony_ci
46098c2ecf20Sopenharmony_cistruct loc_track {
46108c2ecf20Sopenharmony_ci	unsigned long max;
46118c2ecf20Sopenharmony_ci	unsigned long count;
46128c2ecf20Sopenharmony_ci	struct location *loc;
46138c2ecf20Sopenharmony_ci};
46148c2ecf20Sopenharmony_ci
46158c2ecf20Sopenharmony_cistatic void free_loc_track(struct loc_track *t)
46168c2ecf20Sopenharmony_ci{
46178c2ecf20Sopenharmony_ci	if (t->max)
46188c2ecf20Sopenharmony_ci		free_pages((unsigned long)t->loc,
46198c2ecf20Sopenharmony_ci			get_order(sizeof(struct location) * t->max));
46208c2ecf20Sopenharmony_ci}
46218c2ecf20Sopenharmony_ci
46228c2ecf20Sopenharmony_cistatic int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
46238c2ecf20Sopenharmony_ci{
46248c2ecf20Sopenharmony_ci	struct location *l;
46258c2ecf20Sopenharmony_ci	int order;
46268c2ecf20Sopenharmony_ci
46278c2ecf20Sopenharmony_ci	order = get_order(sizeof(struct location) * max);
46288c2ecf20Sopenharmony_ci
46298c2ecf20Sopenharmony_ci	l = (void *)__get_free_pages(flags, order);
46308c2ecf20Sopenharmony_ci	if (!l)
46318c2ecf20Sopenharmony_ci		return 0;
46328c2ecf20Sopenharmony_ci
46338c2ecf20Sopenharmony_ci	if (t->count) {
46348c2ecf20Sopenharmony_ci		memcpy(l, t->loc, sizeof(struct location) * t->count);
46358c2ecf20Sopenharmony_ci		free_loc_track(t);
46368c2ecf20Sopenharmony_ci	}
46378c2ecf20Sopenharmony_ci	t->max = max;
46388c2ecf20Sopenharmony_ci	t->loc = l;
46398c2ecf20Sopenharmony_ci	return 1;
46408c2ecf20Sopenharmony_ci}
46418c2ecf20Sopenharmony_ci
46428c2ecf20Sopenharmony_cistatic int add_location(struct loc_track *t, struct kmem_cache *s,
46438c2ecf20Sopenharmony_ci				const struct track *track)
46448c2ecf20Sopenharmony_ci{
46458c2ecf20Sopenharmony_ci	long start, end, pos;
46468c2ecf20Sopenharmony_ci	struct location *l;
46478c2ecf20Sopenharmony_ci	unsigned long caddr;
46488c2ecf20Sopenharmony_ci	unsigned long age = jiffies - track->when;
46498c2ecf20Sopenharmony_ci
46508c2ecf20Sopenharmony_ci	start = -1;
46518c2ecf20Sopenharmony_ci	end = t->count;
46528c2ecf20Sopenharmony_ci
46538c2ecf20Sopenharmony_ci	for ( ; ; ) {
46548c2ecf20Sopenharmony_ci		pos = start + (end - start + 1) / 2;
46558c2ecf20Sopenharmony_ci
46568c2ecf20Sopenharmony_ci		/*
46578c2ecf20Sopenharmony_ci		 * There is nothing at "end". If we end up there
46588c2ecf20Sopenharmony_ci		 * we need to add something to before end.
46598c2ecf20Sopenharmony_ci		 */
46608c2ecf20Sopenharmony_ci		if (pos == end)
46618c2ecf20Sopenharmony_ci			break;
46628c2ecf20Sopenharmony_ci
46638c2ecf20Sopenharmony_ci		caddr = t->loc[pos].addr;
46648c2ecf20Sopenharmony_ci		if (track->addr == caddr) {
46658c2ecf20Sopenharmony_ci
46668c2ecf20Sopenharmony_ci			l = &t->loc[pos];
46678c2ecf20Sopenharmony_ci			l->count++;
46688c2ecf20Sopenharmony_ci			if (track->when) {
46698c2ecf20Sopenharmony_ci				l->sum_time += age;
46708c2ecf20Sopenharmony_ci				if (age < l->min_time)
46718c2ecf20Sopenharmony_ci					l->min_time = age;
46728c2ecf20Sopenharmony_ci				if (age > l->max_time)
46738c2ecf20Sopenharmony_ci					l->max_time = age;
46748c2ecf20Sopenharmony_ci
46758c2ecf20Sopenharmony_ci				if (track->pid < l->min_pid)
46768c2ecf20Sopenharmony_ci					l->min_pid = track->pid;
46778c2ecf20Sopenharmony_ci				if (track->pid > l->max_pid)
46788c2ecf20Sopenharmony_ci					l->max_pid = track->pid;
46798c2ecf20Sopenharmony_ci
46808c2ecf20Sopenharmony_ci				cpumask_set_cpu(track->cpu,
46818c2ecf20Sopenharmony_ci						to_cpumask(l->cpus));
46828c2ecf20Sopenharmony_ci			}
46838c2ecf20Sopenharmony_ci			node_set(page_to_nid(virt_to_page(track)), l->nodes);
46848c2ecf20Sopenharmony_ci			return 1;
46858c2ecf20Sopenharmony_ci		}
46868c2ecf20Sopenharmony_ci
46878c2ecf20Sopenharmony_ci		if (track->addr < caddr)
46888c2ecf20Sopenharmony_ci			end = pos;
46898c2ecf20Sopenharmony_ci		else
46908c2ecf20Sopenharmony_ci			start = pos;
46918c2ecf20Sopenharmony_ci	}
46928c2ecf20Sopenharmony_ci
46938c2ecf20Sopenharmony_ci	/*
46948c2ecf20Sopenharmony_ci	 * Not found. Insert new tracking element.
46958c2ecf20Sopenharmony_ci	 */
46968c2ecf20Sopenharmony_ci	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
46978c2ecf20Sopenharmony_ci		return 0;
46988c2ecf20Sopenharmony_ci
46998c2ecf20Sopenharmony_ci	l = t->loc + pos;
47008c2ecf20Sopenharmony_ci	if (pos < t->count)
47018c2ecf20Sopenharmony_ci		memmove(l + 1, l,
47028c2ecf20Sopenharmony_ci			(t->count - pos) * sizeof(struct location));
47038c2ecf20Sopenharmony_ci	t->count++;
47048c2ecf20Sopenharmony_ci	l->count = 1;
47058c2ecf20Sopenharmony_ci	l->addr = track->addr;
47068c2ecf20Sopenharmony_ci	l->sum_time = age;
47078c2ecf20Sopenharmony_ci	l->min_time = age;
47088c2ecf20Sopenharmony_ci	l->max_time = age;
47098c2ecf20Sopenharmony_ci	l->min_pid = track->pid;
47108c2ecf20Sopenharmony_ci	l->max_pid = track->pid;
47118c2ecf20Sopenharmony_ci	cpumask_clear(to_cpumask(l->cpus));
47128c2ecf20Sopenharmony_ci	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
47138c2ecf20Sopenharmony_ci	nodes_clear(l->nodes);
47148c2ecf20Sopenharmony_ci	node_set(page_to_nid(virt_to_page(track)), l->nodes);
47158c2ecf20Sopenharmony_ci	return 1;
47168c2ecf20Sopenharmony_ci}
47178c2ecf20Sopenharmony_ci
47188c2ecf20Sopenharmony_cistatic void process_slab(struct loc_track *t, struct kmem_cache *s,
47198c2ecf20Sopenharmony_ci		struct page *page, enum track_item alloc)
47208c2ecf20Sopenharmony_ci{
47218c2ecf20Sopenharmony_ci	void *addr = page_address(page);
47228c2ecf20Sopenharmony_ci	void *p;
47238c2ecf20Sopenharmony_ci	unsigned long *map;
47248c2ecf20Sopenharmony_ci
47258c2ecf20Sopenharmony_ci	map = get_map(s, page);
47268c2ecf20Sopenharmony_ci	for_each_object(p, s, addr, page->objects)
47278c2ecf20Sopenharmony_ci		if (!test_bit(__obj_to_index(s, addr, p), map))
47288c2ecf20Sopenharmony_ci			add_location(t, s, get_track(s, p, alloc));
47298c2ecf20Sopenharmony_ci	put_map(map);
47308c2ecf20Sopenharmony_ci}
47318c2ecf20Sopenharmony_ci
47328c2ecf20Sopenharmony_cistatic int list_locations(struct kmem_cache *s, char *buf,
47338c2ecf20Sopenharmony_ci					enum track_item alloc)
47348c2ecf20Sopenharmony_ci{
47358c2ecf20Sopenharmony_ci	int len = 0;
47368c2ecf20Sopenharmony_ci	unsigned long i;
47378c2ecf20Sopenharmony_ci	struct loc_track t = { 0, 0, NULL };
47388c2ecf20Sopenharmony_ci	int node;
47398c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
47408c2ecf20Sopenharmony_ci
47418c2ecf20Sopenharmony_ci	if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
47428c2ecf20Sopenharmony_ci			     GFP_KERNEL)) {
47438c2ecf20Sopenharmony_ci		return sprintf(buf, "Out of memory\n");
47448c2ecf20Sopenharmony_ci	}
47458c2ecf20Sopenharmony_ci	/* Push back cpu slabs */
47468c2ecf20Sopenharmony_ci	flush_all(s);
47478c2ecf20Sopenharmony_ci
47488c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
47498c2ecf20Sopenharmony_ci		unsigned long flags;
47508c2ecf20Sopenharmony_ci		struct page *page;
47518c2ecf20Sopenharmony_ci
47528c2ecf20Sopenharmony_ci		if (!atomic_long_read(&n->nr_slabs))
47538c2ecf20Sopenharmony_ci			continue;
47548c2ecf20Sopenharmony_ci
47558c2ecf20Sopenharmony_ci		spin_lock_irqsave(&n->list_lock, flags);
47568c2ecf20Sopenharmony_ci		list_for_each_entry(page, &n->partial, slab_list)
47578c2ecf20Sopenharmony_ci			process_slab(&t, s, page, alloc);
47588c2ecf20Sopenharmony_ci		list_for_each_entry(page, &n->full, slab_list)
47598c2ecf20Sopenharmony_ci			process_slab(&t, s, page, alloc);
47608c2ecf20Sopenharmony_ci		spin_unlock_irqrestore(&n->list_lock, flags);
47618c2ecf20Sopenharmony_ci	}
47628c2ecf20Sopenharmony_ci
47638c2ecf20Sopenharmony_ci	for (i = 0; i < t.count; i++) {
47648c2ecf20Sopenharmony_ci		struct location *l = &t.loc[i];
47658c2ecf20Sopenharmony_ci
47668c2ecf20Sopenharmony_ci		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
47678c2ecf20Sopenharmony_ci			break;
47688c2ecf20Sopenharmony_ci		len += sprintf(buf + len, "%7ld ", l->count);
47698c2ecf20Sopenharmony_ci
47708c2ecf20Sopenharmony_ci		if (l->addr)
47718c2ecf20Sopenharmony_ci			len += sprintf(buf + len, "%pS", (void *)l->addr);
47728c2ecf20Sopenharmony_ci		else
47738c2ecf20Sopenharmony_ci			len += sprintf(buf + len, "<not-available>");
47748c2ecf20Sopenharmony_ci
47758c2ecf20Sopenharmony_ci		if (l->sum_time != l->min_time) {
47768c2ecf20Sopenharmony_ci			len += sprintf(buf + len, " age=%ld/%ld/%ld",
47778c2ecf20Sopenharmony_ci				l->min_time,
47788c2ecf20Sopenharmony_ci				(long)div_u64(l->sum_time, l->count),
47798c2ecf20Sopenharmony_ci				l->max_time);
47808c2ecf20Sopenharmony_ci		} else
47818c2ecf20Sopenharmony_ci			len += sprintf(buf + len, " age=%ld",
47828c2ecf20Sopenharmony_ci				l->min_time);
47838c2ecf20Sopenharmony_ci
47848c2ecf20Sopenharmony_ci		if (l->min_pid != l->max_pid)
47858c2ecf20Sopenharmony_ci			len += sprintf(buf + len, " pid=%ld-%ld",
47868c2ecf20Sopenharmony_ci				l->min_pid, l->max_pid);
47878c2ecf20Sopenharmony_ci		else
47888c2ecf20Sopenharmony_ci			len += sprintf(buf + len, " pid=%ld",
47898c2ecf20Sopenharmony_ci				l->min_pid);
47908c2ecf20Sopenharmony_ci
47918c2ecf20Sopenharmony_ci		if (num_online_cpus() > 1 &&
47928c2ecf20Sopenharmony_ci				!cpumask_empty(to_cpumask(l->cpus)) &&
47938c2ecf20Sopenharmony_ci				len < PAGE_SIZE - 60)
47948c2ecf20Sopenharmony_ci			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
47958c2ecf20Sopenharmony_ci					 " cpus=%*pbl",
47968c2ecf20Sopenharmony_ci					 cpumask_pr_args(to_cpumask(l->cpus)));
47978c2ecf20Sopenharmony_ci
47988c2ecf20Sopenharmony_ci		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
47998c2ecf20Sopenharmony_ci				len < PAGE_SIZE - 60)
48008c2ecf20Sopenharmony_ci			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
48018c2ecf20Sopenharmony_ci					 " nodes=%*pbl",
48028c2ecf20Sopenharmony_ci					 nodemask_pr_args(&l->nodes));
48038c2ecf20Sopenharmony_ci
48048c2ecf20Sopenharmony_ci		len += sprintf(buf + len, "\n");
48058c2ecf20Sopenharmony_ci	}
48068c2ecf20Sopenharmony_ci
48078c2ecf20Sopenharmony_ci	free_loc_track(&t);
48088c2ecf20Sopenharmony_ci	if (!t.count)
48098c2ecf20Sopenharmony_ci		len += sprintf(buf, "No data\n");
48108c2ecf20Sopenharmony_ci	return len;
48118c2ecf20Sopenharmony_ci}
48128c2ecf20Sopenharmony_ci#endif	/* CONFIG_SLUB_DEBUG */
48138c2ecf20Sopenharmony_ci
48148c2ecf20Sopenharmony_ci#ifdef SLUB_RESILIENCY_TEST
48158c2ecf20Sopenharmony_cistatic void __init resiliency_test(void)
48168c2ecf20Sopenharmony_ci{
48178c2ecf20Sopenharmony_ci	u8 *p;
48188c2ecf20Sopenharmony_ci	int type = KMALLOC_NORMAL;
48198c2ecf20Sopenharmony_ci
48208c2ecf20Sopenharmony_ci	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
48218c2ecf20Sopenharmony_ci
48228c2ecf20Sopenharmony_ci	pr_err("SLUB resiliency testing\n");
48238c2ecf20Sopenharmony_ci	pr_err("-----------------------\n");
48248c2ecf20Sopenharmony_ci	pr_err("A. Corruption after allocation\n");
48258c2ecf20Sopenharmony_ci
48268c2ecf20Sopenharmony_ci	p = kzalloc(16, GFP_KERNEL);
48278c2ecf20Sopenharmony_ci	p[16] = 0x12;
48288c2ecf20Sopenharmony_ci	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
48298c2ecf20Sopenharmony_ci	       p + 16);
48308c2ecf20Sopenharmony_ci
48318c2ecf20Sopenharmony_ci	validate_slab_cache(kmalloc_caches[type][4]);
48328c2ecf20Sopenharmony_ci
48338c2ecf20Sopenharmony_ci	/* Hmmm... The next two are dangerous */
48348c2ecf20Sopenharmony_ci	p = kzalloc(32, GFP_KERNEL);
48358c2ecf20Sopenharmony_ci	p[32 + sizeof(void *)] = 0x34;
48368c2ecf20Sopenharmony_ci	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
48378c2ecf20Sopenharmony_ci	       p);
48388c2ecf20Sopenharmony_ci	pr_err("If allocated object is overwritten then not detectable\n\n");
48398c2ecf20Sopenharmony_ci
48408c2ecf20Sopenharmony_ci	validate_slab_cache(kmalloc_caches[type][5]);
48418c2ecf20Sopenharmony_ci	p = kzalloc(64, GFP_KERNEL);
48428c2ecf20Sopenharmony_ci	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
48438c2ecf20Sopenharmony_ci	*p = 0x56;
48448c2ecf20Sopenharmony_ci	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
48458c2ecf20Sopenharmony_ci	       p);
48468c2ecf20Sopenharmony_ci	pr_err("If allocated object is overwritten then not detectable\n\n");
48478c2ecf20Sopenharmony_ci	validate_slab_cache(kmalloc_caches[type][6]);
48488c2ecf20Sopenharmony_ci
48498c2ecf20Sopenharmony_ci	pr_err("\nB. Corruption after free\n");
48508c2ecf20Sopenharmony_ci	p = kzalloc(128, GFP_KERNEL);
48518c2ecf20Sopenharmony_ci	kfree(p);
48528c2ecf20Sopenharmony_ci	*p = 0x78;
48538c2ecf20Sopenharmony_ci	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
48548c2ecf20Sopenharmony_ci	validate_slab_cache(kmalloc_caches[type][7]);
48558c2ecf20Sopenharmony_ci
48568c2ecf20Sopenharmony_ci	p = kzalloc(256, GFP_KERNEL);
48578c2ecf20Sopenharmony_ci	kfree(p);
48588c2ecf20Sopenharmony_ci	p[50] = 0x9a;
48598c2ecf20Sopenharmony_ci	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
48608c2ecf20Sopenharmony_ci	validate_slab_cache(kmalloc_caches[type][8]);
48618c2ecf20Sopenharmony_ci
48628c2ecf20Sopenharmony_ci	p = kzalloc(512, GFP_KERNEL);
48638c2ecf20Sopenharmony_ci	kfree(p);
48648c2ecf20Sopenharmony_ci	p[512] = 0xab;
48658c2ecf20Sopenharmony_ci	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
48668c2ecf20Sopenharmony_ci	validate_slab_cache(kmalloc_caches[type][9]);
48678c2ecf20Sopenharmony_ci}
48688c2ecf20Sopenharmony_ci#else
48698c2ecf20Sopenharmony_ci#ifdef CONFIG_SYSFS
48708c2ecf20Sopenharmony_cistatic void resiliency_test(void) {};
48718c2ecf20Sopenharmony_ci#endif
48728c2ecf20Sopenharmony_ci#endif	/* SLUB_RESILIENCY_TEST */
48738c2ecf20Sopenharmony_ci
48748c2ecf20Sopenharmony_ci#ifdef CONFIG_SYSFS
48758c2ecf20Sopenharmony_cienum slab_stat_type {
48768c2ecf20Sopenharmony_ci	SL_ALL,			/* All slabs */
48778c2ecf20Sopenharmony_ci	SL_PARTIAL,		/* Only partially allocated slabs */
48788c2ecf20Sopenharmony_ci	SL_CPU,			/* Only slabs used for cpu caches */
48798c2ecf20Sopenharmony_ci	SL_OBJECTS,		/* Determine allocated objects not slabs */
48808c2ecf20Sopenharmony_ci	SL_TOTAL		/* Determine object capacity not slabs */
48818c2ecf20Sopenharmony_ci};
48828c2ecf20Sopenharmony_ci
48838c2ecf20Sopenharmony_ci#define SO_ALL		(1 << SL_ALL)
48848c2ecf20Sopenharmony_ci#define SO_PARTIAL	(1 << SL_PARTIAL)
48858c2ecf20Sopenharmony_ci#define SO_CPU		(1 << SL_CPU)
48868c2ecf20Sopenharmony_ci#define SO_OBJECTS	(1 << SL_OBJECTS)
48878c2ecf20Sopenharmony_ci#define SO_TOTAL	(1 << SL_TOTAL)
48888c2ecf20Sopenharmony_ci
48898c2ecf20Sopenharmony_ci#ifdef CONFIG_MEMCG
48908c2ecf20Sopenharmony_cistatic bool memcg_sysfs_enabled = IS_ENABLED(CONFIG_SLUB_MEMCG_SYSFS_ON);
48918c2ecf20Sopenharmony_ci
48928c2ecf20Sopenharmony_cistatic int __init setup_slub_memcg_sysfs(char *str)
48938c2ecf20Sopenharmony_ci{
48948c2ecf20Sopenharmony_ci	int v;
48958c2ecf20Sopenharmony_ci
48968c2ecf20Sopenharmony_ci	if (get_option(&str, &v) > 0)
48978c2ecf20Sopenharmony_ci		memcg_sysfs_enabled = v;
48988c2ecf20Sopenharmony_ci
48998c2ecf20Sopenharmony_ci	return 1;
49008c2ecf20Sopenharmony_ci}
49018c2ecf20Sopenharmony_ci
49028c2ecf20Sopenharmony_ci__setup("slub_memcg_sysfs=", setup_slub_memcg_sysfs);
49038c2ecf20Sopenharmony_ci#endif
49048c2ecf20Sopenharmony_ci
49058c2ecf20Sopenharmony_cistatic ssize_t show_slab_objects(struct kmem_cache *s,
49068c2ecf20Sopenharmony_ci			    char *buf, unsigned long flags)
49078c2ecf20Sopenharmony_ci{
49088c2ecf20Sopenharmony_ci	unsigned long total = 0;
49098c2ecf20Sopenharmony_ci	int node;
49108c2ecf20Sopenharmony_ci	int x;
49118c2ecf20Sopenharmony_ci	unsigned long *nodes;
49128c2ecf20Sopenharmony_ci
49138c2ecf20Sopenharmony_ci	nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
49148c2ecf20Sopenharmony_ci	if (!nodes)
49158c2ecf20Sopenharmony_ci		return -ENOMEM;
49168c2ecf20Sopenharmony_ci
49178c2ecf20Sopenharmony_ci	if (flags & SO_CPU) {
49188c2ecf20Sopenharmony_ci		int cpu;
49198c2ecf20Sopenharmony_ci
49208c2ecf20Sopenharmony_ci		for_each_possible_cpu(cpu) {
49218c2ecf20Sopenharmony_ci			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
49228c2ecf20Sopenharmony_ci							       cpu);
49238c2ecf20Sopenharmony_ci			int node;
49248c2ecf20Sopenharmony_ci			struct page *page;
49258c2ecf20Sopenharmony_ci
49268c2ecf20Sopenharmony_ci			page = READ_ONCE(c->page);
49278c2ecf20Sopenharmony_ci			if (!page)
49288c2ecf20Sopenharmony_ci				continue;
49298c2ecf20Sopenharmony_ci
49308c2ecf20Sopenharmony_ci			node = page_to_nid(page);
49318c2ecf20Sopenharmony_ci			if (flags & SO_TOTAL)
49328c2ecf20Sopenharmony_ci				x = page->objects;
49338c2ecf20Sopenharmony_ci			else if (flags & SO_OBJECTS)
49348c2ecf20Sopenharmony_ci				x = page->inuse;
49358c2ecf20Sopenharmony_ci			else
49368c2ecf20Sopenharmony_ci				x = 1;
49378c2ecf20Sopenharmony_ci
49388c2ecf20Sopenharmony_ci			total += x;
49398c2ecf20Sopenharmony_ci			nodes[node] += x;
49408c2ecf20Sopenharmony_ci
49418c2ecf20Sopenharmony_ci			page = slub_percpu_partial_read_once(c);
49428c2ecf20Sopenharmony_ci			if (page) {
49438c2ecf20Sopenharmony_ci				node = page_to_nid(page);
49448c2ecf20Sopenharmony_ci				if (flags & SO_TOTAL)
49458c2ecf20Sopenharmony_ci					WARN_ON_ONCE(1);
49468c2ecf20Sopenharmony_ci				else if (flags & SO_OBJECTS)
49478c2ecf20Sopenharmony_ci					WARN_ON_ONCE(1);
49488c2ecf20Sopenharmony_ci				else
49498c2ecf20Sopenharmony_ci					x = page->pages;
49508c2ecf20Sopenharmony_ci				total += x;
49518c2ecf20Sopenharmony_ci				nodes[node] += x;
49528c2ecf20Sopenharmony_ci			}
49538c2ecf20Sopenharmony_ci		}
49548c2ecf20Sopenharmony_ci	}
49558c2ecf20Sopenharmony_ci
49568c2ecf20Sopenharmony_ci	/*
49578c2ecf20Sopenharmony_ci	 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
49588c2ecf20Sopenharmony_ci	 * already held which will conflict with an existing lock order:
49598c2ecf20Sopenharmony_ci	 *
49608c2ecf20Sopenharmony_ci	 * mem_hotplug_lock->slab_mutex->kernfs_mutex
49618c2ecf20Sopenharmony_ci	 *
49628c2ecf20Sopenharmony_ci	 * We don't really need mem_hotplug_lock (to hold off
49638c2ecf20Sopenharmony_ci	 * slab_mem_going_offline_callback) here because slab's memory hot
49648c2ecf20Sopenharmony_ci	 * unplug code doesn't destroy the kmem_cache->node[] data.
49658c2ecf20Sopenharmony_ci	 */
49668c2ecf20Sopenharmony_ci
49678c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
49688c2ecf20Sopenharmony_ci	if (flags & SO_ALL) {
49698c2ecf20Sopenharmony_ci		struct kmem_cache_node *n;
49708c2ecf20Sopenharmony_ci
49718c2ecf20Sopenharmony_ci		for_each_kmem_cache_node(s, node, n) {
49728c2ecf20Sopenharmony_ci
49738c2ecf20Sopenharmony_ci			if (flags & SO_TOTAL)
49748c2ecf20Sopenharmony_ci				x = atomic_long_read(&n->total_objects);
49758c2ecf20Sopenharmony_ci			else if (flags & SO_OBJECTS)
49768c2ecf20Sopenharmony_ci				x = atomic_long_read(&n->total_objects) -
49778c2ecf20Sopenharmony_ci					count_partial(n, count_free);
49788c2ecf20Sopenharmony_ci			else
49798c2ecf20Sopenharmony_ci				x = atomic_long_read(&n->nr_slabs);
49808c2ecf20Sopenharmony_ci			total += x;
49818c2ecf20Sopenharmony_ci			nodes[node] += x;
49828c2ecf20Sopenharmony_ci		}
49838c2ecf20Sopenharmony_ci
49848c2ecf20Sopenharmony_ci	} else
49858c2ecf20Sopenharmony_ci#endif
49868c2ecf20Sopenharmony_ci	if (flags & SO_PARTIAL) {
49878c2ecf20Sopenharmony_ci		struct kmem_cache_node *n;
49888c2ecf20Sopenharmony_ci
49898c2ecf20Sopenharmony_ci		for_each_kmem_cache_node(s, node, n) {
49908c2ecf20Sopenharmony_ci			if (flags & SO_TOTAL)
49918c2ecf20Sopenharmony_ci				x = count_partial(n, count_total);
49928c2ecf20Sopenharmony_ci			else if (flags & SO_OBJECTS)
49938c2ecf20Sopenharmony_ci				x = count_partial(n, count_inuse);
49948c2ecf20Sopenharmony_ci			else
49958c2ecf20Sopenharmony_ci				x = n->nr_partial;
49968c2ecf20Sopenharmony_ci			total += x;
49978c2ecf20Sopenharmony_ci			nodes[node] += x;
49988c2ecf20Sopenharmony_ci		}
49998c2ecf20Sopenharmony_ci	}
50008c2ecf20Sopenharmony_ci	x = sprintf(buf, "%lu", total);
50018c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
50028c2ecf20Sopenharmony_ci	for (node = 0; node < nr_node_ids; node++)
50038c2ecf20Sopenharmony_ci		if (nodes[node])
50048c2ecf20Sopenharmony_ci			x += sprintf(buf + x, " N%d=%lu",
50058c2ecf20Sopenharmony_ci					node, nodes[node]);
50068c2ecf20Sopenharmony_ci#endif
50078c2ecf20Sopenharmony_ci	kfree(nodes);
50088c2ecf20Sopenharmony_ci	return x + sprintf(buf + x, "\n");
50098c2ecf20Sopenharmony_ci}
50108c2ecf20Sopenharmony_ci
50118c2ecf20Sopenharmony_ci#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
50128c2ecf20Sopenharmony_ci#define to_slab(n) container_of(n, struct kmem_cache, kobj)
50138c2ecf20Sopenharmony_ci
50148c2ecf20Sopenharmony_cistruct slab_attribute {
50158c2ecf20Sopenharmony_ci	struct attribute attr;
50168c2ecf20Sopenharmony_ci	ssize_t (*show)(struct kmem_cache *s, char *buf);
50178c2ecf20Sopenharmony_ci	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
50188c2ecf20Sopenharmony_ci};
50198c2ecf20Sopenharmony_ci
50208c2ecf20Sopenharmony_ci#define SLAB_ATTR_RO(_name) \
50218c2ecf20Sopenharmony_ci	static struct slab_attribute _name##_attr = \
50228c2ecf20Sopenharmony_ci	__ATTR(_name, 0400, _name##_show, NULL)
50238c2ecf20Sopenharmony_ci
50248c2ecf20Sopenharmony_ci#define SLAB_ATTR(_name) \
50258c2ecf20Sopenharmony_ci	static struct slab_attribute _name##_attr =  \
50268c2ecf20Sopenharmony_ci	__ATTR(_name, 0600, _name##_show, _name##_store)
50278c2ecf20Sopenharmony_ci
50288c2ecf20Sopenharmony_cistatic ssize_t slab_size_show(struct kmem_cache *s, char *buf)
50298c2ecf20Sopenharmony_ci{
50308c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", s->size);
50318c2ecf20Sopenharmony_ci}
50328c2ecf20Sopenharmony_ciSLAB_ATTR_RO(slab_size);
50338c2ecf20Sopenharmony_ci
50348c2ecf20Sopenharmony_cistatic ssize_t align_show(struct kmem_cache *s, char *buf)
50358c2ecf20Sopenharmony_ci{
50368c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", s->align);
50378c2ecf20Sopenharmony_ci}
50388c2ecf20Sopenharmony_ciSLAB_ATTR_RO(align);
50398c2ecf20Sopenharmony_ci
50408c2ecf20Sopenharmony_cistatic ssize_t object_size_show(struct kmem_cache *s, char *buf)
50418c2ecf20Sopenharmony_ci{
50428c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", s->object_size);
50438c2ecf20Sopenharmony_ci}
50448c2ecf20Sopenharmony_ciSLAB_ATTR_RO(object_size);
50458c2ecf20Sopenharmony_ci
50468c2ecf20Sopenharmony_cistatic ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
50478c2ecf20Sopenharmony_ci{
50488c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", oo_objects(s->oo));
50498c2ecf20Sopenharmony_ci}
50508c2ecf20Sopenharmony_ciSLAB_ATTR_RO(objs_per_slab);
50518c2ecf20Sopenharmony_ci
50528c2ecf20Sopenharmony_cistatic ssize_t order_show(struct kmem_cache *s, char *buf)
50538c2ecf20Sopenharmony_ci{
50548c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", oo_order(s->oo));
50558c2ecf20Sopenharmony_ci}
50568c2ecf20Sopenharmony_ciSLAB_ATTR_RO(order);
50578c2ecf20Sopenharmony_ci
50588c2ecf20Sopenharmony_cistatic ssize_t min_partial_show(struct kmem_cache *s, char *buf)
50598c2ecf20Sopenharmony_ci{
50608c2ecf20Sopenharmony_ci	return sprintf(buf, "%lu\n", s->min_partial);
50618c2ecf20Sopenharmony_ci}
50628c2ecf20Sopenharmony_ci
50638c2ecf20Sopenharmony_cistatic ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
50648c2ecf20Sopenharmony_ci				 size_t length)
50658c2ecf20Sopenharmony_ci{
50668c2ecf20Sopenharmony_ci	unsigned long min;
50678c2ecf20Sopenharmony_ci	int err;
50688c2ecf20Sopenharmony_ci
50698c2ecf20Sopenharmony_ci	err = kstrtoul(buf, 10, &min);
50708c2ecf20Sopenharmony_ci	if (err)
50718c2ecf20Sopenharmony_ci		return err;
50728c2ecf20Sopenharmony_ci
50738c2ecf20Sopenharmony_ci	set_min_partial(s, min);
50748c2ecf20Sopenharmony_ci	return length;
50758c2ecf20Sopenharmony_ci}
50768c2ecf20Sopenharmony_ciSLAB_ATTR(min_partial);
50778c2ecf20Sopenharmony_ci
50788c2ecf20Sopenharmony_cistatic ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
50798c2ecf20Sopenharmony_ci{
50808c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", slub_cpu_partial(s));
50818c2ecf20Sopenharmony_ci}
50828c2ecf20Sopenharmony_ci
50838c2ecf20Sopenharmony_cistatic ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
50848c2ecf20Sopenharmony_ci				 size_t length)
50858c2ecf20Sopenharmony_ci{
50868c2ecf20Sopenharmony_ci	unsigned int objects;
50878c2ecf20Sopenharmony_ci	int err;
50888c2ecf20Sopenharmony_ci
50898c2ecf20Sopenharmony_ci	err = kstrtouint(buf, 10, &objects);
50908c2ecf20Sopenharmony_ci	if (err)
50918c2ecf20Sopenharmony_ci		return err;
50928c2ecf20Sopenharmony_ci	if (objects && !kmem_cache_has_cpu_partial(s))
50938c2ecf20Sopenharmony_ci		return -EINVAL;
50948c2ecf20Sopenharmony_ci
50958c2ecf20Sopenharmony_ci	slub_set_cpu_partial(s, objects);
50968c2ecf20Sopenharmony_ci	flush_all(s);
50978c2ecf20Sopenharmony_ci	return length;
50988c2ecf20Sopenharmony_ci}
50998c2ecf20Sopenharmony_ciSLAB_ATTR(cpu_partial);
51008c2ecf20Sopenharmony_ci
51018c2ecf20Sopenharmony_cistatic ssize_t ctor_show(struct kmem_cache *s, char *buf)
51028c2ecf20Sopenharmony_ci{
51038c2ecf20Sopenharmony_ci	if (!s->ctor)
51048c2ecf20Sopenharmony_ci		return 0;
51058c2ecf20Sopenharmony_ci	return sprintf(buf, "%pS\n", s->ctor);
51068c2ecf20Sopenharmony_ci}
51078c2ecf20Sopenharmony_ciSLAB_ATTR_RO(ctor);
51088c2ecf20Sopenharmony_ci
51098c2ecf20Sopenharmony_cistatic ssize_t aliases_show(struct kmem_cache *s, char *buf)
51108c2ecf20Sopenharmony_ci{
51118c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
51128c2ecf20Sopenharmony_ci}
51138c2ecf20Sopenharmony_ciSLAB_ATTR_RO(aliases);
51148c2ecf20Sopenharmony_ci
51158c2ecf20Sopenharmony_cistatic ssize_t partial_show(struct kmem_cache *s, char *buf)
51168c2ecf20Sopenharmony_ci{
51178c2ecf20Sopenharmony_ci	return show_slab_objects(s, buf, SO_PARTIAL);
51188c2ecf20Sopenharmony_ci}
51198c2ecf20Sopenharmony_ciSLAB_ATTR_RO(partial);
51208c2ecf20Sopenharmony_ci
51218c2ecf20Sopenharmony_cistatic ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
51228c2ecf20Sopenharmony_ci{
51238c2ecf20Sopenharmony_ci	return show_slab_objects(s, buf, SO_CPU);
51248c2ecf20Sopenharmony_ci}
51258c2ecf20Sopenharmony_ciSLAB_ATTR_RO(cpu_slabs);
51268c2ecf20Sopenharmony_ci
51278c2ecf20Sopenharmony_cistatic ssize_t objects_show(struct kmem_cache *s, char *buf)
51288c2ecf20Sopenharmony_ci{
51298c2ecf20Sopenharmony_ci	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
51308c2ecf20Sopenharmony_ci}
51318c2ecf20Sopenharmony_ciSLAB_ATTR_RO(objects);
51328c2ecf20Sopenharmony_ci
51338c2ecf20Sopenharmony_cistatic ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
51348c2ecf20Sopenharmony_ci{
51358c2ecf20Sopenharmony_ci	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
51368c2ecf20Sopenharmony_ci}
51378c2ecf20Sopenharmony_ciSLAB_ATTR_RO(objects_partial);
51388c2ecf20Sopenharmony_ci
51398c2ecf20Sopenharmony_cistatic ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
51408c2ecf20Sopenharmony_ci{
51418c2ecf20Sopenharmony_ci	int objects = 0;
51428c2ecf20Sopenharmony_ci	int pages = 0;
51438c2ecf20Sopenharmony_ci	int cpu;
51448c2ecf20Sopenharmony_ci	int len;
51458c2ecf20Sopenharmony_ci
51468c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu) {
51478c2ecf20Sopenharmony_ci		struct page *page;
51488c2ecf20Sopenharmony_ci
51498c2ecf20Sopenharmony_ci		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
51508c2ecf20Sopenharmony_ci
51518c2ecf20Sopenharmony_ci		if (page) {
51528c2ecf20Sopenharmony_ci			pages += page->pages;
51538c2ecf20Sopenharmony_ci			objects += page->pobjects;
51548c2ecf20Sopenharmony_ci		}
51558c2ecf20Sopenharmony_ci	}
51568c2ecf20Sopenharmony_ci
51578c2ecf20Sopenharmony_ci	len = sprintf(buf, "%d(%d)", objects, pages);
51588c2ecf20Sopenharmony_ci
51598c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
51608c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu) {
51618c2ecf20Sopenharmony_ci		struct page *page;
51628c2ecf20Sopenharmony_ci
51638c2ecf20Sopenharmony_ci		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
51648c2ecf20Sopenharmony_ci
51658c2ecf20Sopenharmony_ci		if (page && len < PAGE_SIZE - 20)
51668c2ecf20Sopenharmony_ci			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
51678c2ecf20Sopenharmony_ci				page->pobjects, page->pages);
51688c2ecf20Sopenharmony_ci	}
51698c2ecf20Sopenharmony_ci#endif
51708c2ecf20Sopenharmony_ci	return len + sprintf(buf + len, "\n");
51718c2ecf20Sopenharmony_ci}
51728c2ecf20Sopenharmony_ciSLAB_ATTR_RO(slabs_cpu_partial);
51738c2ecf20Sopenharmony_ci
51748c2ecf20Sopenharmony_cistatic ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
51758c2ecf20Sopenharmony_ci{
51768c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
51778c2ecf20Sopenharmony_ci}
51788c2ecf20Sopenharmony_ciSLAB_ATTR_RO(reclaim_account);
51798c2ecf20Sopenharmony_ci
51808c2ecf20Sopenharmony_cistatic ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
51818c2ecf20Sopenharmony_ci{
51828c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
51838c2ecf20Sopenharmony_ci}
51848c2ecf20Sopenharmony_ciSLAB_ATTR_RO(hwcache_align);
51858c2ecf20Sopenharmony_ci
51868c2ecf20Sopenharmony_ci#ifdef CONFIG_ZONE_DMA
51878c2ecf20Sopenharmony_cistatic ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
51888c2ecf20Sopenharmony_ci{
51898c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
51908c2ecf20Sopenharmony_ci}
51918c2ecf20Sopenharmony_ciSLAB_ATTR_RO(cache_dma);
51928c2ecf20Sopenharmony_ci#endif
51938c2ecf20Sopenharmony_ci
51948c2ecf20Sopenharmony_cistatic ssize_t usersize_show(struct kmem_cache *s, char *buf)
51958c2ecf20Sopenharmony_ci{
51968c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", s->usersize);
51978c2ecf20Sopenharmony_ci}
51988c2ecf20Sopenharmony_ciSLAB_ATTR_RO(usersize);
51998c2ecf20Sopenharmony_ci
52008c2ecf20Sopenharmony_cistatic ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
52018c2ecf20Sopenharmony_ci{
52028c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
52038c2ecf20Sopenharmony_ci}
52048c2ecf20Sopenharmony_ciSLAB_ATTR_RO(destroy_by_rcu);
52058c2ecf20Sopenharmony_ci
52068c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
52078c2ecf20Sopenharmony_cistatic ssize_t slabs_show(struct kmem_cache *s, char *buf)
52088c2ecf20Sopenharmony_ci{
52098c2ecf20Sopenharmony_ci	return show_slab_objects(s, buf, SO_ALL);
52108c2ecf20Sopenharmony_ci}
52118c2ecf20Sopenharmony_ciSLAB_ATTR_RO(slabs);
52128c2ecf20Sopenharmony_ci
52138c2ecf20Sopenharmony_cistatic ssize_t total_objects_show(struct kmem_cache *s, char *buf)
52148c2ecf20Sopenharmony_ci{
52158c2ecf20Sopenharmony_ci	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
52168c2ecf20Sopenharmony_ci}
52178c2ecf20Sopenharmony_ciSLAB_ATTR_RO(total_objects);
52188c2ecf20Sopenharmony_ci
52198c2ecf20Sopenharmony_cistatic ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
52208c2ecf20Sopenharmony_ci{
52218c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
52228c2ecf20Sopenharmony_ci}
52238c2ecf20Sopenharmony_ciSLAB_ATTR_RO(sanity_checks);
52248c2ecf20Sopenharmony_ci
52258c2ecf20Sopenharmony_cistatic ssize_t trace_show(struct kmem_cache *s, char *buf)
52268c2ecf20Sopenharmony_ci{
52278c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
52288c2ecf20Sopenharmony_ci}
52298c2ecf20Sopenharmony_ciSLAB_ATTR_RO(trace);
52308c2ecf20Sopenharmony_ci
52318c2ecf20Sopenharmony_cistatic ssize_t red_zone_show(struct kmem_cache *s, char *buf)
52328c2ecf20Sopenharmony_ci{
52338c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
52348c2ecf20Sopenharmony_ci}
52358c2ecf20Sopenharmony_ci
52368c2ecf20Sopenharmony_ciSLAB_ATTR_RO(red_zone);
52378c2ecf20Sopenharmony_ci
52388c2ecf20Sopenharmony_cistatic ssize_t poison_show(struct kmem_cache *s, char *buf)
52398c2ecf20Sopenharmony_ci{
52408c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
52418c2ecf20Sopenharmony_ci}
52428c2ecf20Sopenharmony_ci
52438c2ecf20Sopenharmony_ciSLAB_ATTR_RO(poison);
52448c2ecf20Sopenharmony_ci
52458c2ecf20Sopenharmony_cistatic ssize_t store_user_show(struct kmem_cache *s, char *buf)
52468c2ecf20Sopenharmony_ci{
52478c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
52488c2ecf20Sopenharmony_ci}
52498c2ecf20Sopenharmony_ci
52508c2ecf20Sopenharmony_ciSLAB_ATTR_RO(store_user);
52518c2ecf20Sopenharmony_ci
52528c2ecf20Sopenharmony_cistatic ssize_t validate_show(struct kmem_cache *s, char *buf)
52538c2ecf20Sopenharmony_ci{
52548c2ecf20Sopenharmony_ci	return 0;
52558c2ecf20Sopenharmony_ci}
52568c2ecf20Sopenharmony_ci
52578c2ecf20Sopenharmony_cistatic ssize_t validate_store(struct kmem_cache *s,
52588c2ecf20Sopenharmony_ci			const char *buf, size_t length)
52598c2ecf20Sopenharmony_ci{
52608c2ecf20Sopenharmony_ci	int ret = -EINVAL;
52618c2ecf20Sopenharmony_ci
52628c2ecf20Sopenharmony_ci	if (buf[0] == '1') {
52638c2ecf20Sopenharmony_ci		ret = validate_slab_cache(s);
52648c2ecf20Sopenharmony_ci		if (ret >= 0)
52658c2ecf20Sopenharmony_ci			ret = length;
52668c2ecf20Sopenharmony_ci	}
52678c2ecf20Sopenharmony_ci	return ret;
52688c2ecf20Sopenharmony_ci}
52698c2ecf20Sopenharmony_ciSLAB_ATTR(validate);
52708c2ecf20Sopenharmony_ci
52718c2ecf20Sopenharmony_cistatic ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
52728c2ecf20Sopenharmony_ci{
52738c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
52748c2ecf20Sopenharmony_ci		return -ENOSYS;
52758c2ecf20Sopenharmony_ci	return list_locations(s, buf, TRACK_ALLOC);
52768c2ecf20Sopenharmony_ci}
52778c2ecf20Sopenharmony_ciSLAB_ATTR_RO(alloc_calls);
52788c2ecf20Sopenharmony_ci
52798c2ecf20Sopenharmony_cistatic ssize_t free_calls_show(struct kmem_cache *s, char *buf)
52808c2ecf20Sopenharmony_ci{
52818c2ecf20Sopenharmony_ci	if (!(s->flags & SLAB_STORE_USER))
52828c2ecf20Sopenharmony_ci		return -ENOSYS;
52838c2ecf20Sopenharmony_ci	return list_locations(s, buf, TRACK_FREE);
52848c2ecf20Sopenharmony_ci}
52858c2ecf20Sopenharmony_ciSLAB_ATTR_RO(free_calls);
52868c2ecf20Sopenharmony_ci#endif /* CONFIG_SLUB_DEBUG */
52878c2ecf20Sopenharmony_ci
52888c2ecf20Sopenharmony_ci#ifdef CONFIG_FAILSLAB
52898c2ecf20Sopenharmony_cistatic ssize_t failslab_show(struct kmem_cache *s, char *buf)
52908c2ecf20Sopenharmony_ci{
52918c2ecf20Sopenharmony_ci	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
52928c2ecf20Sopenharmony_ci}
52938c2ecf20Sopenharmony_ciSLAB_ATTR_RO(failslab);
52948c2ecf20Sopenharmony_ci#endif
52958c2ecf20Sopenharmony_ci
52968c2ecf20Sopenharmony_cistatic ssize_t shrink_show(struct kmem_cache *s, char *buf)
52978c2ecf20Sopenharmony_ci{
52988c2ecf20Sopenharmony_ci	return 0;
52998c2ecf20Sopenharmony_ci}
53008c2ecf20Sopenharmony_ci
53018c2ecf20Sopenharmony_cistatic ssize_t shrink_store(struct kmem_cache *s,
53028c2ecf20Sopenharmony_ci			const char *buf, size_t length)
53038c2ecf20Sopenharmony_ci{
53048c2ecf20Sopenharmony_ci	if (buf[0] == '1')
53058c2ecf20Sopenharmony_ci		kmem_cache_shrink(s);
53068c2ecf20Sopenharmony_ci	else
53078c2ecf20Sopenharmony_ci		return -EINVAL;
53088c2ecf20Sopenharmony_ci	return length;
53098c2ecf20Sopenharmony_ci}
53108c2ecf20Sopenharmony_ciSLAB_ATTR(shrink);
53118c2ecf20Sopenharmony_ci
53128c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
53138c2ecf20Sopenharmony_cistatic ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
53148c2ecf20Sopenharmony_ci{
53158c2ecf20Sopenharmony_ci	return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10);
53168c2ecf20Sopenharmony_ci}
53178c2ecf20Sopenharmony_ci
53188c2ecf20Sopenharmony_cistatic ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
53198c2ecf20Sopenharmony_ci				const char *buf, size_t length)
53208c2ecf20Sopenharmony_ci{
53218c2ecf20Sopenharmony_ci	unsigned int ratio;
53228c2ecf20Sopenharmony_ci	int err;
53238c2ecf20Sopenharmony_ci
53248c2ecf20Sopenharmony_ci	err = kstrtouint(buf, 10, &ratio);
53258c2ecf20Sopenharmony_ci	if (err)
53268c2ecf20Sopenharmony_ci		return err;
53278c2ecf20Sopenharmony_ci	if (ratio > 100)
53288c2ecf20Sopenharmony_ci		return -ERANGE;
53298c2ecf20Sopenharmony_ci
53308c2ecf20Sopenharmony_ci	s->remote_node_defrag_ratio = ratio * 10;
53318c2ecf20Sopenharmony_ci
53328c2ecf20Sopenharmony_ci	return length;
53338c2ecf20Sopenharmony_ci}
53348c2ecf20Sopenharmony_ciSLAB_ATTR(remote_node_defrag_ratio);
53358c2ecf20Sopenharmony_ci#endif
53368c2ecf20Sopenharmony_ci
53378c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_STATS
53388c2ecf20Sopenharmony_cistatic int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
53398c2ecf20Sopenharmony_ci{
53408c2ecf20Sopenharmony_ci	unsigned long sum  = 0;
53418c2ecf20Sopenharmony_ci	int cpu;
53428c2ecf20Sopenharmony_ci	int len;
53438c2ecf20Sopenharmony_ci	int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
53448c2ecf20Sopenharmony_ci
53458c2ecf20Sopenharmony_ci	if (!data)
53468c2ecf20Sopenharmony_ci		return -ENOMEM;
53478c2ecf20Sopenharmony_ci
53488c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu) {
53498c2ecf20Sopenharmony_ci		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
53508c2ecf20Sopenharmony_ci
53518c2ecf20Sopenharmony_ci		data[cpu] = x;
53528c2ecf20Sopenharmony_ci		sum += x;
53538c2ecf20Sopenharmony_ci	}
53548c2ecf20Sopenharmony_ci
53558c2ecf20Sopenharmony_ci	len = sprintf(buf, "%lu", sum);
53568c2ecf20Sopenharmony_ci
53578c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
53588c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu) {
53598c2ecf20Sopenharmony_ci		if (data[cpu] && len < PAGE_SIZE - 20)
53608c2ecf20Sopenharmony_ci			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
53618c2ecf20Sopenharmony_ci	}
53628c2ecf20Sopenharmony_ci#endif
53638c2ecf20Sopenharmony_ci	kfree(data);
53648c2ecf20Sopenharmony_ci	return len + sprintf(buf + len, "\n");
53658c2ecf20Sopenharmony_ci}
53668c2ecf20Sopenharmony_ci
53678c2ecf20Sopenharmony_cistatic void clear_stat(struct kmem_cache *s, enum stat_item si)
53688c2ecf20Sopenharmony_ci{
53698c2ecf20Sopenharmony_ci	int cpu;
53708c2ecf20Sopenharmony_ci
53718c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu)
53728c2ecf20Sopenharmony_ci		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
53738c2ecf20Sopenharmony_ci}
53748c2ecf20Sopenharmony_ci
53758c2ecf20Sopenharmony_ci#define STAT_ATTR(si, text) 					\
53768c2ecf20Sopenharmony_cistatic ssize_t text##_show(struct kmem_cache *s, char *buf)	\
53778c2ecf20Sopenharmony_ci{								\
53788c2ecf20Sopenharmony_ci	return show_stat(s, buf, si);				\
53798c2ecf20Sopenharmony_ci}								\
53808c2ecf20Sopenharmony_cistatic ssize_t text##_store(struct kmem_cache *s,		\
53818c2ecf20Sopenharmony_ci				const char *buf, size_t length)	\
53828c2ecf20Sopenharmony_ci{								\
53838c2ecf20Sopenharmony_ci	if (buf[0] != '0')					\
53848c2ecf20Sopenharmony_ci		return -EINVAL;					\
53858c2ecf20Sopenharmony_ci	clear_stat(s, si);					\
53868c2ecf20Sopenharmony_ci	return length;						\
53878c2ecf20Sopenharmony_ci}								\
53888c2ecf20Sopenharmony_ciSLAB_ATTR(text);						\
53898c2ecf20Sopenharmony_ci
53908c2ecf20Sopenharmony_ciSTAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
53918c2ecf20Sopenharmony_ciSTAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
53928c2ecf20Sopenharmony_ciSTAT_ATTR(FREE_FASTPATH, free_fastpath);
53938c2ecf20Sopenharmony_ciSTAT_ATTR(FREE_SLOWPATH, free_slowpath);
53948c2ecf20Sopenharmony_ciSTAT_ATTR(FREE_FROZEN, free_frozen);
53958c2ecf20Sopenharmony_ciSTAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
53968c2ecf20Sopenharmony_ciSTAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
53978c2ecf20Sopenharmony_ciSTAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
53988c2ecf20Sopenharmony_ciSTAT_ATTR(ALLOC_SLAB, alloc_slab);
53998c2ecf20Sopenharmony_ciSTAT_ATTR(ALLOC_REFILL, alloc_refill);
54008c2ecf20Sopenharmony_ciSTAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
54018c2ecf20Sopenharmony_ciSTAT_ATTR(FREE_SLAB, free_slab);
54028c2ecf20Sopenharmony_ciSTAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
54038c2ecf20Sopenharmony_ciSTAT_ATTR(DEACTIVATE_FULL, deactivate_full);
54048c2ecf20Sopenharmony_ciSTAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
54058c2ecf20Sopenharmony_ciSTAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
54068c2ecf20Sopenharmony_ciSTAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
54078c2ecf20Sopenharmony_ciSTAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
54088c2ecf20Sopenharmony_ciSTAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
54098c2ecf20Sopenharmony_ciSTAT_ATTR(ORDER_FALLBACK, order_fallback);
54108c2ecf20Sopenharmony_ciSTAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
54118c2ecf20Sopenharmony_ciSTAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
54128c2ecf20Sopenharmony_ciSTAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
54138c2ecf20Sopenharmony_ciSTAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
54148c2ecf20Sopenharmony_ciSTAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
54158c2ecf20Sopenharmony_ciSTAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
54168c2ecf20Sopenharmony_ci#endif	/* CONFIG_SLUB_STATS */
54178c2ecf20Sopenharmony_ci
54188c2ecf20Sopenharmony_cistatic struct attribute *slab_attrs[] = {
54198c2ecf20Sopenharmony_ci	&slab_size_attr.attr,
54208c2ecf20Sopenharmony_ci	&object_size_attr.attr,
54218c2ecf20Sopenharmony_ci	&objs_per_slab_attr.attr,
54228c2ecf20Sopenharmony_ci	&order_attr.attr,
54238c2ecf20Sopenharmony_ci	&min_partial_attr.attr,
54248c2ecf20Sopenharmony_ci	&cpu_partial_attr.attr,
54258c2ecf20Sopenharmony_ci	&objects_attr.attr,
54268c2ecf20Sopenharmony_ci	&objects_partial_attr.attr,
54278c2ecf20Sopenharmony_ci	&partial_attr.attr,
54288c2ecf20Sopenharmony_ci	&cpu_slabs_attr.attr,
54298c2ecf20Sopenharmony_ci	&ctor_attr.attr,
54308c2ecf20Sopenharmony_ci	&aliases_attr.attr,
54318c2ecf20Sopenharmony_ci	&align_attr.attr,
54328c2ecf20Sopenharmony_ci	&hwcache_align_attr.attr,
54338c2ecf20Sopenharmony_ci	&reclaim_account_attr.attr,
54348c2ecf20Sopenharmony_ci	&destroy_by_rcu_attr.attr,
54358c2ecf20Sopenharmony_ci	&shrink_attr.attr,
54368c2ecf20Sopenharmony_ci	&slabs_cpu_partial_attr.attr,
54378c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
54388c2ecf20Sopenharmony_ci	&total_objects_attr.attr,
54398c2ecf20Sopenharmony_ci	&slabs_attr.attr,
54408c2ecf20Sopenharmony_ci	&sanity_checks_attr.attr,
54418c2ecf20Sopenharmony_ci	&trace_attr.attr,
54428c2ecf20Sopenharmony_ci	&red_zone_attr.attr,
54438c2ecf20Sopenharmony_ci	&poison_attr.attr,
54448c2ecf20Sopenharmony_ci	&store_user_attr.attr,
54458c2ecf20Sopenharmony_ci	&validate_attr.attr,
54468c2ecf20Sopenharmony_ci	&alloc_calls_attr.attr,
54478c2ecf20Sopenharmony_ci	&free_calls_attr.attr,
54488c2ecf20Sopenharmony_ci#endif
54498c2ecf20Sopenharmony_ci#ifdef CONFIG_ZONE_DMA
54508c2ecf20Sopenharmony_ci	&cache_dma_attr.attr,
54518c2ecf20Sopenharmony_ci#endif
54528c2ecf20Sopenharmony_ci#ifdef CONFIG_NUMA
54538c2ecf20Sopenharmony_ci	&remote_node_defrag_ratio_attr.attr,
54548c2ecf20Sopenharmony_ci#endif
54558c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_STATS
54568c2ecf20Sopenharmony_ci	&alloc_fastpath_attr.attr,
54578c2ecf20Sopenharmony_ci	&alloc_slowpath_attr.attr,
54588c2ecf20Sopenharmony_ci	&free_fastpath_attr.attr,
54598c2ecf20Sopenharmony_ci	&free_slowpath_attr.attr,
54608c2ecf20Sopenharmony_ci	&free_frozen_attr.attr,
54618c2ecf20Sopenharmony_ci	&free_add_partial_attr.attr,
54628c2ecf20Sopenharmony_ci	&free_remove_partial_attr.attr,
54638c2ecf20Sopenharmony_ci	&alloc_from_partial_attr.attr,
54648c2ecf20Sopenharmony_ci	&alloc_slab_attr.attr,
54658c2ecf20Sopenharmony_ci	&alloc_refill_attr.attr,
54668c2ecf20Sopenharmony_ci	&alloc_node_mismatch_attr.attr,
54678c2ecf20Sopenharmony_ci	&free_slab_attr.attr,
54688c2ecf20Sopenharmony_ci	&cpuslab_flush_attr.attr,
54698c2ecf20Sopenharmony_ci	&deactivate_full_attr.attr,
54708c2ecf20Sopenharmony_ci	&deactivate_empty_attr.attr,
54718c2ecf20Sopenharmony_ci	&deactivate_to_head_attr.attr,
54728c2ecf20Sopenharmony_ci	&deactivate_to_tail_attr.attr,
54738c2ecf20Sopenharmony_ci	&deactivate_remote_frees_attr.attr,
54748c2ecf20Sopenharmony_ci	&deactivate_bypass_attr.attr,
54758c2ecf20Sopenharmony_ci	&order_fallback_attr.attr,
54768c2ecf20Sopenharmony_ci	&cmpxchg_double_fail_attr.attr,
54778c2ecf20Sopenharmony_ci	&cmpxchg_double_cpu_fail_attr.attr,
54788c2ecf20Sopenharmony_ci	&cpu_partial_alloc_attr.attr,
54798c2ecf20Sopenharmony_ci	&cpu_partial_free_attr.attr,
54808c2ecf20Sopenharmony_ci	&cpu_partial_node_attr.attr,
54818c2ecf20Sopenharmony_ci	&cpu_partial_drain_attr.attr,
54828c2ecf20Sopenharmony_ci#endif
54838c2ecf20Sopenharmony_ci#ifdef CONFIG_FAILSLAB
54848c2ecf20Sopenharmony_ci	&failslab_attr.attr,
54858c2ecf20Sopenharmony_ci#endif
54868c2ecf20Sopenharmony_ci	&usersize_attr.attr,
54878c2ecf20Sopenharmony_ci
54888c2ecf20Sopenharmony_ci	NULL
54898c2ecf20Sopenharmony_ci};
54908c2ecf20Sopenharmony_ci
54918c2ecf20Sopenharmony_cistatic const struct attribute_group slab_attr_group = {
54928c2ecf20Sopenharmony_ci	.attrs = slab_attrs,
54938c2ecf20Sopenharmony_ci};
54948c2ecf20Sopenharmony_ci
54958c2ecf20Sopenharmony_cistatic ssize_t slab_attr_show(struct kobject *kobj,
54968c2ecf20Sopenharmony_ci				struct attribute *attr,
54978c2ecf20Sopenharmony_ci				char *buf)
54988c2ecf20Sopenharmony_ci{
54998c2ecf20Sopenharmony_ci	struct slab_attribute *attribute;
55008c2ecf20Sopenharmony_ci	struct kmem_cache *s;
55018c2ecf20Sopenharmony_ci	int err;
55028c2ecf20Sopenharmony_ci
55038c2ecf20Sopenharmony_ci	attribute = to_slab_attr(attr);
55048c2ecf20Sopenharmony_ci	s = to_slab(kobj);
55058c2ecf20Sopenharmony_ci
55068c2ecf20Sopenharmony_ci	if (!attribute->show)
55078c2ecf20Sopenharmony_ci		return -EIO;
55088c2ecf20Sopenharmony_ci
55098c2ecf20Sopenharmony_ci	err = attribute->show(s, buf);
55108c2ecf20Sopenharmony_ci
55118c2ecf20Sopenharmony_ci	return err;
55128c2ecf20Sopenharmony_ci}
55138c2ecf20Sopenharmony_ci
55148c2ecf20Sopenharmony_cistatic ssize_t slab_attr_store(struct kobject *kobj,
55158c2ecf20Sopenharmony_ci				struct attribute *attr,
55168c2ecf20Sopenharmony_ci				const char *buf, size_t len)
55178c2ecf20Sopenharmony_ci{
55188c2ecf20Sopenharmony_ci	struct slab_attribute *attribute;
55198c2ecf20Sopenharmony_ci	struct kmem_cache *s;
55208c2ecf20Sopenharmony_ci	int err;
55218c2ecf20Sopenharmony_ci
55228c2ecf20Sopenharmony_ci	attribute = to_slab_attr(attr);
55238c2ecf20Sopenharmony_ci	s = to_slab(kobj);
55248c2ecf20Sopenharmony_ci
55258c2ecf20Sopenharmony_ci	if (!attribute->store)
55268c2ecf20Sopenharmony_ci		return -EIO;
55278c2ecf20Sopenharmony_ci
55288c2ecf20Sopenharmony_ci	err = attribute->store(s, buf, len);
55298c2ecf20Sopenharmony_ci	return err;
55308c2ecf20Sopenharmony_ci}
55318c2ecf20Sopenharmony_ci
55328c2ecf20Sopenharmony_cistatic void kmem_cache_release(struct kobject *k)
55338c2ecf20Sopenharmony_ci{
55348c2ecf20Sopenharmony_ci	slab_kmem_cache_release(to_slab(k));
55358c2ecf20Sopenharmony_ci}
55368c2ecf20Sopenharmony_ci
55378c2ecf20Sopenharmony_cistatic const struct sysfs_ops slab_sysfs_ops = {
55388c2ecf20Sopenharmony_ci	.show = slab_attr_show,
55398c2ecf20Sopenharmony_ci	.store = slab_attr_store,
55408c2ecf20Sopenharmony_ci};
55418c2ecf20Sopenharmony_ci
55428c2ecf20Sopenharmony_cistatic struct kobj_type slab_ktype = {
55438c2ecf20Sopenharmony_ci	.sysfs_ops = &slab_sysfs_ops,
55448c2ecf20Sopenharmony_ci	.release = kmem_cache_release,
55458c2ecf20Sopenharmony_ci};
55468c2ecf20Sopenharmony_ci
55478c2ecf20Sopenharmony_cistatic struct kset *slab_kset;
55488c2ecf20Sopenharmony_ci
55498c2ecf20Sopenharmony_cistatic inline struct kset *cache_kset(struct kmem_cache *s)
55508c2ecf20Sopenharmony_ci{
55518c2ecf20Sopenharmony_ci	return slab_kset;
55528c2ecf20Sopenharmony_ci}
55538c2ecf20Sopenharmony_ci
55548c2ecf20Sopenharmony_ci#define ID_STR_LENGTH 64
55558c2ecf20Sopenharmony_ci
55568c2ecf20Sopenharmony_ci/* Create a unique string id for a slab cache:
55578c2ecf20Sopenharmony_ci *
55588c2ecf20Sopenharmony_ci * Format	:[flags-]size
55598c2ecf20Sopenharmony_ci */
55608c2ecf20Sopenharmony_cistatic char *create_unique_id(struct kmem_cache *s)
55618c2ecf20Sopenharmony_ci{
55628c2ecf20Sopenharmony_ci	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
55638c2ecf20Sopenharmony_ci	char *p = name;
55648c2ecf20Sopenharmony_ci
55658c2ecf20Sopenharmony_ci	if (!name)
55668c2ecf20Sopenharmony_ci		return ERR_PTR(-ENOMEM);
55678c2ecf20Sopenharmony_ci
55688c2ecf20Sopenharmony_ci	*p++ = ':';
55698c2ecf20Sopenharmony_ci	/*
55708c2ecf20Sopenharmony_ci	 * First flags affecting slabcache operations. We will only
55718c2ecf20Sopenharmony_ci	 * get here for aliasable slabs so we do not need to support
55728c2ecf20Sopenharmony_ci	 * too many flags. The flags here must cover all flags that
55738c2ecf20Sopenharmony_ci	 * are matched during merging to guarantee that the id is
55748c2ecf20Sopenharmony_ci	 * unique.
55758c2ecf20Sopenharmony_ci	 */
55768c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CACHE_DMA)
55778c2ecf20Sopenharmony_ci		*p++ = 'd';
55788c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CACHE_DMA32)
55798c2ecf20Sopenharmony_ci		*p++ = 'D';
55808c2ecf20Sopenharmony_ci	if (s->flags & SLAB_RECLAIM_ACCOUNT)
55818c2ecf20Sopenharmony_ci		*p++ = 'a';
55828c2ecf20Sopenharmony_ci	if (s->flags & SLAB_CONSISTENCY_CHECKS)
55838c2ecf20Sopenharmony_ci		*p++ = 'F';
55848c2ecf20Sopenharmony_ci	if (s->flags & SLAB_ACCOUNT)
55858c2ecf20Sopenharmony_ci		*p++ = 'A';
55868c2ecf20Sopenharmony_ci	if (p != name + 1)
55878c2ecf20Sopenharmony_ci		*p++ = '-';
55888c2ecf20Sopenharmony_ci	p += sprintf(p, "%07u", s->size);
55898c2ecf20Sopenharmony_ci
55908c2ecf20Sopenharmony_ci	BUG_ON(p > name + ID_STR_LENGTH - 1);
55918c2ecf20Sopenharmony_ci	return name;
55928c2ecf20Sopenharmony_ci}
55938c2ecf20Sopenharmony_ci
55948c2ecf20Sopenharmony_cistatic int sysfs_slab_add(struct kmem_cache *s)
55958c2ecf20Sopenharmony_ci{
55968c2ecf20Sopenharmony_ci	int err;
55978c2ecf20Sopenharmony_ci	const char *name;
55988c2ecf20Sopenharmony_ci	struct kset *kset = cache_kset(s);
55998c2ecf20Sopenharmony_ci	int unmergeable = slab_unmergeable(s);
56008c2ecf20Sopenharmony_ci
56018c2ecf20Sopenharmony_ci	if (!kset) {
56028c2ecf20Sopenharmony_ci		kobject_init(&s->kobj, &slab_ktype);
56038c2ecf20Sopenharmony_ci		return 0;
56048c2ecf20Sopenharmony_ci	}
56058c2ecf20Sopenharmony_ci
56068c2ecf20Sopenharmony_ci	if (!unmergeable && disable_higher_order_debug &&
56078c2ecf20Sopenharmony_ci			(slub_debug & DEBUG_METADATA_FLAGS))
56088c2ecf20Sopenharmony_ci		unmergeable = 1;
56098c2ecf20Sopenharmony_ci
56108c2ecf20Sopenharmony_ci	if (unmergeable) {
56118c2ecf20Sopenharmony_ci		/*
56128c2ecf20Sopenharmony_ci		 * Slabcache can never be merged so we can use the name proper.
56138c2ecf20Sopenharmony_ci		 * This is typically the case for debug situations. In that
56148c2ecf20Sopenharmony_ci		 * case we can catch duplicate names easily.
56158c2ecf20Sopenharmony_ci		 */
56168c2ecf20Sopenharmony_ci		sysfs_remove_link(&slab_kset->kobj, s->name);
56178c2ecf20Sopenharmony_ci		name = s->name;
56188c2ecf20Sopenharmony_ci	} else {
56198c2ecf20Sopenharmony_ci		/*
56208c2ecf20Sopenharmony_ci		 * Create a unique name for the slab as a target
56218c2ecf20Sopenharmony_ci		 * for the symlinks.
56228c2ecf20Sopenharmony_ci		 */
56238c2ecf20Sopenharmony_ci		name = create_unique_id(s);
56248c2ecf20Sopenharmony_ci		if (IS_ERR(name))
56258c2ecf20Sopenharmony_ci			return PTR_ERR(name);
56268c2ecf20Sopenharmony_ci	}
56278c2ecf20Sopenharmony_ci
56288c2ecf20Sopenharmony_ci	s->kobj.kset = kset;
56298c2ecf20Sopenharmony_ci	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
56308c2ecf20Sopenharmony_ci	if (err)
56318c2ecf20Sopenharmony_ci		goto out;
56328c2ecf20Sopenharmony_ci
56338c2ecf20Sopenharmony_ci	err = sysfs_create_group(&s->kobj, &slab_attr_group);
56348c2ecf20Sopenharmony_ci	if (err)
56358c2ecf20Sopenharmony_ci		goto out_del_kobj;
56368c2ecf20Sopenharmony_ci
56378c2ecf20Sopenharmony_ci	if (!unmergeable) {
56388c2ecf20Sopenharmony_ci		/* Setup first alias */
56398c2ecf20Sopenharmony_ci		sysfs_slab_alias(s, s->name);
56408c2ecf20Sopenharmony_ci	}
56418c2ecf20Sopenharmony_ciout:
56428c2ecf20Sopenharmony_ci	if (!unmergeable)
56438c2ecf20Sopenharmony_ci		kfree(name);
56448c2ecf20Sopenharmony_ci	return err;
56458c2ecf20Sopenharmony_ciout_del_kobj:
56468c2ecf20Sopenharmony_ci	kobject_del(&s->kobj);
56478c2ecf20Sopenharmony_ci	goto out;
56488c2ecf20Sopenharmony_ci}
56498c2ecf20Sopenharmony_ci
56508c2ecf20Sopenharmony_civoid sysfs_slab_unlink(struct kmem_cache *s)
56518c2ecf20Sopenharmony_ci{
56528c2ecf20Sopenharmony_ci	if (slab_state >= FULL)
56538c2ecf20Sopenharmony_ci		kobject_del(&s->kobj);
56548c2ecf20Sopenharmony_ci}
56558c2ecf20Sopenharmony_ci
56568c2ecf20Sopenharmony_civoid sysfs_slab_release(struct kmem_cache *s)
56578c2ecf20Sopenharmony_ci{
56588c2ecf20Sopenharmony_ci	if (slab_state >= FULL)
56598c2ecf20Sopenharmony_ci		kobject_put(&s->kobj);
56608c2ecf20Sopenharmony_ci}
56618c2ecf20Sopenharmony_ci
56628c2ecf20Sopenharmony_ci/*
56638c2ecf20Sopenharmony_ci * Need to buffer aliases during bootup until sysfs becomes
56648c2ecf20Sopenharmony_ci * available lest we lose that information.
56658c2ecf20Sopenharmony_ci */
56668c2ecf20Sopenharmony_cistruct saved_alias {
56678c2ecf20Sopenharmony_ci	struct kmem_cache *s;
56688c2ecf20Sopenharmony_ci	const char *name;
56698c2ecf20Sopenharmony_ci	struct saved_alias *next;
56708c2ecf20Sopenharmony_ci};
56718c2ecf20Sopenharmony_ci
56728c2ecf20Sopenharmony_cistatic struct saved_alias *alias_list;
56738c2ecf20Sopenharmony_ci
56748c2ecf20Sopenharmony_cistatic int sysfs_slab_alias(struct kmem_cache *s, const char *name)
56758c2ecf20Sopenharmony_ci{
56768c2ecf20Sopenharmony_ci	struct saved_alias *al;
56778c2ecf20Sopenharmony_ci
56788c2ecf20Sopenharmony_ci	if (slab_state == FULL) {
56798c2ecf20Sopenharmony_ci		/*
56808c2ecf20Sopenharmony_ci		 * If we have a leftover link then remove it.
56818c2ecf20Sopenharmony_ci		 */
56828c2ecf20Sopenharmony_ci		sysfs_remove_link(&slab_kset->kobj, name);
56838c2ecf20Sopenharmony_ci		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
56848c2ecf20Sopenharmony_ci	}
56858c2ecf20Sopenharmony_ci
56868c2ecf20Sopenharmony_ci	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
56878c2ecf20Sopenharmony_ci	if (!al)
56888c2ecf20Sopenharmony_ci		return -ENOMEM;
56898c2ecf20Sopenharmony_ci
56908c2ecf20Sopenharmony_ci	al->s = s;
56918c2ecf20Sopenharmony_ci	al->name = name;
56928c2ecf20Sopenharmony_ci	al->next = alias_list;
56938c2ecf20Sopenharmony_ci	alias_list = al;
56948c2ecf20Sopenharmony_ci	return 0;
56958c2ecf20Sopenharmony_ci}
56968c2ecf20Sopenharmony_ci
56978c2ecf20Sopenharmony_cistatic int __init slab_sysfs_init(void)
56988c2ecf20Sopenharmony_ci{
56998c2ecf20Sopenharmony_ci	struct kmem_cache *s;
57008c2ecf20Sopenharmony_ci	int err;
57018c2ecf20Sopenharmony_ci
57028c2ecf20Sopenharmony_ci	mutex_lock(&slab_mutex);
57038c2ecf20Sopenharmony_ci
57048c2ecf20Sopenharmony_ci	slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
57058c2ecf20Sopenharmony_ci	if (!slab_kset) {
57068c2ecf20Sopenharmony_ci		mutex_unlock(&slab_mutex);
57078c2ecf20Sopenharmony_ci		pr_err("Cannot register slab subsystem.\n");
57088c2ecf20Sopenharmony_ci		return -ENOSYS;
57098c2ecf20Sopenharmony_ci	}
57108c2ecf20Sopenharmony_ci
57118c2ecf20Sopenharmony_ci	slab_state = FULL;
57128c2ecf20Sopenharmony_ci
57138c2ecf20Sopenharmony_ci	list_for_each_entry(s, &slab_caches, list) {
57148c2ecf20Sopenharmony_ci		err = sysfs_slab_add(s);
57158c2ecf20Sopenharmony_ci		if (err)
57168c2ecf20Sopenharmony_ci			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
57178c2ecf20Sopenharmony_ci			       s->name);
57188c2ecf20Sopenharmony_ci	}
57198c2ecf20Sopenharmony_ci
57208c2ecf20Sopenharmony_ci	while (alias_list) {
57218c2ecf20Sopenharmony_ci		struct saved_alias *al = alias_list;
57228c2ecf20Sopenharmony_ci
57238c2ecf20Sopenharmony_ci		alias_list = alias_list->next;
57248c2ecf20Sopenharmony_ci		err = sysfs_slab_alias(al->s, al->name);
57258c2ecf20Sopenharmony_ci		if (err)
57268c2ecf20Sopenharmony_ci			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
57278c2ecf20Sopenharmony_ci			       al->name);
57288c2ecf20Sopenharmony_ci		kfree(al);
57298c2ecf20Sopenharmony_ci	}
57308c2ecf20Sopenharmony_ci
57318c2ecf20Sopenharmony_ci	mutex_unlock(&slab_mutex);
57328c2ecf20Sopenharmony_ci	resiliency_test();
57338c2ecf20Sopenharmony_ci	return 0;
57348c2ecf20Sopenharmony_ci}
57358c2ecf20Sopenharmony_ci
57368c2ecf20Sopenharmony_ci__initcall(slab_sysfs_init);
57378c2ecf20Sopenharmony_ci#endif /* CONFIG_SYSFS */
57388c2ecf20Sopenharmony_ci
57398c2ecf20Sopenharmony_ci/*
57408c2ecf20Sopenharmony_ci * The /proc/slabinfo ABI
57418c2ecf20Sopenharmony_ci */
57428c2ecf20Sopenharmony_ci#ifdef CONFIG_SLUB_DEBUG
57438c2ecf20Sopenharmony_civoid get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57448c2ecf20Sopenharmony_ci{
57458c2ecf20Sopenharmony_ci	unsigned long nr_slabs = 0;
57468c2ecf20Sopenharmony_ci	unsigned long nr_objs = 0;
57478c2ecf20Sopenharmony_ci	unsigned long nr_free = 0;
57488c2ecf20Sopenharmony_ci	int node;
57498c2ecf20Sopenharmony_ci	struct kmem_cache_node *n;
57508c2ecf20Sopenharmony_ci
57518c2ecf20Sopenharmony_ci	for_each_kmem_cache_node(s, node, n) {
57528c2ecf20Sopenharmony_ci		nr_slabs += node_nr_slabs(n);
57538c2ecf20Sopenharmony_ci		nr_objs += node_nr_objs(n);
57548c2ecf20Sopenharmony_ci		nr_free += count_partial(n, count_free);
57558c2ecf20Sopenharmony_ci	}
57568c2ecf20Sopenharmony_ci
57578c2ecf20Sopenharmony_ci	sinfo->active_objs = nr_objs - nr_free;
57588c2ecf20Sopenharmony_ci	sinfo->num_objs = nr_objs;
57598c2ecf20Sopenharmony_ci	sinfo->active_slabs = nr_slabs;
57608c2ecf20Sopenharmony_ci	sinfo->num_slabs = nr_slabs;
57618c2ecf20Sopenharmony_ci	sinfo->objects_per_slab = oo_objects(s->oo);
57628c2ecf20Sopenharmony_ci	sinfo->cache_order = oo_order(s->oo);
57638c2ecf20Sopenharmony_ci}
57648c2ecf20Sopenharmony_ci
57658c2ecf20Sopenharmony_civoid slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
57668c2ecf20Sopenharmony_ci{
57678c2ecf20Sopenharmony_ci}
57688c2ecf20Sopenharmony_ci
57698c2ecf20Sopenharmony_cissize_t slabinfo_write(struct file *file, const char __user *buffer,
57708c2ecf20Sopenharmony_ci		       size_t count, loff_t *ppos)
57718c2ecf20Sopenharmony_ci{
57728c2ecf20Sopenharmony_ci	return -EIO;
57738c2ecf20Sopenharmony_ci}
57748c2ecf20Sopenharmony_ci#endif /* CONFIG_SLUB_DEBUG */
5775