18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * This file contains common generic and tag-based KASAN code.
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Copyright (c) 2014 Samsung Electronics Co., Ltd.
68c2ecf20Sopenharmony_ci * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
78c2ecf20Sopenharmony_ci *
88c2ecf20Sopenharmony_ci * Some code borrowed from https://github.com/xairy/kasan-prototype by
98c2ecf20Sopenharmony_ci *        Andrey Konovalov <andreyknvl@gmail.com>
108c2ecf20Sopenharmony_ci *
118c2ecf20Sopenharmony_ci * This program is free software; you can redistribute it and/or modify
128c2ecf20Sopenharmony_ci * it under the terms of the GNU General Public License version 2 as
138c2ecf20Sopenharmony_ci * published by the Free Software Foundation.
148c2ecf20Sopenharmony_ci *
158c2ecf20Sopenharmony_ci */
168c2ecf20Sopenharmony_ci
178c2ecf20Sopenharmony_ci#include <linux/export.h>
188c2ecf20Sopenharmony_ci#include <linux/init.h>
198c2ecf20Sopenharmony_ci#include <linux/kasan.h>
208c2ecf20Sopenharmony_ci#include <linux/kernel.h>
218c2ecf20Sopenharmony_ci#include <linux/kmemleak.h>
228c2ecf20Sopenharmony_ci#include <linux/linkage.h>
238c2ecf20Sopenharmony_ci#include <linux/memblock.h>
248c2ecf20Sopenharmony_ci#include <linux/memory.h>
258c2ecf20Sopenharmony_ci#include <linux/mm.h>
268c2ecf20Sopenharmony_ci#include <linux/module.h>
278c2ecf20Sopenharmony_ci#include <linux/printk.h>
288c2ecf20Sopenharmony_ci#include <linux/sched.h>
298c2ecf20Sopenharmony_ci#include <linux/sched/task_stack.h>
308c2ecf20Sopenharmony_ci#include <linux/slab.h>
318c2ecf20Sopenharmony_ci#include <linux/stacktrace.h>
328c2ecf20Sopenharmony_ci#include <linux/string.h>
338c2ecf20Sopenharmony_ci#include <linux/types.h>
348c2ecf20Sopenharmony_ci#include <linux/vmalloc.h>
358c2ecf20Sopenharmony_ci#include <linux/bug.h>
368c2ecf20Sopenharmony_ci
378c2ecf20Sopenharmony_ci#include <asm/cacheflush.h>
388c2ecf20Sopenharmony_ci#include <asm/tlbflush.h>
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_ci#include "kasan.h"
418c2ecf20Sopenharmony_ci#include "../slab.h"
428c2ecf20Sopenharmony_ci
438c2ecf20Sopenharmony_cidepot_stack_handle_t kasan_save_stack(gfp_t flags)
448c2ecf20Sopenharmony_ci{
458c2ecf20Sopenharmony_ci	unsigned long entries[KASAN_STACK_DEPTH];
468c2ecf20Sopenharmony_ci	unsigned int nr_entries;
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
498c2ecf20Sopenharmony_ci	nr_entries = filter_irq_stacks(entries, nr_entries);
508c2ecf20Sopenharmony_ci	return stack_depot_save(entries, nr_entries, flags);
518c2ecf20Sopenharmony_ci}
528c2ecf20Sopenharmony_ci
538c2ecf20Sopenharmony_civoid kasan_set_track(struct kasan_track *track, gfp_t flags)
548c2ecf20Sopenharmony_ci{
558c2ecf20Sopenharmony_ci	track->pid = current->pid;
568c2ecf20Sopenharmony_ci	track->stack = kasan_save_stack(flags);
578c2ecf20Sopenharmony_ci}
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_civoid kasan_enable_current(void)
608c2ecf20Sopenharmony_ci{
618c2ecf20Sopenharmony_ci	current->kasan_depth++;
628c2ecf20Sopenharmony_ci}
638c2ecf20Sopenharmony_ci
648c2ecf20Sopenharmony_civoid kasan_disable_current(void)
658c2ecf20Sopenharmony_ci{
668c2ecf20Sopenharmony_ci	current->kasan_depth--;
678c2ecf20Sopenharmony_ci}
688c2ecf20Sopenharmony_ci
698c2ecf20Sopenharmony_cibool __kasan_check_read(const volatile void *p, unsigned int size)
708c2ecf20Sopenharmony_ci{
718c2ecf20Sopenharmony_ci	return check_memory_region((unsigned long)p, size, false, _RET_IP_);
728c2ecf20Sopenharmony_ci}
738c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__kasan_check_read);
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_cibool __kasan_check_write(const volatile void *p, unsigned int size)
768c2ecf20Sopenharmony_ci{
778c2ecf20Sopenharmony_ci	return check_memory_region((unsigned long)p, size, true, _RET_IP_);
788c2ecf20Sopenharmony_ci}
798c2ecf20Sopenharmony_ciEXPORT_SYMBOL(__kasan_check_write);
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ci#undef memset
828c2ecf20Sopenharmony_civoid *memset(void *addr, int c, size_t len)
838c2ecf20Sopenharmony_ci{
848c2ecf20Sopenharmony_ci	if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
858c2ecf20Sopenharmony_ci		return NULL;
868c2ecf20Sopenharmony_ci
878c2ecf20Sopenharmony_ci	return __memset(addr, c, len);
888c2ecf20Sopenharmony_ci}
898c2ecf20Sopenharmony_ci
908c2ecf20Sopenharmony_ci#ifdef __HAVE_ARCH_MEMMOVE
918c2ecf20Sopenharmony_ci#undef memmove
928c2ecf20Sopenharmony_civoid *memmove(void *dest, const void *src, size_t len)
938c2ecf20Sopenharmony_ci{
948c2ecf20Sopenharmony_ci	if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
958c2ecf20Sopenharmony_ci	    !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
968c2ecf20Sopenharmony_ci		return NULL;
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci	return __memmove(dest, src, len);
998c2ecf20Sopenharmony_ci}
1008c2ecf20Sopenharmony_ci#endif
1018c2ecf20Sopenharmony_ci
1028c2ecf20Sopenharmony_ci#undef memcpy
1038c2ecf20Sopenharmony_civoid *memcpy(void *dest, const void *src, size_t len)
1048c2ecf20Sopenharmony_ci{
1058c2ecf20Sopenharmony_ci	if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
1068c2ecf20Sopenharmony_ci	    !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
1078c2ecf20Sopenharmony_ci		return NULL;
1088c2ecf20Sopenharmony_ci
1098c2ecf20Sopenharmony_ci	return __memcpy(dest, src, len);
1108c2ecf20Sopenharmony_ci}
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci/*
1138c2ecf20Sopenharmony_ci * Poisons the shadow memory for 'size' bytes starting from 'addr'.
1148c2ecf20Sopenharmony_ci * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
1158c2ecf20Sopenharmony_ci */
1168c2ecf20Sopenharmony_civoid kasan_poison_shadow(const void *address, size_t size, u8 value)
1178c2ecf20Sopenharmony_ci{
1188c2ecf20Sopenharmony_ci	void *shadow_start, *shadow_end;
1198c2ecf20Sopenharmony_ci
1208c2ecf20Sopenharmony_ci	/*
1218c2ecf20Sopenharmony_ci	 * Perform shadow offset calculation based on untagged address, as
1228c2ecf20Sopenharmony_ci	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
1238c2ecf20Sopenharmony_ci	 * addresses to this function.
1248c2ecf20Sopenharmony_ci	 */
1258c2ecf20Sopenharmony_ci	address = reset_tag(address);
1268c2ecf20Sopenharmony_ci
1278c2ecf20Sopenharmony_ci	shadow_start = kasan_mem_to_shadow(address);
1288c2ecf20Sopenharmony_ci	shadow_end = kasan_mem_to_shadow(address + size);
1298c2ecf20Sopenharmony_ci
1308c2ecf20Sopenharmony_ci	__memset(shadow_start, value, shadow_end - shadow_start);
1318c2ecf20Sopenharmony_ci}
1328c2ecf20Sopenharmony_ci
1338c2ecf20Sopenharmony_civoid kasan_unpoison_shadow(const void *address, size_t size)
1348c2ecf20Sopenharmony_ci{
1358c2ecf20Sopenharmony_ci	u8 tag = get_tag(address);
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_ci	/*
1388c2ecf20Sopenharmony_ci	 * Perform shadow offset calculation based on untagged address, as
1398c2ecf20Sopenharmony_ci	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
1408c2ecf20Sopenharmony_ci	 * addresses to this function.
1418c2ecf20Sopenharmony_ci	 */
1428c2ecf20Sopenharmony_ci	address = reset_tag(address);
1438c2ecf20Sopenharmony_ci
1448c2ecf20Sopenharmony_ci	kasan_poison_shadow(address, size, tag);
1458c2ecf20Sopenharmony_ci
1468c2ecf20Sopenharmony_ci	if (size & KASAN_SHADOW_MASK) {
1478c2ecf20Sopenharmony_ci		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1508c2ecf20Sopenharmony_ci			*shadow = tag;
1518c2ecf20Sopenharmony_ci		else
1528c2ecf20Sopenharmony_ci			*shadow = size & KASAN_SHADOW_MASK;
1538c2ecf20Sopenharmony_ci	}
1548c2ecf20Sopenharmony_ci}
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_cistatic void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
1578c2ecf20Sopenharmony_ci{
1588c2ecf20Sopenharmony_ci	void *base = task_stack_page(task);
1598c2ecf20Sopenharmony_ci	size_t size = sp - base;
1608c2ecf20Sopenharmony_ci
1618c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(base, size);
1628c2ecf20Sopenharmony_ci}
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci/* Unpoison the entire stack for a task. */
1658c2ecf20Sopenharmony_civoid kasan_unpoison_task_stack(struct task_struct *task)
1668c2ecf20Sopenharmony_ci{
1678c2ecf20Sopenharmony_ci	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
1688c2ecf20Sopenharmony_ci}
1698c2ecf20Sopenharmony_ci
1708c2ecf20Sopenharmony_ci/* Unpoison the stack for the current task beyond a watermark sp value. */
1718c2ecf20Sopenharmony_ciasmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
1728c2ecf20Sopenharmony_ci{
1738c2ecf20Sopenharmony_ci	/*
1748c2ecf20Sopenharmony_ci	 * Calculate the task stack base address.  Avoid using 'current'
1758c2ecf20Sopenharmony_ci	 * because this function is called by early resume code which hasn't
1768c2ecf20Sopenharmony_ci	 * yet set up the percpu register (%gs).
1778c2ecf20Sopenharmony_ci	 */
1788c2ecf20Sopenharmony_ci	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
1798c2ecf20Sopenharmony_ci
1808c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(base, watermark - base);
1818c2ecf20Sopenharmony_ci}
1828c2ecf20Sopenharmony_ci
1838c2ecf20Sopenharmony_civoid kasan_alloc_pages(struct page *page, unsigned int order)
1848c2ecf20Sopenharmony_ci{
1858c2ecf20Sopenharmony_ci	u8 tag;
1868c2ecf20Sopenharmony_ci	unsigned long i;
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_ci	if (unlikely(PageHighMem(page)))
1898c2ecf20Sopenharmony_ci		return;
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_ci	tag = random_tag();
1928c2ecf20Sopenharmony_ci	for (i = 0; i < (1 << order); i++)
1938c2ecf20Sopenharmony_ci		page_kasan_tag_set(page + i, tag);
1948c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
1958c2ecf20Sopenharmony_ci}
1968c2ecf20Sopenharmony_ci
1978c2ecf20Sopenharmony_civoid kasan_free_pages(struct page *page, unsigned int order)
1988c2ecf20Sopenharmony_ci{
1998c2ecf20Sopenharmony_ci	if (likely(!PageHighMem(page)))
2008c2ecf20Sopenharmony_ci		kasan_poison_shadow(page_address(page),
2018c2ecf20Sopenharmony_ci				PAGE_SIZE << order,
2028c2ecf20Sopenharmony_ci				KASAN_FREE_PAGE);
2038c2ecf20Sopenharmony_ci}
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_ci/*
2068c2ecf20Sopenharmony_ci * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
2078c2ecf20Sopenharmony_ci * For larger allocations larger redzones are used.
2088c2ecf20Sopenharmony_ci */
2098c2ecf20Sopenharmony_cistatic inline unsigned int optimal_redzone(unsigned int object_size)
2108c2ecf20Sopenharmony_ci{
2118c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
2128c2ecf20Sopenharmony_ci		return 0;
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci	return
2158c2ecf20Sopenharmony_ci		object_size <= 64        - 16   ? 16 :
2168c2ecf20Sopenharmony_ci		object_size <= 128       - 32   ? 32 :
2178c2ecf20Sopenharmony_ci		object_size <= 512       - 64   ? 64 :
2188c2ecf20Sopenharmony_ci		object_size <= 4096      - 128  ? 128 :
2198c2ecf20Sopenharmony_ci		object_size <= (1 << 14) - 256  ? 256 :
2208c2ecf20Sopenharmony_ci		object_size <= (1 << 15) - 512  ? 512 :
2218c2ecf20Sopenharmony_ci		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
2228c2ecf20Sopenharmony_ci}
2238c2ecf20Sopenharmony_ci
2248c2ecf20Sopenharmony_civoid kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
2258c2ecf20Sopenharmony_ci			slab_flags_t *flags)
2268c2ecf20Sopenharmony_ci{
2278c2ecf20Sopenharmony_ci	unsigned int orig_size = *size;
2288c2ecf20Sopenharmony_ci	unsigned int redzone_size;
2298c2ecf20Sopenharmony_ci	int redzone_adjust;
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_ci	/* Add alloc meta. */
2328c2ecf20Sopenharmony_ci	cache->kasan_info.alloc_meta_offset = *size;
2338c2ecf20Sopenharmony_ci	*size += sizeof(struct kasan_alloc_meta);
2348c2ecf20Sopenharmony_ci
2358c2ecf20Sopenharmony_ci	/* Add free meta. */
2368c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
2378c2ecf20Sopenharmony_ci	    (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
2388c2ecf20Sopenharmony_ci	     cache->object_size < sizeof(struct kasan_free_meta))) {
2398c2ecf20Sopenharmony_ci		cache->kasan_info.free_meta_offset = *size;
2408c2ecf20Sopenharmony_ci		*size += sizeof(struct kasan_free_meta);
2418c2ecf20Sopenharmony_ci	}
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	redzone_size = optimal_redzone(cache->object_size);
2448c2ecf20Sopenharmony_ci	redzone_adjust = redzone_size -	(*size - cache->object_size);
2458c2ecf20Sopenharmony_ci	if (redzone_adjust > 0)
2468c2ecf20Sopenharmony_ci		*size += redzone_adjust;
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_ci	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
2498c2ecf20Sopenharmony_ci			max(*size, cache->object_size + redzone_size));
2508c2ecf20Sopenharmony_ci
2518c2ecf20Sopenharmony_ci	/*
2528c2ecf20Sopenharmony_ci	 * If the metadata doesn't fit, don't enable KASAN at all.
2538c2ecf20Sopenharmony_ci	 */
2548c2ecf20Sopenharmony_ci	if (*size <= cache->kasan_info.alloc_meta_offset ||
2558c2ecf20Sopenharmony_ci			*size <= cache->kasan_info.free_meta_offset) {
2568c2ecf20Sopenharmony_ci		cache->kasan_info.alloc_meta_offset = 0;
2578c2ecf20Sopenharmony_ci		cache->kasan_info.free_meta_offset = 0;
2588c2ecf20Sopenharmony_ci		*size = orig_size;
2598c2ecf20Sopenharmony_ci		return;
2608c2ecf20Sopenharmony_ci	}
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_ci	*flags |= SLAB_KASAN;
2638c2ecf20Sopenharmony_ci}
2648c2ecf20Sopenharmony_ci
2658c2ecf20Sopenharmony_cisize_t kasan_metadata_size(struct kmem_cache *cache)
2668c2ecf20Sopenharmony_ci{
2678c2ecf20Sopenharmony_ci	return (cache->kasan_info.alloc_meta_offset ?
2688c2ecf20Sopenharmony_ci		sizeof(struct kasan_alloc_meta) : 0) +
2698c2ecf20Sopenharmony_ci		(cache->kasan_info.free_meta_offset ?
2708c2ecf20Sopenharmony_ci		sizeof(struct kasan_free_meta) : 0);
2718c2ecf20Sopenharmony_ci}
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_cistruct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
2748c2ecf20Sopenharmony_ci					const void *object)
2758c2ecf20Sopenharmony_ci{
2768c2ecf20Sopenharmony_ci	return (void *)object + cache->kasan_info.alloc_meta_offset;
2778c2ecf20Sopenharmony_ci}
2788c2ecf20Sopenharmony_ci
2798c2ecf20Sopenharmony_cistruct kasan_free_meta *get_free_info(struct kmem_cache *cache,
2808c2ecf20Sopenharmony_ci				      const void *object)
2818c2ecf20Sopenharmony_ci{
2828c2ecf20Sopenharmony_ci	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
2838c2ecf20Sopenharmony_ci	return (void *)object + cache->kasan_info.free_meta_offset;
2848c2ecf20Sopenharmony_ci}
2858c2ecf20Sopenharmony_ci
2868c2ecf20Sopenharmony_civoid kasan_poison_slab(struct page *page)
2878c2ecf20Sopenharmony_ci{
2888c2ecf20Sopenharmony_ci	unsigned long i;
2898c2ecf20Sopenharmony_ci
2908c2ecf20Sopenharmony_ci	for (i = 0; i < compound_nr(page); i++)
2918c2ecf20Sopenharmony_ci		page_kasan_tag_reset(page + i);
2928c2ecf20Sopenharmony_ci	kasan_poison_shadow(page_address(page), page_size(page),
2938c2ecf20Sopenharmony_ci			KASAN_KMALLOC_REDZONE);
2948c2ecf20Sopenharmony_ci}
2958c2ecf20Sopenharmony_ci
2968c2ecf20Sopenharmony_civoid kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
2978c2ecf20Sopenharmony_ci{
2988c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(object, cache->object_size);
2998c2ecf20Sopenharmony_ci}
3008c2ecf20Sopenharmony_ci
3018c2ecf20Sopenharmony_civoid kasan_poison_object_data(struct kmem_cache *cache, void *object)
3028c2ecf20Sopenharmony_ci{
3038c2ecf20Sopenharmony_ci	kasan_poison_shadow(object,
3048c2ecf20Sopenharmony_ci			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
3058c2ecf20Sopenharmony_ci			KASAN_KMALLOC_REDZONE);
3068c2ecf20Sopenharmony_ci}
3078c2ecf20Sopenharmony_ci
3088c2ecf20Sopenharmony_ci/*
3098c2ecf20Sopenharmony_ci * This function assigns a tag to an object considering the following:
3108c2ecf20Sopenharmony_ci * 1. A cache might have a constructor, which might save a pointer to a slab
3118c2ecf20Sopenharmony_ci *    object somewhere (e.g. in the object itself). We preassign a tag for
3128c2ecf20Sopenharmony_ci *    each object in caches with constructors during slab creation and reuse
3138c2ecf20Sopenharmony_ci *    the same tag each time a particular object is allocated.
3148c2ecf20Sopenharmony_ci * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
3158c2ecf20Sopenharmony_ci *    accessed after being freed. We preassign tags for objects in these
3168c2ecf20Sopenharmony_ci *    caches as well.
3178c2ecf20Sopenharmony_ci * 3. For SLAB allocator we can't preassign tags randomly since the freelist
3188c2ecf20Sopenharmony_ci *    is stored as an array of indexes instead of a linked list. Assign tags
3198c2ecf20Sopenharmony_ci *    based on objects indexes, so that objects that are next to each other
3208c2ecf20Sopenharmony_ci *    get different tags.
3218c2ecf20Sopenharmony_ci */
3228c2ecf20Sopenharmony_cistatic u8 assign_tag(struct kmem_cache *cache, const void *object,
3238c2ecf20Sopenharmony_ci			bool init, bool keep_tag)
3248c2ecf20Sopenharmony_ci{
3258c2ecf20Sopenharmony_ci	/*
3268c2ecf20Sopenharmony_ci	 * 1. When an object is kmalloc()'ed, two hooks are called:
3278c2ecf20Sopenharmony_ci	 *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
3288c2ecf20Sopenharmony_ci	 *    tag only in the first one.
3298c2ecf20Sopenharmony_ci	 * 2. We reuse the same tag for krealloc'ed objects.
3308c2ecf20Sopenharmony_ci	 */
3318c2ecf20Sopenharmony_ci	if (keep_tag)
3328c2ecf20Sopenharmony_ci		return get_tag(object);
3338c2ecf20Sopenharmony_ci
3348c2ecf20Sopenharmony_ci	/*
3358c2ecf20Sopenharmony_ci	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
3368c2ecf20Sopenharmony_ci	 * set, assign a tag when the object is being allocated (init == false).
3378c2ecf20Sopenharmony_ci	 */
3388c2ecf20Sopenharmony_ci	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
3398c2ecf20Sopenharmony_ci		return init ? KASAN_TAG_KERNEL : random_tag();
3408c2ecf20Sopenharmony_ci
3418c2ecf20Sopenharmony_ci	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
3428c2ecf20Sopenharmony_ci#ifdef CONFIG_SLAB
3438c2ecf20Sopenharmony_ci	/* For SLAB assign tags based on the object index in the freelist. */
3448c2ecf20Sopenharmony_ci	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
3458c2ecf20Sopenharmony_ci#else
3468c2ecf20Sopenharmony_ci	/*
3478c2ecf20Sopenharmony_ci	 * For SLUB assign a random tag during slab creation, otherwise reuse
3488c2ecf20Sopenharmony_ci	 * the already assigned tag.
3498c2ecf20Sopenharmony_ci	 */
3508c2ecf20Sopenharmony_ci	return init ? random_tag() : get_tag(object);
3518c2ecf20Sopenharmony_ci#endif
3528c2ecf20Sopenharmony_ci}
3538c2ecf20Sopenharmony_ci
3548c2ecf20Sopenharmony_civoid * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
3558c2ecf20Sopenharmony_ci						const void *object)
3568c2ecf20Sopenharmony_ci{
3578c2ecf20Sopenharmony_ci	struct kasan_alloc_meta *alloc_info;
3588c2ecf20Sopenharmony_ci
3598c2ecf20Sopenharmony_ci	if (!(cache->flags & SLAB_KASAN))
3608c2ecf20Sopenharmony_ci		return (void *)object;
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci	alloc_info = get_alloc_info(cache, object);
3638c2ecf20Sopenharmony_ci	__memset(alloc_info, 0, sizeof(*alloc_info));
3648c2ecf20Sopenharmony_ci
3658c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
3668c2ecf20Sopenharmony_ci		object = set_tag(object,
3678c2ecf20Sopenharmony_ci				assign_tag(cache, object, true, false));
3688c2ecf20Sopenharmony_ci
3698c2ecf20Sopenharmony_ci	return (void *)object;
3708c2ecf20Sopenharmony_ci}
3718c2ecf20Sopenharmony_ci
3728c2ecf20Sopenharmony_cistatic inline bool shadow_invalid(u8 tag, s8 shadow_byte)
3738c2ecf20Sopenharmony_ci{
3748c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
3758c2ecf20Sopenharmony_ci		return shadow_byte < 0 ||
3768c2ecf20Sopenharmony_ci			shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
3778c2ecf20Sopenharmony_ci
3788c2ecf20Sopenharmony_ci	/* else CONFIG_KASAN_SW_TAGS: */
3798c2ecf20Sopenharmony_ci	if ((u8)shadow_byte == KASAN_TAG_INVALID)
3808c2ecf20Sopenharmony_ci		return true;
3818c2ecf20Sopenharmony_ci	if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
3828c2ecf20Sopenharmony_ci		return true;
3838c2ecf20Sopenharmony_ci
3848c2ecf20Sopenharmony_ci	return false;
3858c2ecf20Sopenharmony_ci}
3868c2ecf20Sopenharmony_ci
3878c2ecf20Sopenharmony_cistatic bool __kasan_slab_free(struct kmem_cache *cache, void *object,
3888c2ecf20Sopenharmony_ci			      unsigned long ip, bool quarantine)
3898c2ecf20Sopenharmony_ci{
3908c2ecf20Sopenharmony_ci	s8 shadow_byte;
3918c2ecf20Sopenharmony_ci	u8 tag;
3928c2ecf20Sopenharmony_ci	void *tagged_object;
3938c2ecf20Sopenharmony_ci	unsigned long rounded_up_size;
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_ci	tag = get_tag(object);
3968c2ecf20Sopenharmony_ci	tagged_object = object;
3978c2ecf20Sopenharmony_ci	object = reset_tag(object);
3988c2ecf20Sopenharmony_ci
3998c2ecf20Sopenharmony_ci	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
4008c2ecf20Sopenharmony_ci	    object)) {
4018c2ecf20Sopenharmony_ci		kasan_report_invalid_free(tagged_object, ip);
4028c2ecf20Sopenharmony_ci		return true;
4038c2ecf20Sopenharmony_ci	}
4048c2ecf20Sopenharmony_ci
4058c2ecf20Sopenharmony_ci	/* RCU slabs could be legally used after free within the RCU period */
4068c2ecf20Sopenharmony_ci	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
4078c2ecf20Sopenharmony_ci		return false;
4088c2ecf20Sopenharmony_ci
4098c2ecf20Sopenharmony_ci	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
4108c2ecf20Sopenharmony_ci	if (shadow_invalid(tag, shadow_byte)) {
4118c2ecf20Sopenharmony_ci		kasan_report_invalid_free(tagged_object, ip);
4128c2ecf20Sopenharmony_ci		return true;
4138c2ecf20Sopenharmony_ci	}
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_ci	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
4168c2ecf20Sopenharmony_ci	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ci	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
4198c2ecf20Sopenharmony_ci			unlikely(!(cache->flags & SLAB_KASAN)))
4208c2ecf20Sopenharmony_ci		return false;
4218c2ecf20Sopenharmony_ci
4228c2ecf20Sopenharmony_ci	kasan_set_free_info(cache, object, tag);
4238c2ecf20Sopenharmony_ci
4248c2ecf20Sopenharmony_ci	quarantine_put(get_free_info(cache, object), cache);
4258c2ecf20Sopenharmony_ci
4268c2ecf20Sopenharmony_ci	return IS_ENABLED(CONFIG_KASAN_GENERIC);
4278c2ecf20Sopenharmony_ci}
4288c2ecf20Sopenharmony_ci
4298c2ecf20Sopenharmony_cibool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
4308c2ecf20Sopenharmony_ci{
4318c2ecf20Sopenharmony_ci	return __kasan_slab_free(cache, object, ip, true);
4328c2ecf20Sopenharmony_ci}
4338c2ecf20Sopenharmony_ci
4348c2ecf20Sopenharmony_cistatic void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
4358c2ecf20Sopenharmony_ci				size_t size, gfp_t flags, bool keep_tag)
4368c2ecf20Sopenharmony_ci{
4378c2ecf20Sopenharmony_ci	unsigned long redzone_start;
4388c2ecf20Sopenharmony_ci	unsigned long redzone_end;
4398c2ecf20Sopenharmony_ci	u8 tag = 0xff;
4408c2ecf20Sopenharmony_ci
4418c2ecf20Sopenharmony_ci	if (gfpflags_allow_blocking(flags))
4428c2ecf20Sopenharmony_ci		quarantine_reduce();
4438c2ecf20Sopenharmony_ci
4448c2ecf20Sopenharmony_ci	if (unlikely(object == NULL))
4458c2ecf20Sopenharmony_ci		return NULL;
4468c2ecf20Sopenharmony_ci
4478c2ecf20Sopenharmony_ci	redzone_start = round_up((unsigned long)(object + size),
4488c2ecf20Sopenharmony_ci				KASAN_SHADOW_SCALE_SIZE);
4498c2ecf20Sopenharmony_ci	redzone_end = round_up((unsigned long)object + cache->object_size,
4508c2ecf20Sopenharmony_ci				KASAN_SHADOW_SCALE_SIZE);
4518c2ecf20Sopenharmony_ci
4528c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
4538c2ecf20Sopenharmony_ci		tag = assign_tag(cache, object, false, keep_tag);
4548c2ecf20Sopenharmony_ci
4558c2ecf20Sopenharmony_ci	/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
4568c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(set_tag(object, tag), size);
4578c2ecf20Sopenharmony_ci	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
4588c2ecf20Sopenharmony_ci		KASAN_KMALLOC_REDZONE);
4598c2ecf20Sopenharmony_ci
4608c2ecf20Sopenharmony_ci	if (cache->flags & SLAB_KASAN)
4618c2ecf20Sopenharmony_ci		kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
4628c2ecf20Sopenharmony_ci
4638c2ecf20Sopenharmony_ci	return set_tag(object, tag);
4648c2ecf20Sopenharmony_ci}
4658c2ecf20Sopenharmony_ci
4668c2ecf20Sopenharmony_civoid * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
4678c2ecf20Sopenharmony_ci					gfp_t flags)
4688c2ecf20Sopenharmony_ci{
4698c2ecf20Sopenharmony_ci	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
4708c2ecf20Sopenharmony_ci}
4718c2ecf20Sopenharmony_ci
4728c2ecf20Sopenharmony_civoid * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
4738c2ecf20Sopenharmony_ci				size_t size, gfp_t flags)
4748c2ecf20Sopenharmony_ci{
4758c2ecf20Sopenharmony_ci	return __kasan_kmalloc(cache, object, size, flags, true);
4768c2ecf20Sopenharmony_ci}
4778c2ecf20Sopenharmony_ciEXPORT_SYMBOL(kasan_kmalloc);
4788c2ecf20Sopenharmony_ci
4798c2ecf20Sopenharmony_civoid * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
4808c2ecf20Sopenharmony_ci						gfp_t flags)
4818c2ecf20Sopenharmony_ci{
4828c2ecf20Sopenharmony_ci	struct page *page;
4838c2ecf20Sopenharmony_ci	unsigned long redzone_start;
4848c2ecf20Sopenharmony_ci	unsigned long redzone_end;
4858c2ecf20Sopenharmony_ci
4868c2ecf20Sopenharmony_ci	if (gfpflags_allow_blocking(flags))
4878c2ecf20Sopenharmony_ci		quarantine_reduce();
4888c2ecf20Sopenharmony_ci
4898c2ecf20Sopenharmony_ci	if (unlikely(ptr == NULL))
4908c2ecf20Sopenharmony_ci		return NULL;
4918c2ecf20Sopenharmony_ci
4928c2ecf20Sopenharmony_ci	page = virt_to_page(ptr);
4938c2ecf20Sopenharmony_ci	redzone_start = round_up((unsigned long)(ptr + size),
4948c2ecf20Sopenharmony_ci				KASAN_SHADOW_SCALE_SIZE);
4958c2ecf20Sopenharmony_ci	redzone_end = (unsigned long)ptr + page_size(page);
4968c2ecf20Sopenharmony_ci
4978c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(ptr, size);
4988c2ecf20Sopenharmony_ci	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
4998c2ecf20Sopenharmony_ci		KASAN_PAGE_REDZONE);
5008c2ecf20Sopenharmony_ci
5018c2ecf20Sopenharmony_ci	return (void *)ptr;
5028c2ecf20Sopenharmony_ci}
5038c2ecf20Sopenharmony_ci
5048c2ecf20Sopenharmony_civoid * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
5058c2ecf20Sopenharmony_ci{
5068c2ecf20Sopenharmony_ci	struct page *page;
5078c2ecf20Sopenharmony_ci
5088c2ecf20Sopenharmony_ci	if (unlikely(object == ZERO_SIZE_PTR))
5098c2ecf20Sopenharmony_ci		return (void *)object;
5108c2ecf20Sopenharmony_ci
5118c2ecf20Sopenharmony_ci	page = virt_to_head_page(object);
5128c2ecf20Sopenharmony_ci
5138c2ecf20Sopenharmony_ci	if (unlikely(!PageSlab(page)))
5148c2ecf20Sopenharmony_ci		return kasan_kmalloc_large(object, size, flags);
5158c2ecf20Sopenharmony_ci	else
5168c2ecf20Sopenharmony_ci		return __kasan_kmalloc(page->slab_cache, object, size,
5178c2ecf20Sopenharmony_ci						flags, true);
5188c2ecf20Sopenharmony_ci}
5198c2ecf20Sopenharmony_ci
5208c2ecf20Sopenharmony_civoid kasan_poison_kfree(void *ptr, unsigned long ip)
5218c2ecf20Sopenharmony_ci{
5228c2ecf20Sopenharmony_ci	struct page *page;
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci	page = virt_to_head_page(ptr);
5258c2ecf20Sopenharmony_ci
5268c2ecf20Sopenharmony_ci	if (unlikely(!PageSlab(page))) {
5278c2ecf20Sopenharmony_ci		if (ptr != page_address(page)) {
5288c2ecf20Sopenharmony_ci			kasan_report_invalid_free(ptr, ip);
5298c2ecf20Sopenharmony_ci			return;
5308c2ecf20Sopenharmony_ci		}
5318c2ecf20Sopenharmony_ci		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
5328c2ecf20Sopenharmony_ci	} else {
5338c2ecf20Sopenharmony_ci		__kasan_slab_free(page->slab_cache, ptr, ip, false);
5348c2ecf20Sopenharmony_ci	}
5358c2ecf20Sopenharmony_ci}
5368c2ecf20Sopenharmony_ci
5378c2ecf20Sopenharmony_civoid kasan_kfree_large(void *ptr, unsigned long ip)
5388c2ecf20Sopenharmony_ci{
5398c2ecf20Sopenharmony_ci	if (ptr != page_address(virt_to_head_page(ptr)))
5408c2ecf20Sopenharmony_ci		kasan_report_invalid_free(ptr, ip);
5418c2ecf20Sopenharmony_ci	/* The object will be poisoned by page_alloc. */
5428c2ecf20Sopenharmony_ci}
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci#ifndef CONFIG_KASAN_VMALLOC
5458c2ecf20Sopenharmony_ciint kasan_module_alloc(void *addr, size_t size)
5468c2ecf20Sopenharmony_ci{
5478c2ecf20Sopenharmony_ci	void *ret;
5488c2ecf20Sopenharmony_ci	size_t scaled_size;
5498c2ecf20Sopenharmony_ci	size_t shadow_size;
5508c2ecf20Sopenharmony_ci	unsigned long shadow_start;
5518c2ecf20Sopenharmony_ci
5528c2ecf20Sopenharmony_ci	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
5538c2ecf20Sopenharmony_ci	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
5548c2ecf20Sopenharmony_ci	shadow_size = round_up(scaled_size, PAGE_SIZE);
5558c2ecf20Sopenharmony_ci
5568c2ecf20Sopenharmony_ci	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
5578c2ecf20Sopenharmony_ci		return -EINVAL;
5588c2ecf20Sopenharmony_ci
5598c2ecf20Sopenharmony_ci	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
5608c2ecf20Sopenharmony_ci			shadow_start + shadow_size,
5618c2ecf20Sopenharmony_ci			GFP_KERNEL,
5628c2ecf20Sopenharmony_ci			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
5638c2ecf20Sopenharmony_ci			__builtin_return_address(0));
5648c2ecf20Sopenharmony_ci
5658c2ecf20Sopenharmony_ci	if (ret) {
5668c2ecf20Sopenharmony_ci		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
5678c2ecf20Sopenharmony_ci		find_vm_area(addr)->flags |= VM_KASAN;
5688c2ecf20Sopenharmony_ci		kmemleak_ignore(ret);
5698c2ecf20Sopenharmony_ci		return 0;
5708c2ecf20Sopenharmony_ci	}
5718c2ecf20Sopenharmony_ci
5728c2ecf20Sopenharmony_ci	return -ENOMEM;
5738c2ecf20Sopenharmony_ci}
5748c2ecf20Sopenharmony_ci
5758c2ecf20Sopenharmony_civoid kasan_free_shadow(const struct vm_struct *vm)
5768c2ecf20Sopenharmony_ci{
5778c2ecf20Sopenharmony_ci	if (vm->flags & VM_KASAN)
5788c2ecf20Sopenharmony_ci		vfree(kasan_mem_to_shadow(vm->addr));
5798c2ecf20Sopenharmony_ci}
5808c2ecf20Sopenharmony_ci#endif
5818c2ecf20Sopenharmony_ci
5828c2ecf20Sopenharmony_ci#ifdef CONFIG_MEMORY_HOTPLUG
5838c2ecf20Sopenharmony_cistatic bool shadow_mapped(unsigned long addr)
5848c2ecf20Sopenharmony_ci{
5858c2ecf20Sopenharmony_ci	pgd_t *pgd = pgd_offset_k(addr);
5868c2ecf20Sopenharmony_ci	p4d_t *p4d;
5878c2ecf20Sopenharmony_ci	pud_t *pud;
5888c2ecf20Sopenharmony_ci	pmd_t *pmd;
5898c2ecf20Sopenharmony_ci	pte_t *pte;
5908c2ecf20Sopenharmony_ci
5918c2ecf20Sopenharmony_ci	if (pgd_none(*pgd))
5928c2ecf20Sopenharmony_ci		return false;
5938c2ecf20Sopenharmony_ci	p4d = p4d_offset(pgd, addr);
5948c2ecf20Sopenharmony_ci	if (p4d_none(*p4d))
5958c2ecf20Sopenharmony_ci		return false;
5968c2ecf20Sopenharmony_ci	pud = pud_offset(p4d, addr);
5978c2ecf20Sopenharmony_ci	if (pud_none(*pud))
5988c2ecf20Sopenharmony_ci		return false;
5998c2ecf20Sopenharmony_ci
6008c2ecf20Sopenharmony_ci	/*
6018c2ecf20Sopenharmony_ci	 * We can't use pud_large() or pud_huge(), the first one is
6028c2ecf20Sopenharmony_ci	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
6038c2ecf20Sopenharmony_ci	 * pud_bad(), if pud is bad then it's bad because it's huge.
6048c2ecf20Sopenharmony_ci	 */
6058c2ecf20Sopenharmony_ci	if (pud_bad(*pud))
6068c2ecf20Sopenharmony_ci		return true;
6078c2ecf20Sopenharmony_ci	pmd = pmd_offset(pud, addr);
6088c2ecf20Sopenharmony_ci	if (pmd_none(*pmd))
6098c2ecf20Sopenharmony_ci		return false;
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_ci	if (pmd_bad(*pmd))
6128c2ecf20Sopenharmony_ci		return true;
6138c2ecf20Sopenharmony_ci	pte = pte_offset_kernel(pmd, addr);
6148c2ecf20Sopenharmony_ci	return !pte_none(*pte);
6158c2ecf20Sopenharmony_ci}
6168c2ecf20Sopenharmony_ci
6178c2ecf20Sopenharmony_cistatic int __meminit kasan_mem_notifier(struct notifier_block *nb,
6188c2ecf20Sopenharmony_ci			unsigned long action, void *data)
6198c2ecf20Sopenharmony_ci{
6208c2ecf20Sopenharmony_ci	struct memory_notify *mem_data = data;
6218c2ecf20Sopenharmony_ci	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
6228c2ecf20Sopenharmony_ci	unsigned long shadow_end, shadow_size;
6238c2ecf20Sopenharmony_ci
6248c2ecf20Sopenharmony_ci	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
6258c2ecf20Sopenharmony_ci	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
6268c2ecf20Sopenharmony_ci	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
6278c2ecf20Sopenharmony_ci	shadow_size = nr_shadow_pages << PAGE_SHIFT;
6288c2ecf20Sopenharmony_ci	shadow_end = shadow_start + shadow_size;
6298c2ecf20Sopenharmony_ci
6308c2ecf20Sopenharmony_ci	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
6318c2ecf20Sopenharmony_ci		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
6328c2ecf20Sopenharmony_ci		return NOTIFY_BAD;
6338c2ecf20Sopenharmony_ci
6348c2ecf20Sopenharmony_ci	switch (action) {
6358c2ecf20Sopenharmony_ci	case MEM_GOING_ONLINE: {
6368c2ecf20Sopenharmony_ci		void *ret;
6378c2ecf20Sopenharmony_ci
6388c2ecf20Sopenharmony_ci		/*
6398c2ecf20Sopenharmony_ci		 * If shadow is mapped already than it must have been mapped
6408c2ecf20Sopenharmony_ci		 * during the boot. This could happen if we onlining previously
6418c2ecf20Sopenharmony_ci		 * offlined memory.
6428c2ecf20Sopenharmony_ci		 */
6438c2ecf20Sopenharmony_ci		if (shadow_mapped(shadow_start))
6448c2ecf20Sopenharmony_ci			return NOTIFY_OK;
6458c2ecf20Sopenharmony_ci
6468c2ecf20Sopenharmony_ci		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
6478c2ecf20Sopenharmony_ci					shadow_end, GFP_KERNEL,
6488c2ecf20Sopenharmony_ci					PAGE_KERNEL, VM_NO_GUARD,
6498c2ecf20Sopenharmony_ci					pfn_to_nid(mem_data->start_pfn),
6508c2ecf20Sopenharmony_ci					__builtin_return_address(0));
6518c2ecf20Sopenharmony_ci		if (!ret)
6528c2ecf20Sopenharmony_ci			return NOTIFY_BAD;
6538c2ecf20Sopenharmony_ci
6548c2ecf20Sopenharmony_ci		kmemleak_ignore(ret);
6558c2ecf20Sopenharmony_ci		return NOTIFY_OK;
6568c2ecf20Sopenharmony_ci	}
6578c2ecf20Sopenharmony_ci	case MEM_CANCEL_ONLINE:
6588c2ecf20Sopenharmony_ci	case MEM_OFFLINE: {
6598c2ecf20Sopenharmony_ci		struct vm_struct *vm;
6608c2ecf20Sopenharmony_ci
6618c2ecf20Sopenharmony_ci		/*
6628c2ecf20Sopenharmony_ci		 * shadow_start was either mapped during boot by kasan_init()
6638c2ecf20Sopenharmony_ci		 * or during memory online by __vmalloc_node_range().
6648c2ecf20Sopenharmony_ci		 * In the latter case we can use vfree() to free shadow.
6658c2ecf20Sopenharmony_ci		 * Non-NULL result of the find_vm_area() will tell us if
6668c2ecf20Sopenharmony_ci		 * that was the second case.
6678c2ecf20Sopenharmony_ci		 *
6688c2ecf20Sopenharmony_ci		 * Currently it's not possible to free shadow mapped
6698c2ecf20Sopenharmony_ci		 * during boot by kasan_init(). It's because the code
6708c2ecf20Sopenharmony_ci		 * to do that hasn't been written yet. So we'll just
6718c2ecf20Sopenharmony_ci		 * leak the memory.
6728c2ecf20Sopenharmony_ci		 */
6738c2ecf20Sopenharmony_ci		vm = find_vm_area((void *)shadow_start);
6748c2ecf20Sopenharmony_ci		if (vm)
6758c2ecf20Sopenharmony_ci			vfree((void *)shadow_start);
6768c2ecf20Sopenharmony_ci	}
6778c2ecf20Sopenharmony_ci	}
6788c2ecf20Sopenharmony_ci
6798c2ecf20Sopenharmony_ci	return NOTIFY_OK;
6808c2ecf20Sopenharmony_ci}
6818c2ecf20Sopenharmony_ci
6828c2ecf20Sopenharmony_cistatic int __init kasan_memhotplug_init(void)
6838c2ecf20Sopenharmony_ci{
6848c2ecf20Sopenharmony_ci	hotplug_memory_notifier(kasan_mem_notifier, 0);
6858c2ecf20Sopenharmony_ci
6868c2ecf20Sopenharmony_ci	return 0;
6878c2ecf20Sopenharmony_ci}
6888c2ecf20Sopenharmony_ci
6898c2ecf20Sopenharmony_cicore_initcall(kasan_memhotplug_init);
6908c2ecf20Sopenharmony_ci#endif
6918c2ecf20Sopenharmony_ci
6928c2ecf20Sopenharmony_ci#ifdef CONFIG_KASAN_VMALLOC
6938c2ecf20Sopenharmony_cistatic int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
6948c2ecf20Sopenharmony_ci				      void *unused)
6958c2ecf20Sopenharmony_ci{
6968c2ecf20Sopenharmony_ci	unsigned long page;
6978c2ecf20Sopenharmony_ci	pte_t pte;
6988c2ecf20Sopenharmony_ci
6998c2ecf20Sopenharmony_ci	if (likely(!pte_none(*ptep)))
7008c2ecf20Sopenharmony_ci		return 0;
7018c2ecf20Sopenharmony_ci
7028c2ecf20Sopenharmony_ci	page = __get_free_page(GFP_KERNEL);
7038c2ecf20Sopenharmony_ci	if (!page)
7048c2ecf20Sopenharmony_ci		return -ENOMEM;
7058c2ecf20Sopenharmony_ci
7068c2ecf20Sopenharmony_ci	memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
7078c2ecf20Sopenharmony_ci	pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
7088c2ecf20Sopenharmony_ci
7098c2ecf20Sopenharmony_ci	spin_lock(&init_mm.page_table_lock);
7108c2ecf20Sopenharmony_ci	if (likely(pte_none(*ptep))) {
7118c2ecf20Sopenharmony_ci		set_pte_at(&init_mm, addr, ptep, pte);
7128c2ecf20Sopenharmony_ci		page = 0;
7138c2ecf20Sopenharmony_ci	}
7148c2ecf20Sopenharmony_ci	spin_unlock(&init_mm.page_table_lock);
7158c2ecf20Sopenharmony_ci	if (page)
7168c2ecf20Sopenharmony_ci		free_page(page);
7178c2ecf20Sopenharmony_ci	return 0;
7188c2ecf20Sopenharmony_ci}
7198c2ecf20Sopenharmony_ci
7208c2ecf20Sopenharmony_ciint kasan_populate_vmalloc(unsigned long addr, unsigned long size)
7218c2ecf20Sopenharmony_ci{
7228c2ecf20Sopenharmony_ci	unsigned long shadow_start, shadow_end;
7238c2ecf20Sopenharmony_ci	int ret;
7248c2ecf20Sopenharmony_ci
7258c2ecf20Sopenharmony_ci	if (!is_vmalloc_or_module_addr((void *)addr))
7268c2ecf20Sopenharmony_ci		return 0;
7278c2ecf20Sopenharmony_ci
7288c2ecf20Sopenharmony_ci	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
7298c2ecf20Sopenharmony_ci	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
7308c2ecf20Sopenharmony_ci	shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
7318c2ecf20Sopenharmony_ci	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
7328c2ecf20Sopenharmony_ci
7338c2ecf20Sopenharmony_ci	ret = apply_to_page_range(&init_mm, shadow_start,
7348c2ecf20Sopenharmony_ci				  shadow_end - shadow_start,
7358c2ecf20Sopenharmony_ci				  kasan_populate_vmalloc_pte, NULL);
7368c2ecf20Sopenharmony_ci	if (ret)
7378c2ecf20Sopenharmony_ci		return ret;
7388c2ecf20Sopenharmony_ci
7398c2ecf20Sopenharmony_ci	flush_cache_vmap(shadow_start, shadow_end);
7408c2ecf20Sopenharmony_ci
7418c2ecf20Sopenharmony_ci	/*
7428c2ecf20Sopenharmony_ci	 * We need to be careful about inter-cpu effects here. Consider:
7438c2ecf20Sopenharmony_ci	 *
7448c2ecf20Sopenharmony_ci	 *   CPU#0				  CPU#1
7458c2ecf20Sopenharmony_ci	 * WRITE_ONCE(p, vmalloc(100));		while (x = READ_ONCE(p)) ;
7468c2ecf20Sopenharmony_ci	 *					p[99] = 1;
7478c2ecf20Sopenharmony_ci	 *
7488c2ecf20Sopenharmony_ci	 * With compiler instrumentation, that ends up looking like this:
7498c2ecf20Sopenharmony_ci	 *
7508c2ecf20Sopenharmony_ci	 *   CPU#0				  CPU#1
7518c2ecf20Sopenharmony_ci	 * // vmalloc() allocates memory
7528c2ecf20Sopenharmony_ci	 * // let a = area->addr
7538c2ecf20Sopenharmony_ci	 * // we reach kasan_populate_vmalloc
7548c2ecf20Sopenharmony_ci	 * // and call kasan_unpoison_shadow:
7558c2ecf20Sopenharmony_ci	 * STORE shadow(a), unpoison_val
7568c2ecf20Sopenharmony_ci	 * ...
7578c2ecf20Sopenharmony_ci	 * STORE shadow(a+99), unpoison_val	x = LOAD p
7588c2ecf20Sopenharmony_ci	 * // rest of vmalloc process		<data dependency>
7598c2ecf20Sopenharmony_ci	 * STORE p, a				LOAD shadow(x+99)
7608c2ecf20Sopenharmony_ci	 *
7618c2ecf20Sopenharmony_ci	 * If there is no barrier between the end of unpoisioning the shadow
7628c2ecf20Sopenharmony_ci	 * and the store of the result to p, the stores could be committed
7638c2ecf20Sopenharmony_ci	 * in a different order by CPU#0, and CPU#1 could erroneously observe
7648c2ecf20Sopenharmony_ci	 * poison in the shadow.
7658c2ecf20Sopenharmony_ci	 *
7668c2ecf20Sopenharmony_ci	 * We need some sort of barrier between the stores.
7678c2ecf20Sopenharmony_ci	 *
7688c2ecf20Sopenharmony_ci	 * In the vmalloc() case, this is provided by a smp_wmb() in
7698c2ecf20Sopenharmony_ci	 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
7708c2ecf20Sopenharmony_ci	 * get_vm_area() and friends, the caller gets shadow allocated but
7718c2ecf20Sopenharmony_ci	 * doesn't have any pages mapped into the virtual address space that
7728c2ecf20Sopenharmony_ci	 * has been reserved. Mapping those pages in will involve taking and
7738c2ecf20Sopenharmony_ci	 * releasing a page-table lock, which will provide the barrier.
7748c2ecf20Sopenharmony_ci	 */
7758c2ecf20Sopenharmony_ci
7768c2ecf20Sopenharmony_ci	return 0;
7778c2ecf20Sopenharmony_ci}
7788c2ecf20Sopenharmony_ci
7798c2ecf20Sopenharmony_ci/*
7808c2ecf20Sopenharmony_ci * Poison the shadow for a vmalloc region. Called as part of the
7818c2ecf20Sopenharmony_ci * freeing process at the time the region is freed.
7828c2ecf20Sopenharmony_ci */
7838c2ecf20Sopenharmony_civoid kasan_poison_vmalloc(const void *start, unsigned long size)
7848c2ecf20Sopenharmony_ci{
7858c2ecf20Sopenharmony_ci	if (!is_vmalloc_or_module_addr(start))
7868c2ecf20Sopenharmony_ci		return;
7878c2ecf20Sopenharmony_ci
7888c2ecf20Sopenharmony_ci	size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
7898c2ecf20Sopenharmony_ci	kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
7908c2ecf20Sopenharmony_ci}
7918c2ecf20Sopenharmony_ci
7928c2ecf20Sopenharmony_civoid kasan_unpoison_vmalloc(const void *start, unsigned long size)
7938c2ecf20Sopenharmony_ci{
7948c2ecf20Sopenharmony_ci	if (!is_vmalloc_or_module_addr(start))
7958c2ecf20Sopenharmony_ci		return;
7968c2ecf20Sopenharmony_ci
7978c2ecf20Sopenharmony_ci	kasan_unpoison_shadow(start, size);
7988c2ecf20Sopenharmony_ci}
7998c2ecf20Sopenharmony_ci
8008c2ecf20Sopenharmony_cistatic int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
8018c2ecf20Sopenharmony_ci					void *unused)
8028c2ecf20Sopenharmony_ci{
8038c2ecf20Sopenharmony_ci	unsigned long page;
8048c2ecf20Sopenharmony_ci
8058c2ecf20Sopenharmony_ci	page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
8068c2ecf20Sopenharmony_ci
8078c2ecf20Sopenharmony_ci	spin_lock(&init_mm.page_table_lock);
8088c2ecf20Sopenharmony_ci
8098c2ecf20Sopenharmony_ci	if (likely(!pte_none(*ptep))) {
8108c2ecf20Sopenharmony_ci		pte_clear(&init_mm, addr, ptep);
8118c2ecf20Sopenharmony_ci		free_page(page);
8128c2ecf20Sopenharmony_ci	}
8138c2ecf20Sopenharmony_ci	spin_unlock(&init_mm.page_table_lock);
8148c2ecf20Sopenharmony_ci
8158c2ecf20Sopenharmony_ci	return 0;
8168c2ecf20Sopenharmony_ci}
8178c2ecf20Sopenharmony_ci
8188c2ecf20Sopenharmony_ci/*
8198c2ecf20Sopenharmony_ci * Release the backing for the vmalloc region [start, end), which
8208c2ecf20Sopenharmony_ci * lies within the free region [free_region_start, free_region_end).
8218c2ecf20Sopenharmony_ci *
8228c2ecf20Sopenharmony_ci * This can be run lazily, long after the region was freed. It runs
8238c2ecf20Sopenharmony_ci * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
8248c2ecf20Sopenharmony_ci * infrastructure.
8258c2ecf20Sopenharmony_ci *
8268c2ecf20Sopenharmony_ci * How does this work?
8278c2ecf20Sopenharmony_ci * -------------------
8288c2ecf20Sopenharmony_ci *
8298c2ecf20Sopenharmony_ci * We have a region that is page aligned, labelled as A.
8308c2ecf20Sopenharmony_ci * That might not map onto the shadow in a way that is page-aligned:
8318c2ecf20Sopenharmony_ci *
8328c2ecf20Sopenharmony_ci *                    start                     end
8338c2ecf20Sopenharmony_ci *                    v                         v
8348c2ecf20Sopenharmony_ci * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
8358c2ecf20Sopenharmony_ci *  -------- -------- --------          -------- --------
8368c2ecf20Sopenharmony_ci *      |        |       |                 |        |
8378c2ecf20Sopenharmony_ci *      |        |       |         /-------/        |
8388c2ecf20Sopenharmony_ci *      \-------\|/------/         |/---------------/
8398c2ecf20Sopenharmony_ci *              |||                ||
8408c2ecf20Sopenharmony_ci *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
8418c2ecf20Sopenharmony_ci *                 (1)      (2)      (3)
8428c2ecf20Sopenharmony_ci *
8438c2ecf20Sopenharmony_ci * First we align the start upwards and the end downwards, so that the
8448c2ecf20Sopenharmony_ci * shadow of the region aligns with shadow page boundaries. In the
8458c2ecf20Sopenharmony_ci * example, this gives us the shadow page (2). This is the shadow entirely
8468c2ecf20Sopenharmony_ci * covered by this allocation.
8478c2ecf20Sopenharmony_ci *
8488c2ecf20Sopenharmony_ci * Then we have the tricky bits. We want to know if we can free the
8498c2ecf20Sopenharmony_ci * partially covered shadow pages - (1) and (3) in the example. For this,
8508c2ecf20Sopenharmony_ci * we are given the start and end of the free region that contains this
8518c2ecf20Sopenharmony_ci * allocation. Extending our previous example, we could have:
8528c2ecf20Sopenharmony_ci *
8538c2ecf20Sopenharmony_ci *  free_region_start                                    free_region_end
8548c2ecf20Sopenharmony_ci *  |                 start                     end      |
8558c2ecf20Sopenharmony_ci *  v                 v                         v        v
8568c2ecf20Sopenharmony_ci * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
8578c2ecf20Sopenharmony_ci *  -------- -------- --------          -------- --------
8588c2ecf20Sopenharmony_ci *      |        |       |                 |        |
8598c2ecf20Sopenharmony_ci *      |        |       |         /-------/        |
8608c2ecf20Sopenharmony_ci *      \-------\|/------/         |/---------------/
8618c2ecf20Sopenharmony_ci *              |||                ||
8628c2ecf20Sopenharmony_ci *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
8638c2ecf20Sopenharmony_ci *                 (1)      (2)      (3)
8648c2ecf20Sopenharmony_ci *
8658c2ecf20Sopenharmony_ci * Once again, we align the start of the free region up, and the end of
8668c2ecf20Sopenharmony_ci * the free region down so that the shadow is page aligned. So we can free
8678c2ecf20Sopenharmony_ci * page (1) - we know no allocation currently uses anything in that page,
8688c2ecf20Sopenharmony_ci * because all of it is in the vmalloc free region. But we cannot free
8698c2ecf20Sopenharmony_ci * page (3), because we can't be sure that the rest of it is unused.
8708c2ecf20Sopenharmony_ci *
8718c2ecf20Sopenharmony_ci * We only consider pages that contain part of the original region for
8728c2ecf20Sopenharmony_ci * freeing: we don't try to free other pages from the free region or we'd
8738c2ecf20Sopenharmony_ci * end up trying to free huge chunks of virtual address space.
8748c2ecf20Sopenharmony_ci *
8758c2ecf20Sopenharmony_ci * Concurrency
8768c2ecf20Sopenharmony_ci * -----------
8778c2ecf20Sopenharmony_ci *
8788c2ecf20Sopenharmony_ci * How do we know that we're not freeing a page that is simultaneously
8798c2ecf20Sopenharmony_ci * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
8808c2ecf20Sopenharmony_ci *
8818c2ecf20Sopenharmony_ci * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
8828c2ecf20Sopenharmony_ci * at the same time. While we run under free_vmap_area_lock, the population
8838c2ecf20Sopenharmony_ci * code does not.
8848c2ecf20Sopenharmony_ci *
8858c2ecf20Sopenharmony_ci * free_vmap_area_lock instead operates to ensure that the larger range
8868c2ecf20Sopenharmony_ci * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
8878c2ecf20Sopenharmony_ci * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
8888c2ecf20Sopenharmony_ci * no space identified as free will become used while we are running. This
8898c2ecf20Sopenharmony_ci * means that so long as we are careful with alignment and only free shadow
8908c2ecf20Sopenharmony_ci * pages entirely covered by the free region, we will not run in to any
8918c2ecf20Sopenharmony_ci * trouble - any simultaneous allocations will be for disjoint regions.
8928c2ecf20Sopenharmony_ci */
8938c2ecf20Sopenharmony_civoid kasan_release_vmalloc(unsigned long start, unsigned long end,
8948c2ecf20Sopenharmony_ci			   unsigned long free_region_start,
8958c2ecf20Sopenharmony_ci			   unsigned long free_region_end)
8968c2ecf20Sopenharmony_ci{
8978c2ecf20Sopenharmony_ci	void *shadow_start, *shadow_end;
8988c2ecf20Sopenharmony_ci	unsigned long region_start, region_end;
8998c2ecf20Sopenharmony_ci	unsigned long size;
9008c2ecf20Sopenharmony_ci
9018c2ecf20Sopenharmony_ci	region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
9028c2ecf20Sopenharmony_ci	region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
9038c2ecf20Sopenharmony_ci
9048c2ecf20Sopenharmony_ci	free_region_start = ALIGN(free_region_start,
9058c2ecf20Sopenharmony_ci				  PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
9068c2ecf20Sopenharmony_ci
9078c2ecf20Sopenharmony_ci	if (start != region_start &&
9088c2ecf20Sopenharmony_ci	    free_region_start < region_start)
9098c2ecf20Sopenharmony_ci		region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
9108c2ecf20Sopenharmony_ci
9118c2ecf20Sopenharmony_ci	free_region_end = ALIGN_DOWN(free_region_end,
9128c2ecf20Sopenharmony_ci				     PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
9138c2ecf20Sopenharmony_ci
9148c2ecf20Sopenharmony_ci	if (end != region_end &&
9158c2ecf20Sopenharmony_ci	    free_region_end > region_end)
9168c2ecf20Sopenharmony_ci		region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
9178c2ecf20Sopenharmony_ci
9188c2ecf20Sopenharmony_ci	shadow_start = kasan_mem_to_shadow((void *)region_start);
9198c2ecf20Sopenharmony_ci	shadow_end = kasan_mem_to_shadow((void *)region_end);
9208c2ecf20Sopenharmony_ci
9218c2ecf20Sopenharmony_ci	if (shadow_end > shadow_start) {
9228c2ecf20Sopenharmony_ci		size = shadow_end - shadow_start;
9238c2ecf20Sopenharmony_ci		apply_to_existing_page_range(&init_mm,
9248c2ecf20Sopenharmony_ci					     (unsigned long)shadow_start,
9258c2ecf20Sopenharmony_ci					     size, kasan_depopulate_vmalloc_pte,
9268c2ecf20Sopenharmony_ci					     NULL);
9278c2ecf20Sopenharmony_ci		flush_tlb_kernel_range((unsigned long)shadow_start,
9288c2ecf20Sopenharmony_ci				       (unsigned long)shadow_end);
9298c2ecf20Sopenharmony_ci	}
9308c2ecf20Sopenharmony_ci}
9318c2ecf20Sopenharmony_ci#endif
932