1// SPDX-License-Identifier: GPL-2.0 2/* 3 * This file contains core generic KASAN code. 4 * 5 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 7 * 8 * Some code borrowed from https://github.com/xairy/kasan-prototype by 9 * Andrey Konovalov <andreyknvl@gmail.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 */ 16 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19#include <linux/export.h> 20#include <linux/interrupt.h> 21#include <linux/init.h> 22#include <linux/kasan.h> 23#include <linux/kernel.h> 24#include <linux/kmemleak.h> 25#include <linux/linkage.h> 26#include <linux/memblock.h> 27#include <linux/memory.h> 28#include <linux/mm.h> 29#include <linux/module.h> 30#include <linux/printk.h> 31#include <linux/sched.h> 32#include <linux/sched/task_stack.h> 33#include <linux/slab.h> 34#include <linux/stacktrace.h> 35#include <linux/string.h> 36#include <linux/types.h> 37#include <linux/vmalloc.h> 38#include <linux/bug.h> 39 40#include "kasan.h" 41#include "../slab.h" 42 43/* 44 * All functions below always inlined so compiler could 45 * perform better optimizations in each of __asan_loadX/__assn_storeX 46 * depending on memory access size X. 47 */ 48 49static __always_inline bool memory_is_poisoned_1(unsigned long addr) 50{ 51 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); 52 53 if (unlikely(shadow_value)) { 54 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; 55 return unlikely(last_accessible_byte >= shadow_value); 56 } 57 58 return false; 59} 60 61static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, 62 unsigned long size) 63{ 64 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); 65 66 /* 67 * Access crosses 8(shadow size)-byte boundary. Such access maps 68 * into 2 shadow bytes, so we need to check them both. 69 */ 70 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) 71 return *shadow_addr || memory_is_poisoned_1(addr + size - 1); 72 73 return memory_is_poisoned_1(addr + size - 1); 74} 75 76static __always_inline bool memory_is_poisoned_16(unsigned long addr) 77{ 78 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); 79 80 /* Unaligned 16-bytes access maps into 3 shadow bytes. */ 81 if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) 82 return *shadow_addr || memory_is_poisoned_1(addr + 15); 83 84 return *shadow_addr; 85} 86 87static __always_inline unsigned long bytes_is_nonzero(const u8 *start, 88 size_t size) 89{ 90 while (size) { 91 if (unlikely(*start)) 92 return (unsigned long)start; 93 start++; 94 size--; 95 } 96 97 return 0; 98} 99 100static __always_inline unsigned long memory_is_nonzero(const void *start, 101 const void *end) 102{ 103 unsigned int words; 104 unsigned long ret; 105 unsigned int prefix = (unsigned long)start % 8; 106 107 if (end - start <= 16) 108 return bytes_is_nonzero(start, end - start); 109 110 if (prefix) { 111 prefix = 8 - prefix; 112 ret = bytes_is_nonzero(start, prefix); 113 if (unlikely(ret)) 114 return ret; 115 start += prefix; 116 } 117 118 words = (end - start) / 8; 119 while (words) { 120 if (unlikely(*(u64 *)start)) 121 return bytes_is_nonzero(start, 8); 122 start += 8; 123 words--; 124 } 125 126 return bytes_is_nonzero(start, (end - start) % 8); 127} 128 129static __always_inline bool memory_is_poisoned_n(unsigned long addr, 130 size_t size) 131{ 132 unsigned long ret; 133 134 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), 135 kasan_mem_to_shadow((void *)addr + size - 1) + 1); 136 137 if (unlikely(ret)) { 138 unsigned long last_byte = addr + size - 1; 139 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); 140 141 if (unlikely(ret != (unsigned long)last_shadow || 142 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) 143 return true; 144 } 145 return false; 146} 147 148static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) 149{ 150 if (__builtin_constant_p(size)) { 151 switch (size) { 152 case 1: 153 return memory_is_poisoned_1(addr); 154 case 2: 155 case 4: 156 case 8: 157 return memory_is_poisoned_2_4_8(addr, size); 158 case 16: 159 return memory_is_poisoned_16(addr); 160 default: 161 BUILD_BUG(); 162 } 163 } 164 165 return memory_is_poisoned_n(addr, size); 166} 167 168static __always_inline bool check_memory_region_inline(unsigned long addr, 169 size_t size, bool write, 170 unsigned long ret_ip) 171{ 172 if (unlikely(size == 0)) 173 return true; 174 175 if (unlikely(addr + size < addr)) 176 return !kasan_report(addr, size, write, ret_ip); 177 178#ifndef __HAVE_ARCH_SHADOW_MAP 179 if (unlikely((void *)addr < 180 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { 181 return !kasan_report(addr, size, write, ret_ip); 182 } 183#else 184 if (unlikely(kasan_mem_to_shadow((void *)addr) == NULL)) { 185 return !kasan_report(addr, size, write, ret_ip); 186 } 187#endif 188 189 if (likely(!memory_is_poisoned(addr, size))) 190 return true; 191 192 return !kasan_report(addr, size, write, ret_ip); 193} 194 195bool check_memory_region(unsigned long addr, size_t size, bool write, 196 unsigned long ret_ip) 197{ 198 return check_memory_region_inline(addr, size, write, ret_ip); 199} 200 201void kasan_cache_shrink(struct kmem_cache *cache) 202{ 203 quarantine_remove_cache(cache); 204} 205 206void kasan_cache_shutdown(struct kmem_cache *cache) 207{ 208 if (!__kmem_cache_empty(cache)) 209 quarantine_remove_cache(cache); 210} 211 212static void register_global(struct kasan_global *global) 213{ 214 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); 215 216 kasan_unpoison_shadow(global->beg, global->size); 217 218 kasan_poison_shadow(global->beg + aligned_size, 219 global->size_with_redzone - aligned_size, 220 KASAN_GLOBAL_REDZONE); 221} 222 223void __asan_register_globals(struct kasan_global *globals, size_t size) 224{ 225 int i; 226 227 for (i = 0; i < size; i++) 228 register_global(&globals[i]); 229} 230EXPORT_SYMBOL(__asan_register_globals); 231 232void __asan_unregister_globals(struct kasan_global *globals, size_t size) 233{ 234} 235EXPORT_SYMBOL(__asan_unregister_globals); 236 237#define DEFINE_ASAN_LOAD_STORE(size) \ 238 void __asan_load##size(unsigned long addr) \ 239 { \ 240 check_memory_region_inline(addr, size, false, _RET_IP_);\ 241 } \ 242 EXPORT_SYMBOL(__asan_load##size); \ 243 __alias(__asan_load##size) \ 244 void __asan_load##size##_noabort(unsigned long); \ 245 EXPORT_SYMBOL(__asan_load##size##_noabort); \ 246 void __asan_store##size(unsigned long addr) \ 247 { \ 248 check_memory_region_inline(addr, size, true, _RET_IP_); \ 249 } \ 250 EXPORT_SYMBOL(__asan_store##size); \ 251 __alias(__asan_store##size) \ 252 void __asan_store##size##_noabort(unsigned long); \ 253 EXPORT_SYMBOL(__asan_store##size##_noabort) 254 255DEFINE_ASAN_LOAD_STORE(1); 256DEFINE_ASAN_LOAD_STORE(2); 257DEFINE_ASAN_LOAD_STORE(4); 258DEFINE_ASAN_LOAD_STORE(8); 259DEFINE_ASAN_LOAD_STORE(16); 260 261void __asan_loadN(unsigned long addr, size_t size) 262{ 263 check_memory_region(addr, size, false, _RET_IP_); 264} 265EXPORT_SYMBOL(__asan_loadN); 266 267__alias(__asan_loadN) 268void __asan_loadN_noabort(unsigned long, size_t); 269EXPORT_SYMBOL(__asan_loadN_noabort); 270 271void __asan_storeN(unsigned long addr, size_t size) 272{ 273 check_memory_region(addr, size, true, _RET_IP_); 274} 275EXPORT_SYMBOL(__asan_storeN); 276 277__alias(__asan_storeN) 278void __asan_storeN_noabort(unsigned long, size_t); 279EXPORT_SYMBOL(__asan_storeN_noabort); 280 281/* to shut up compiler complaints */ 282void __asan_handle_no_return(void) {} 283EXPORT_SYMBOL(__asan_handle_no_return); 284 285/* Emitted by compiler to poison alloca()ed objects. */ 286void __asan_alloca_poison(unsigned long addr, size_t size) 287{ 288 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); 289 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - 290 rounded_up_size; 291 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); 292 293 const void *left_redzone = (const void *)(addr - 294 KASAN_ALLOCA_REDZONE_SIZE); 295 const void *right_redzone = (const void *)(addr + rounded_up_size); 296 297 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); 298 299 kasan_unpoison_shadow((const void *)(addr + rounded_down_size), 300 size - rounded_down_size); 301 kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, 302 KASAN_ALLOCA_LEFT); 303 kasan_poison_shadow(right_redzone, 304 padding_size + KASAN_ALLOCA_REDZONE_SIZE, 305 KASAN_ALLOCA_RIGHT); 306} 307EXPORT_SYMBOL(__asan_alloca_poison); 308 309/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ 310void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) 311{ 312 if (unlikely(!stack_top || stack_top > stack_bottom)) 313 return; 314 315 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top); 316} 317EXPORT_SYMBOL(__asan_allocas_unpoison); 318 319/* Emitted by the compiler to [un]poison local variables. */ 320#define DEFINE_ASAN_SET_SHADOW(byte) \ 321 void __asan_set_shadow_##byte(const void *addr, size_t size) \ 322 { \ 323 __memset((void *)addr, 0x##byte, size); \ 324 } \ 325 EXPORT_SYMBOL(__asan_set_shadow_##byte) 326 327DEFINE_ASAN_SET_SHADOW(00); 328DEFINE_ASAN_SET_SHADOW(f1); 329DEFINE_ASAN_SET_SHADOW(f2); 330DEFINE_ASAN_SET_SHADOW(f3); 331DEFINE_ASAN_SET_SHADOW(f5); 332DEFINE_ASAN_SET_SHADOW(f8); 333 334void kasan_record_aux_stack(void *addr) 335{ 336 struct page *page = kasan_addr_to_page(addr); 337 struct kmem_cache *cache; 338 struct kasan_alloc_meta *alloc_info; 339 void *object; 340 341 if (!(page && PageSlab(page))) 342 return; 343 344 cache = page->slab_cache; 345 object = nearest_obj(cache, page, addr); 346 alloc_info = get_alloc_info(cache, object); 347 if (!alloc_info) 348 return; 349 350 /* 351 * record the last two call_rcu() call stacks. 352 */ 353 alloc_info->aux_stack[1] = alloc_info->aux_stack[0]; 354 alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT); 355} 356 357void kasan_set_free_info(struct kmem_cache *cache, 358 void *object, u8 tag) 359{ 360 struct kasan_free_meta *free_meta; 361 362 free_meta = get_free_info(cache, object); 363 kasan_set_track(&free_meta->free_track, GFP_NOWAIT); 364 365 /* 366 * the object was freed and has free track set 367 */ 368 *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK; 369} 370 371struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, 372 void *object, u8 tag) 373{ 374 if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK) 375 return NULL; 376 return &get_free_info(cache, object)->free_track; 377} 378