Lines Matching defs:cache
224 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
232 cache->kasan_info.alloc_meta_offset = *size;
237 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
238 cache->object_size < sizeof(struct kasan_free_meta))) {
239 cache->kasan_info.free_meta_offset = *size;
243 redzone_size = optimal_redzone(cache->object_size);
244 redzone_adjust = redzone_size - (*size - cache->object_size);
249 max(*size, cache->object_size + redzone_size));
254 if (*size <= cache->kasan_info.alloc_meta_offset ||
255 *size <= cache->kasan_info.free_meta_offset) {
256 cache->kasan_info.alloc_meta_offset = 0;
257 cache->kasan_info.free_meta_offset = 0;
265 size_t kasan_metadata_size(struct kmem_cache *cache)
267 return (cache->kasan_info.alloc_meta_offset ?
269 (cache->kasan_info.free_meta_offset ?
273 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
276 return (void *)object + cache->kasan_info.alloc_meta_offset;
279 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
283 return (void *)object + cache->kasan_info.free_meta_offset;
296 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
298 kasan_unpoison_shadow(object, cache->object_size);
301 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
304 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
310 * 1. A cache might have a constructor, which might save a pointer to a slab
314 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
322 static u8 assign_tag(struct kmem_cache *cache, const void *object,
335 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
338 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
344 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
354 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
359 if (!(cache->flags & SLAB_KASAN))
362 alloc_info = get_alloc_info(cache, object);
367 assign_tag(cache, object, true, false));
387 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
399 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
406 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
415 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
419 unlikely(!(cache->flags & SLAB_KASAN)))
422 kasan_set_free_info(cache, object, tag);
424 quarantine_put(get_free_info(cache, object), cache);
429 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
431 return __kasan_slab_free(cache, object, ip, true);
434 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
449 redzone_end = round_up((unsigned long)object + cache->object_size,
453 tag = assign_tag(cache, object, false, keep_tag);
460 if (cache->flags & SLAB_KASAN)
461 kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
466 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
469 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
472 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
475 return __kasan_kmalloc(cache, object, size, flags, true);