Lines Matching defs:object

274 					const void *object)
276 return (void *)object + cache->kasan_info.alloc_meta_offset;
280 const void *object)
283 return (void *)object + cache->kasan_info.free_meta_offset;
296 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
298 kasan_unpoison_shadow(object, cache->object_size);
301 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
303 kasan_poison_shadow(object,
309 * This function assigns a tag to an object considering the following:
311 * object somewhere (e.g. in the object itself). We preassign a tag for
312 * each object in caches with constructors during slab creation and reuse
313 * the same tag each time a particular object is allocated.
322 static u8 assign_tag(struct kmem_cache *cache, const void *object,
326 * 1. When an object is kmalloc()'ed, two hooks are called:
332 return get_tag(object);
336 * set, assign a tag when the object is being allocated (init == false).
343 /* For SLAB assign tags based on the object index in the freelist. */
344 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
350 return init ? random_tag() : get_tag(object);
355 const void *object)
360 return (void *)object;
362 alloc_info = get_alloc_info(cache, object);
366 object = set_tag(object,
367 assign_tag(cache, object, true, false));
369 return (void *)object;
387 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
395 tag = get_tag(object);
396 tagged_object = object;
397 object = reset_tag(object);
399 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
400 object)) {
409 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
416 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
422 kasan_set_free_info(cache, object, tag);
424 quarantine_put(get_free_info(cache, object), cache);
429 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
431 return __kasan_slab_free(cache, object, ip, true);
434 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
444 if (unlikely(object == NULL))
447 redzone_start = round_up((unsigned long)(object + size),
449 redzone_end = round_up((unsigned long)object + cache->object_size,
453 tag = assign_tag(cache, object, false, keep_tag);
456 kasan_unpoison_shadow(set_tag(object, tag), size);
461 kasan_set_track(&get_alloc_info(cache, object)->alloc_track, flags);
463 return set_tag(object, tag);
466 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
469 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
472 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
475 return __kasan_kmalloc(cache, object, size, flags, true);
504 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
508 if (unlikely(object == ZERO_SIZE_PTR))
509 return (void *)object;
511 page = virt_to_head_page(object);
514 return kasan_kmalloc_large(object, size, flags);
516 return __kasan_kmalloc(page->slab_cache, object, size,
541 /* The object will be poisoned by page_alloc. */