Lines Matching defs:object
139 * operations no list for full slabs is used. If an object in a full slab is
292 /* Poison object */
400 static inline void *get_freepointer(struct kmem_cache *s, void *object)
405 object = kasan_reset_tag(object);
406 ptr_addr = (unsigned long)object + s->offset;
412 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
414 prefetchw(object + s->offset);
429 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
435 return get_freepointer(s, object);
437 object = kasan_reset_tag(object);
438 freepointer_addr = (unsigned long)object + s->offset;
443 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
445 unsigned long freeptr_addr = (unsigned long)object + s->offset;
448 BUG_ON(object == fp); /* naive detection of double free or corruption */
695 * slub is about to manipulate internal object metadata. This memory lies
696 * outside the range of the allocated object, so accessing it would normally
716 struct slab *slab, void *object)
720 if (!object)
724 object = kasan_reset_tag(object);
725 object = restore_red_left(s, object);
726 if (object < base || object >= base + slab->objects * s->size ||
727 (object - base) % s->size) {
753 * not overlapping with object.
763 static struct track *get_track(struct kmem_cache *s, void *object,
768 p = object + get_info_end(s);
792 static void set_track_update(struct kmem_cache *s, void *object,
796 struct track *p = get_track(s, object, alloc);
807 static __always_inline void set_track(struct kmem_cache *s, void *object,
812 set_track_update(s, object, alloc, addr, handle);
815 static void init_tracking(struct kmem_cache *s, void *object)
822 p = get_track(s, object, TRACK_ALLOC);
840 pr_err("object allocation/free stack trace missing\n");
844 void print_tracking(struct kmem_cache *s, void *object)
850 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
851 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
870 void *object, unsigned int orig_size)
872 void *p = kasan_reset_tag(object);
879 * KASAN could save its free meta data in object's data area at
894 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
896 void *p = kasan_reset_tag(object);
907 void skip_orig_size_check(struct kmem_cache *s, const void *object)
909 set_orig_size(s, (void *)object, s->object_size);
985 u8 *object, char *reason)
991 print_trailer(s, slab, object);
1027 static void init_object(struct kmem_cache *s, void *object, u8 val)
1029 u8 *p = kasan_reset_tag(object);
1041 poison_size = get_orig_size(s, object);
1062 u8 *object, char *what,
1086 print_trailer(s, slab, object);
1097 * object address
1098 * Bytes of the object to be managed.
1099 * If the freepointer may overlay the object then the free
1100 * pointer is at the middle of the object.
1105 * object + s->object_size
1113 * object + s->inuse
1116 * A. Free pointer (if we cannot overwrite object on free)
1118 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1125 * object + s->size
1191 void *object, u8 val)
1193 u8 *p = object;
1194 u8 *endobject = object + s->object_size;
1198 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1199 object - s->red_left_pad, val, s->red_left_pad))
1202 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1207 orig_size = get_orig_size(s, object);
1210 !check_bytes_and_report(s, slab, object,
1240 * freepointer while object is allocated.
1250 * another error because the object count is now wrong.
1284 * Determine if a certain object in a slab is on the freelist. Must hold the
1291 void *object = NULL;
1299 if (object) {
1300 object_err(s, slab, object,
1302 set_freepointer(s, object, NULL);
1312 object = fp;
1313 fp = get_freepointer(s, object);
1328 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1336 static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1343 object, slab->inuse,
1347 print_section(KERN_INFO, "Object ", (void *)object,
1405 static void setup_object_debug(struct kmem_cache *s, void *object)
1410 init_object(s, object, SLUB_RED_INACTIVE);
1411 init_tracking(s, object);
1426 struct slab *slab, void *object)
1431 if (!check_valid_pointer(s, slab, object)) {
1432 object_err(s, slab, object, "Freelist Pointer check fails");
1436 if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1443 struct slab *slab, void *object, int orig_size)
1446 if (!alloc_consistency_checks(s, slab, object))
1451 trace(s, slab, object, 1);
1452 set_orig_size(s, object, orig_size);
1453 init_object(s, object, SLUB_RED_ACTIVE);
1471 struct slab *slab, void *object, unsigned long addr)
1473 if (!check_valid_pointer(s, slab, object)) {
1474 slab_err(s, slab, "Invalid object pointer 0x%p", object);
1478 if (on_freelist(s, slab, object)) {
1479 object_err(s, slab, object, "Object already free");
1483 if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1488 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
1489 object);
1491 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1492 object);
1495 object_err(s, slab, object,
1651 * @object_size: the size of an object without meta data
1716 static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1721 struct slab *slab, void *object, int orig_size) { return true; }
1729 void *object, u8 val) { return 1; }
1731 static inline void set_track(struct kmem_cache *s, void *object,
1787 * The initialization memset's clear the object and the metadata,
1808 void *object;
1822 object = next;
1823 next = get_freepointer(s, object);
1825 /* If object's reuse doesn't have to be delayed */
1826 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
1827 /* Move object to the new freelist */
1828 set_freepointer(s, object, *head);
1829 *head = object;
1831 *tail = object;
1835 * accordingly if object's reuse is delayed.
1839 } while (object != old_tail);
1847 static void *setup_object(struct kmem_cache *s, void *object)
1849 setup_object_debug(s, object);
1850 object = kasan_init_slab_obj(s, object);
1852 kasan_unpoison_object_data(s, object);
1853 s->ctor(object);
1854 kasan_poison_object_data(s, object);
1856 return object;
2149 * slab from the n->partial list. Remove only a single object from the slab, do
2151 * it to full list if it was the last free object.
2156 void *object;
2160 object = slab->freelist;
2161 slab->freelist = get_freepointer(s, object);
2164 if (!alloc_debug_processing(s, slab, object, orig_size)) {
2174 return object;
2179 * allocated slab. Allocate a single object instead of whole freelist
2188 void *object;
2191 object = slab->freelist;
2192 slab->freelist = get_freepointer(s, object);
2195 if (!alloc_debug_processing(s, slab, object, orig_size))
2213 return object;
2276 void *object = NULL;
2297 object = alloc_single_from_partial(s, n, slab,
2299 if (object)
2304 t = acquire_slab(s, n, slab, object == NULL);
2308 if (!object) {
2311 object = t;
2327 return object;
2340 void *object;
2375 object = get_partial_node(s, n, pc);
2376 if (object) {
2384 return object;
2398 void *object;
2404 object = get_partial_node(s, get_node(s, searchnode), pc);
2405 if (object || node != NUMA_NO_NODE)
2406 return object;
2512 * remember the last object in freelist_tail for later splicing.
2520 * If 'nextfree' is invalid, it is possible that the object at
2912 void *object = head;
2932 if (!free_consistency_checks(s, slab, object, addr))
2937 set_track_update(s, object, TRACK_FREE, addr, handle);
2938 trace(s, slab, object, 0);
2940 init_object(s, object, SLUB_RED_INACTIVE);
2943 if (object != tail) {
2944 object = get_freepointer(s, object);
2959 slab_fix(s, "Object at 0x%p not freed", object);
2995 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3085 * first element of the freelist as the object to allocate now and move the
3262 * and return the object
3335 void *object;
3354 * Irqless object alloc/free algorithm used here depends on sequence
3356 * on c to guarantee that object and slab associated with previous tid
3357 * won't be used with current tid. If we fetch tid first, object and
3370 object = c->freelist;
3374 unlikely(!object || !slab || !node_match(slab, node))) {
3375 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3377 void *next_object = get_freepointer_safe(s, object);
3393 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
3401 return object;
3409 void *object;
3414 object = get_partial(s, node, &pc);
3416 if (object)
3417 return object;
3425 object = alloc_single_from_new_slab(s, slab, orig_size);
3427 return object;
3432 * If the object has been wiped upon free, make sure it's fully initialized by
3451 * Otherwise we can simply pick the next object from the lockless free list.
3456 void *object;
3464 object = kfence_alloc(s, orig_size, gfpflags);
3465 if (unlikely(object))
3468 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
3470 maybe_wipe_obj_freeptr(s, object);
3478 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
3480 return object;
3860 void *object;
3864 object = p[--size];
3865 folio = virt_to_folio(object);
3869 free_large_kmalloc(folio, object);
3873 /* Derive kmem_cache from object */
3878 df->s = cache_from_obj(s, object); /* Support for memcg */
3882 df->tail = object;
3883 df->freelist = object;
3886 if (is_kfence_address(object))
3889 set_freepointer(df->s, object, NULL);
3893 object = p[--size];
3895 if (df->slab == virt_to_slab(object)) {
3897 set_freepointer(df->s, object, df->freelist);
3898 df->freelist = object;
3950 void *object = kfence_alloc(s, s->object_size, flags);
3952 if (unlikely(object)) {
3953 p[i] = object;
3957 object = c->freelist;
3958 if (unlikely(!object)) {
3960 * We may have removed an object from c->freelist using
3986 c->freelist = get_freepointer(s, object);
3987 p[i] = object;
4010 void *object = kfence_alloc(s, s->object_size, flags);
4012 if (unlikely(object)) {
4013 p[i] = object;
4065 * offset 0. If we tune the size of the object to the alignment then we can
4066 * get the required alignment by putting one properly sized object after
4088 * Calculate the order of allocation given an slab object size.
4108 * slab and thereby reduce object handling overhead. If the user has
4110 * the smallest order which will fit the object.
4187 * lets see if we can place a single object there.
4345 * object freed. If they are used for allocation then they can be
4370 * a slab object.
4379 * Round up object size to the next word boundary. We can only
4387 * Determine if we can poison the object itself. If the user of
4388 * the slab may touch the object after free or before allocation
4389 * then we should never poison the object itself.
4400 * end of the object and the free pointer. If not then add an
4409 * by the object and redzoning.
4418 * Relocate free pointer after the object if it is not
4419 * permitted to overwrite the first word of the object on
4424 * redzoning an object smaller than sizeof(void *).
4427 * pointer is outside of the object is used in the
4435 * Store freelist pointer near middle of object to keep
4436 * it away from the edges of the object to avoid small
4446 * the object.
4464 * of the object.
4475 * SLUB stores one object immediately after another beginning from
4477 * each object to conform to the alignment.
4539 * The larger the object size is, the more slabs we want on the partial
4648 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
4658 kpp->kp_ptr = object;
4662 objp0 = kasan_reset_tag(object);
4756 /* Find object and usable object size. */
4761 usercopy_abort("SLUB object not in SLUB page?!", NULL,
4764 /* Find offset within object. */
4773 usercopy_abort("SLUB object in left red zone",
4784 usercopy_abort("SLUB object", s->name, to_user, offset, n);
5099 * Adjust the object sizes so that we clear
5100 * the complete object on kzalloc.
5397 SL_TOTAL /* Determine object capacity not slabs */