Lines Matching defs:object
56 * A. page->freelist -> List of object free in a page
88 * operations no list for full slabs is used. If an object in a full slab is
198 /* Poison object */
277 static inline void *get_freepointer(struct kmem_cache *s, void *object)
279 return freelist_dereference(s, object + s->offset);
282 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
284 prefetch(object + s->offset);
287 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
293 return get_freepointer(s, object);
295 freepointer_addr = (unsigned long)object + s->offset;
300 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
302 unsigned long freeptr_addr = (unsigned long)object + s->offset;
305 BUG_ON(object == fp); /* naive detection of double free or corruption */
441 * Determine a map of object in use on a page.
499 * slub is about to manipulate internal object metadata. This memory lies
500 * outside the range of the allocated object, so accessing it would normally
520 struct page *page, void *object)
524 if (!object)
528 object = kasan_reset_tag(object);
529 object = restore_red_left(s, object);
530 if (object < base || object >= base + page->objects * s->size ||
531 (object - base) % s->size) {
557 * not overlapping with object.
567 static struct track *get_track(struct kmem_cache *s, void *object,
572 p = object + get_info_end(s);
577 static void set_track(struct kmem_cache *s, void *object,
580 struct track *p = get_track(s, object, alloc);
602 static void init_tracking(struct kmem_cache *s, void *object)
607 set_track(s, object, TRACK_FREE, 0UL);
608 set_track(s, object, TRACK_ALLOC, 0UL);
630 void print_tracking(struct kmem_cache *s, void *object)
636 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
637 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
729 u8 *object, char *reason)
732 print_trailer(s, page, object);
749 static void init_object(struct kmem_cache *s, void *object, u8 val)
751 u8 *p = object;
773 u8 *object, char *what,
794 print_trailer(s, page, object);
803 * object address
804 * Bytes of the object to be managed.
805 * If the freepointer may overlay the object then the free
806 * pointer is at the middle of the object.
811 * object + s->object_size
819 * object + s->inuse
822 * A. Free pointer (if we cannot overwrite object on free)
830 * object + s->size
893 void *object, u8 val)
895 u8 *p = object;
896 u8 *endobject = object + s->object_size;
899 if (!check_bytes_and_report(s, page, object, "Left Redzone",
900 object - s->red_left_pad, val, s->red_left_pad))
903 if (!check_bytes_and_report(s, page, object, "Right Redzone",
930 * freepointer while object is allocated.
940 * another error because the object count is now wrong.
976 * Determine if a certain object on a page is on the freelist. Must hold the
983 void *object = NULL;
991 if (object) {
992 object_err(s, page, object,
994 set_freepointer(s, object, NULL);
1004 object = fp;
1005 fp = get_freepointer(s, object);
1020 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
1028 static void trace(struct kmem_cache *s, struct page *page, void *object,
1035 object, page->inuse,
1039 print_section(KERN_INFO, "Object ", (void *)object,
1106 void *object)
1111 init_object(s, object, SLUB_RED_INACTIVE);
1112 init_tracking(s, object);
1127 struct page *page, void *object)
1132 if (!check_valid_pointer(s, page, object)) {
1133 object_err(s, page, object, "Freelist Pointer check fails");
1137 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1145 void *object, unsigned long addr)
1148 if (!alloc_consistency_checks(s, page, object))
1154 set_track(s, object, TRACK_ALLOC, addr);
1155 trace(s, page, object, 1);
1156 init_object(s, object, SLUB_RED_ACTIVE);
1174 struct page *page, void *object, unsigned long addr)
1176 if (!check_valid_pointer(s, page, object)) {
1177 slab_err(s, page, "Invalid object pointer 0x%p", object);
1181 if (on_freelist(s, page, object)) {
1182 object_err(s, page, object, "Object already free");
1186 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1191 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1192 object);
1194 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1195 object);
1198 object_err(s, page, object,
1212 void *object = head;
1229 if (!free_consistency_checks(s, page, object, addr))
1234 set_track(s, object, TRACK_FREE, addr);
1235 trace(s, page, object, 0);
1237 init_object(s, object, SLUB_RED_INACTIVE);
1240 if (object != tail) {
1241 object = get_freepointer(s, object);
1254 slab_fix(s, "Object at 0x%p not freed", object);
1401 * @object_size: the size of an object without meta data
1455 struct page *page, void *object) {}
1460 struct page *page, void *object, unsigned long addr) { return 0; }
1470 void *object, u8 val) { return 1; }
1553 void *object;
1563 object = next;
1564 next = get_freepointer(s, object);
1568 * Clear the object and the metadata, but don't touch
1571 memset(object, 0, s->object_size);
1574 memset((char *)object + s->inuse, 0,
1578 /* If object's reuse doesn't have to be delayed */
1579 if (!slab_free_hook(s, object)) {
1580 /* Move object to the new freelist */
1581 set_freepointer(s, object, *head);
1582 *head = object;
1584 *tail = object;
1588 * accordingly if object's reuse is delayed.
1592 } while (object != old_tail);
1601 void *object)
1603 setup_object_debug(s, page, object);
1604 object = kasan_init_slab_obj(s, object);
1606 kasan_unpoison_object_data(s, object);
1607 s->ctor(object);
1608 kasan_poison_object_data(s, object);
1610 return object;
1960 void *object = NULL;
1980 t = acquire_slab(s, n, page, object == NULL, &objects);
1985 if (!object) {
1988 object = t;
1999 return object;
2013 void *object;
2048 object = get_partial_node(s, n, c, flags);
2049 if (object) {
2057 return object;
2072 void *object;
2078 object = get_partial_node(s, get_node(s, searchnode), c, flags);
2079 if (object || node != NUMA_NO_NODE)
2080 return object;
2187 * If 'nextfree' is invalid, it is possible that the object at
2548 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2657 * first element of the freelist as the object to allocate now and move the
2795 * If the object has been wiped upon free, make sure it's fully initialized by
2813 * Otherwise we can simply pick the next object from the lockless free list.
2818 void *object;
2845 * Irqless object alloc/free algorithm used here depends on sequence
2847 * on c to guarantee that object and page associated with previous tid
2848 * won't be used with current tid. If we fetch tid first, object and
2861 object = c->freelist;
2863 if (unlikely(!object || !page || !node_match(page, node))) {
2864 object = __slab_alloc(s, gfpflags, node, addr, c);
2866 void *next_object = get_freepointer_safe(s, object);
2884 object, tid,
2894 maybe_wipe_obj_freeptr(s, object);
2896 if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2897 memset(object, 0, s->object_size);
2899 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
2901 return object;
3199 void *object;
3206 object = p[--size];
3207 /* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3208 } while (!object && size);
3210 if (!object)
3213 page = virt_to_head_page(object);
3218 kfree_hook(object);
3220 p[size] = NULL; /* mark object processed */
3223 /* Derive kmem_cache from object */
3226 df->s = cache_from_obj(s, object); /* Support for memcg */
3231 set_freepointer(df->s, object, NULL);
3232 df->tail = object;
3233 df->freelist = object;
3234 p[size] = NULL; /* mark object processed */
3238 object = p[--size];
3239 if (!object)
3243 if (df->page == virt_to_head_page(object)) {
3245 set_freepointer(df->s, object, df->freelist);
3246 df->freelist = object;
3248 p[size] = NULL; /* mark object processed */
3304 void *object = c->freelist;
3306 if (unlikely(!object)) {
3308 * We may have removed an object from c->freelist using
3330 c->freelist = get_freepointer(s, object);
3331 p[i] = object;
3359 * offset 0. If we tune the size of the object to the alignment then we can
3360 * get the required alignment by putting one properly sized object after
3381 * Calculate the order of allocation given an slab object size.
3401 * slab and thereby reduce object handling overhead. If the user has
3403 * the smallest order which will fit the object.
3466 * lets see if we can place a single object there.
3622 * object freed. If they are used for allocation then they can be
3649 * a slab object.
3658 * Round up object size to the next word boundary. We can only
3666 * Determine if we can poison the object itself. If the user of
3667 * the slab may touch the object after free or before allocation
3668 * then we should never poison the object itself.
3679 * end of the object and the free pointer. If not then add an
3688 * by the object and redzoning.
3696 * Relocate free pointer after the object if it is not
3697 * permitted to overwrite the first word of the object on
3702 * redzoning an object smaller than sizeof(void *).
3705 * pointer is outside of the object is used in the
3713 * Store freelist pointer near middle of object to keep
3714 * it away from the edges of the object to avoid small
3724 * the object.
3737 * of the object.
3748 * SLUB stores one object immediately after another beginning from
3750 * each object to conform to the alignment.
3817 * The larger the object size is, the more pages we want on the partial
4047 /* Find object and usable object size. */
4052 usercopy_abort("SLUB object not in SLUB page?!", NULL,
4055 /* Find offset within object. */
4061 usercopy_abort("SLUB object in left red zone",
4073 * If the copy is still within the allocated object, produce
4081 usercopy_warn("SLUB object", s->name, to_user, offset, n);
4085 usercopy_abort("SLUB object", s->name, to_user, offset, n);
4089 size_t __ksize(const void *object)
4093 if (unlikely(object == ZERO_SIZE_PTR))
4096 page = virt_to_head_page(object);
4110 void *object = (void *)x;
4122 kfree_hook(object);
4128 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
4421 * Adjust the object sizes so that we clear
4422 * the complete object on kzalloc.
4838 pr_err("If allocated object is overwritten then not detectable\n\n");
4846 pr_err("If allocated object is overwritten then not detectable\n\n");
4880 SL_TOTAL /* Determine object capacity not slabs */