Lines Matching refs:size

314 		__p < (__addr) + (__objects) * (__s)->size; \
315 __p += (__s)->size)
317 static inline unsigned int order_objects(unsigned int order, unsigned int size)
319 return ((unsigned int)PAGE_SIZE << order) / size;
323 unsigned int size)
326 (order << OO_SHIFT) + order_objects(order, size)
473 return s->size - s->red_left_pad;
475 return s->size;
530 if (object < base || object >= base + page->objects * s->size ||
531 (object - base) % s->size) {
830 * object + s->size
831 * Nothing is used beyond s->size.
871 remainder = length % s->size;
959 maxobj = order_objects(compound_order(page), s->size);
1009 max_objects = order_objects(compound_order(page), s->size);
1401 * @object_size: the size of an object without meta data
1504 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1506 ptr = kasan_kmalloc_large(ptr, size, flags);
1508 kmemleak_alloc(ptr, size, 1, flags);
1575 s->size - s->inuse - rsize);
1656 s->random_seq[i] *= s->size;
1684 * page might be smaller than the usual size defined by the cache.
1710 page_limit = page->objects * s->size;
1800 next = p + s->size;
2548 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
2549 s->name, s->object_size, s->size, oo_order(s->oo),
2915 s->size, gfpflags);
2922 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2925 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2926 ret = kasan_kmalloc(s, ret, size, gfpflags);
2938 s->object_size, s->size, gfpflags, node);
2947 int node, size_t size)
2952 size, s->size, gfpflags, node);
2954 ret = kasan_kmalloc(s, ret, size, gfpflags);
3194 int build_detached_freelist(struct kmem_cache *s, size_t size,
3206 object = p[--size];
3208 } while (!object && size);
3220 p[size] = NULL; /* mark object processed */
3221 return size;
3234 p[size] = NULL; /* mark object processed */
3237 while (size) {
3238 object = p[--size];
3248 p[size] = NULL; /* mark object processed */
3258 first_skipped_index = size + 1;
3265 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3267 if (WARN_ON(!size))
3270 memcg_slab_free_hook(s, p, size);
3274 size = build_detached_freelist(s, size, p, &df);
3279 } while (likely(size));
3284 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3292 s = slab_pre_alloc_hook(s, &objcg, size, flags);
3303 for (i = 0; i < size; i++) {
3346 slab_post_alloc_hook(s, objcg, flags, size, p);
3359 * offset 0. If we tune the size of the object to the alignment then we can
3381 * Calculate the order of allocation given an slab object size.
3405 static inline unsigned int slab_order(unsigned int size,
3412 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
3413 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3415 for (order = max(min_order, (unsigned int)get_order(min_objects * size));
3421 rem = slab_size % size;
3430 static inline int calculate_order(unsigned int size)
3447 max_objects = order_objects(slub_max_order, size);
3455 order = slab_order(size, min_objects,
3468 order = slab_order(size, 1, slub_max_order, 1);
3475 order = slab_order(size, 1, MAX_ORDER, 1);
3530 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3636 else if (s->size >= PAGE_SIZE)
3638 else if (s->size >= 1024)
3640 else if (s->size >= 256)
3654 unsigned int size = s->object_size;
3658 * Round up object size to the next word boundary. We can only
3662 size = ALIGN(size, sizeof(void *));
3682 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3683 size += sizeof(void *);
3690 s->inuse = size;
3709 s->offset = size;
3710 size += sizeof(void *);
3726 size += 2 * sizeof(struct track);
3729 kasan_cache_create(s, &size, &s->flags);
3739 size += sizeof(void *);
3743 size += s->red_left_pad;
3749 * offset 0. In order to align the objects we have to simply size
3752 size = ALIGN(size, s->align);
3753 s->size = size;
3754 s->reciprocal_size = reciprocal_value(size);
3758 order = calculate_order(size);
3779 s->oo = oo_make(order, size);
3780 s->min = oo_make(get_order(size), size);
3789 s->flags = kmem_cache_flags(s->size, flags, s->name);
3801 if (get_order(s->size) > get_order(s->object_size)) {
3817 * The larger the object size is, the more pages we want on the partial
3820 set_min_partial(s, ilog2(s->size) / 2);
3957 void *__kmalloc(size_t size, gfp_t flags)
3962 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3963 return kmalloc_large(size, flags);
3965 s = kmalloc_slab(size, flags);
3972 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3974 ret = kasan_kmalloc(s, ret, size, flags);
3981 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3985 unsigned int order = get_order(size);
3995 return kmalloc_large_node_hook(ptr, size, flags);
3998 void *__kmalloc_node(size_t size, gfp_t flags, int node)
4003 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4004 ret = kmalloc_large_node(size, flags, node);
4007 size, PAGE_SIZE << get_order(size),
4013 s = kmalloc_slab(size, flags);
4020 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
4022 ret = kasan_kmalloc(s, ret, size, flags);
4047 /* Find object and usable object size. */
4056 offset = (ptr - page_address(page)) % s->size;
4411 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
4416 s = find_mergeable(size, align, flags, name, ctor);
4424 s->object_size = max(s->object_size, size);
4425 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
4455 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4460 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4461 return kmalloc_large(size, gfpflags);
4463 s = kmalloc_slab(size, gfpflags);
4471 trace_kmalloc(caller, ret, size, s->size, gfpflags);
4478 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4484 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4485 ret = kmalloc_large_node(size, gfpflags, node);
4488 size, PAGE_SIZE << get_order(size),
4494 s = kmalloc_slab(size, gfpflags);
4502 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
5030 return sprintf(buf, "%u\n", s->size);
5558 * Format :[flags-]size
5588 p += sprintf(p, "%07u", s->size);