Lines Matching defs:size
29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
78 * slob_block has a field 'units', which indicates size of block if +ve,
81 * Free blocks of size 1 unit simply contain the offset of the next block.
82 * Those with larger size contain their size in the first SLOB_UNIT of
126 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
135 int size;
144 * Encode the given size and next info into a free slob block s.
146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
151 if (size > 1) {
152 s[0].units = size;
159 * Return the size of a slob block.
225 * @size: Size of the allocation.
230 * Tries to find a chunk of memory at least @size bytes big within @page.
237 static void *slob_page_alloc(struct page *sp, size_t size, int align,
241 int delta = 0, units = SLOB_UNITS(size);
251 * allocated block with its size, so that the block itself is
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
310 if (size < SLOB_BREAK1)
312 else if (size < SLOB_BREAK2)
330 if (sp->units < SLOB_UNITS(size))
333 b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
370 b = slob_page_alloc(sp, size, align, align_offset, &_unused);
375 memset(b, 0, size);
382 static void slob_free(void *block, int size)
392 BUG_ON(!size);
395 units = SLOB_UNITS(size);
417 if (size < SLOB_BREAK1)
419 else if (size < SLOB_BREAK2)
469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
480 if (size < PAGE_SIZE - minalign) {
487 if (is_power_of_2(size))
488 align = max(minalign, (int) size);
490 if (!size)
493 m = slob_alloc(size + minalign, gfp, align, node, minalign);
497 *m = size;
501 size, size + minalign, gfp, node);
503 unsigned int order = get_order(size);
510 size, PAGE_SIZE << order, gfp, node);
513 kmemleak_alloc(ret, size, 1, gfp);
517 void *__kmalloc(size_t size, gfp_t gfp)
519 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
523 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
525 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
530 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
533 return __do_kmalloc_node(size, gfp, node, caller);
588 c->size += sizeof(struct slob_rcu);
603 if (c->size < PAGE_SIZE) {
604 b = slob_alloc(c->size, flags, c->align, node, 0);
606 SLOB_UNITS(c->size) * SLOB_UNIT,
609 b = slob_new_pages(flags, get_order(c->size), node);
611 PAGE_SIZE << get_order(c->size),
620 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
631 void *__kmalloc_node(size_t size, gfp_t gfp, int node)
633 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
644 static void __kmem_cache_free(void *b, int size)
646 if (size < PAGE_SIZE)
647 slob_free(b, size);
649 slob_free_pages(b, get_order(size));
655 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
657 __kmem_cache_free(b, slob_rcu->size);
665 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
666 slob_rcu->size = c->size;
669 __kmem_cache_free(b, c->size);
676 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
678 __kmem_cache_free_bulk(s, size, p);
682 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
685 return __kmem_cache_alloc_bulk(s, flags, size, p);
706 .size = sizeof(struct kmem_cache),