Lines Matching defs:cachep

211 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
213 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
217 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
219 static inline void fixup_slab_list(struct kmem_cache *cachep,
241 #define MAKE_LIST(cachep, listp, slab, nodeid) \
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
247 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
322 * cachep->obj_offset: The real object.
323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
324 * cachep->size - 1* BYTES_PER_WORD: last caller address
327 static int obj_offset(struct kmem_cache *cachep)
329 return cachep->obj_offset;
332 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 return (unsigned long long*) (objp + obj_offset(cachep) -
339 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
342 if (cachep->flags & SLAB_STORE_USER)
343 return (unsigned long long *)(objp + cachep->size -
346 return (unsigned long long *) (objp + cachep->size -
350 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353 return (void **)(objp + cachep->size - BYTES_PER_WORD);
359 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
360 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
361 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
392 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
394 return this_cpu_ptr(cachep->cpu_cache);
436 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
438 static void __slab_error(const char *function, struct kmem_cache *cachep,
442 function, cachep->name, msg);
551 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
559 n = get_node(cachep, page_node);
562 free_block(cachep, &objp, 1, page_node, &list);
565 slabs_destroy(cachep, &list);
603 #define drain_alien_cache(cachep, alien) do { } while (0)
604 #define reap_alien(cachep, n) do { } while (0)
616 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
621 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
627 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
694 static void __drain_alien_cache(struct kmem_cache *cachep,
698 struct kmem_cache_node *n = get_node(cachep, node);
710 free_block(cachep, ac->entry, ac->avail, node, list);
719 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
732 __drain_alien_cache(cachep, ac, node, &list);
734 slabs_destroy(cachep, &list);
740 static void drain_alien_cache(struct kmem_cache *cachep,
755 __drain_alien_cache(cachep, ac, i, &list);
757 slabs_destroy(cachep, &list);
762 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
770 n = get_node(cachep, node);
771 STATS_INC_NODEFREES(cachep);
777 STATS_INC_ACOVERFLOW(cachep);
778 __drain_alien_cache(cachep, ac, page_node, &list);
782 slabs_destroy(cachep, &list);
784 n = get_node(cachep, page_node);
786 free_block(cachep, &objp, 1, page_node, &list);
788 slabs_destroy(cachep, &list);
793 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
804 return __cache_free_alien(cachep, objp, node, page_node);
817 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
826 n = get_node(cachep, node);
829 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
830 cachep->num;
842 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
845 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
852 cachep->node[node] = n;
870 struct kmem_cache *cachep;
872 list_for_each_entry(cachep, &slab_caches, list) {
873 ret = init_cache_node(cachep, node, GFP_KERNEL);
882 static int setup_kmem_cache_node(struct kmem_cache *cachep,
893 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
898 if (cachep->shared) {
900 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
905 ret = init_cache_node(cachep, node, gfp);
909 n = get_node(cachep, node);
912 free_block(cachep, n->shared->entry,
929 slabs_destroy(cachep, &list);
952 struct kmem_cache *cachep;
957 list_for_each_entry(cachep, &slab_caches, list) {
963 n = get_node(cachep, node);
970 n->free_limit -= cachep->batchcount;
973 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
974 free_block(cachep, nc->entry, nc->avail, node, &list);
984 free_block(cachep, shared->entry,
996 drain_alien_cache(cachep, alien);
1001 slabs_destroy(cachep, &list);
1008 list_for_each_entry(cachep, &slab_caches, list) {
1009 n = get_node(cachep, node);
1012 drain_freelist(cachep, n, INT_MAX);
1018 struct kmem_cache *cachep;
1036 list_for_each_entry(cachep, &slab_caches, list) {
1037 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1107 struct kmem_cache *cachep;
1110 list_for_each_entry(cachep, &slab_caches, list) {
1113 n = get_node(cachep, node);
1117 drain_freelist(cachep, n, INT_MAX);
1164 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1178 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1179 cachep->node[nodeid] = ptr;
1186 static void __init set_up_node(struct kmem_cache *cachep, int index)
1191 cachep->node[node] = &init_kmem_cache_node[index + node];
1192 cachep->node[node]->next_reap = jiffies +
1194 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1285 struct kmem_cache *cachep;
1289 list_for_each_entry(cachep, &slab_caches, list)
1290 if (enable_cpucache(cachep, GFP_NOWAIT))
1327 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1342 cachep->name, cachep->size, cachep->gfporder);
1344 for_each_kmem_cache_node(cachep, node, n) {
1355 (total_slabs * cachep->num) - free_objs,
1356 total_slabs * cachep->num);
1369 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1374 flags |= cachep->allocflags;
1376 page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1378 slab_out_of_memory(cachep, flags, nodeid);
1382 account_slab_page(page, cachep->gfporder, cachep);
1394 static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1396 int order = cachep->gfporder;
1406 unaccount_slab_page(page, order, cachep);
1412 struct kmem_cache *cachep;
1416 cachep = page->slab_cache;
1418 kmem_freepages(cachep, page);
1422 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1424 if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1425 (cachep->size % PAGE_SIZE) == 0)
1432 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1434 if (!is_debug_pagealloc_cache(cachep))
1437 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1441 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1446 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1448 int size = cachep->object_size;
1449 addr = &((char *)addr)[obj_offset(cachep)];
1487 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1492 if (cachep->flags & SLAB_RED_ZONE) {
1494 *dbg_redzone1(cachep, objp),
1495 *dbg_redzone2(cachep, objp));
1498 if (cachep->flags & SLAB_STORE_USER)
1499 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1500 realobj = (char *)objp + obj_offset(cachep);
1501 size = cachep->object_size;
1511 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1517 if (is_debug_pagealloc_cache(cachep))
1520 realobj = (char *)objp + obj_offset(cachep);
1521 size = cachep->object_size;
1533 print_tainted(), cachep->name,
1535 print_objinfo(cachep, objp, 0);
1557 objnr = obj_to_index(cachep, page, objp);
1559 objp = index_to_obj(cachep, page, objnr - 1);
1560 realobj = (char *)objp + obj_offset(cachep);
1562 print_objinfo(cachep, objp, 2);
1564 if (objnr + 1 < cachep->num) {
1565 objp = index_to_obj(cachep, page, objnr + 1);
1566 realobj = (char *)objp + obj_offset(cachep);
1568 print_objinfo(cachep, objp, 2);
1575 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1580 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1581 poison_obj(cachep, page->freelist - obj_offset(cachep),
1585 for (i = 0; i < cachep->num; i++) {
1586 void *objp = index_to_obj(cachep, page, i);
1588 if (cachep->flags & SLAB_POISON) {
1589 check_poison_obj(cachep, objp);
1590 slab_kernel_map(cachep, objp, 1);
1592 if (cachep->flags & SLAB_RED_ZONE) {
1593 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1594 slab_error(cachep, "start of a freed object was overwritten");
1595 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1596 slab_error(cachep, "end of a freed object was overwritten");
1601 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1609 * @cachep: cache pointer being destroyed
1616 static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1621 slab_destroy_debugcheck(cachep, page);
1622 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1625 kmem_freepages(cachep, page);
1631 if (OFF_SLAB(cachep))
1632 kmem_cache_free(cachep->freelist_cache, freelist);
1639 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1645 slab_destroy(cachep, page);
1651 * @cachep: pointer to the cache that is being created
1663 static size_t calculate_slab_order(struct kmem_cache *cachep,
1698 if (freelist_cache->size > cachep->size / 2)
1703 cachep->num = num;
1704 cachep->gfporder = gfporder;
1732 struct kmem_cache *cachep, int entries, int batchcount)
1752 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1755 return enable_cpucache(cachep, gfp);
1757 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1758 if (!cachep->cpu_cache)
1766 set_up_node(cachep, SIZE_NODE);
1771 cachep->node[node] = kmalloc_node(
1773 BUG_ON(!cachep->node[node]);
1774 kmem_cache_node_init(cachep->node[node]);
1778 cachep->node[numa_mem_id()]->next_reap =
1780 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1782 cpu_cache_get(cachep)->avail = 0;
1783 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1784 cpu_cache_get(cachep)->batchcount = 1;
1785 cpu_cache_get(cachep)->touched = 0;
1786 cachep->batchcount = 1;
1787 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1801 struct kmem_cache *cachep;
1803 cachep = find_mergeable(size, align, flags, name, ctor);
1804 if (cachep) {
1805 cachep->refcount++;
1811 cachep->object_size = max_t(int, cachep->object_size, size);
1813 return cachep;
1816 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1821 cachep->num = 0;
1828 if (unlikely(slab_want_init_on_free(cachep)))
1831 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1834 left = calculate_slab_order(cachep, size,
1836 if (!cachep->num)
1839 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1842 cachep->colour = left / cachep->colour_off;
1847 static bool set_off_slab_cache(struct kmem_cache *cachep,
1852 cachep->num = 0;
1865 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1866 if (!cachep->num)
1873 if (left >= cachep->num * sizeof(freelist_idx_t))
1876 cachep->colour = left / cachep->colour_off;
1881 static bool set_on_slab_cache(struct kmem_cache *cachep,
1886 cachep->num = 0;
1888 left = calculate_slab_order(cachep, size, flags);
1889 if (!cachep->num)
1892 cachep->colour = left / cachep->colour_off;
1899 * @cachep: cache management descriptor
1920 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1925 unsigned int size = cachep->size;
1958 if (ralign < cachep->align) {
1959 ralign = cachep->align;
1967 cachep->align = ralign;
1968 cachep->colour_off = cache_line_size();
1970 if (cachep->colour_off < cachep->align)
1971 cachep->colour_off = cachep->align;
1986 cachep->obj_offset += sizeof(unsigned long long);
2001 kasan_cache_create(cachep, &size, &flags);
2003 size = ALIGN(size, cachep->align);
2009 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2020 size >= 256 && cachep->object_size > cache_line_size()) {
2024 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2026 cachep->obj_offset += tmp_size - size;
2034 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2039 if (set_off_slab_cache(cachep, size, flags)) {
2044 if (set_on_slab_cache(cachep, size, flags))
2050 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2051 cachep->flags = flags;
2052 cachep->allocflags = __GFP_COMP;
2054 cachep->allocflags |= GFP_DMA;
2056 cachep->allocflags |= GFP_DMA32;
2058 cachep->allocflags |= __GFP_RECLAIMABLE;
2059 cachep->size = size;
2060 cachep->reciprocal_buffer_size = reciprocal_value(size);
2069 (cachep->flags & SLAB_POISON) &&
2070 is_debug_pagealloc_cache(cachep))
2071 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2074 if (OFF_SLAB(cachep)) {
2075 cachep->freelist_cache =
2076 kmalloc_slab(cachep->freelist_size, 0u);
2079 err = setup_cpu_cache(cachep, gfp);
2081 __kmem_cache_release(cachep);
2104 static void check_spinlock_acquired(struct kmem_cache *cachep)
2108 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2112 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2116 assert_spin_locked(&get_node(cachep, node)->list_lock);
2128 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2140 free_block(cachep, ac->entry, tofree, node, list);
2147 struct kmem_cache *cachep = arg;
2154 ac = cpu_cache_get(cachep);
2155 n = get_node(cachep, node);
2157 free_block(cachep, ac->entry, ac->avail, node, &list);
2160 slabs_destroy(cachep, &list);
2163 static void drain_cpu_caches(struct kmem_cache *cachep)
2169 on_each_cpu(do_drain, cachep, 1);
2171 for_each_kmem_cache_node(cachep, node, n)
2173 drain_alien_cache(cachep, n->alien);
2175 for_each_kmem_cache_node(cachep, node, n) {
2177 drain_array_locked(cachep, n->shared, node, true, &list);
2180 slabs_destroy(cachep, &list);
2236 int __kmem_cache_shrink(struct kmem_cache *cachep)
2242 drain_cpu_caches(cachep);
2245 for_each_kmem_cache_node(cachep, node, n) {
2246 drain_freelist(cachep, n, INT_MAX);
2254 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2256 return __kmem_cache_shrink(cachep);
2259 void __kmem_cache_release(struct kmem_cache *cachep)
2264 cache_random_seq_destroy(cachep);
2266 free_percpu(cachep->cpu_cache);
2269 for_each_kmem_cache_node(cachep, i, n) {
2273 cachep->node[i] = NULL;
2291 static void *alloc_slabmgmt(struct kmem_cache *cachep,
2301 if (OBJFREELIST_SLAB(cachep))
2303 else if (OFF_SLAB(cachep)) {
2305 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2309 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2310 cachep->freelist_size;
2327 static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2332 for (i = 0; i < cachep->num; i++) {
2333 void *objp = index_to_obj(cachep, page, i);
2335 if (cachep->flags & SLAB_STORE_USER)
2336 *dbg_userword(cachep, objp) = NULL;
2338 if (cachep->flags & SLAB_RED_ZONE) {
2339 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2340 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2347 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2348 kasan_unpoison_object_data(cachep,
2349 objp + obj_offset(cachep));
2350 cachep->ctor(objp + obj_offset(cachep));
2352 cachep, objp + obj_offset(cachep));
2355 if (cachep->flags & SLAB_RED_ZONE) {
2356 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2357 slab_error(cachep, "constructor overwrote the end of an object");
2358 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2359 slab_error(cachep, "constructor overwrote the start of an object");
2362 if (cachep->flags & SLAB_POISON) {
2363 poison_obj(cachep, objp, POISON_FREE);
2364 slab_kernel_map(cachep, objp, 0);
2386 struct kmem_cache *cachep,
2396 if (!cachep->random_seq) {
2400 state->list = cachep->random_seq;
2427 static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2429 unsigned int objfreelist = 0, i, rand, count = cachep->num;
2436 precomputed = freelist_state_initialize(&state, cachep, count);
2439 if (OBJFREELIST_SLAB(cachep)) {
2444 page->freelist = index_to_obj(cachep, page, objfreelist) +
2445 obj_offset(cachep);
2468 if (OBJFREELIST_SLAB(cachep))
2469 set_free_obj(page, cachep->num - 1, objfreelist);
2474 static inline bool shuffle_freelist(struct kmem_cache *cachep,
2481 static void cache_init_objs(struct kmem_cache *cachep,
2488 cache_init_objs_debug(cachep, page);
2491 shuffled = shuffle_freelist(cachep, page);
2493 if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2494 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2495 obj_offset(cachep);
2498 for (i = 0; i < cachep->num; i++) {
2499 objp = index_to_obj(cachep, page, i);
2500 objp = kasan_init_slab_obj(cachep, objp);
2503 if (DEBUG == 0 && cachep->ctor) {
2504 kasan_unpoison_object_data(cachep, objp);
2505 cachep->ctor(objp);
2506 kasan_poison_object_data(cachep, objp);
2514 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2518 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2524 static void slab_put_obj(struct kmem_cache *cachep,
2527 unsigned int objnr = obj_to_index(cachep, page, objp);
2532 for (i = page->active; i < cachep->num; i++) {
2535 cachep->name, objp);
2542 page->freelist = objp + obj_offset(cachep);
2563 static struct page *cache_grow_begin(struct kmem_cache *cachep,
2580 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2591 page = kmem_getpages(cachep, local_flags, nodeid);
2596 n = get_node(cachep, page_node);
2600 if (n->colour_next >= cachep->colour)
2604 if (offset >= cachep->colour)
2607 offset *= cachep->colour_off;
2617 freelist = alloc_slabmgmt(cachep, page, offset,
2619 if (OFF_SLAB(cachep) && !freelist)
2622 slab_map_pages(cachep, page, freelist);
2624 cache_init_objs(cachep, page);
2632 kmem_freepages(cachep, page);
2639 static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2650 n = get_node(cachep, page_to_nid(page));
2658 fixup_slab_list(cachep, n, page, &list);
2660 STATS_INC_GROWN(cachep);
2661 n->free_objects += cachep->num - page->active;
2664 fixup_objfreelist_debug(cachep, &list);
2705 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2711 BUG_ON(virt_to_cache(objp) != cachep);
2713 objp -= obj_offset(cachep);
2717 if (cachep->flags & SLAB_RED_ZONE) {
2718 verify_redzone_free(cachep, objp);
2719 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2720 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2722 if (cachep->flags & SLAB_STORE_USER)
2723 *dbg_userword(cachep, objp) = (void *)caller;
2725 objnr = obj_to_index(cachep, page, objp);
2727 BUG_ON(objnr >= cachep->num);
2728 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2730 if (cachep->flags & SLAB_POISON) {
2731 poison_obj(cachep, objp, POISON_FREE);
2732 slab_kernel_map(cachep, objp, 0);
2742 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2750 objp = next - obj_offset(cachep);
2752 poison_obj(cachep, objp, POISON_FREE);
2757 static inline void fixup_slab_list(struct kmem_cache *cachep,
2763 if (page->active == cachep->num) {
2765 if (OBJFREELIST_SLAB(cachep)) {
2768 if (cachep->flags & SLAB_POISON) {
2845 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2862 obj = slab_get_obj(cachep, page);
2865 fixup_slab_list(cachep, n, page, &list);
2868 fixup_objfreelist_debug(cachep, &list);
2877 static __always_inline int alloc_block(struct kmem_cache *cachep,
2884 BUG_ON(page->active >= cachep->num);
2886 while (page->active < cachep->num && batchcount--) {
2887 STATS_INC_ALLOCED(cachep);
2888 STATS_INC_ACTIVE(cachep);
2889 STATS_SET_HIGH(cachep);
2891 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2897 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2909 ac = cpu_cache_get(cachep);
2919 n = get_node(cachep, node);
2941 check_spinlock_acquired(cachep);
2943 batchcount = alloc_block(cachep, ac, page, batchcount);
2944 fixup_slab_list(cachep, n, page, &list);
2951 fixup_objfreelist_debug(cachep, &list);
2957 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2963 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2969 ac = cpu_cache_get(cachep);
2971 alloc_block(cachep, ac, page, batchcount);
2972 cache_grow_end(cachep, page);
2982 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2989 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2992 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2995 if (cachep->flags & SLAB_POISON) {
2996 check_poison_obj(cachep, objp);
2997 slab_kernel_map(cachep, objp, 1);
2998 poison_obj(cachep, objp, POISON_INUSE);
3000 if (cachep->flags & SLAB_STORE_USER)
3001 *dbg_userword(cachep, objp) = (void *)caller;
3003 if (cachep->flags & SLAB_RED_ZONE) {
3004 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3005 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3006 slab_error(cachep, "double free, or memory outside object was overwritten");
3008 objp, *dbg_redzone1(cachep, objp),
3009 *dbg_redzone2(cachep, objp));
3011 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3012 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3015 objp += obj_offset(cachep);
3016 if (cachep->ctor && cachep->flags & SLAB_POISON)
3017 cachep->ctor(objp);
3029 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3036 ac = cpu_cache_get(cachep);
3041 STATS_INC_ALLOCHIT(cachep);
3045 STATS_INC_ALLOCMISS(cachep);
3046 objp = cache_alloc_refill(cachep, flags);
3051 ac = cpu_cache_get(cachep);
3071 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3078 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3083 return ____cache_alloc_node(cachep, flags, nid_alloc);
3162 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3171 n = get_node(cachep, nodeid);
3180 check_spinlock_acquired_node(cachep, nodeid);
3182 STATS_INC_NODEALLOCS(cachep);
3183 STATS_INC_ACTIVE(cachep);
3184 STATS_SET_HIGH(cachep);
3186 BUG_ON(page->active == cachep->num);
3188 obj = slab_get_obj(cachep, page);
3191 fixup_slab_list(cachep, n, page, &list);
3194 fixup_objfreelist_debug(cachep, &list);
3199 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3202 obj = slab_get_obj(cachep, page);
3204 cache_grow_end(cachep, page);
3206 return obj ? obj : fallback_alloc(cachep, flags);
3210 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3219 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3220 if (unlikely(!cachep))
3223 cache_alloc_debugcheck_before(cachep, flags);
3229 if (unlikely(!get_node(cachep, nodeid))) {
3231 ptr = fallback_alloc(cachep, flags);
3242 ptr = ____cache_alloc(cachep, flags);
3247 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3250 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3252 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3253 memset(ptr, 0, cachep->object_size);
3255 slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
3284 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3286 return ____cache_alloc(cachep, flags);
3292 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3299 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3300 if (unlikely(!cachep))
3303 cache_alloc_debugcheck_before(cachep, flags);
3305 objp = __do_cache_alloc(cachep, flags);
3307 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3310 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3311 memset(objp, 0, cachep->object_size);
3313 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
3321 static void free_block(struct kmem_cache *cachep, void **objpp,
3325 struct kmem_cache_node *n = get_node(cachep, node);
3338 check_spinlock_acquired_node(cachep, node);
3339 slab_put_obj(cachep, page, objp);
3340 STATS_DEC_ACTIVE(cachep);
3356 n->free_objects -= cachep->num;
3365 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3375 n = get_node(cachep, node);
3390 free_block(cachep, ac->entry, batchcount, node, &list);
3402 STATS_SET_FREEABLE(cachep, i);
3408 slabs_destroy(cachep, &list);
3415 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3419 if (kasan_slab_free(cachep, objp, _RET_IP_))
3423 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3424 __kcsan_check_access(objp, cachep->object_size,
3427 ___cache_free(cachep, objp, caller);
3430 void ___cache_free(struct kmem_cache *cachep, void *objp,
3433 struct array_cache *ac = cpu_cache_get(cachep);
3436 if (unlikely(slab_want_init_on_free(cachep)))
3437 memset(objp, 0, cachep->object_size);
3438 kmemleak_free_recursive(objp, cachep->flags);
3439 objp = cache_free_debugcheck(cachep, objp, caller);
3440 memcg_slab_free_hook(cachep, &objp, 1);
3449 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3453 STATS_INC_FREEHIT(cachep);
3455 STATS_INC_FREEMISS(cachep);
3456 cache_flusharray(cachep, ac);
3463 cache_free_pfmemalloc(cachep, page, objp);
3473 * @cachep: The cache to allocate from.
3481 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3483 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3486 cachep->object_size, cachep->size, flags);
3545 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3549 ret = slab_alloc(cachep, flags, _RET_IP_);
3551 ret = kasan_kmalloc(cachep, ret, size, flags);
3553 size, cachep->size, flags);
3562 * @cachep: The cache to allocate from.
3573 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3575 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3578 cachep->object_size, cachep->size,
3586 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3593 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3595 ret = kasan_kmalloc(cachep, ret, size, flags);
3597 size, cachep->size,
3607 struct kmem_cache *cachep;
3612 cachep = kmalloc_slab(size, flags);
3613 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3614 return cachep;
3615 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3616 ret = kasan_kmalloc(cachep, ret, size, flags);
3646 struct kmem_cache *cachep;
3651 cachep = kmalloc_slab(size, flags);
3652 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3653 return cachep;
3654 ret = slab_alloc(cachep, flags, caller);
3656 ret = kasan_kmalloc(cachep, ret, size, flags);
3658 size, cachep->size, flags);
3677 * @cachep: The cache the allocation was from.
3683 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3686 cachep = cache_from_obj(cachep, objp);
3687 if (!cachep)
3691 debug_check_no_locks_freed(objp, cachep->object_size);
3692 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3693 debug_check_no_obj_freed(objp, cachep->object_size);
3694 __cache_free(cachep, objp, _RET_IP_);
3765 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3772 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3781 if (!cachep->list.next) {
3785 n = get_node(cachep, node);
3790 cachep->node[node] = NULL;
3799 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3805 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3809 prev = cachep->cpu_cache;
3810 cachep->cpu_cache = cpu_cache;
3819 cachep->batchcount = batchcount;
3820 cachep->limit = limit;
3821 cachep->shared = shared;
3833 n = get_node(cachep, node);
3835 free_block(cachep, ac->entry, ac->avail, node, &list);
3837 slabs_destroy(cachep, &list);
3842 return setup_kmem_cache_nodes(cachep, gfp);
3846 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3853 err = cache_random_seq_create(cachep, cachep->num, gfp);
3868 if (cachep->size > 131072)
3870 else if (cachep->size > PAGE_SIZE)
3872 else if (cachep->size > 1024)
3874 else if (cachep->size > 256)
3889 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3902 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3906 cachep->name, -err);
3915 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3932 drain_array_locked(cachep, ac, node, false, &list);
3935 slabs_destroy(cachep, &list);
4007 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4015 for_each_kmem_cache_node(cachep, node, n) {
4028 num_objs = total_slabs * cachep->num;
4037 sinfo->limit = cachep->limit;
4038 sinfo->batchcount = cachep->batchcount;
4039 sinfo->shared = cachep->shared;
4040 sinfo->objects_per_slab = cachep->num;
4041 sinfo->cache_order = cachep->gfporder;
4044 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4048 unsigned long high = cachep->high_mark;
4049 unsigned long allocs = cachep->num_allocations;
4050 unsigned long grown = cachep->grown;
4051 unsigned long reaped = cachep->reaped;
4052 unsigned long errors = cachep->errors;
4053 unsigned long max_freeable = cachep->max_freeable;
4054 unsigned long node_allocs = cachep->node_allocs;
4055 unsigned long node_frees = cachep->node_frees;
4056 unsigned long overflows = cachep->node_overflow;
4065 unsigned long allochit = atomic_read(&cachep->allochit);
4066 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4067 unsigned long freehit = atomic_read(&cachep->freehit);
4068 unsigned long freemiss = atomic_read(&cachep->freemiss);
4091 struct kmem_cache *cachep;
4110 list_for_each_entry(cachep, &slab_caches, list) {
4111 if (!strcmp(cachep->name, kbuf)) {
4116 res = do_tune_cpucache(cachep, limit,
4141 struct kmem_cache *cachep;
4148 cachep = page->slab_cache;
4149 objnr = obj_to_index(cachep, page, (void *)ptr);
4150 BUG_ON(objnr >= cachep->num);
4153 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4156 if (offset >= cachep->useroffset &&
4157 offset - cachep->useroffset <= cachep->usersize &&
4158 n <= cachep->useroffset - offset + cachep->usersize)
4168 offset <= cachep->object_size &&
4169 n <= cachep->object_size - offset) {
4170 usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
4174 usercopy_abort("SLAB object", cachep->name, to_user, offset, n);