Lines Matching defs:cachep
212 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
214 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
215 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
218 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
220 static inline void fixup_slab_list(struct kmem_cache *cachep,
241 #define MAKE_LIST(cachep, listp, slab, nodeid) \
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
247 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
317 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
320 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
322 * cachep->obj_offset: The real object.
323 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
324 * cachep->size - 1* BYTES_PER_WORD: last caller address
327 static int obj_offset(struct kmem_cache *cachep)
329 return cachep->obj_offset;
332 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 return (unsigned long long *) (objp + obj_offset(cachep) -
339 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
342 if (cachep->flags & SLAB_STORE_USER)
343 return (unsigned long long *)(objp + cachep->size -
346 return (unsigned long long *) (objp + cachep->size -
350 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353 return (void **)(objp + cachep->size - BYTES_PER_WORD);
359 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
360 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
361 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
392 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
394 return this_cpu_ptr(cachep->cpu_cache);
436 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
438 static void __slab_error(const char *function, struct kmem_cache *cachep,
442 function, cachep->name, msg);
551 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
559 n = get_node(cachep, slab_node);
562 free_block(cachep, &objp, 1, slab_node, &list);
565 slabs_destroy(cachep, &list);
603 #define drain_alien_cache(cachep, alien) do { } while (0)
604 #define reap_alien(cachep, n) do { } while (0)
616 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
679 static void __drain_alien_cache(struct kmem_cache *cachep,
683 struct kmem_cache_node *n = get_node(cachep, node);
695 free_block(cachep, ac->entry, ac->avail, node, list);
704 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
717 __drain_alien_cache(cachep, ac, node, &list);
719 slabs_destroy(cachep, &list);
725 static void drain_alien_cache(struct kmem_cache *cachep,
740 __drain_alien_cache(cachep, ac, i, &list);
742 slabs_destroy(cachep, &list);
747 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
755 n = get_node(cachep, node);
756 STATS_INC_NODEFREES(cachep);
762 STATS_INC_ACOVERFLOW(cachep);
763 __drain_alien_cache(cachep, ac, slab_node, &list);
767 slabs_destroy(cachep, &list);
769 n = get_node(cachep, slab_node);
771 free_block(cachep, &objp, 1, slab_node, &list);
773 slabs_destroy(cachep, &list);
778 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
789 return __cache_free_alien(cachep, objp, node, slab_node);
802 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
811 n = get_node(cachep, node);
814 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
815 cachep->num;
827 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
830 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
837 cachep->node[node] = n;
855 struct kmem_cache *cachep;
857 list_for_each_entry(cachep, &slab_caches, list) {
858 ret = init_cache_node(cachep, node, GFP_KERNEL);
867 static int setup_kmem_cache_node(struct kmem_cache *cachep,
878 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
883 if (cachep->shared) {
885 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
890 ret = init_cache_node(cachep, node, gfp);
894 n = get_node(cachep, node);
897 free_block(cachep, n->shared->entry,
914 slabs_destroy(cachep, &list);
937 struct kmem_cache *cachep;
942 list_for_each_entry(cachep, &slab_caches, list) {
948 n = get_node(cachep, node);
955 n->free_limit -= cachep->batchcount;
958 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
959 free_block(cachep, nc->entry, nc->avail, node, &list);
969 free_block(cachep, shared->entry,
981 drain_alien_cache(cachep, alien);
986 slabs_destroy(cachep, &list);
993 list_for_each_entry(cachep, &slab_caches, list) {
994 n = get_node(cachep, node);
997 drain_freelist(cachep, n, INT_MAX);
1003 struct kmem_cache *cachep;
1021 list_for_each_entry(cachep, &slab_caches, list) {
1022 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1092 struct kmem_cache *cachep;
1095 list_for_each_entry(cachep, &slab_caches, list) {
1098 n = get_node(cachep, node);
1102 drain_freelist(cachep, n, INT_MAX);
1149 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1163 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1164 cachep->node[nodeid] = ptr;
1171 static void __init set_up_node(struct kmem_cache *cachep, int index)
1176 cachep->node[node] = &init_kmem_cache_node[index + node];
1177 cachep->node[node]->next_reap = jiffies +
1179 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1264 struct kmem_cache *cachep;
1268 list_for_each_entry(cachep, &slab_caches, list)
1269 if (enable_cpucache(cachep, GFP_NOWAIT))
1306 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1321 cachep->name, cachep->size, cachep->gfporder);
1323 for_each_kmem_cache_node(cachep, node, n) {
1334 (total_slabs * cachep->num) - free_objs,
1335 total_slabs * cachep->num);
1348 static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1354 flags |= cachep->allocflags;
1356 folio = (struct folio *) __alloc_pages_node(nodeid, flags, cachep->gfporder);
1358 slab_out_of_memory(cachep, flags, nodeid);
1364 account_slab(slab, cachep->gfporder, cachep, flags);
1378 static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
1380 int order = cachep->gfporder;
1392 unaccount_slab(slab, order, cachep);
1398 struct kmem_cache *cachep;
1402 cachep = slab->slab_cache;
1404 kmem_freepages(cachep, slab);
1408 static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1410 return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1411 ((cachep->size % PAGE_SIZE) == 0);
1415 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1417 if (!is_debug_pagealloc_cache(cachep))
1420 __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1424 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1429 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1431 int size = cachep->object_size;
1432 addr = &((char *)addr)[obj_offset(cachep)];
1470 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1475 if (cachep->flags & SLAB_RED_ZONE) {
1477 *dbg_redzone1(cachep, objp),
1478 *dbg_redzone2(cachep, objp));
1481 if (cachep->flags & SLAB_STORE_USER)
1482 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1483 realobj = (char *)objp + obj_offset(cachep);
1484 size = cachep->object_size;
1494 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1500 if (is_debug_pagealloc_cache(cachep))
1503 realobj = (char *)objp + obj_offset(cachep);
1504 size = cachep->object_size;
1516 print_tainted(), cachep->name,
1518 print_objinfo(cachep, objp, 0);
1540 objnr = obj_to_index(cachep, slab, objp);
1542 objp = index_to_obj(cachep, slab, objnr - 1);
1543 realobj = (char *)objp + obj_offset(cachep);
1545 print_objinfo(cachep, objp, 2);
1547 if (objnr + 1 < cachep->num) {
1548 objp = index_to_obj(cachep, slab, objnr + 1);
1549 realobj = (char *)objp + obj_offset(cachep);
1551 print_objinfo(cachep, objp, 2);
1558 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1563 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1564 poison_obj(cachep, slab->freelist - obj_offset(cachep),
1568 for (i = 0; i < cachep->num; i++) {
1569 void *objp = index_to_obj(cachep, slab, i);
1571 if (cachep->flags & SLAB_POISON) {
1572 check_poison_obj(cachep, objp);
1573 slab_kernel_map(cachep, objp, 1);
1575 if (cachep->flags & SLAB_RED_ZONE) {
1576 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1577 slab_error(cachep, "start of a freed object was overwritten");
1578 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1579 slab_error(cachep, "end of a freed object was overwritten");
1584 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1592 * @cachep: cache pointer being destroyed
1599 static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
1604 slab_destroy_debugcheck(cachep, slab);
1605 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1608 kmem_freepages(cachep, slab);
1614 if (OFF_SLAB(cachep))
1622 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1628 slab_destroy(cachep, slab);
1634 * @cachep: pointer to the cache that is being created
1646 static size_t calculate_slab_order(struct kmem_cache *cachep,
1687 if (freelist_cache_size > cachep->size / 2)
1692 cachep->num = num;
1693 cachep->gfporder = gfporder;
1721 struct kmem_cache *cachep, int entries, int batchcount)
1741 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1744 return enable_cpucache(cachep, gfp);
1746 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1747 if (!cachep->cpu_cache)
1755 set_up_node(cachep, SIZE_NODE);
1760 cachep->node[node] = kmalloc_node(
1762 BUG_ON(!cachep->node[node]);
1763 kmem_cache_node_init(cachep->node[node]);
1767 cachep->node[numa_mem_id()]->next_reap =
1769 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1771 cpu_cache_get(cachep)->avail = 0;
1772 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1773 cpu_cache_get(cachep)->batchcount = 1;
1774 cpu_cache_get(cachep)->touched = 0;
1775 cachep->batchcount = 1;
1776 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1790 struct kmem_cache *cachep;
1792 cachep = find_mergeable(size, align, flags, name, ctor);
1793 if (cachep) {
1794 cachep->refcount++;
1800 cachep->object_size = max_t(int, cachep->object_size, size);
1802 return cachep;
1805 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1810 cachep->num = 0;
1817 if (unlikely(slab_want_init_on_free(cachep)))
1820 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1823 left = calculate_slab_order(cachep, size,
1825 if (!cachep->num)
1828 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1831 cachep->colour = left / cachep->colour_off;
1836 static bool set_off_slab_cache(struct kmem_cache *cachep,
1841 cachep->num = 0;
1854 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1855 if (!cachep->num)
1862 if (left >= cachep->num * sizeof(freelist_idx_t))
1865 cachep->colour = left / cachep->colour_off;
1870 static bool set_on_slab_cache(struct kmem_cache *cachep,
1875 cachep->num = 0;
1877 left = calculate_slab_order(cachep, size, flags);
1878 if (!cachep->num)
1881 cachep->colour = left / cachep->colour_off;
1888 * @cachep: cache management descriptor
1905 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1910 unsigned int size = cachep->size;
1943 if (ralign < cachep->align) {
1944 ralign = cachep->align;
1952 cachep->align = ralign;
1953 cachep->colour_off = cache_line_size();
1955 if (cachep->colour_off < cachep->align)
1956 cachep->colour_off = cachep->align;
1971 cachep->obj_offset += sizeof(unsigned long long);
1986 kasan_cache_create(cachep, &size, &flags);
1988 size = ALIGN(size, cachep->align);
1994 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2005 size >= 256 && cachep->object_size > cache_line_size()) {
2009 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2011 cachep->obj_offset += tmp_size - size;
2019 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2024 if (set_off_slab_cache(cachep, size, flags)) {
2029 if (set_on_slab_cache(cachep, size, flags))
2035 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2036 cachep->flags = flags;
2037 cachep->allocflags = __GFP_COMP;
2039 cachep->allocflags |= GFP_DMA;
2041 cachep->allocflags |= GFP_DMA32;
2043 cachep->allocflags |= __GFP_RECLAIMABLE;
2044 cachep->size = size;
2045 cachep->reciprocal_buffer_size = reciprocal_value(size);
2054 (cachep->flags & SLAB_POISON) &&
2055 is_debug_pagealloc_cache(cachep))
2056 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2059 err = setup_cpu_cache(cachep, gfp);
2061 __kmem_cache_release(cachep);
2084 static void check_spinlock_acquired(struct kmem_cache *cachep)
2088 assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2092 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2096 assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
2108 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2120 free_block(cachep, ac->entry, tofree, node, list);
2127 struct kmem_cache *cachep = arg;
2134 ac = cpu_cache_get(cachep);
2135 n = get_node(cachep, node);
2137 free_block(cachep, ac->entry, ac->avail, node, &list);
2140 slabs_destroy(cachep, &list);
2143 static void drain_cpu_caches(struct kmem_cache *cachep)
2149 on_each_cpu(do_drain, cachep, 1);
2151 for_each_kmem_cache_node(cachep, node, n)
2153 drain_alien_cache(cachep, n->alien);
2155 for_each_kmem_cache_node(cachep, node, n) {
2157 drain_array_locked(cachep, n->shared, node, true, &list);
2160 slabs_destroy(cachep, &list);
2218 int __kmem_cache_shrink(struct kmem_cache *cachep)
2224 drain_cpu_caches(cachep);
2227 for_each_kmem_cache_node(cachep, node, n) {
2228 drain_freelist(cachep, n, INT_MAX);
2236 int __kmem_cache_shutdown(struct kmem_cache *cachep)
2238 return __kmem_cache_shrink(cachep);
2241 void __kmem_cache_release(struct kmem_cache *cachep)
2246 cache_random_seq_destroy(cachep);
2248 free_percpu(cachep->cpu_cache);
2251 for_each_kmem_cache_node(cachep, i, n) {
2255 cachep->node[i] = NULL;
2273 static void *alloc_slabmgmt(struct kmem_cache *cachep,
2283 if (OBJFREELIST_SLAB(cachep))
2285 else if (OFF_SLAB(cachep)) {
2287 freelist = kmalloc_node(cachep->freelist_size,
2291 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2292 cachep->freelist_size;
2309 static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
2314 for (i = 0; i < cachep->num; i++) {
2315 void *objp = index_to_obj(cachep, slab, i);
2317 if (cachep->flags & SLAB_STORE_USER)
2318 *dbg_userword(cachep, objp) = NULL;
2320 if (cachep->flags & SLAB_RED_ZONE) {
2321 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2322 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2329 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2330 kasan_unpoison_object_data(cachep,
2331 objp + obj_offset(cachep));
2332 cachep->ctor(objp + obj_offset(cachep));
2334 cachep, objp + obj_offset(cachep));
2337 if (cachep->flags & SLAB_RED_ZONE) {
2338 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2339 slab_error(cachep, "constructor overwrote the end of an object");
2340 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2341 slab_error(cachep, "constructor overwrote the start of an object");
2344 if (cachep->flags & SLAB_POISON) {
2345 poison_obj(cachep, objp, POISON_FREE);
2346 slab_kernel_map(cachep, objp, 0);
2365 struct kmem_cache *cachep,
2369 if (!cachep->random_seq) {
2372 state->list = cachep->random_seq;
2399 static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab)
2401 unsigned int objfreelist = 0, i, rand, count = cachep->num;
2408 precomputed = freelist_state_initialize(&state, cachep, count);
2411 if (OBJFREELIST_SLAB(cachep)) {
2416 slab->freelist = index_to_obj(cachep, slab, objfreelist) +
2417 obj_offset(cachep);
2439 if (OBJFREELIST_SLAB(cachep))
2440 set_free_obj(slab, cachep->num - 1, objfreelist);
2445 static inline bool shuffle_freelist(struct kmem_cache *cachep,
2452 static void cache_init_objs(struct kmem_cache *cachep,
2459 cache_init_objs_debug(cachep, slab);
2462 shuffled = shuffle_freelist(cachep, slab);
2464 if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2465 slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) +
2466 obj_offset(cachep);
2469 for (i = 0; i < cachep->num; i++) {
2470 objp = index_to_obj(cachep, slab, i);
2471 objp = kasan_init_slab_obj(cachep, objp);
2474 if (DEBUG == 0 && cachep->ctor) {
2475 kasan_unpoison_object_data(cachep, objp);
2476 cachep->ctor(objp);
2477 kasan_poison_object_data(cachep, objp);
2485 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab)
2489 objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active));
2495 static void slab_put_obj(struct kmem_cache *cachep,
2498 unsigned int objnr = obj_to_index(cachep, slab, objp);
2503 for (i = slab->active; i < cachep->num; i++) {
2506 cachep->name, objp);
2513 slab->freelist = objp + obj_offset(cachep);
2522 static struct slab *cache_grow_begin(struct kmem_cache *cachep,
2539 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2550 slab = kmem_getpages(cachep, local_flags, nodeid);
2555 n = get_node(cachep, slab_node);
2559 if (n->colour_next >= cachep->colour)
2563 if (offset >= cachep->colour)
2566 offset *= cachep->colour_off;
2576 freelist = alloc_slabmgmt(cachep, slab, offset,
2578 if (OFF_SLAB(cachep) && !freelist)
2581 slab->slab_cache = cachep;
2584 cache_init_objs(cachep, slab);
2592 kmem_freepages(cachep, slab);
2599 static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
2610 n = get_node(cachep, slab_nid(slab));
2618 fixup_slab_list(cachep, n, slab, &list);
2620 STATS_INC_GROWN(cachep);
2621 n->free_objects += cachep->num - slab->active;
2624 fixup_objfreelist_debug(cachep, &list);
2665 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2671 BUG_ON(virt_to_cache(objp) != cachep);
2673 objp -= obj_offset(cachep);
2677 if (cachep->flags & SLAB_RED_ZONE) {
2678 verify_redzone_free(cachep, objp);
2679 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2680 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2682 if (cachep->flags & SLAB_STORE_USER)
2683 *dbg_userword(cachep, objp) = (void *)caller;
2685 objnr = obj_to_index(cachep, slab, objp);
2687 BUG_ON(objnr >= cachep->num);
2688 BUG_ON(objp != index_to_obj(cachep, slab, objnr));
2690 if (cachep->flags & SLAB_POISON) {
2691 poison_obj(cachep, objp, POISON_FREE);
2692 slab_kernel_map(cachep, objp, 0);
2702 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2710 objp = next - obj_offset(cachep);
2712 poison_obj(cachep, objp, POISON_FREE);
2717 static inline void fixup_slab_list(struct kmem_cache *cachep,
2723 if (slab->active == cachep->num) {
2725 if (OBJFREELIST_SLAB(cachep)) {
2728 if (cachep->flags & SLAB_POISON) {
2805 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2822 obj = slab_get_obj(cachep, slab);
2825 fixup_slab_list(cachep, n, slab, &list);
2828 fixup_objfreelist_debug(cachep, &list);
2837 static __always_inline int alloc_block(struct kmem_cache *cachep,
2844 BUG_ON(slab->active >= cachep->num);
2846 while (slab->active < cachep->num && batchcount--) {
2847 STATS_INC_ALLOCED(cachep);
2848 STATS_INC_ACTIVE(cachep);
2849 STATS_SET_HIGH(cachep);
2851 ac->entry[ac->avail++] = slab_get_obj(cachep, slab);
2857 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2869 ac = cpu_cache_get(cachep);
2879 n = get_node(cachep, node);
2901 check_spinlock_acquired(cachep);
2903 batchcount = alloc_block(cachep, ac, slab, batchcount);
2904 fixup_slab_list(cachep, n, slab, &list);
2911 fixup_objfreelist_debug(cachep, &list);
2917 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2923 slab = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2929 ac = cpu_cache_get(cachep);
2931 alloc_block(cachep, ac, slab, batchcount);
2932 cache_grow_end(cachep, slab);
2943 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2946 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2949 if (cachep->flags & SLAB_POISON) {
2950 check_poison_obj(cachep, objp);
2951 slab_kernel_map(cachep, objp, 1);
2952 poison_obj(cachep, objp, POISON_INUSE);
2954 if (cachep->flags & SLAB_STORE_USER)
2955 *dbg_userword(cachep, objp) = (void *)caller;
2957 if (cachep->flags & SLAB_RED_ZONE) {
2958 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2959 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2960 slab_error(cachep, "double free, or memory outside object was overwritten");
2962 objp, *dbg_redzone1(cachep, objp),
2963 *dbg_redzone2(cachep, objp));
2965 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2966 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2969 objp += obj_offset(cachep);
2970 if (cachep->ctor && cachep->flags & SLAB_POISON)
2971 cachep->ctor(objp);
2982 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
2989 ac = cpu_cache_get(cachep);
2994 STATS_INC_ALLOCHIT(cachep);
2998 STATS_INC_ALLOCMISS(cachep);
2999 objp = cache_alloc_refill(cachep, flags);
3004 ac = cpu_cache_get(cachep);
3026 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3033 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3038 return ____cache_alloc_node(cachep, flags, nid_alloc);
3117 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3126 n = get_node(cachep, nodeid);
3135 check_spinlock_acquired_node(cachep, nodeid);
3137 STATS_INC_NODEALLOCS(cachep);
3138 STATS_INC_ACTIVE(cachep);
3139 STATS_SET_HIGH(cachep);
3141 BUG_ON(slab->active == cachep->num);
3143 obj = slab_get_obj(cachep, slab);
3146 fixup_slab_list(cachep, n, slab, &list);
3149 fixup_objfreelist_debug(cachep, &list);
3154 slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3157 obj = slab_get_obj(cachep, slab);
3159 cache_grow_end(cachep, slab);
3161 return obj ? obj : fallback_alloc(cachep, flags);
3165 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3172 objp = alternate_node_alloc(cachep, flags);
3182 objp = ____cache_alloc(cachep, flags);
3185 objp = ____cache_alloc(cachep, flags);
3186 } else if (!get_node(cachep, nodeid)) {
3188 objp = fallback_alloc(cachep, flags);
3197 objp = ____cache_alloc_node(cachep, flags, nodeid);
3204 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
3206 return ____cache_alloc(cachep, flags);
3212 slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
3221 cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags);
3222 if (unlikely(!cachep))
3225 objp = kfence_alloc(cachep, orig_size, flags);
3230 objp = __do_cache_alloc(cachep, flags, nodeid);
3232 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3234 init = slab_want_init_on_alloc(flags, cachep);
3237 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init,
3238 cachep->object_size);
3243 slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
3246 return slab_alloc_node(cachep, lru, flags, NUMA_NO_NODE, orig_size,
3254 static void free_block(struct kmem_cache *cachep, void **objpp,
3258 struct kmem_cache_node *n = get_node(cachep, node);
3271 check_spinlock_acquired_node(cachep, node);
3272 slab_put_obj(cachep, slab, objp);
3273 STATS_DEC_ACTIVE(cachep);
3289 n->free_objects -= cachep->num;
3298 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3308 n = get_node(cachep, node);
3323 free_block(cachep, ac->entry, batchcount, node, &list);
3335 STATS_SET_FREEABLE(cachep, i);
3341 slabs_destroy(cachep, &list);
3348 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3353 memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
3356 kmemleak_free_recursive(objp, cachep->flags);
3366 init = slab_want_init_on_free(cachep);
3368 memset(objp, 0, cachep->object_size);
3370 if (kasan_slab_free(cachep, objp, init))
3374 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3375 __kcsan_check_access(objp, cachep->object_size,
3378 ___cache_free(cachep, objp, caller);
3381 void ___cache_free(struct kmem_cache *cachep, void *objp,
3384 struct array_cache *ac = cpu_cache_get(cachep);
3387 kmemleak_free_recursive(objp, cachep->flags);
3388 objp = cache_free_debugcheck(cachep, objp, caller);
3397 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3401 STATS_INC_FREEHIT(cachep);
3403 STATS_INC_FREEMISS(cachep);
3404 cache_flusharray(cachep, ac);
3411 cache_free_pfmemalloc(cachep, slab, objp);
3420 void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
3423 void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
3425 trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, NUMA_NO_NODE);
3430 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3432 return __kmem_cache_alloc_lru(cachep, NULL, flags);
3436 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
3439 return __kmem_cache_alloc_lru(cachep, lru, flags);
3496 * @cachep: The cache to allocate from.
3507 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3509 void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);
3511 trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, nodeid);
3517 void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3521 return slab_alloc_node(cachep, NULL, flags, nodeid,
3528 struct kmem_cache *cachep;
3534 cachep = slab->slab_cache;
3535 kpp->kp_slab_cache = cachep;
3536 objp = object - obj_offset(cachep);
3537 kpp->kp_data_offset = obj_offset(cachep);
3539 objnr = obj_to_index(cachep, slab, objp);
3540 objp = index_to_obj(cachep, slab, objnr);
3542 if (DEBUG && cachep->flags & SLAB_STORE_USER)
3543 kpp->kp_ret = *dbg_userword(cachep, objp);
3548 void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp,
3554 debug_check_no_locks_freed(objp, cachep->object_size);
3555 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3556 debug_check_no_obj_freed(objp, cachep->object_size);
3557 __cache_free(cachep, objp, caller);
3561 void __kmem_cache_free(struct kmem_cache *cachep, void *objp,
3564 __do_kmem_cache_free(cachep, objp, caller);
3569 * @cachep: The cache the allocation was from.
3575 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3577 cachep = cache_from_obj(cachep, objp);
3578 if (!cachep)
3581 trace_kmem_cache_free(_RET_IP_, objp, cachep);
3582 __do_kmem_cache_free(cachep, objp, _RET_IP_);
3628 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3635 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3644 if (!cachep->list.next) {
3648 n = get_node(cachep, node);
3653 cachep->node[node] = NULL;
3662 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3668 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3672 prev = cachep->cpu_cache;
3673 cachep->cpu_cache = cpu_cache;
3682 cachep->batchcount = batchcount;
3683 cachep->limit = limit;
3684 cachep->shared = shared;
3696 n = get_node(cachep, node);
3698 free_block(cachep, ac->entry, ac->avail, node, &list);
3700 slabs_destroy(cachep, &list);
3705 return setup_kmem_cache_nodes(cachep, gfp);
3709 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3716 err = cache_random_seq_create(cachep, cachep->num, gfp);
3729 if (cachep->size > 131072)
3731 else if (cachep->size > PAGE_SIZE)
3733 else if (cachep->size > 1024)
3735 else if (cachep->size > 256)
3750 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3762 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3766 cachep->name, -err);
3775 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3792 drain_array_locked(cachep, ac, node, false, &list);
3795 slabs_destroy(cachep, &list);
3867 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
3875 for_each_kmem_cache_node(cachep, node, n) {
3888 num_objs = total_slabs * cachep->num;
3897 sinfo->limit = cachep->limit;
3898 sinfo->batchcount = cachep->batchcount;
3899 sinfo->shared = cachep->shared;
3900 sinfo->objects_per_slab = cachep->num;
3901 sinfo->cache_order = cachep->gfporder;
3904 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
3908 unsigned long high = cachep->high_mark;
3909 unsigned long allocs = cachep->num_allocations;
3910 unsigned long grown = cachep->grown;
3911 unsigned long reaped = cachep->reaped;
3912 unsigned long errors = cachep->errors;
3913 unsigned long max_freeable = cachep->max_freeable;
3914 unsigned long node_allocs = cachep->node_allocs;
3915 unsigned long node_frees = cachep->node_frees;
3916 unsigned long overflows = cachep->node_overflow;
3925 unsigned long allochit = atomic_read(&cachep->allochit);
3926 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
3927 unsigned long freehit = atomic_read(&cachep->freehit);
3928 unsigned long freemiss = atomic_read(&cachep->freemiss);
3951 struct kmem_cache *cachep;
3970 list_for_each_entry(cachep, &slab_caches, list) {
3971 if (!strcmp(cachep->name, kbuf)) {
3976 res = do_tune_cpucache(cachep, limit,
4001 struct kmem_cache *cachep;
4008 cachep = slab->slab_cache;
4009 objnr = obj_to_index(cachep, slab, (void *)ptr);
4010 BUG_ON(objnr >= cachep->num);
4016 offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep);
4019 if (offset >= cachep->useroffset &&
4020 offset - cachep->useroffset <= cachep->usersize &&
4021 n <= cachep->useroffset - offset + cachep->usersize)
4024 usercopy_abort("SLAB object", cachep->name, to_user, offset, n);