Lines Matching defs:cache
23 * The memory is organized in caches, one cache for each object type.
25 * Each cache consists out of many slabs (they are small (usually one
33 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
35 * cache for that memory type.
45 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
48 * Each cache has a short per-cpu head array, most allocs
50 * of the entries in the array are given back into the global cache.
51 * The head array is strictly LIFO and should improve the cache hit rates.
63 * The non-constant members are protected with a per-cache irq spinlock.
72 * The global cache-chain is protected by the mutex 'slab_mutex'.
73 * The sem is only needed when accessing/extending the cache-chain, which
77 * At present, each engine can be growing a cache. This should be blocked.
94 #include <linux/cache.h>
176 * - LIFO ordering, to hand out cache-warm objects from _alloc
180 * The limit is stored in the per-cpu structure to reduce the data cache
209 static int drain_freelist(struct kmem_cache *cache,
374 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
377 return page->s_mem + cache->size * idx;
381 /* internal cache of cache description objs */
441 pr_err("slab error in %s(): cache `%s': %s\n",
507 * the CPUs getting into lockstep and contending for the global cache chain
542 * cache the pointers are not cleared and they could be counted as
799 * cache on this cpu.
859 * Allocates and initializes node for a node on each slab cache, used for
1005 * the respective cache's slabs, now we can go ahead and
1063 * kmem_cache_node of any cache. This to avoid a race between cpu_down, and
1086 * Shutdown cache reaper. Note that the slab_mutex is held so
1099 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1183 * For setting up all the kmem_cache_node for cache whose buffer_size is same as
1224 * 1) initialize the kmem_cache cache: it contains the struct
1230 * 2) Create the first kmalloc cache.
1231 * The struct kmem_cache for the new cache is allocated normally.
1236 * kmalloc cache with kmalloc allocated arrays.
1238 * the other cache's with kmalloc allocated memory.
1341 pr_warn(" cache: %s, object size: %d, order: %d\n",
1609 * @cachep: cache pointer being destroyed
1613 * Before calling the slab page must have been unlinked from the cache. The
1651 * @cachep: pointer to the cache that is being created
1652 * @size: size of objects to be created in this cache.
1762 /* Creation of first cache (kmem_cache). */
1898 * __kmem_cache_create - Create a cache.
1899 * @cachep: cache management descriptor
1902 * Returns a ptr to the cache on success, NULL on failure.
1904 * The @ctor is run when new pages are allocated by the cache.
1914 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1918 * Return: a pointer to the created cache or %NULL in case of error
2190 static int drain_freelist(struct kmem_cache *cache,
2213 * to the cache.
2215 n->free_objects -= cache->num;
2217 slab_destroy(cache, page);
2280 * For a slab cache when the slab descriptor is off-slab, the
2281 * slab descriptor can't come from the same cache which is being created,
2344 * cache which they are a constructor for. Otherwise, deadlock.
2534 pr_err("slab: double free detected in cache '%s', objp %px\n",
2548 * Map pages beginning at addr to the given cache and slab. This is required
2549 * for the slab allocator to be able to lookup the cache and slab of a
2552 static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2555 page->slab_cache = cache;
2560 * Grow (by 1) the number of slabs within a cache. This is called by
2561 * kmem_cache_alloc() when there are no active objs left in a cache.
2683 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2687 redzone1 = *dbg_redzone1(cache, obj);
2688 redzone2 = *dbg_redzone2(cache, obj);
2697 slab_error(cache, "double free detected");
2699 slab_error(cache, "memory outside object was overwritten");
2913 * If there was little recent activity on this cache, then
3095 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3122 get_node(cache, nid) &&
3123 get_node(cache, nid)->free_objects) {
3124 obj = ____cache_alloc_node(cache,
3138 page = cache_grow_begin(cache, flags, numa_mem_id());
3139 cache_grow_end(cache, page);
3142 obj = ____cache_alloc_node(cache,
3260 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3265 objp = alternate_node_alloc(cache, flags);
3269 objp = ____cache_alloc(cache, flags);
3276 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3412 * Release an obj back to its cache. If the obj has a constructed state, it must
3444 * This will avoid cache misses that happen while accessing slabp (which
3447 * the cache.
3473 * @cachep: The cache to allocate from.
3476 * Allocate an object from this cache. The flags are only relevant
3477 * if the cache has no available objects.
3562 * @cachep: The cache to allocate from.
3677 * @cachep: The cache the allocation was from.
3681 * cache.
3861 * - create a LIFO ordering, i.e. return objects that are cache-warm
3947 * If we cannot acquire the cache chain mutex then just give up - we'll try
4107 /* Find the cache in the chain of caches. */
4133 * cache's usercopy region.
4135 * Returns NULL if check passes, otherwise const char * to name of cache