Lines Matching defs:slab
5 * Internal slab definitions
42 struct slab {
67 struct slab *next;
95 #error "Unexpected slab allocator configured"
105 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
113 static_assert(sizeof(struct slab) <= sizeof(struct page));
115 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
119 * folio_slab - Converts from folio to slab.
122 * Currently struct slab is a different representation of a folio where
125 * Return: The slab which contains this folio.
128 const struct folio *: (const struct slab *)(folio), \
129 struct folio *: (struct slab *)(folio)))
132 * slab_folio - The folio allocated for a slab
133 * @slab: The slab.
137 * now accessed by struct slab. It is occasionally necessary to convert back to
143 const struct slab *: (const struct folio *)s, \
144 struct slab *: (struct folio *)s))
147 * page_slab - Converts from first struct page to slab.
148 * @p: The first (either head of compound or single) page of slab.
150 * A temporary wrapper to convert struct page to struct slab in situations where
153 * Long-term ideally everything would work with struct slab directly or go
154 * through folio to struct slab.
156 * Return: The slab which contains this page
159 const struct page *: (const struct slab *)(p), \
160 struct page *: (struct slab *)(p)))
163 * slab_page - The first struct page allocated for a slab
164 * @slab: The slab.
166 * A convenience wrapper for converting slab to the first struct page of the
168 * struct slab.
176 static inline bool slab_test_pfmemalloc(const struct slab *slab)
178 return folio_test_active((struct folio *)slab_folio(slab));
181 static inline void slab_set_pfmemalloc(struct slab *slab)
183 folio_set_active(slab_folio(slab));
186 static inline void slab_clear_pfmemalloc(struct slab *slab)
188 folio_clear_active(slab_folio(slab));
191 static inline void __slab_clear_pfmemalloc(struct slab *slab)
193 __folio_clear_active(slab_folio(slab));
196 static inline void *slab_address(const struct slab *slab)
198 return folio_address(slab_folio(slab));
201 static inline int slab_nid(const struct slab *slab)
203 return folio_nid(slab_folio(slab));
206 static inline pg_data_t *slab_pgdat(const struct slab *slab)
208 return folio_pgdat(slab_folio(slab));
211 static inline struct slab *virt_to_slab(const void *addr)
221 static inline int slab_order(const struct slab *slab)
223 return folio_order((struct folio *)slab_folio(slab));
226 static inline size_t slab_size(const struct slab *slab)
228 return PAGE_SIZE << slab_order(slab);
248 * State of the slab allocator.
252 * have the problem that the structures used for managing slab caches are
253 * allocated from slab caches themselves.
256 DOWN, /* No slab functionality yet */
265 /* The slab cache mutex protects the management structures during changes */
268 /* The list of all slab caches on the system */
271 /* The slab cache that manages slab cache information */
284 /* Find the kmalloc slab corresponding for a certain size */
294 /* Functions provided by the slab allocators */
434 * slab_objcgs - get the object cgroups vector associated with a slab
435 * @slab: a pointer to the slab struct
437 * Returns a pointer to the object cgroups vector associated with the slab,
440 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
442 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
445 slab_page(slab));
446 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
451 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
456 static inline void memcg_free_slab_cgroups(struct slab *slab)
458 kfree(slab_objcgs(slab));
459 slab->memcg_data = 0;
518 struct slab *slab;
527 slab = virt_to_slab(p[i]);
529 if (!slab_objcgs(slab) &&
530 memcg_alloc_slab_cgroups(slab, s, flags,
536 off = obj_to_index(s, slab, p[i]);
538 slab_objcgs(slab)[off] = objcg;
539 mod_objcg_state(objcg, slab_pgdat(slab),
548 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
557 objcgs = slab_objcgs(slab);
565 off = obj_to_index(s, slab, p[i]);
572 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
579 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
589 static inline int memcg_alloc_slab_cgroups(struct slab *slab,
596 static inline void memcg_free_slab_cgroups(struct slab *slab)
615 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
623 struct slab *slab;
625 slab = virt_to_slab(obj);
626 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
629 return slab->slab_cache;
632 static __always_inline void account_slab(struct slab *slab, int order,
636 memcg_alloc_slab_cgroups(slab, s, gfp, true);
638 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
642 static __always_inline void unaccount_slab(struct slab *slab, int order,
646 memcg_free_slab_cgroups(slab);
648 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
662 "%s: Wrong slab cache. %s but object is from %s\n",
774 * The slab lists for all objects.
782 unsigned long total_slabs; /* length of all slab lists */
783 unsigned long free_slabs; /* length of free slab list only */
875 struct slab *kp_slab;
883 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
887 const struct slab *slab, bool to_user);