Lines Matching refs:objects
4 * objects in per cpu and per node lists.
57 * B. page->inuse -> Number of objects in use
58 * C. page->objects -> Number of objects in page
64 * processors may put objects onto the freelist but the processor that
65 * froze the slab is the only one that can retrieve the objects from the
77 * allocating a long series of objects that fill up slabs does not require
91 * cannot scan all objects.
110 * free objects in addition to the regular freelist
171 * sort the partial list by the number of objects in use.
195 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
257 * example, when __free_slub() iterates over objects in a cache, it
311 /* Loop over all objects in a slab */
456 bitmap_zero(object_map, page->objects);
530 if (object < base || object >= base + page->objects * s->size ||
642 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
643 page, page->objects, page->inuse, page->freelist, page->flags);
816 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
817 * 0xcc (RED_ACTIVE) for objects in use.
939 * of the free objects in this slab. May cause
960 if (page->objects > maxobj) {
961 slab_err(s, page, "objects %u > max %u",
962 page->objects, maxobj);
965 if (page->inuse > page->objects) {
967 page->inuse, page->objects);
987 while (fp && nr <= page->objects) {
998 page->inuse = page->objects;
1013 if (page->objects != max_objects) {
1014 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
1015 page->objects, max_objects);
1016 page->objects = max_objects;
1017 slab_fix(s, "Number of objects adjusted.");
1019 if (page->inuse != page->objects - nr) {
1021 page->inuse, page->objects - nr);
1022 page->inuse = page->objects - nr;
1081 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1093 atomic_long_add(objects, &n->total_objects);
1096 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1101 atomic_long_sub(objects, &n->total_objects);
1163 * to avoid issues in the future. Marking all objects
1164 * as used avoids touching the remaining objects.
1166 slab_fix(s, "Marking all objects used");
1167 page->inuse = page->objects;
1489 int objects) {}
1491 int objects) {}
1683 * If the target page allocation failed, the number of objects on the
1704 if (page->objects < 2 || !s->random_seq)
1710 page_limit = page->objects * s->size;
1719 for (idx = 1; idx < page->objects; idx++) {
1780 page->objects = oo_objects(oo);
1799 for (idx = 0, p = start; idx < page->objects - 1; idx++) {
1808 page->inuse = page->objects;
1817 inc_slabs_node(s, page_to_nid(page), page->objects);
1841 page->objects)
1872 dec_slabs_node(s, page_to_nid(page), page->objects);
1908 * Returns a list of objects or NULL if it fails.
1912 int mode, int *objects)
1922 * The old freelist is the list of objects for the
1928 *objects = new.objects - new.inuse;
1930 new.inuse = page->objects;
1962 int objects;
1980 t = acquire_slab(s, n, page, object == NULL, &objects);
1984 available += objects;
2023 * returns node local objects. If the ratio is higher then kmalloc()
2024 * may return off node objects because partial slabs are obtained
2032 * with available objects.
2175 * Stage one: Free all available per cpu objects back
2188 * 'freelist' is already corrupted. So isolate all objects
2212 * list presence reflects the actual number of objects
2221 * because the number of objects in the slab may have
2414 pobjects += page->objects - page->inuse;
2494 * Check if the objects in a per cpu structure fit numa
2509 return page->objects - page->inuse;
2637 new.inuse = page->objects;
2652 * Processing is still very fast if new objects have been freed to the
2731 * freelist is pointing to the list of objects to be used.
2732 * page is pointing to the page from which the objects are obtained.
2962 * Slow path handling. This may still be called frequently since objects
3094 * Bulk free of a freelist with several objects (all pointing to the
3095 * same page) possible by specifying head and tail ptr, plus objects
3150 * to remove objects, whose reuse must be delayed.
3182 * This function progressively scans the array with free objects (with
3183 * a limited look ahead) and extract objects belonging to the same
3185 * page/objects. This can happen without any need for
3186 * synchronization, because the objects are owned by running process.
3187 * The freelist is build up as a single linked list in the objects.
3215 /* Handle kalloc'ed objects */
3240 continue; /* Skip processed objects */
3296 * Drain objects in the per cpu slab, while disabling local
3385 * order 0 does not cause fragmentation in the page allocator. Larger objects
3391 * number of objects is in one slab. Otherwise we may generate too much
3396 * number of objects in a slab as critical. If we reach slub_max_order then
3400 * Higher order allocations also allow the placement of more objects in a
3442 * we reduce the minimum objects required in a slab.
3465 * We were unable to place multiple objects in a slab. Now
3553 inc_slabs_node(kmem_cache_node, node, page->objects);
3618 * cpu_partial determined the maximum number of objects kept in the
3628 * A) The number of objects from per cpu partial slabs dumped to the
3630 * B) The number of objects in cpu partial slabs to extract from the
3631 * per node list when we run out of per cpu objects. We only fetch
3701 * destructor, are poisoning the objects, or are
3734 * overwrites from earlier objects rather than let
3749 * offset 0. In order to align the objects we have to simply size
3777 * Determine the number of objects per slab
3857 for_each_object(p, s, addr, page->objects) {
3916 /* Attempt to free all objects */
4031 * Rejects incorrectly sized objects and objects that are to be copied
4140 * being allocated from last increasing the chance that the last objects
4170 int free = page->objects - page->inuse;
4178 if (free == page->objects) {
4517 return page->objects;
4535 for_each_object(p, s, addr, page->objects) {
4593 * Generate lists of code addresses where slabcache objects are allocated
4726 for_each_object(p, s, addr, page->objects)
4879 SL_OBJECTS, /* Determine allocated objects not slabs */
4932 x = page->objects;
5086 unsigned int objects;
5089 err = kstrtouint(buf, 10, &objects);
5092 if (objects && !kmem_cache_has_cpu_partial(s))
5095 slub_set_cpu_partial(s, objects);
5131 SLAB_ATTR_RO(objects);
5141 int objects = 0;
5153 objects += page->pobjects;
5157 len = sprintf(buf, "%d(%d)", objects, pages);