Lines Matching refs:objects
4 * objects in per cpu and per node lists.
72 * A. slab->freelist -> List of free objects in a slab
73 * B. slab->inuse -> Number of objects in use
74 * C. slab->objects -> Number of objects in slab
82 * processors may put objects onto the freelist but the processor that
83 * froze the slab is the only one that can retrieve the objects from the
97 * allocating a long series of objects that fill up slabs does not require
142 * cannot scan all objects.
161 * free objects in addition to the regular freelist
261 * sort the partial list by the number of objects in use.
289 #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
455 /* Loop over all objects in a slab */
494 * We take the number of objects but actually limit the number of
640 bitmap_zero(obj_map, slab->objects);
726 if (object < base || object >= base + slab->objects * s->size ||
858 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
859 slab, slab->objects, slab->inuse, slab->freelist,
1110 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
1111 * 0xcc (RED_ACTIVE) for objects in use.
1249 * of the free objects in this slab. May cause
1268 if (slab->objects > maxobj) {
1269 slab_err(s, slab, "objects %u > max %u",
1270 slab->objects, maxobj);
1273 if (slab->inuse > slab->objects) {
1275 slab->inuse, slab->objects);
1295 while (fp && nr <= slab->objects) {
1306 slab->inuse = slab->objects;
1321 if (slab->objects != max_objects) {
1322 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1323 slab->objects, max_objects);
1324 slab->objects = max_objects;
1325 slab_fix(s, "Number of objects adjusted");
1327 if (slab->inuse != slab->objects - nr) {
1329 slab->inuse, slab->objects - nr);
1330 slab->inuse = slab->objects - nr;
1381 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1393 atomic_long_add(objects, &n->total_objects);
1396 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1401 atomic_long_sub(objects, &n->total_objects);
1460 * to avoid issues in the future. Marking all objects
1461 * as used avoids touching the remaining objects.
1463 slab_fix(s, "Marking all objects used");
1464 slab->inuse = slab->objects;
1749 int objects) {}
1751 int objects) {}
1937 * If the target page allocation failed, the number of objects on the
1958 if (slab->objects < 2 || !s->random_seq)
1964 page_limit = slab->objects * s->size;
1973 for (idx = 1; idx < slab->objects; idx++) {
2031 slab->objects = oo_objects(oo);
2051 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2103 for_each_object(p, s, slab_address(slab), slab->objects)
2115 dec_slabs_node(s, slab_nid(slab), slab->objects);
2169 if (slab->inuse == slab->objects) {
2205 if (slab->inuse == slab->objects)
2210 inc_slabs_node(s, nid, slab->objects);
2220 * Returns a list of objects or NULL if it fails.
2234 * The old freelist is the list of objects for the
2241 new.inuse = slab->objects;
2350 * returns node local objects. If the ratio is higher then kmalloc()
2351 * may return off node objects because partial slabs are obtained
2359 * with available objects.
2511 * Stage one: Count the objects on cpu's freelist as free_delta and
2521 * 'freelist_iter' is already corrupted. So isolate all objects
2538 * reflects the actual number of objects during unfreeze.
2542 * unfrozen and number of objects in the slab may have changed.
2883 * Check if the objects in a per cpu structure fit numa
2898 return slab->objects - slab->inuse;
2921 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
2951 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3065 new.inuse = slab->objects;
3080 * Processing is still very fast if new objects have been freed to the
3168 * freelist is pointing to the list of objects to be used.
3169 * slab is pointing to the slab from which the objects are obtained.
3251 slab->inuse = slab->objects;
3254 inc_slabs_node(s, slab_nid(slab), slab->objects);
3581 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
3593 * Slow path handling. This may still be called frequently since objects
3730 * Bulk free of a freelist with several objects (all pointing to the
3731 * same slab) possible by specifying head and tail ptr, plus objects
3807 * to remove objects, whose reuse must be delayed.
3844 * This function progressively scans the array with free objects (with
3845 * a limited look ahead) and extract objects belonging to the same
3847 * slab/objects. This can happen without any need for
3848 * synchronization, because the objects are owned by running process.
3849 * The freelist is build up as a single linked list in the objects.
3867 /* Handle kalloc'ed objects */
3942 * Drain objects in the per cpu slab, while disabling local
4092 * order 0 does not cause fragmentation in the page allocator. Larger objects
4098 * number of objects is in one slab. Otherwise we may generate too much
4103 * number of objects in a slab as critical. If we reach slub_max_order then
4107 * Higher order allocations also allow the placement of more objects in a
4150 * we reduce the minimum objects required in a slab.
4186 * We were unable to place multiple objects in a slab. Now
4264 inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects);
4281 inc_slabs_node(kmem_cache_node, node, slab->objects);
4341 * cpu_partial determined the maximum number of objects kept in the
4350 * of objects, even though we now limit maximum number of pages, see
4423 * destructor, are poisoning the objects, or are
4461 * overwrites from earlier objects rather than let
4476 * offset 0. In order to align the objects we have to simply size
4501 * Determine the number of objects per slab
4580 for_each_object(p, s, addr, slab->objects) {
4638 /* Attempt to free all objects */
4672 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
4740 * Rejects incorrectly sized objects and objects that are to be copied
4796 * being allocated from last increasing the chance that the last objects
4825 int free = slab->objects - slab->inuse;
4833 if (free == slab->objects) {
4836 dec_slabs_node(s, node, slab->objects);
5141 return slab->objects;
5157 for_each_object(p, s, addr, slab->objects) {
5226 * Generate lists of code addresses where slabcache objects are allocated
5382 for_each_object(p, s, addr, slab->objects)
5396 SL_OBJECTS, /* Determine allocated objects not slabs */
5434 x = slab->objects;
5596 unsigned int objects;
5599 err = kstrtouint(buf, 10, &objects);
5602 if (objects && !kmem_cache_has_cpu_partial(s))
5605 slub_set_cpu_partial(s, objects);
5645 int objects = 0;
5662 objects = (slabs * oo_objects(s->oo)) / 2;
5663 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
5672 objects = (slabs * oo_objects(s->oo)) / 2;
5674 cpu, objects, slabs);
5735 SLAB_ATTR_RO(objects);