Lines Matching defs:shrinker
117 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
122 /* This may call shrinker, so it must use down_read_trylock() */
123 id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
135 shrinker->id = id;
142 static void unregister_memcg_shrinker(struct shrinker *shrinker)
144 int id = shrinker->id;
182 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
187 static void unregister_memcg_shrinker(struct shrinker *shrinker)
260 * Add a shrinker callback to be called from the vm.
262 int prealloc_shrinker(struct shrinker *shrinker)
264 unsigned int size = sizeof(*shrinker->nr_deferred);
266 if (shrinker->flags & SHRINKER_NUMA_AWARE)
269 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
270 if (!shrinker->nr_deferred)
273 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
274 if (prealloc_memcg_shrinker(shrinker))
281 kfree(shrinker->nr_deferred);
282 shrinker->nr_deferred = NULL;
286 void free_prealloced_shrinker(struct shrinker *shrinker)
288 if (!shrinker->nr_deferred)
291 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
293 unregister_memcg_shrinker(shrinker);
297 kfree(shrinker->nr_deferred);
298 shrinker->nr_deferred = NULL;
301 void register_shrinker_prepared(struct shrinker *shrinker)
304 list_add_tail(&shrinker->list, &shrinker_list);
305 shrinker->flags |= SHRINKER_REGISTERED;
309 int register_shrinker(struct shrinker *shrinker)
311 int err = prealloc_shrinker(shrinker);
315 register_shrinker_prepared(shrinker);
323 void unregister_shrinker(struct shrinker *shrinker)
325 if (!(shrinker->flags & SHRINKER_REGISTERED))
329 list_del(&shrinker->list);
330 shrinker->flags &= ~SHRINKER_REGISTERED;
331 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
332 unregister_memcg_shrinker(shrinker);
335 kfree(shrinker->nr_deferred);
336 shrinker->nr_deferred = NULL;
343 struct shrinker *shrinker, int priority)
352 long batch_size = shrinker->batch ? shrinker->batch
356 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
359 freeable = shrinker->count_objects(shrinker, shrinkctl);
364 * copy the current shrinker scan count into a local variable
365 * and zero it so that other concurrent shrinker invocations
368 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
371 if (shrinker->seeks) {
374 do_div(delta, shrinker->seeks);
387 shrinker->scan_objects, total_scan);
402 * Hence only allow the shrinker to scan the entire cache when
416 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
421 * pass to avoid too frequent shrinker calls, but if the slab has less
441 ret = shrinker->scan_objects(shrinker, shrinkctl);
458 * move the unused scan count back into the shrinker in a
464 &shrinker->nr_deferred[nid]);
466 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
468 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan);
497 struct shrinker *shrinker;
499 shrinker = idr_find(&shrinker_idr, i);
500 if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) {
501 if (!shrinker)
508 !(shrinker->flags & SHRINKER_NONSLAB))
511 ret = do_shrink_slab(&sc, shrinker, priority);
515 * After the shrinker reported that it had no objects to
517 * the memcg shrinker map, a new object might have been
519 * case, we invoke the shrinker one more time and reset
530 ret = do_shrink_slab(&sc, shrinker, priority);
580 struct shrinker *shrinker;
595 list_for_each_entry(shrinker, &shrinker_list, list) {
605 ret = do_shrink_slab(&sc, shrinker, priority);
610 reclaimacct_substage_end(RA_SHRINKSLAB, ret, shrinker);
613 * Bail out if someone want to register a new shrinker to