Lines Matching defs:shrinker

266 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
274 /* This may call shrinker, so it must use down_read_trylock() */
275 id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
285 shrinker->id = id;
292 static void unregister_memcg_shrinker(struct shrinker *shrinker)
294 int id = shrinker->id;
303 static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
309 return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
312 static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
318 return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
384 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
389 static void unregister_memcg_shrinker(struct shrinker *shrinker)
393 static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
399 static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
447 * single memcg. For example, a memcg-aware shrinker can free one object
473 static long xchg_nr_deferred(struct shrinker *shrinker,
478 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
482 (shrinker->flags & SHRINKER_MEMCG_AWARE))
483 return xchg_nr_deferred_memcg(nid, shrinker,
486 return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
490 static long add_nr_deferred(long nr, struct shrinker *shrinker,
495 if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
499 (shrinker->flags & SHRINKER_MEMCG_AWARE))
500 return add_nr_deferred_memcg(nr, nid, shrinker,
503 return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
603 * Add a shrinker callback to be called from the vm.
605 static int __prealloc_shrinker(struct shrinker *shrinker)
610 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
611 err = prealloc_memcg_shrinker(shrinker);
615 shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
618 size = sizeof(*shrinker->nr_deferred);
619 if (shrinker->flags & SHRINKER_NUMA_AWARE)
622 shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
623 if (!shrinker->nr_deferred)
630 int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
636 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
638 if (!shrinker->name)
641 err = __prealloc_shrinker(shrinker);
643 kfree_const(shrinker->name);
644 shrinker->name = NULL;
650 int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
652 return __prealloc_shrinker(shrinker);
656 void free_prealloced_shrinker(struct shrinker *shrinker)
659 kfree_const(shrinker->name);
660 shrinker->name = NULL;
662 if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
664 unregister_memcg_shrinker(shrinker);
669 kfree(shrinker->nr_deferred);
670 shrinker->nr_deferred = NULL;
673 void register_shrinker_prepared(struct shrinker *shrinker)
676 list_add_tail(&shrinker->list, &shrinker_list);
677 shrinker->flags |= SHRINKER_REGISTERED;
678 shrinker_debugfs_add(shrinker);
682 static int __register_shrinker(struct shrinker *shrinker)
684 int err = __prealloc_shrinker(shrinker);
688 register_shrinker_prepared(shrinker);
693 int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
699 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
701 if (!shrinker->name)
704 err = __register_shrinker(shrinker);
706 kfree_const(shrinker->name);
707 shrinker->name = NULL;
712 int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
714 return __register_shrinker(shrinker);
722 void unregister_shrinker(struct shrinker *shrinker)
727 if (!(shrinker->flags & SHRINKER_REGISTERED))
731 list_del(&shrinker->list);
732 shrinker->flags &= ~SHRINKER_REGISTERED;
733 if (shrinker->flags & SHRINKER_MEMCG_AWARE)
734 unregister_memcg_shrinker(shrinker);
735 debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
740 kfree(shrinker->nr_deferred);
741 shrinker->nr_deferred = NULL;
750 * shrinker invocations have seen an update, before freeing memory, similar to
763 struct shrinker *shrinker, int priority)
771 long batch_size = shrinker->batch ? shrinker->batch
775 freeable = shrinker->count_objects(shrinker, shrinkctl);
780 * copy the current shrinker scan count into a local variable
781 * and zero it so that other concurrent shrinker invocations
784 nr = xchg_nr_deferred(shrinker, shrinkctl);
786 if (shrinker->seeks) {
789 do_div(delta, shrinker->seeks);
803 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
808 * pass to avoid too frequent shrinker calls, but if the slab has less
828 ret = shrinker->scan_objects(shrinker, shrinkctl);
850 * move the unused scan count back into the shrinker in a
853 new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
855 trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
883 struct shrinker *shrinker;
885 shrinker = idr_find(&shrinker_idr, i);
886 if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) {
887 if (!shrinker)
894 !(shrinker->flags & SHRINKER_NONSLAB))
897 ret = do_shrink_slab(&sc, shrinker, priority);
901 * After the shrinker reported that it had no objects to
903 * the memcg shrinker map, a new object might have been
905 * case, we invoke the shrinker one more time and reset
916 ret = do_shrink_slab(&sc, shrinker, priority);
966 struct shrinker *shrinker;
981 list_for_each_entry(shrinker, &shrinker_list, list) {
988 ret = do_shrink_slab(&sc, shrinker, priority);
993 * Bail out if someone want to register a new shrinker to