1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10 
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12 
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cpu.h>
23 
24 #define ODEBUG_HASH_BITS	14
25 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
26 
27 #define ODEBUG_POOL_SIZE	1024
28 #define ODEBUG_POOL_MIN_LEVEL	256
29 #define ODEBUG_POOL_PERCPU_SIZE	64
30 #define ODEBUG_BATCH_SIZE	16
31 
32 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
33 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
34 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
35 
36 /*
37  * We limit the freeing of debug objects via workqueue at a maximum
38  * frequency of 10Hz and about 1024 objects for each freeing operation.
39  * So it is freeing at most 10k debug objects per second.
40  */
41 #define ODEBUG_FREE_WORK_MAX	1024
42 #define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
43 
44 struct debug_bucket {
45 	struct hlist_head	list;
46 	raw_spinlock_t		lock;
47 };
48 
49 /*
50  * Debug object percpu free list
51  * Access is protected by disabling irq
52  */
53 struct debug_percpu_free {
54 	struct hlist_head	free_objs;
55 	int			obj_free;
56 };
57 
58 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59 
60 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
61 
62 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63 
64 static DEFINE_RAW_SPINLOCK(pool_lock);
65 
66 static HLIST_HEAD(obj_pool);
67 static HLIST_HEAD(obj_to_free);
68 
69 /*
70  * Because of the presence of percpu free pools, obj_pool_free will
71  * under-count those in the percpu free pools. Similarly, obj_pool_used
72  * will over-count those in the percpu free pools. Adjustments will be
73  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74  * can be off.
75  */
76 static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
77 static int			obj_pool_free = ODEBUG_POOL_SIZE;
78 static int			obj_pool_used;
79 static int			obj_pool_max_used;
80 static bool			obj_freeing;
81 /* The number of objs on the global free list */
82 static int			obj_nr_tofree;
83 
84 static int			debug_objects_maxchain __read_mostly;
85 static int __maybe_unused	debug_objects_maxchecked __read_mostly;
86 static int			debug_objects_fixups __read_mostly;
87 static int			debug_objects_warnings __read_mostly;
88 static int			debug_objects_enabled __read_mostly
89 				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 static int			debug_objects_pool_size __read_mostly
91 				= ODEBUG_POOL_SIZE;
92 static int			debug_objects_pool_min_level __read_mostly
93 				= ODEBUG_POOL_MIN_LEVEL;
94 static const struct debug_obj_descr *descr_test  __read_mostly;
95 static struct kmem_cache	*obj_cache __read_mostly;
96 
97 /*
98  * Track numbers of kmem_cache_alloc()/free() calls done.
99  */
100 static int			debug_objects_allocated;
101 static int			debug_objects_freed;
102 
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105 
enable_object_debug(char *str)106 static int __init enable_object_debug(char *str)
107 {
108 	debug_objects_enabled = 1;
109 	return 0;
110 }
111 
disable_object_debug(char *str)112 static int __init disable_object_debug(char *str)
113 {
114 	debug_objects_enabled = 0;
115 	return 0;
116 }
117 
118 early_param("debug_objects", enable_object_debug);
119 early_param("no_debug_objects", disable_object_debug);
120 
121 static const char *obj_states[ODEBUG_STATE_MAX] = {
122 	[ODEBUG_STATE_NONE]		= "none",
123 	[ODEBUG_STATE_INIT]		= "initialized",
124 	[ODEBUG_STATE_INACTIVE]		= "inactive",
125 	[ODEBUG_STATE_ACTIVE]		= "active",
126 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
127 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
128 };
129 
fill_pool(void)130 static void fill_pool(void)
131 {
132 	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
133 	struct debug_obj *obj;
134 	unsigned long flags;
135 
136 	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137 		return;
138 
139 	/*
140 	 * Reuse objs from the global free list; they will be reinitialized
141 	 * when allocating.
142 	 *
143 	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144 	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145 	 * sections.
146 	 */
147 	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148 		raw_spin_lock_irqsave(&pool_lock, flags);
149 		/*
150 		 * Recheck with the lock held as the worker thread might have
151 		 * won the race and freed the global free list already.
152 		 */
153 		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 			hlist_del(&obj->node);
156 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157 			hlist_add_head(&obj->node, &obj_pool);
158 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159 		}
160 		raw_spin_unlock_irqrestore(&pool_lock, flags);
161 	}
162 
163 	if (unlikely(!obj_cache))
164 		return;
165 
166 	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167 		struct debug_obj *new[ODEBUG_BATCH_SIZE];
168 		int cnt;
169 
170 		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172 			if (!new[cnt])
173 				break;
174 		}
175 		if (!cnt)
176 			return;
177 
178 		raw_spin_lock_irqsave(&pool_lock, flags);
179 		while (cnt) {
180 			hlist_add_head(&new[--cnt]->node, &obj_pool);
181 			debug_objects_allocated++;
182 			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183 		}
184 		raw_spin_unlock_irqrestore(&pool_lock, flags);
185 	}
186 }
187 
188 /*
189  * Lookup an object in the hash bucket.
190  */
lookup_object(void *addr, struct debug_bucket *b)191 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192 {
193 	struct debug_obj *obj;
194 	int cnt = 0;
195 
196 	hlist_for_each_entry(obj, &b->list, node) {
197 		cnt++;
198 		if (obj->object == addr)
199 			return obj;
200 	}
201 	if (cnt > debug_objects_maxchain)
202 		debug_objects_maxchain = cnt;
203 
204 	return NULL;
205 }
206 
207 /*
208  * Allocate a new object from the hlist
209  */
__alloc_object(struct hlist_head *list)210 static struct debug_obj *__alloc_object(struct hlist_head *list)
211 {
212 	struct debug_obj *obj = NULL;
213 
214 	if (list->first) {
215 		obj = hlist_entry(list->first, typeof(*obj), node);
216 		hlist_del(&obj->node);
217 	}
218 
219 	return obj;
220 }
221 
222 static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)223 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
224 {
225 	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
226 	struct debug_obj *obj;
227 
228 	if (likely(obj_cache)) {
229 		obj = __alloc_object(&percpu_pool->free_objs);
230 		if (obj) {
231 			percpu_pool->obj_free--;
232 			goto init_obj;
233 		}
234 	}
235 
236 	raw_spin_lock(&pool_lock);
237 	obj = __alloc_object(&obj_pool);
238 	if (obj) {
239 		obj_pool_used++;
240 		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
241 
242 		/*
243 		 * Looking ahead, allocate one batch of debug objects and
244 		 * put them into the percpu free pool.
245 		 */
246 		if (likely(obj_cache)) {
247 			int i;
248 
249 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
250 				struct debug_obj *obj2;
251 
252 				obj2 = __alloc_object(&obj_pool);
253 				if (!obj2)
254 					break;
255 				hlist_add_head(&obj2->node,
256 					       &percpu_pool->free_objs);
257 				percpu_pool->obj_free++;
258 				obj_pool_used++;
259 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
260 			}
261 		}
262 
263 		if (obj_pool_used > obj_pool_max_used)
264 			obj_pool_max_used = obj_pool_used;
265 
266 		if (obj_pool_free < obj_pool_min_free)
267 			obj_pool_min_free = obj_pool_free;
268 	}
269 	raw_spin_unlock(&pool_lock);
270 
271 init_obj:
272 	if (obj) {
273 		obj->object = addr;
274 		obj->descr  = descr;
275 		obj->state  = ODEBUG_STATE_NONE;
276 		obj->astate = 0;
277 		hlist_add_head(&obj->node, &b->list);
278 	}
279 	return obj;
280 }
281 
282 /*
283  * workqueue function to free objects.
284  *
285  * To reduce contention on the global pool_lock, the actual freeing of
286  * debug objects will be delayed if the pool_lock is busy.
287  */
free_obj_work(struct work_struct *work)288 static void free_obj_work(struct work_struct *work)
289 {
290 	struct hlist_node *tmp;
291 	struct debug_obj *obj;
292 	unsigned long flags;
293 	HLIST_HEAD(tofree);
294 
295 	WRITE_ONCE(obj_freeing, false);
296 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
297 		return;
298 
299 	if (obj_pool_free >= debug_objects_pool_size)
300 		goto free_objs;
301 
302 	/*
303 	 * The objs on the pool list might be allocated before the work is
304 	 * run, so recheck if pool list it full or not, if not fill pool
305 	 * list from the global free list. As it is likely that a workload
306 	 * may be gearing up to use more and more objects, don't free any
307 	 * of them until the next round.
308 	 */
309 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
310 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
311 		hlist_del(&obj->node);
312 		hlist_add_head(&obj->node, &obj_pool);
313 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
314 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
315 	}
316 	raw_spin_unlock_irqrestore(&pool_lock, flags);
317 	return;
318 
319 free_objs:
320 	/*
321 	 * Pool list is already full and there are still objs on the free
322 	 * list. Move remaining free objs to a temporary list to free the
323 	 * memory outside the pool_lock held region.
324 	 */
325 	if (obj_nr_tofree) {
326 		hlist_move_list(&obj_to_free, &tofree);
327 		debug_objects_freed += obj_nr_tofree;
328 		WRITE_ONCE(obj_nr_tofree, 0);
329 	}
330 	raw_spin_unlock_irqrestore(&pool_lock, flags);
331 
332 	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
333 		hlist_del(&obj->node);
334 		kmem_cache_free(obj_cache, obj);
335 	}
336 }
337 
__free_object(struct debug_obj *obj)338 static void __free_object(struct debug_obj *obj)
339 {
340 	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
341 	struct debug_percpu_free *percpu_pool;
342 	int lookahead_count = 0;
343 	unsigned long flags;
344 	bool work;
345 
346 	local_irq_save(flags);
347 	if (!obj_cache)
348 		goto free_to_obj_pool;
349 
350 	/*
351 	 * Try to free it into the percpu pool first.
352 	 */
353 	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
354 	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
355 		hlist_add_head(&obj->node, &percpu_pool->free_objs);
356 		percpu_pool->obj_free++;
357 		local_irq_restore(flags);
358 		return;
359 	}
360 
361 	/*
362 	 * As the percpu pool is full, look ahead and pull out a batch
363 	 * of objects from the percpu pool and free them as well.
364 	 */
365 	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
366 		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
367 		if (!objs[lookahead_count])
368 			break;
369 		percpu_pool->obj_free--;
370 	}
371 
372 free_to_obj_pool:
373 	raw_spin_lock(&pool_lock);
374 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
375 	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
376 	obj_pool_used--;
377 
378 	if (work) {
379 		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
380 		hlist_add_head(&obj->node, &obj_to_free);
381 		if (lookahead_count) {
382 			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
383 			obj_pool_used -= lookahead_count;
384 			while (lookahead_count) {
385 				hlist_add_head(&objs[--lookahead_count]->node,
386 					       &obj_to_free);
387 			}
388 		}
389 
390 		if ((obj_pool_free > debug_objects_pool_size) &&
391 		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
392 			int i;
393 
394 			/*
395 			 * Free one more batch of objects from obj_pool.
396 			 */
397 			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
398 				obj = __alloc_object(&obj_pool);
399 				hlist_add_head(&obj->node, &obj_to_free);
400 				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
401 				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
402 			}
403 		}
404 	} else {
405 		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
406 		hlist_add_head(&obj->node, &obj_pool);
407 		if (lookahead_count) {
408 			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
409 			obj_pool_used -= lookahead_count;
410 			while (lookahead_count) {
411 				hlist_add_head(&objs[--lookahead_count]->node,
412 					       &obj_pool);
413 			}
414 		}
415 	}
416 	raw_spin_unlock(&pool_lock);
417 	local_irq_restore(flags);
418 }
419 
420 /*
421  * Put the object back into the pool and schedule work to free objects
422  * if necessary.
423  */
free_object(struct debug_obj *obj)424 static void free_object(struct debug_obj *obj)
425 {
426 	__free_object(obj);
427 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
428 		WRITE_ONCE(obj_freeing, true);
429 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
430 	}
431 }
432 
433 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)434 static int object_cpu_offline(unsigned int cpu)
435 {
436 	struct debug_percpu_free *percpu_pool;
437 	struct hlist_node *tmp;
438 	struct debug_obj *obj;
439 	unsigned long flags;
440 
441 	/* Remote access is safe as the CPU is dead already */
442 	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
443 	hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
444 		hlist_del(&obj->node);
445 		kmem_cache_free(obj_cache, obj);
446 	}
447 
448 	raw_spin_lock_irqsave(&pool_lock, flags);
449 	obj_pool_used -= percpu_pool->obj_free;
450 	debug_objects_freed += percpu_pool->obj_free;
451 	raw_spin_unlock_irqrestore(&pool_lock, flags);
452 
453 	percpu_pool->obj_free = 0;
454 
455 	return 0;
456 }
457 #endif
458 
459 /*
460  * We run out of memory. That means we probably have tons of objects
461  * allocated.
462  */
debug_objects_oom(void)463 static void debug_objects_oom(void)
464 {
465 	struct debug_bucket *db = obj_hash;
466 	struct hlist_node *tmp;
467 	HLIST_HEAD(freelist);
468 	struct debug_obj *obj;
469 	unsigned long flags;
470 	int i;
471 
472 	pr_warn("Out of memory. ODEBUG disabled\n");
473 
474 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
475 		raw_spin_lock_irqsave(&db->lock, flags);
476 		hlist_move_list(&db->list, &freelist);
477 		raw_spin_unlock_irqrestore(&db->lock, flags);
478 
479 		/* Now free them */
480 		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
481 			hlist_del(&obj->node);
482 			free_object(obj);
483 		}
484 	}
485 }
486 
487 /*
488  * We use the pfn of the address for the hash. That way we can check
489  * for freed objects simply by checking the affected bucket.
490  */
get_bucket(unsigned long addr)491 static struct debug_bucket *get_bucket(unsigned long addr)
492 {
493 	unsigned long hash;
494 
495 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
496 	return &obj_hash[hash];
497 }
498 
debug_print_object(struct debug_obj *obj, char *msg)499 static void debug_print_object(struct debug_obj *obj, char *msg)
500 {
501 	const struct debug_obj_descr *descr = obj->descr;
502 	static int limit;
503 
504 	/*
505 	 * Don't report if lookup_object_or_alloc() by the current thread
506 	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
507 	 * concurrent thread turned off debug_objects_enabled and cleared
508 	 * the hash buckets.
509 	 */
510 	if (!debug_objects_enabled)
511 		return;
512 
513 	if (limit < 5 && descr != descr_test) {
514 		void *hint = descr->debug_hint ?
515 			descr->debug_hint(obj->object) : NULL;
516 		limit++;
517 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
518 				 "object type: %s hint: %pS\n",
519 			msg, obj_states[obj->state], obj->astate,
520 			descr->name, hint);
521 	}
522 	debug_objects_warnings++;
523 }
524 
525 /*
526  * Try to repair the damage, so we have a better chance to get useful
527  * debug output.
528  */
529 static bool
debug_object_fixup(bool (fixup)void *addr, enum debug_obj_state state), void * addr, enum debug_obj_state state)530 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
531 		   void * addr, enum debug_obj_state state)
532 {
533 	if (fixup && fixup(addr, state)) {
534 		debug_objects_fixups++;
535 		return true;
536 	}
537 	return false;
538 }
539 
debug_object_is_on_stack(void *addr, int onstack)540 static void debug_object_is_on_stack(void *addr, int onstack)
541 {
542 	int is_on_stack;
543 	static int limit;
544 
545 	if (limit > 4)
546 		return;
547 
548 	is_on_stack = object_is_on_stack(addr);
549 	if (is_on_stack == onstack)
550 		return;
551 
552 	limit++;
553 	if (is_on_stack)
554 		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
555 			 task_stack_page(current));
556 	else
557 		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
558 			 task_stack_page(current));
559 
560 	WARN_ON(1);
561 }
562 
lookup_object_or_alloc(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr, bool onstack, bool alloc_ifstatic)563 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
564 						const struct debug_obj_descr *descr,
565 						bool onstack, bool alloc_ifstatic)
566 {
567 	struct debug_obj *obj = lookup_object(addr, b);
568 	enum debug_obj_state state = ODEBUG_STATE_NONE;
569 
570 	if (likely(obj))
571 		return obj;
572 
573 	/*
574 	 * debug_object_init() unconditionally allocates untracked
575 	 * objects. It does not matter whether it is a static object or
576 	 * not.
577 	 *
578 	 * debug_object_assert_init() and debug_object_activate() allow
579 	 * allocation only if the descriptor callback confirms that the
580 	 * object is static and considered initialized. For non-static
581 	 * objects the allocation needs to be done from the fixup callback.
582 	 */
583 	if (unlikely(alloc_ifstatic)) {
584 		if (!descr->is_static_object || !descr->is_static_object(addr))
585 			return ERR_PTR(-ENOENT);
586 		/* Statically allocated objects are considered initialized */
587 		state = ODEBUG_STATE_INIT;
588 	}
589 
590 	obj = alloc_object(addr, b, descr);
591 	if (likely(obj)) {
592 		obj->state = state;
593 		debug_object_is_on_stack(addr, onstack);
594 		return obj;
595 	}
596 
597 	/* Out of memory. Do the cleanup outside of the locked region */
598 	debug_objects_enabled = 0;
599 	return NULL;
600 }
601 
debug_objects_fill_pool(void)602 static void debug_objects_fill_pool(void)
603 {
604 	/*
605 	 * On RT enabled kernels the pool refill must happen in preemptible
606 	 * context:
607 	 */
608 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
609 		fill_pool();
610 }
611 
612 static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)613 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
614 {
615 	struct debug_obj *obj, o;
616 	struct debug_bucket *db;
617 	unsigned long flags;
618 
619 	debug_objects_fill_pool();
620 
621 	db = get_bucket((unsigned long) addr);
622 
623 	raw_spin_lock_irqsave(&db->lock, flags);
624 
625 	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
626 	if (unlikely(!obj)) {
627 		raw_spin_unlock_irqrestore(&db->lock, flags);
628 		debug_objects_oom();
629 		return;
630 	}
631 
632 	switch (obj->state) {
633 	case ODEBUG_STATE_NONE:
634 	case ODEBUG_STATE_INIT:
635 	case ODEBUG_STATE_INACTIVE:
636 		obj->state = ODEBUG_STATE_INIT;
637 		raw_spin_unlock_irqrestore(&db->lock, flags);
638 		return;
639 	default:
640 		break;
641 	}
642 
643 	o = *obj;
644 	raw_spin_unlock_irqrestore(&db->lock, flags);
645 	debug_print_object(&o, "init");
646 
647 	if (o.state == ODEBUG_STATE_ACTIVE)
648 		debug_object_fixup(descr->fixup_init, addr, o.state);
649 }
650 
651 /**
652  * debug_object_init - debug checks when an object is initialized
653  * @addr:	address of the object
654  * @descr:	pointer to an object specific debug description structure
655  */
debug_object_init(void *addr, const struct debug_obj_descr *descr)656 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
657 {
658 	if (!debug_objects_enabled)
659 		return;
660 
661 	__debug_object_init(addr, descr, 0);
662 }
663 EXPORT_SYMBOL_GPL(debug_object_init);
664 
665 /**
666  * debug_object_init_on_stack - debug checks when an object on stack is
667  *				initialized
668  * @addr:	address of the object
669  * @descr:	pointer to an object specific debug description structure
670  */
debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)671 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
672 {
673 	if (!debug_objects_enabled)
674 		return;
675 
676 	__debug_object_init(addr, descr, 1);
677 }
678 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
679 
680 /**
681  * debug_object_activate - debug checks when an object is activated
682  * @addr:	address of the object
683  * @descr:	pointer to an object specific debug description structure
684  * Returns 0 for success, -EINVAL for check failed.
685  */
debug_object_activate(void *addr, const struct debug_obj_descr *descr)686 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
687 {
688 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
689 	struct debug_bucket *db;
690 	struct debug_obj *obj;
691 	unsigned long flags;
692 
693 	if (!debug_objects_enabled)
694 		return 0;
695 
696 	debug_objects_fill_pool();
697 
698 	db = get_bucket((unsigned long) addr);
699 
700 	raw_spin_lock_irqsave(&db->lock, flags);
701 
702 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
703 	if (unlikely(!obj)) {
704 		raw_spin_unlock_irqrestore(&db->lock, flags);
705 		debug_objects_oom();
706 		return 0;
707 	} else if (likely(!IS_ERR(obj))) {
708 		switch (obj->state) {
709 		case ODEBUG_STATE_ACTIVE:
710 		case ODEBUG_STATE_DESTROYED:
711 			o = *obj;
712 			break;
713 		case ODEBUG_STATE_INIT:
714 		case ODEBUG_STATE_INACTIVE:
715 			obj->state = ODEBUG_STATE_ACTIVE;
716 			fallthrough;
717 		default:
718 			raw_spin_unlock_irqrestore(&db->lock, flags);
719 			return 0;
720 		}
721 	}
722 
723 	raw_spin_unlock_irqrestore(&db->lock, flags);
724 	debug_print_object(&o, "activate");
725 
726 	switch (o.state) {
727 	case ODEBUG_STATE_ACTIVE:
728 	case ODEBUG_STATE_NOTAVAILABLE:
729 		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
730 			return 0;
731 		fallthrough;
732 	default:
733 		return -EINVAL;
734 	}
735 }
736 EXPORT_SYMBOL_GPL(debug_object_activate);
737 
738 /**
739  * debug_object_deactivate - debug checks when an object is deactivated
740  * @addr:	address of the object
741  * @descr:	pointer to an object specific debug description structure
742  */
debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)743 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
744 {
745 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
746 	struct debug_bucket *db;
747 	struct debug_obj *obj;
748 	unsigned long flags;
749 
750 	if (!debug_objects_enabled)
751 		return;
752 
753 	db = get_bucket((unsigned long) addr);
754 
755 	raw_spin_lock_irqsave(&db->lock, flags);
756 
757 	obj = lookup_object(addr, db);
758 	if (obj) {
759 		switch (obj->state) {
760 		case ODEBUG_STATE_DESTROYED:
761 			break;
762 		case ODEBUG_STATE_INIT:
763 		case ODEBUG_STATE_INACTIVE:
764 		case ODEBUG_STATE_ACTIVE:
765 			if (obj->astate)
766 				break;
767 			obj->state = ODEBUG_STATE_INACTIVE;
768 			fallthrough;
769 		default:
770 			raw_spin_unlock_irqrestore(&db->lock, flags);
771 			return;
772 		}
773 		o = *obj;
774 	}
775 
776 	raw_spin_unlock_irqrestore(&db->lock, flags);
777 	debug_print_object(&o, "deactivate");
778 }
779 EXPORT_SYMBOL_GPL(debug_object_deactivate);
780 
781 /**
782  * debug_object_destroy - debug checks when an object is destroyed
783  * @addr:	address of the object
784  * @descr:	pointer to an object specific debug description structure
785  */
debug_object_destroy(void *addr, const struct debug_obj_descr *descr)786 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
787 {
788 	struct debug_obj *obj, o;
789 	struct debug_bucket *db;
790 	unsigned long flags;
791 
792 	if (!debug_objects_enabled)
793 		return;
794 
795 	db = get_bucket((unsigned long) addr);
796 
797 	raw_spin_lock_irqsave(&db->lock, flags);
798 
799 	obj = lookup_object(addr, db);
800 	if (!obj) {
801 		raw_spin_unlock_irqrestore(&db->lock, flags);
802 		return;
803 	}
804 
805 	switch (obj->state) {
806 	case ODEBUG_STATE_ACTIVE:
807 	case ODEBUG_STATE_DESTROYED:
808 		break;
809 	case ODEBUG_STATE_NONE:
810 	case ODEBUG_STATE_INIT:
811 	case ODEBUG_STATE_INACTIVE:
812 		obj->state = ODEBUG_STATE_DESTROYED;
813 		fallthrough;
814 	default:
815 		raw_spin_unlock_irqrestore(&db->lock, flags);
816 		return;
817 	}
818 
819 	o = *obj;
820 	raw_spin_unlock_irqrestore(&db->lock, flags);
821 	debug_print_object(&o, "destroy");
822 
823 	if (o.state == ODEBUG_STATE_ACTIVE)
824 		debug_object_fixup(descr->fixup_destroy, addr, o.state);
825 }
826 EXPORT_SYMBOL_GPL(debug_object_destroy);
827 
828 /**
829  * debug_object_free - debug checks when an object is freed
830  * @addr:	address of the object
831  * @descr:	pointer to an object specific debug description structure
832  */
debug_object_free(void *addr, const struct debug_obj_descr *descr)833 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
834 {
835 	struct debug_obj *obj, o;
836 	struct debug_bucket *db;
837 	unsigned long flags;
838 
839 	if (!debug_objects_enabled)
840 		return;
841 
842 	db = get_bucket((unsigned long) addr);
843 
844 	raw_spin_lock_irqsave(&db->lock, flags);
845 
846 	obj = lookup_object(addr, db);
847 	if (!obj) {
848 		raw_spin_unlock_irqrestore(&db->lock, flags);
849 		return;
850 	}
851 
852 	switch (obj->state) {
853 	case ODEBUG_STATE_ACTIVE:
854 		break;
855 	default:
856 		hlist_del(&obj->node);
857 		raw_spin_unlock_irqrestore(&db->lock, flags);
858 		free_object(obj);
859 		return;
860 	}
861 
862 	o = *obj;
863 	raw_spin_unlock_irqrestore(&db->lock, flags);
864 	debug_print_object(&o, "free");
865 
866 	debug_object_fixup(descr->fixup_free, addr, o.state);
867 }
868 EXPORT_SYMBOL_GPL(debug_object_free);
869 
870 /**
871  * debug_object_assert_init - debug checks when object should be init-ed
872  * @addr:	address of the object
873  * @descr:	pointer to an object specific debug description structure
874  */
debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)875 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
876 {
877 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
878 	struct debug_bucket *db;
879 	struct debug_obj *obj;
880 	unsigned long flags;
881 
882 	if (!debug_objects_enabled)
883 		return;
884 
885 	debug_objects_fill_pool();
886 
887 	db = get_bucket((unsigned long) addr);
888 
889 	raw_spin_lock_irqsave(&db->lock, flags);
890 	obj = lookup_object_or_alloc(addr, db, descr, false, true);
891 	raw_spin_unlock_irqrestore(&db->lock, flags);
892 	if (likely(!IS_ERR_OR_NULL(obj)))
893 		return;
894 
895 	/* If NULL the allocation has hit OOM */
896 	if (!obj) {
897 		debug_objects_oom();
898 		return;
899 	}
900 
901 	/* Object is neither tracked nor static. It's not initialized. */
902 	debug_print_object(&o, "assert_init");
903 	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
904 }
905 EXPORT_SYMBOL_GPL(debug_object_assert_init);
906 
907 /**
908  * debug_object_active_state - debug checks object usage state machine
909  * @addr:	address of the object
910  * @descr:	pointer to an object specific debug description structure
911  * @expect:	expected state
912  * @next:	state to move to if expected state is found
913  */
914 void
debug_object_active_state(void *addr, const struct debug_obj_descr *descr, unsigned int expect, unsigned int next)915 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
916 			  unsigned int expect, unsigned int next)
917 {
918 	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
919 	struct debug_bucket *db;
920 	struct debug_obj *obj;
921 	unsigned long flags;
922 
923 	if (!debug_objects_enabled)
924 		return;
925 
926 	db = get_bucket((unsigned long) addr);
927 
928 	raw_spin_lock_irqsave(&db->lock, flags);
929 
930 	obj = lookup_object(addr, db);
931 	if (obj) {
932 		switch (obj->state) {
933 		case ODEBUG_STATE_ACTIVE:
934 			if (obj->astate != expect)
935 				break;
936 			obj->astate = next;
937 			raw_spin_unlock_irqrestore(&db->lock, flags);
938 			return;
939 		default:
940 			break;
941 		}
942 		o = *obj;
943 	}
944 
945 	raw_spin_unlock_irqrestore(&db->lock, flags);
946 	debug_print_object(&o, "active_state");
947 }
948 EXPORT_SYMBOL_GPL(debug_object_active_state);
949 
950 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void *address, unsigned long size)951 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
952 {
953 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
954 	int cnt, objs_checked = 0;
955 	struct debug_obj *obj, o;
956 	struct debug_bucket *db;
957 	struct hlist_node *tmp;
958 
959 	saddr = (unsigned long) address;
960 	eaddr = saddr + size;
961 	paddr = saddr & ODEBUG_CHUNK_MASK;
962 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
963 	chunks >>= ODEBUG_CHUNK_SHIFT;
964 
965 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
966 		db = get_bucket(paddr);
967 
968 repeat:
969 		cnt = 0;
970 		raw_spin_lock_irqsave(&db->lock, flags);
971 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
972 			cnt++;
973 			oaddr = (unsigned long) obj->object;
974 			if (oaddr < saddr || oaddr >= eaddr)
975 				continue;
976 
977 			switch (obj->state) {
978 			case ODEBUG_STATE_ACTIVE:
979 				o = *obj;
980 				raw_spin_unlock_irqrestore(&db->lock, flags);
981 				debug_print_object(&o, "free");
982 				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
983 				goto repeat;
984 			default:
985 				hlist_del(&obj->node);
986 				__free_object(obj);
987 				break;
988 			}
989 		}
990 		raw_spin_unlock_irqrestore(&db->lock, flags);
991 
992 		if (cnt > debug_objects_maxchain)
993 			debug_objects_maxchain = cnt;
994 
995 		objs_checked += cnt;
996 	}
997 
998 	if (objs_checked > debug_objects_maxchecked)
999 		debug_objects_maxchecked = objs_checked;
1000 
1001 	/* Schedule work to actually kmem_cache_free() objects */
1002 	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1003 		WRITE_ONCE(obj_freeing, true);
1004 		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1005 	}
1006 }
1007 
debug_check_no_obj_freed(const void *address, unsigned long size)1008 void debug_check_no_obj_freed(const void *address, unsigned long size)
1009 {
1010 	if (debug_objects_enabled)
1011 		__debug_check_no_obj_freed(address, size);
1012 }
1013 #endif
1014 
1015 #ifdef CONFIG_DEBUG_FS
1016 
debug_stats_show(struct seq_file *m, void *v)1017 static int debug_stats_show(struct seq_file *m, void *v)
1018 {
1019 	int cpu, obj_percpu_free = 0;
1020 
1021 	for_each_possible_cpu(cpu)
1022 		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1023 
1024 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1025 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1026 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1027 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1028 	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1029 	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1030 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1031 	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1032 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1033 	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1034 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1035 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1036 	return 0;
1037 }
1038 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1039 
debug_objects_init_debugfs(void)1040 static int __init debug_objects_init_debugfs(void)
1041 {
1042 	struct dentry *dbgdir;
1043 
1044 	if (!debug_objects_enabled)
1045 		return 0;
1046 
1047 	dbgdir = debugfs_create_dir("debug_objects", NULL);
1048 
1049 	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1050 
1051 	return 0;
1052 }
1053 __initcall(debug_objects_init_debugfs);
1054 
1055 #else
debug_objects_init_debugfs(void)1056 static inline void debug_objects_init_debugfs(void) { }
1057 #endif
1058 
1059 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1060 
1061 /* Random data structure for the self test */
1062 struct self_test {
1063 	unsigned long	dummy1[6];
1064 	int		static_init;
1065 	unsigned long	dummy2[3];
1066 };
1067 
1068 static __initconst const struct debug_obj_descr descr_type_test;
1069 
is_static_object(void *addr)1070 static bool __init is_static_object(void *addr)
1071 {
1072 	struct self_test *obj = addr;
1073 
1074 	return obj->static_init;
1075 }
1076 
1077 /*
1078  * fixup_init is called when:
1079  * - an active object is initialized
1080  */
fixup_init(void *addr, enum debug_obj_state state)1081 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1082 {
1083 	struct self_test *obj = addr;
1084 
1085 	switch (state) {
1086 	case ODEBUG_STATE_ACTIVE:
1087 		debug_object_deactivate(obj, &descr_type_test);
1088 		debug_object_init(obj, &descr_type_test);
1089 		return true;
1090 	default:
1091 		return false;
1092 	}
1093 }
1094 
1095 /*
1096  * fixup_activate is called when:
1097  * - an active object is activated
1098  * - an unknown non-static object is activated
1099  */
fixup_activate(void *addr, enum debug_obj_state state)1100 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1101 {
1102 	struct self_test *obj = addr;
1103 
1104 	switch (state) {
1105 	case ODEBUG_STATE_NOTAVAILABLE:
1106 		return true;
1107 	case ODEBUG_STATE_ACTIVE:
1108 		debug_object_deactivate(obj, &descr_type_test);
1109 		debug_object_activate(obj, &descr_type_test);
1110 		return true;
1111 
1112 	default:
1113 		return false;
1114 	}
1115 }
1116 
1117 /*
1118  * fixup_destroy is called when:
1119  * - an active object is destroyed
1120  */
fixup_destroy(void *addr, enum debug_obj_state state)1121 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1122 {
1123 	struct self_test *obj = addr;
1124 
1125 	switch (state) {
1126 	case ODEBUG_STATE_ACTIVE:
1127 		debug_object_deactivate(obj, &descr_type_test);
1128 		debug_object_destroy(obj, &descr_type_test);
1129 		return true;
1130 	default:
1131 		return false;
1132 	}
1133 }
1134 
1135 /*
1136  * fixup_free is called when:
1137  * - an active object is freed
1138  */
fixup_free(void *addr, enum debug_obj_state state)1139 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1140 {
1141 	struct self_test *obj = addr;
1142 
1143 	switch (state) {
1144 	case ODEBUG_STATE_ACTIVE:
1145 		debug_object_deactivate(obj, &descr_type_test);
1146 		debug_object_free(obj, &descr_type_test);
1147 		return true;
1148 	default:
1149 		return false;
1150 	}
1151 }
1152 
1153 static int __init
check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)1154 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1155 {
1156 	struct debug_bucket *db;
1157 	struct debug_obj *obj;
1158 	unsigned long flags;
1159 	int res = -EINVAL;
1160 
1161 	db = get_bucket((unsigned long) addr);
1162 
1163 	raw_spin_lock_irqsave(&db->lock, flags);
1164 
1165 	obj = lookup_object(addr, db);
1166 	if (!obj && state != ODEBUG_STATE_NONE) {
1167 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1168 		goto out;
1169 	}
1170 	if (obj && obj->state != state) {
1171 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1172 		       obj->state, state);
1173 		goto out;
1174 	}
1175 	if (fixups != debug_objects_fixups) {
1176 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1177 		       fixups, debug_objects_fixups);
1178 		goto out;
1179 	}
1180 	if (warnings != debug_objects_warnings) {
1181 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1182 		       warnings, debug_objects_warnings);
1183 		goto out;
1184 	}
1185 	res = 0;
1186 out:
1187 	raw_spin_unlock_irqrestore(&db->lock, flags);
1188 	if (res)
1189 		debug_objects_enabled = 0;
1190 	return res;
1191 }
1192 
1193 static __initconst const struct debug_obj_descr descr_type_test = {
1194 	.name			= "selftest",
1195 	.is_static_object	= is_static_object,
1196 	.fixup_init		= fixup_init,
1197 	.fixup_activate		= fixup_activate,
1198 	.fixup_destroy		= fixup_destroy,
1199 	.fixup_free		= fixup_free,
1200 };
1201 
1202 static __initdata struct self_test obj = { .static_init = 0 };
1203 
debug_objects_selftest(void)1204 static void __init debug_objects_selftest(void)
1205 {
1206 	int fixups, oldfixups, warnings, oldwarnings;
1207 	unsigned long flags;
1208 
1209 	local_irq_save(flags);
1210 
1211 	fixups = oldfixups = debug_objects_fixups;
1212 	warnings = oldwarnings = debug_objects_warnings;
1213 	descr_test = &descr_type_test;
1214 
1215 	debug_object_init(&obj, &descr_type_test);
1216 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1217 		goto out;
1218 	debug_object_activate(&obj, &descr_type_test);
1219 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1220 		goto out;
1221 	debug_object_activate(&obj, &descr_type_test);
1222 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1223 		goto out;
1224 	debug_object_deactivate(&obj, &descr_type_test);
1225 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1226 		goto out;
1227 	debug_object_destroy(&obj, &descr_type_test);
1228 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1229 		goto out;
1230 	debug_object_init(&obj, &descr_type_test);
1231 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1232 		goto out;
1233 	debug_object_activate(&obj, &descr_type_test);
1234 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1235 		goto out;
1236 	debug_object_deactivate(&obj, &descr_type_test);
1237 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1238 		goto out;
1239 	debug_object_free(&obj, &descr_type_test);
1240 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1241 		goto out;
1242 
1243 	obj.static_init = 1;
1244 	debug_object_activate(&obj, &descr_type_test);
1245 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1246 		goto out;
1247 	debug_object_init(&obj, &descr_type_test);
1248 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1249 		goto out;
1250 	debug_object_free(&obj, &descr_type_test);
1251 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1252 		goto out;
1253 
1254 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1255 	debug_object_init(&obj, &descr_type_test);
1256 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1257 		goto out;
1258 	debug_object_activate(&obj, &descr_type_test);
1259 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1260 		goto out;
1261 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1262 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1263 		goto out;
1264 #endif
1265 	pr_info("selftest passed\n");
1266 
1267 out:
1268 	debug_objects_fixups = oldfixups;
1269 	debug_objects_warnings = oldwarnings;
1270 	descr_test = NULL;
1271 
1272 	local_irq_restore(flags);
1273 }
1274 #else
debug_objects_selftest(void)1275 static inline void debug_objects_selftest(void) { }
1276 #endif
1277 
1278 /*
1279  * Called during early boot to initialize the hash buckets and link
1280  * the static object pool objects into the poll list. After this call
1281  * the object tracker is fully operational.
1282  */
debug_objects_early_init(void)1283 void __init debug_objects_early_init(void)
1284 {
1285 	int i;
1286 
1287 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1288 		raw_spin_lock_init(&obj_hash[i].lock);
1289 
1290 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1291 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1292 }
1293 
1294 /*
1295  * Convert the statically allocated objects to dynamic ones:
1296  */
debug_objects_replace_static_objects(void)1297 static int __init debug_objects_replace_static_objects(void)
1298 {
1299 	struct debug_bucket *db = obj_hash;
1300 	struct hlist_node *tmp;
1301 	struct debug_obj *obj, *new;
1302 	HLIST_HEAD(objects);
1303 	int i, cnt = 0;
1304 
1305 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1306 		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1307 		if (!obj)
1308 			goto free;
1309 		hlist_add_head(&obj->node, &objects);
1310 	}
1311 
1312 	debug_objects_allocated += i;
1313 
1314 	/*
1315 	 * debug_objects_mem_init() is now called early that only one CPU is up
1316 	 * and interrupts have been disabled, so it is safe to replace the
1317 	 * active object references.
1318 	 */
1319 
1320 	/* Remove the statically allocated objects from the pool */
1321 	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1322 		hlist_del(&obj->node);
1323 	/* Move the allocated objects to the pool */
1324 	hlist_move_list(&objects, &obj_pool);
1325 
1326 	/* Replace the active object references */
1327 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1328 		hlist_move_list(&db->list, &objects);
1329 
1330 		hlist_for_each_entry(obj, &objects, node) {
1331 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1332 			hlist_del(&new->node);
1333 			/* copy object data */
1334 			*new = *obj;
1335 			hlist_add_head(&new->node, &db->list);
1336 			cnt++;
1337 		}
1338 	}
1339 
1340 	pr_debug("%d of %d active objects replaced\n",
1341 		 cnt, obj_pool_used);
1342 	return 0;
1343 free:
1344 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1345 		hlist_del(&obj->node);
1346 		kmem_cache_free(obj_cache, obj);
1347 	}
1348 	return -ENOMEM;
1349 }
1350 
1351 /*
1352  * Called after the kmem_caches are functional to setup a dedicated
1353  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1354  * prevents that the debug code is called on kmem_cache_free() for the
1355  * debug tracker objects to avoid recursive calls.
1356  */
debug_objects_mem_init(void)1357 void __init debug_objects_mem_init(void)
1358 {
1359 	int cpu, extras;
1360 
1361 	if (!debug_objects_enabled)
1362 		return;
1363 
1364 	/*
1365 	 * Initialize the percpu object pools
1366 	 *
1367 	 * Initialization is not strictly necessary, but was done for
1368 	 * completeness.
1369 	 */
1370 	for_each_possible_cpu(cpu)
1371 		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1372 
1373 	obj_cache = kmem_cache_create("debug_objects_cache",
1374 				      sizeof (struct debug_obj), 0,
1375 				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1376 				      NULL);
1377 
1378 	if (!obj_cache || debug_objects_replace_static_objects()) {
1379 		debug_objects_enabled = 0;
1380 		kmem_cache_destroy(obj_cache);
1381 		pr_warn("out of memory.\n");
1382 		return;
1383 	} else
1384 		debug_objects_selftest();
1385 
1386 #ifdef CONFIG_HOTPLUG_CPU
1387 	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1388 					object_cpu_offline);
1389 #endif
1390 
1391 	/*
1392 	 * Increase the thresholds for allocating and freeing objects
1393 	 * according to the number of possible CPUs available in the system.
1394 	 */
1395 	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1396 	debug_objects_pool_size += extras;
1397 	debug_objects_pool_min_level += extras;
1398 }
1399