Lines Matching refs:object

37  *   Note that the kmemleak_object.use_count is incremented when an object is
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
132 * object->lock. Insertions or deletions from object_list, gray_list or
139 unsigned int flags; /* object status flags */
144 /* object usage count; object freed when use_count == 0 */
152 /* the total number of pointers found pointing to this object */
156 /* memory ranges to be scanned inside an object (empty for all) */
167 /* flag set after the first reporting of an unreference object */
169 /* flag set to not scan the object */
171 /* flag set to fully scan the object when scan_area allocation failed */
192 /* search tree for object boundaries */
278 * with the object->lock held.
281 struct kmemleak_object *object)
283 const u8 *ptr = (const u8 *)object->pointer;
287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
298 * - white - orphan object, not enough references to it (count < min_count)
303 * Newly created objects don't have any color assigned (object->count == -1)
306 static bool color_white(const struct kmemleak_object *object)
308 return object->count != KMEMLEAK_BLACK &&
309 object->count < object->min_count;
312 static bool color_gray(const struct kmemleak_object *object)
314 return object->min_count != KMEMLEAK_BLACK &&
315 object->count >= object->min_count;
323 static bool unreferenced_object(struct kmemleak_object *object)
325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
326 time_before_eq(object->jiffies + jiffies_min_age,
332 * print_unreferenced function must be called with the object->lock held.
335 struct kmemleak_object *object)
338 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
340 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
341 object->pointer, object->size);
343 object->comm, object->pid, object->jiffies,
345 hex_dump_object(seq, object);
348 for (i = 0; i < object->trace_len; i++) {
349 void *ptr = (void *)object->trace[i];
357 * the object->lock held.
359 static void dump_object_info(struct kmemleak_object *object)
362 object->pointer, object->size);
364 object->comm, object->pid, object->jiffies);
365 pr_notice(" min_count = %d\n", object->min_count);
366 pr_notice(" count = %d\n", object->count);
367 pr_notice(" flags = 0x%x\n", object->flags);
368 pr_notice(" checksum = %u\n", object->checksum);
370 stack_trace_print(object->trace, object->trace_len, 4);
374 * Look-up a memory block metadata (kmemleak_object) in the object search
384 struct kmemleak_object *object =
386 if (ptr < object->pointer)
387 rb = object->rb_node.rb_left;
388 else if (object->pointer + object->size <= ptr)
389 rb = object->rb_node.rb_right;
390 else if (object->pointer == ptr || alias)
391 return object;
393 kmemleak_warn("Found object by alias at 0x%08lx\n",
395 dump_object_info(object);
403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
404 * that once an object's use_count reached 0, the RCU freeing was already
405 * registered and the object should no longer be used. This function must be
408 static int get_object(struct kmemleak_object *object)
410 return atomic_inc_not_zero(&object->use_count);
419 struct kmemleak_object *object;
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
424 if (object)
425 return object;
430 object = list_first_entry_or_null(&mem_pool_free_list,
431 typeof(*object), object_list);
432 if (object)
433 list_del(&object->object_list);
435 object = &mem_pool[--mem_pool_free_count];
440 return object;
444 * Return the object to either the slab allocator or the memory pool.
446 static void mem_pool_free(struct kmemleak_object *object)
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
451 kmem_cache_free(object_cache, object);
455 /* add the object to the memory pool free list */
457 list_add(&object->object_list, &mem_pool_free_list);
468 struct kmemleak_object *object =
473 * code accessing this object, hence no need for locking.
475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
479 mem_pool_free(object);
483 * Decrement the object use_count. Once the count is 0, free the object using
489 static void put_object(struct kmemleak_object *object)
491 if (!atomic_dec_and_test(&object->use_count))
495 WARN_ON(object->flags & OBJECT_ALLOCATED);
500 * came from the memory pool. Free the object directly.
503 call_rcu(&object->rcu, free_object_rcu);
505 free_object_rcu(&object->rcu);
509 * Look up an object in the object search tree and increase its use_count.
514 struct kmemleak_object *object;
518 object = lookup_object(ptr, alias);
521 /* check whether the object is still available */
522 if (object && !get_object(object))
523 object = NULL;
526 return object;
530 * Remove an object from the object_tree_root and object_list. Must be called
533 static void __remove_object(struct kmemleak_object *object)
535 rb_erase(&object->rb_node, &object_tree_root);
536 list_del_rcu(&object->object_list);
540 * Look up an object in the object search tree and remove it from both
541 * object_tree_root and object_list. The returned object's use_count should be
547 struct kmemleak_object *object;
550 object = lookup_object(ptr, alias);
551 if (object)
552 __remove_object(object);
555 return object;
574 struct kmemleak_object *object, *parent;
578 object = mem_pool_alloc(gfp);
579 if (!object) {
585 INIT_LIST_HEAD(&object->object_list);
586 INIT_LIST_HEAD(&object->gray_list);
587 INIT_HLIST_HEAD(&object->area_list);
588 raw_spin_lock_init(&object->lock);
589 atomic_set(&object->use_count, 1);
590 object->flags = OBJECT_ALLOCATED;
591 object->pointer = ptr;
592 object->size = size;
593 object->excess_ref = 0;
594 object->min_count = min_count;
595 object->count = 0; /* white color initially */
596 object->jiffies = jiffies;
597 object->checksum = 0;
601 object->pid = 0;
602 strncpy(object->comm, "hardirq", sizeof(object->comm));
604 object->pid = 0;
605 strncpy(object->comm, "softirq", sizeof(object->comm));
607 object->pid = current->pid;
614 strncpy(object->comm, current->comm, sizeof(object->comm));
618 object->trace_len = __save_stack_trace(object->trace);
635 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
642 kmem_cache_free(object_cache, object);
643 object = NULL;
647 rb_link_node(&object->rb_node, rb_parent, link);
648 rb_insert_color(&object->rb_node, &object_tree_root);
650 list_add_tail_rcu(&object->object_list, &object_list);
653 return object;
657 * Mark the object as not allocated and schedule RCU freeing via put_object().
659 static void __delete_object(struct kmemleak_object *object)
663 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
664 WARN_ON(atomic_read(&object->use_count) < 1);
670 raw_spin_lock_irqsave(&object->lock, flags);
671 object->flags &= ~OBJECT_ALLOCATED;
672 raw_spin_unlock_irqrestore(&object->lock, flags);
673 put_object(object);
682 struct kmemleak_object *object;
684 object = find_and_remove_object(ptr, 0);
685 if (!object) {
687 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
692 __delete_object(object);
702 struct kmemleak_object *object;
705 object = find_and_remove_object(ptr, 1);
706 if (!object) {
708 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
719 start = object->pointer;
720 end = object->pointer + object->size;
722 create_object(start, ptr - start, object->min_count,
725 create_object(ptr + size, end - ptr - size, object->min_count,
728 __delete_object(object);
731 static void __paint_it(struct kmemleak_object *object, int color)
733 object->min_count = color;
735 object->flags |= OBJECT_NO_SCAN;
738 static void paint_it(struct kmemleak_object *object, int color)
742 raw_spin_lock_irqsave(&object->lock, flags);
743 __paint_it(object, color);
744 raw_spin_unlock_irqrestore(&object->lock, flags);
749 struct kmemleak_object *object;
751 object = find_and_get_object(ptr, 0);
752 if (!object) {
753 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
759 paint_it(object, color);
760 put_object(object);
764 * Mark an object permanently as gray-colored so that it can no longer be
773 * Mark the object as black-colored so that it is ignored from scans and
782 * Add a scanning area to the object. If at least one such area is added,
788 struct kmemleak_object *object;
793 object = find_and_get_object(ptr, 1);
794 if (!object) {
795 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
801 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
806 raw_spin_lock_irqsave(&object->lock, flags);
808 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
809 /* mark the object for full scan to avoid false positives */
810 object->flags |= OBJECT_FULL_SCAN;
814 size = untagged_objp + object->size - untagged_ptr;
815 } else if (untagged_ptr + size > untagged_objp + object->size) {
816 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
817 dump_object_info(object);
826 hlist_add_head(&area->node, &object->area_list);
828 raw_spin_unlock_irqrestore(&object->lock, flags);
829 put_object(object);
833 * Any surplus references (object already gray) to 'ptr' are passed to
835 * vm_struct may be used as an alternative reference to the vmalloc'ed object
841 struct kmemleak_object *object;
843 object = find_and_get_object(ptr, 0);
844 if (!object) {
845 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
850 raw_spin_lock_irqsave(&object->lock, flags);
851 object->excess_ref = excess_ref;
852 raw_spin_unlock_irqrestore(&object->lock, flags);
853 put_object(object);
857 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
858 * pointer. Such object will not be scanned by kmemleak but references to it
864 struct kmemleak_object *object;
866 object = find_and_get_object(ptr, 0);
867 if (!object) {
868 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
872 raw_spin_lock_irqsave(&object->lock, flags);
873 object->flags |= OBJECT_NO_SCAN;
874 raw_spin_unlock_irqrestore(&object->lock, flags);
875 put_object(object);
879 * kmemleak_alloc - register a newly allocated object
880 * @ptr: pointer to beginning of the object
881 * @size: size of the object
882 * @min_count: minimum number of references to this object. If during memory
884 * the object is reported as a memory leak. If @min_count is 0,
885 * the object is never reported as a leak. If @min_count is -1,
886 * the object is ignored (not scanned and not reported as a leak)
889 * This function is called from the kernel allocators when a new object
903 * kmemleak_alloc_percpu - register a newly allocated __percpu object
904 * @ptr: __percpu pointer to beginning of the object
905 * @size: size of the object
908 * This function is called from the kernel percpu allocator when a new object
930 * kmemleak_vmalloc - register a newly vmalloc'ed object
932 * @size: size of the object
936 * object (memory block) is allocated.
955 * kmemleak_free - unregister a previously registered object
956 * @ptr: pointer to beginning of the object
958 * This function is called from the kernel allocators when an object (memory
971 * kmemleak_free_part - partially unregister a previously registered object
972 * @ptr: pointer to the beginning or inside the object. This also
989 * kmemleak_free_percpu - unregister a previously registered __percpu object
990 * @ptr: __percpu pointer to beginning of the object
992 * This function is called from the kernel percpu allocator when an object
1009 * kmemleak_update_trace - update object allocation stack trace
1010 * @ptr: pointer to beginning of the object
1012 * Override the object allocation stack trace for cases where the actual
1017 struct kmemleak_object *object;
1025 object = find_and_get_object((unsigned long)ptr, 1);
1026 if (!object) {
1028 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1034 raw_spin_lock_irqsave(&object->lock, flags);
1035 object->trace_len = __save_stack_trace(object->trace);
1036 raw_spin_unlock_irqrestore(&object->lock, flags);
1038 put_object(object);
1043 * kmemleak_not_leak - mark an allocated object as false positive
1044 * @ptr: pointer to beginning of the object
1046 * Calling this function on an object will cause the memory block to no longer
1059 * kmemleak_ignore - ignore an allocated object
1060 * @ptr: pointer to beginning of the object
1062 * Calling this function on an object will cause the memory block to be
1077 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1078 * @ptr: pointer to beginning or inside the object. This also
1083 * This function is used when it is known that only certain parts of an object
1097 * kmemleak_no_scan - do not scan an allocated object
1098 * @ptr: pointer to beginning of the object
1101 * in situations where it is known that the given object does not contain any
1117 * @phys: physical address of the object
1118 * @size: size of the object
1119 * @min_count: minimum number of references to this object.
1134 * @phys: physical address if the beginning or inside an object. This
1148 * @phys: physical address of the object
1160 * @phys: physical address of the object
1170 * Update an object's checksum and return true if it was modified.
1172 static bool update_checksum(struct kmemleak_object *object)
1174 u32 old_csum = object->checksum;
1178 object->checksum = crc32(0, (void *)object->pointer, object->size);
1182 return object->checksum != old_csum;
1186 * Update an object's references. object->lock must be held by the caller.
1188 static void update_refs(struct kmemleak_object *object)
1190 if (!color_white(object)) {
1196 * Increase the object's reference count (number of pointers to the
1198 * object's color will become gray and it will be added to the
1201 object->count++;
1202 if (color_gray(object)) {
1204 WARN_ON(!get_object(object));
1205 list_add_tail(&object->gray_list, &gray_list);
1245 struct kmemleak_object *object;
1262 * object->use_count cannot be dropped to 0 while the object
1266 object = lookup_object(pointer, 1);
1267 if (!object)
1269 if (object == scanned)
1274 * Avoid the lockdep recursive warning on object->lock being
1278 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1279 /* only pass surplus references (object already gray) */
1280 if (color_gray(object)) {
1281 excess_ref = object->excess_ref;
1282 /* no need for update_refs() if object already gray */
1285 update_refs(object);
1287 raw_spin_unlock(&object->lock);
1290 object = lookup_object(excess_ref, 0);
1291 if (!object)
1293 if (object == scanned)
1296 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1297 update_refs(object);
1298 raw_spin_unlock(&object->lock);
1323 * that object->use_count >= 1.
1325 static void scan_object(struct kmemleak_object *object)
1331 * Once the object->lock is acquired, the corresponding memory block
1334 raw_spin_lock_irqsave(&object->lock, flags);
1335 if (object->flags & OBJECT_NO_SCAN)
1337 if (!(object->flags & OBJECT_ALLOCATED))
1338 /* already freed object */
1340 if (hlist_empty(&object->area_list) ||
1341 object->flags & OBJECT_FULL_SCAN) {
1342 void *start = (void *)object->pointer;
1343 void *end = (void *)(object->pointer + object->size);
1348 scan_block(start, next, object);
1354 raw_spin_unlock_irqrestore(&object->lock, flags);
1356 raw_spin_lock_irqsave(&object->lock, flags);
1357 } while (object->flags & OBJECT_ALLOCATED);
1359 hlist_for_each_entry(area, &object->area_list, node)
1362 object);
1364 raw_spin_unlock_irqrestore(&object->lock, flags);
1373 struct kmemleak_object *object, *tmp;
1380 object = list_entry(gray_list.next, typeof(*object), gray_list);
1381 while (&object->gray_list != &gray_list) {
1386 scan_object(object);
1388 tmp = list_entry(object->gray_list.next, typeof(*object),
1391 /* remove the object from the list and release it */
1392 list_del(&object->gray_list);
1393 put_object(object);
1395 object = tmp;
1408 struct kmemleak_object *object;
1417 list_for_each_entry_rcu(object, &object_list, object_list) {
1418 raw_spin_lock_irqsave(&object->lock, flags);
1422 * 1 reference to any object at this point.
1424 if (atomic_read(&object->use_count) > 1) {
1425 pr_debug("object->use_count = %d\n",
1426 atomic_read(&object->use_count));
1427 dump_object_info(object);
1430 /* reset the reference count (whiten the object) */
1431 object->count = 0;
1432 if (color_gray(object) && get_object(object))
1433 list_add_tail(&object->gray_list, &gray_list);
1435 raw_spin_unlock_irqrestore(&object->lock, flags);
1502 list_for_each_entry_rcu(object, &object_list, object_list) {
1503 raw_spin_lock_irqsave(&object->lock, flags);
1504 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1505 && update_checksum(object) && get_object(object)) {
1507 object->count = object->min_count;
1508 list_add_tail(&object->gray_list, &gray_list);
1510 raw_spin_unlock_irqrestore(&object->lock, flags);
1529 list_for_each_entry_rcu(object, &object_list, object_list) {
1530 raw_spin_lock_irqsave(&object->lock, flags);
1531 if (unreferenced_object(object) &&
1532 !(object->flags & OBJECT_REPORTED)) {
1533 object->flags |= OBJECT_REPORTED;
1536 print_unreferenced(NULL, object);
1540 raw_spin_unlock_irqrestore(&object->lock, flags);
1618 * Iterate over the object_list and return the first valid object at or after
1624 struct kmemleak_object *object;
1633 list_for_each_entry_rcu(object, &object_list, object_list) {
1636 if (get_object(object))
1639 object = NULL;
1641 return object;
1645 * Return the next object in the object_list. The function decrements the
1646 * use_count of the previous object and increases that of the next one.
1668 * Decrement the use_count of the last object required, if any.
1685 * Print the information for an unreferenced object to the seq file.
1689 struct kmemleak_object *object = v;
1692 raw_spin_lock_irqsave(&object->lock, flags);
1693 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1694 print_unreferenced(seq, object);
1695 raw_spin_unlock_irqrestore(&object->lock, flags);
1714 struct kmemleak_object *object;
1719 object = find_and_get_object(addr, 0);
1720 if (!object) {
1721 pr_info("Unknown object at 0x%08lx\n", addr);
1725 raw_spin_lock_irqsave(&object->lock, flags);
1726 dump_object_info(object);
1727 raw_spin_unlock_irqrestore(&object->lock, flags);
1729 put_object(object);
1741 struct kmemleak_object *object;
1745 list_for_each_entry_rcu(object, &object_list, object_list) {
1746 raw_spin_lock_irqsave(&object->lock, flags);
1747 if ((object->flags & OBJECT_REPORTED) &&
1748 unreferenced_object(object))
1749 __paint_it(object, KMEMLEAK_GREY);
1750 raw_spin_unlock_irqrestore(&object->lock, flags);
1773 * dump=... - dump information about the object found at the given address
1853 struct kmemleak_object *object, *tmp;
1859 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1860 __remove_object(object);
1861 __delete_object(object);
1877 * longer track object freeing. Ordering of the scan thread stopping and