1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #define _GNU_SOURCE
17 #include <errno.h>
18 #include <string.h>
19 #include <stdbool.h>
20 #include <debug.h>
21 #include <fcntl.h>
22 #include <dlfcn.h>
23 #include <unistd.h>
24 #include "libc.h"
25 #include "atomic.h"
26 #include "pthread_impl.h"
27 #include "oldmalloc/malloc_impl.h"
28
29 #if defined(__GNUC__) && defined(__PIC__)
30 #define inline inline __attribute__((always_inline))
31 #endif
32
33 bool g_enable_check = false;
34 int g_recycle_num;
35 size_t g_recycle_size;
36 int g_mem_lock[2];
37 static struct chunk recycle_list;
38 static struct heap_block block_list;
39
40 static struct {
41 struct stat_bin bins[PTHREAD_NUM_MAX];
42 struct stat_bin free_list;
43 size_t p_total_size;
44 size_t peak_size;
45 char *f_path;
46 char f_path_buf[PATH_MAX];
47 int fd;
48 bool verbose;
49 } mem_stat;
50
lock(volatile int *lk)51 static inline void lock(volatile int *lk)
52 {
53 if (libc.threads_minus_1)
54 while (a_swap(lk, 1)) __wait(lk, lk + 1, 1, 1);
55 }
56
unlock(volatile int *lk)57 static inline void unlock(volatile int *lk)
58 {
59 if (lk[0]) {
60 a_store(lk, 0);
61 if (lk[1]) __wake(lk, 1, 1);
62 }
63 }
64
lock_stat_bin(int tid)65 static inline void lock_stat_bin(int tid)
66 {
67 lock(mem_stat.bins[tid].lock);
68 if (!mem_stat.bins[tid].head.next)
69 mem_stat.bins[tid].head.next = mem_stat.bins[tid].head.prev = &mem_stat.bins[tid].head;
70 }
71
unlock_stat_bin(int tid)72 static inline void unlock_stat_bin(int tid)
73 {
74 unlock(mem_stat.bins[tid].lock);
75 }
76
insert_free_list(struct node *node)77 static void insert_free_list(struct node *node)
78 {
79 struct list *list = NULL;
80
81 list = mem_stat.free_list.head.prev;
82 node->list.prev = list;
83 node->list.next = list->next;
84 list->next = &node->list;
85 node->list.next->prev = &node->list;
86 }
87
try_delete_node(int tid, void *ptr)88 static int try_delete_node(int tid, void *ptr)
89 {
90 struct list *list = NULL;
91 struct node *node = NULL;
92
93 lock_stat_bin(tid);
94 for (list = mem_stat.bins[tid].head.next; list != &mem_stat.bins[tid].head; list = list->next) {
95 node = (struct node *)((uintptr_t)list - (uint32_t)&((struct node *)0)->list);
96 if (node->ptr != ptr) {
97 continue;
98 }
99 list->prev->next = list->next;
100 list->next->prev = list->prev;
101 mem_stat.bins[tid].t_total_size -= node->size;
102 insert_free_list(node);
103 mem_stat.p_total_size -= node->size;
104 unlock_stat_bin(tid);
105 return 0;
106 }
107 unlock_stat_bin(tid);
108 return -1;
109 }
110
delete_node(void *ptr)111 int delete_node(void *ptr)
112 {
113 int tid = ((struct pthread *)pthread_self())->tid;
114 int status, i;
115
116 lock(g_mem_lock);
117 status = try_delete_node(tid, ptr);
118 if (status == 0) {
119 unlock(g_mem_lock);
120 return 0;
121 }
122
123 for (i = 0; i < PTHREAD_NUM_MAX; ++i) {
124 if (i == tid) {
125 continue;
126 }
127 status = try_delete_node(i, ptr);
128 if (status == 0) {
129 unlock(g_mem_lock);
130 return 0;
131 }
132 }
133 unlock(g_mem_lock);
134 return -1;
135 }
136
expand_mem(void)137 static struct node *expand_mem(void)
138 {
139 struct node *ptr = NULL;
140 struct node *node = NULL;
141 size_t node_len = sizeof(struct node);
142 int n_node = PAGE_SIZE / node_len;
143 int i;
144
145 ptr = __mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
146 if (ptr == MAP_FAILED) {
147 printf("%s %d, map failed, err: %s\n", __func__, __LINE__, strerror(errno));
148 return NULL;
149 }
150
151 for (i = 1; i < n_node; ++i) {
152 node = (struct node *)((uintptr_t)ptr + i * node_len);
153 insert_free_list(node);
154 }
155
156 return ptr;
157 }
158
alloc_node(void)159 static struct node *alloc_node(void)
160 {
161 struct list *list = NULL;
162 struct node *node = NULL;
163 int ret;
164
165 if (!mem_stat.free_list.head.next) {
166 mem_stat.free_list.head.next = mem_stat.free_list.head.prev = &mem_stat.free_list.head;
167 }
168
169 for (list = mem_stat.free_list.head.next; list != &mem_stat.free_list.head; list = list->next) {
170 node = (struct node *)((uintptr_t)list - (uint32_t)&((struct node *)0)->list);
171 list->prev->next = list->next;
172 list->next->prev = list->prev;
173 return node;
174 }
175
176 return expand_mem();
177 }
178
create_node(int tid, void *ptr, size_t size)179 static struct node *create_node(int tid, void *ptr, size_t size)
180 {
181 pid_t pid = getpid();
182 struct node *node = NULL;
183 void *lr[BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET + 1] = { 0 };
184 int nptr;
185
186 node = alloc_node();
187 if (node == NULL) {
188 return NULL;
189 }
190 mem_stat.p_total_size += size;
191 mem_stat.peak_size = (mem_stat.peak_size < mem_stat.p_total_size) ? mem_stat.p_total_size : mem_stat.peak_size;
192 node->tid = tid;
193 node->pid = pid;
194 node->ptr = ptr;
195 node->size = size;
196 nptr = backtrace(lr, BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET + 1);
197 memcpy(node->lr, lr + BACKTRACE_OFFSET + 1, BACKTRACE_DEPTH_MAX * sizeof(void *));
198 return node;
199 }
200
insert_node(void *ptr, size_t size)201 void insert_node(void *ptr, size_t size)
202 {
203 int tid = ((struct pthread *)pthread_self())->tid;
204 struct list *list = NULL;
205 struct node *node = NULL;
206
207 lock(g_mem_lock);
208 node = create_node(tid, ptr, size);
209 if (node == NULL) {
210 unlock(g_mem_lock);
211 return;
212 }
213
214 lock_stat_bin(tid);
215 mem_stat.bins[tid].t_total_size += size;
216 list = mem_stat.bins[tid].head.prev;
217 node->list.prev = list;
218 node->list.next = list->next;
219 list->next = &node->list;
220 node->list.next->prev = &node->list;
221 unlock_stat_bin(tid);
222 unlock(g_mem_lock);
223 }
224
file_path_init(void)225 static void file_path_init(void)
226 {
227 char *pos = NULL;
228 int len;
229
230 if (!mem_stat.f_path) {
231 return;
232 }
233
234 pos = strrchr(mem_stat.f_path, '/');
235 if (pos) {
236 len = pos - mem_stat.f_path + 1;
237 strncpy(mem_stat.f_path_buf, mem_stat.f_path, PATH_MAX - 1);
238 snprintf(mem_stat.f_path_buf + len, PATH_MAX - len, "pid(%d)_%s", getpid(), pos + 1);
239 } else {
240 snprintf(mem_stat.f_path_buf, PATH_MAX, "pid(%d)_%s", getpid(), mem_stat.f_path);
241 }
242 }
243
get_file(void)244 static bool get_file(void)
245 {
246 if (!g_enable_check) {
247 printf("You should call mem_check_init(char *) or use command line parameters, "
248 "--mwatch or --mrecord <full path>, to call mem_check_init(char *) "
249 "when executing your program.\n");
250 return false;
251 }
252
253 if (mem_stat.verbose) {
254 return true;
255 }
256
257 file_path_init();
258 if (!access(mem_stat.f_path_buf, 0)) {
259 return true;
260 }
261 mem_stat.fd = open(mem_stat.f_path_buf, O_RDWR | O_CREAT);
262 if (mem_stat.fd < 0) {
263 printf("err: %s create failed, memory info will output from serial port!\n", mem_stat.f_path_buf);
264 mem_stat.verbose = true;
265 }
266 return true;
267 }
268
get_backtrace_info(void **buffer, int nptr, int fd, bool verbose, bool checkpoint)269 static int get_backtrace_info(void **buffer, int nptr, int fd, bool verbose, bool checkpoint)
270 {
271 int i, ret;
272 char str_buf[ITEM_BUFFER_SIZE];
273 Dl_info info = { 0 };
274 bool checkpoint_head = false;
275 int checkpoint_trace_num = 0;
276 bool symbol_found;
277
278 for (i = 0; i < nptr; ++i) {
279 symbol_found = true;
280 dladdr((void *)buffer[i], &info);
281 if ((info.dli_fname == NULL) || (info.dli_fname[0] == '\0')) {
282 symbol_found = false;
283 }
284
285 if (checkpoint && !checkpoint_head) {
286 checkpoint_head = true;
287 if (verbose) {
288 printf(" [Check point]:\n");
289 } else {
290 snprintf(str_buf, ITEM_BUFFER_SIZE, " [Check point]:\n");
291 ret = write(fd, str_buf, strlen(str_buf));
292 if (ret != strlen(str_buf)) {
293 goto err;
294 }
295 }
296 }
297 if (verbose) {
298 symbol_found ?
299 printf("\t#%02d: <%s+%#x>[%#x] -> %s\n", i, info.dli_sname, (uintptr_t)buffer[i] -
300 (uintptr_t)info.dli_saddr, (uintptr_t)buffer[i] - (uintptr_t)info.dli_fbase, info.dli_fname) :
301 printf("\t#%02d: %#x\n", i, buffer[i]);
302 } else {
303 symbol_found ?
304 snprintf(str_buf, ITEM_BUFFER_SIZE, "\t#%02d: <%s+%#x>[%#x] -> %s\n", i, info.dli_sname,
305 (uintptr_t)buffer[i] - (uintptr_t)info.dli_saddr, (uintptr_t)buffer[i] - (uintptr_t)info.dli_fbase,
306 info.dli_fname) :
307 snprintf(str_buf, ITEM_BUFFER_SIZE, "\t#%02d: %#x\n", i, buffer[i]);
308 ret = write(fd, str_buf, strlen(str_buf));
309 if (ret != strlen(str_buf)) {
310 goto err;
311 }
312 }
313 if (checkpoint) {
314 checkpoint_trace_num++;
315 if (checkpoint_trace_num == CHECK_POINT_TRACE_MAX) {
316 break;
317 }
318 }
319 }
320 return 0;
321 err:
322 printf("Write failed, err: %s\n", strerror(errno));
323 return ret;
324 }
325
print_integrity_info(struct node *node)326 static int print_integrity_info(struct node *node)
327 {
328 int ret;
329 char buffer[ITEM_BUFFER_SIZE];
330 char *str = "The possible attacker was allocated from:";
331
332 if (mem_stat.verbose) {
333 printf("\n==PID:%d== Memory integrity information:\n", getpid());
334 printf(" [TID:%d PID:%d allocated addr: %#x, size: %#x] %s\n", node->tid, node->pid, node->ptr, node->size,
335 str);
336 } else {
337 snprintf(buffer, ITEM_BUFFER_SIZE, "\n==PID:%d== Memory integrity information:\n", getpid());
338 ret = write(mem_stat.fd, buffer, strlen(buffer));
339 if (ret != strlen(buffer)) {
340 goto err;
341 }
342 snprintf(buffer, ITEM_BUFFER_SIZE, " [TID:%d PID:%d allocated addr: %#x, size: %#x] %s\n", node->tid, node->pid,
343 node->ptr, node->size, str);
344 ret = write(mem_stat.fd, buffer, strlen(buffer));
345 if (ret != strlen(buffer)) {
346 goto err;
347 }
348 }
349 return 0;
350 err:
351 printf("Write failed, err: %s\n", strerror(errno));
352 return ret;
353 }
354
check_mem_integrity(int tid, void *ptr)355 static int check_mem_integrity(int tid, void *ptr)
356 {
357 struct list *list = NULL;
358 struct node *node = NULL;
359 int nptr = 0;
360
361 lock_stat_bin(tid);
362 for (list = mem_stat.bins[tid].head.next; list != &mem_stat.bins[tid].head; list = list->next) {
363 node = (struct node *)((uintptr_t)list - (uint32_t)&((struct node *)0)->list);
364 if (node->ptr != ptr) {
365 continue;
366 }
367 if (print_integrity_info(node) != 0) {
368 unlock_stat_bin(tid);
369 printf("Memory integrity check failed!\n");
370 return -1;
371 }
372 while (node->lr[nptr] != NULL) {
373 ++nptr;
374 if (nptr == BACKTRACE_DEPTH_MAX) {
375 break;
376 }
377 }
378 if ((nptr == 0) || (get_backtrace_info(node->lr, nptr, mem_stat.fd, mem_stat.verbose, false) != 0)) {
379 unlock_stat_bin(tid);
380 printf("get backtrace failed!\n");
381 return -1;
382 }
383 if (!mem_stat.verbose) {
384 printf("Memory integrity information saved in %s\n", mem_stat.f_path_buf);
385 }
386 unlock_stat_bin(tid);
387 return 0;
388 }
389 unlock_stat_bin(tid);
390 return 1;
391 }
392
get_integrity_info(void *ptr)393 static void get_integrity_info(void *ptr)
394 {
395 int i, status;
396 int tid = ((struct pthread *)pthread_self())->tid;
397
398 status = check_mem_integrity(tid, ptr);
399 if (status != 1) {
400 return;
401 }
402
403 for (i = 0; i < PTHREAD_NUM_MAX; ++i) {
404 if (i == tid) {
405 continue;
406 }
407 status = check_mem_integrity(i, ptr);
408 if (status != 1) {
409 return;
410 }
411 }
412 }
413
is_invalid(struct chunk *self)414 bool is_invalid(struct chunk *self)
415 {
416 uint32_t checksum;
417 checksum = CHUNK_SIZE(self) ^ CHUNK_PSIZE(self) ^ NODE_MAGIC;
418 if (checksum != self->checksum) {
419 return true;
420 } else {
421 return false;
422 }
423 }
424
calculate_checksum(struct chunk *cur, struct chunk *next)425 void calculate_checksum(struct chunk *cur, struct chunk *next)
426 {
427 if (cur) {
428 cur->checksum = CHUNK_SIZE(cur) ^ CHUNK_PSIZE(cur) ^ NODE_MAGIC;
429 }
430
431 if (next) {
432 next->checksum = CHUNK_SIZE(next) ^ CHUNK_PSIZE(next) ^ NODE_MAGIC;
433 }
434 }
435
check_heap_integrity(void)436 void check_heap_integrity(void)
437 {
438 struct chunk *cur = NULL;
439 struct chunk *next = NULL;
440 struct heap_block *block = NULL;
441
442 if (!block_list.next) {
443 return;
444 }
445
446 lock(g_mem_lock);
447 if (!get_file()) {
448 unlock(g_mem_lock);
449 return;
450 }
451 block = block_list.next;
452 while (block != &block_list) {
453 cur = BLOCK_TO_CHUNK(block);
454 do {
455 next = NEXT_CHUNK(cur);
456 if (is_invalid(next)) {
457 get_integrity_info(CHUNK_TO_MEM(cur));
458 unlock(g_mem_lock);
459 a_crash();
460 }
461 cur = next;
462 } while (CHUNK_SIZE(next));
463 block = block->next;
464 }
465 unlock(g_mem_lock);
466 printf("\nCheck heap integrity ok!\n");
467 }
468
check_chunk_integrity(struct chunk *cur)469 void check_chunk_integrity(struct chunk *cur)
470 {
471 struct chunk *next = NULL;
472
473 if (is_invalid(cur)) {
474 check_heap_integrity();
475 }
476
477 lock(g_mem_lock);
478 next = NEXT_CHUNK(cur);
479 if ((CHUNK_SIZE(next)) && is_invalid(next)) {
480 get_integrity_info(CHUNK_TO_MEM(cur));
481 unlock(g_mem_lock);
482 a_crash();
483 }
484 unlock(g_mem_lock);
485 }
486
insert_block_list(struct chunk *self)487 void insert_block_list(struct chunk *self)
488 {
489 struct heap_block *block = CHUNK_TO_BLOCK(self);
490 struct heap_block *cur = NULL;
491
492 if (!block_list.next) {
493 block_list.next = block_list.prev = &block_list;
494 }
495
496 cur = block_list.prev;
497 block->next = cur->next;
498 block->prev = cur;
499 cur->next = block;
500 block_list.prev = block;
501 }
502
get_free_trace(void *ptr)503 void get_free_trace(void *ptr)
504 {
505 void *lr[BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET] = { 0 };
506 int tid = ((struct pthread *)pthread_self())->tid;
507 char buffer[ITEM_BUFFER_SIZE];
508 int nptr, ret;
509
510 lock(g_mem_lock);
511 if (!get_file()) {
512 unlock(g_mem_lock);
513 return;
514 }
515 if (mem_stat.verbose) {
516 printf("\n==PID:%d== double free\n", getpid());
517 printf(" [TID:%d freed addr: %#x]:\n", tid, ptr);
518 } else {
519 snprintf(buffer, ITEM_BUFFER_SIZE, "\n==PID:%d== double free\n", getpid());
520 ret = write(mem_stat.fd, buffer, strlen(buffer));
521 if (ret != strlen(buffer)) {
522 goto err;
523 }
524 snprintf(buffer, ITEM_BUFFER_SIZE, " [TID:%d freed addr: %#x]:\n", tid, ptr);
525 ret = write(mem_stat.fd, buffer, strlen(buffer));
526 if (ret != strlen(buffer)) {
527 goto err;
528 }
529 }
530
531 nptr = backtrace(lr, BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET);
532 if (get_backtrace_info(lr + BACKTRACE_OFFSET, nptr - BACKTRACE_OFFSET, mem_stat.fd, mem_stat.verbose, false) != 0) {
533 printf("Trace failed\n");
534 }
535
536 unlock(g_mem_lock);
537 return;
538 err:
539 printf("Write failed, err: %s\n", strerror(errno));
540 unlock(g_mem_lock);
541 return;
542 }
543
watch_mem(void)544 void watch_mem(void)
545 {
546 int tid, ret;
547 char buffer[ITEM_BUFFER_SIZE];
548 void *lr[BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET] = { 0 };
549 pid_t pid = getpid();
550 int nptr;
551
552 lock(g_mem_lock);
553 if (!get_file()) {
554 unlock(g_mem_lock);
555 return;
556 }
557 if (mem_stat.verbose) {
558 printf("\n==PID:%d== Heap memory statistics(bytes):\n", pid);
559 } else {
560 snprintf(buffer, ITEM_BUFFER_SIZE, "\n==PID:%d== Heap memory statistics(bytes):\n", pid);
561 ret = write(mem_stat.fd, buffer, strlen(buffer));
562 if (ret != strlen(buffer)) {
563 goto err2;
564 }
565 }
566 nptr = backtrace(lr, BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET);
567 if (get_backtrace_info(lr + BACKTRACE_OFFSET, nptr - BACKTRACE_OFFSET, mem_stat.fd, mem_stat.verbose, true) != 0) {
568 printf("Check failed\n");
569 unlock(g_mem_lock);
570 return;
571 }
572 for (tid = 0; tid < PTHREAD_NUM_MAX; ++tid) {
573 lock_stat_bin(tid);
574 if (mem_stat.bins[tid].t_total_size == 0) {
575 unlock_stat_bin(tid);
576 continue;
577 }
578 if (mem_stat.verbose) {
579 printf("\n [TID: %d, Used: %#x]", tid, mem_stat.bins[tid].t_total_size);
580 } else {
581 snprintf(buffer, ITEM_BUFFER_SIZE, "\n [TID: %d, Used: %#x]", tid, mem_stat.bins[tid].t_total_size);
582 ret = write(mem_stat.fd, buffer, strlen(buffer));
583 if (ret != strlen(buffer)) {
584 goto err1;
585 }
586 }
587 unlock_stat_bin(tid);
588 }
589 if (mem_stat.verbose) {
590 printf("\n\n==PID:%d== Total heap: %#x byte(s), Peak: %#x byte(s)\n", pid,
591 mem_stat.p_total_size, mem_stat.peak_size);
592 } else {
593 snprintf(buffer, ITEM_BUFFER_SIZE, "\n\n==PID:%d== Total heap: %#x byte(s), Peak: %#x byte(s)\n", pid,
594 mem_stat.p_total_size, mem_stat.peak_size);
595 ret = write(mem_stat.fd, buffer, strlen(buffer));
596 if (ret != strlen(buffer)) {
597 goto err2;
598 }
599 }
600 if (!mem_stat.verbose) {
601 printf("Memory statistics information saved in %s\n", mem_stat.f_path_buf);
602 }
603 unlock(g_mem_lock);
604 return;
605 err1:
606 unlock_stat_bin(tid);
607 err2:
608 printf("Write failed, err: %s\n", strerror(errno));
609 unlock(g_mem_lock);
610 }
611
get_node_info(struct node *node, int fd, bool verbose, bool mem_leak_exist)612 static int get_node_info(struct node *node, int fd, bool verbose, bool mem_leak_exist)
613 {
614 char buffer[ITEM_BUFFER_SIZE];
615 void *lr[BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET] = { 0 };
616 int nptr, ret;
617
618 if (!mem_leak_exist) {
619 if (verbose) {
620 printf("\n==PID:%d== Detected memory leak(s):\n", getpid());
621 } else {
622 snprintf(buffer, ITEM_BUFFER_SIZE, "\n==PID:%d== Detected memory leak(s):\n", getpid());
623 ret = write(fd, buffer, strlen(buffer));
624 if (ret != strlen(buffer)) {
625 goto err;
626 }
627 }
628 nptr = backtrace(lr, BACKTRACE_DEPTH_MAX + BACKTRACE_OFFSET);
629 if (get_backtrace_info(lr + BACKTRACE_OFFSET, nptr - BACKTRACE_OFFSET, mem_stat.fd, mem_stat.verbose, true) != 0) {
630 printf("Check failed\n");
631 goto err;
632 }
633 }
634
635 if (verbose) {
636 printf("\n [TID:%d Leak:%#x byte(s)] Allocated from:\n", node->tid, node->size);
637 } else {
638 snprintf(buffer, ITEM_BUFFER_SIZE, "\n [TID:%d Leak:%#x byte(s)] Allocated from:\n", node->tid, node->size);
639 ret = write(fd, buffer, strlen(buffer));
640 if (ret != strlen(buffer)) {
641 goto err;
642 }
643 }
644 return 0;
645 err:
646 printf("Write failed, err: %s\n", strerror(errno));
647 return ret;
648
649 }
650
print_summary_info(size_t leak_size, size_t allocs, int fd, bool verbose, bool mem_leak_exist)651 static void print_summary_info(size_t leak_size, size_t allocs, int fd, bool verbose, bool mem_leak_exist)
652 {
653 char buffer[ITEM_BUFFER_SIZE];
654 int ret;
655
656 if (!mem_leak_exist) {
657 if (verbose) {
658 printf("\nNo memory leak!\n");
659 return;
660 } else {
661 snprintf(buffer, ITEM_BUFFER_SIZE, "\nNo memory leak!\n");
662 ret = write(fd, buffer, strlen(buffer));
663 if (ret != strlen(buffer)) {
664 printf("Write failed, err: %s\n", strerror(errno));
665 }
666 return;
667 }
668 }
669
670 if (verbose) {
671 printf("\n==PID:%d== SUMMARY: %#x byte(s) leaked in %d allocation(s).\n", getpid(), leak_size, allocs);
672 } else {
673 snprintf(buffer, ITEM_BUFFER_SIZE, "\n==PID:%d== SUMMARY: %#x byte(s) leaked in %d allocation(s).\n", getpid(),
674 leak_size, allocs);
675 ret = write(fd, buffer, strlen(buffer));
676 if (ret != strlen(buffer)) {
677 printf("Write failed, err: %s\n", strerror(errno));
678 }
679 }
680 }
681
check_leak(void)682 void check_leak(void)
683 {
684 struct list *list = NULL;
685 struct node *node = NULL;
686 int tid, nptr;
687 size_t leak_size = 0;
688 size_t allocs = 0;
689 bool mem_leak_exist = false;
690 pid_t pid = getpid();
691
692 lock(g_mem_lock);
693 if (!get_file()) {
694 unlock(g_mem_lock);
695 return;
696 }
697 for (tid = 0; tid < PTHREAD_NUM_MAX; ++tid) {
698 lock_stat_bin(tid);
699 for (list = mem_stat.bins[tid].head.next; list != &mem_stat.bins[tid].head; list = list->next) {
700 node = (struct node *)((uintptr_t)list - (uint32_t)&((struct node *)0)->list);
701 if (node->pid != pid) {
702 continue;
703 }
704 if (get_node_info(node, mem_stat.fd, mem_stat.verbose, mem_leak_exist) != 0) {
705 unlock_stat_bin(tid);
706 unlock(g_mem_lock);
707 printf("Check failed\n");
708 return;
709 }
710 ++allocs;
711 leak_size += node->size;
712 mem_leak_exist = true;
713 nptr = 0;
714 while (node->lr[nptr] != NULL) {
715 ++nptr;
716 if (nptr == BACKTRACE_DEPTH_MAX) {
717 break;
718 }
719 }
720 if (nptr == 0) {
721 continue;
722 }
723 if (get_backtrace_info(node->lr, nptr, mem_stat.fd, mem_stat.verbose, false) != 0) {
724 unlock_stat_bin(tid);
725 unlock(g_mem_lock);
726 printf("Check failed\n");
727 return;
728 }
729 }
730 unlock_stat_bin(tid);
731 }
732 print_summary_info(leak_size, allocs, mem_stat.fd, mem_stat.verbose, mem_leak_exist);
733 if (!mem_stat.verbose) {
734 printf("Leak check information saved in %s\n", mem_stat.f_path_buf);
735 }
736 unlock(g_mem_lock);
737 }
738
mem_check_init(char *f_path)739 void mem_check_init(char *f_path)
740 {
741 signal(35, watch_mem);
742 signal(36, check_leak);
743 signal(37, check_heap_integrity);
744 g_enable_check = true;
745 mem_stat.fd = -1;
746 const char *string = "memory info will print to serial port!";
747
748 if (!f_path) {
749 goto out;
750 }
751
752 if (strlen(f_path) > (PATH_MAX - PREFIX_PLACE_HOLDER - 1)) {
753 printf("file name: %s is too long, %s\n", f_path, string);
754 goto out;
755 }
756 mem_stat.f_path = f_path;
757 file_path_init();
758 mem_stat.fd = open(mem_stat.f_path_buf, O_RDWR | O_CREAT | O_EXCL);
759 if (mem_stat.fd < 0) {
760 switch (errno) {
761 case EEXIST:
762 printf("file: %s is exist, %s\n", mem_stat.f_path_buf, string);
763 goto out;
764 default:
765 printf("path: %s create failed, %s\n", mem_stat.f_path_buf, string);
766 goto out;
767 }
768 }
769 mem_stat.verbose = false;
770 return;
771
772 out:
773 mem_stat.verbose = true;
774 }
775
mem_check_deinitnull776 void mem_check_deinit()
777 {
778 if (mem_stat.fd > 0) {
779 close(mem_stat.fd);
780 }
781 }
782
parse_argv(int argc, char *argv[])783 void parse_argv(int argc, char *argv[])
784 {
785
786 if (argc <= 1) {
787 return;
788 }
789
790 if (!strcmp(argv[argc - 1], "--mwatch")) {
791 mem_check_init(NULL);
792 } else if ((argc > 2) && (!strcmp(argv[argc - 2], "--mrecord"))) {
793 mem_check_init(argv[argc - 1]);
794 } else if (!strcmp(argv[argc - 1], "--mrecord")) {
795 printf("usage: --mrecord filepath\n");
796 }
797 }
798
insert_free_tail(struct chunk *self)799 void insert_free_tail(struct chunk *self)
800 {
801 volatile struct chunk *cur = NULL;
802 lock(g_mem_lock);
803 if (!recycle_list.next) {
804 recycle_list.next = recycle_list.prev = &recycle_list;
805 }
806 cur = recycle_list.prev;
807 self->next = cur->next;
808 self->prev = cur;
809 cur->next = self;
810 recycle_list.prev = self;
811 memset(CHUNK_TO_MEM(self), FREE_MAGIC, CHUNK_SIZE(self) - OVERHEAD);
812 ++g_recycle_num;
813 g_recycle_size += CHUNK_SIZE(self);
814 unlock(g_mem_lock);
815 }
816
get_free_head(void)817 struct chunk *get_free_head(void)
818 {
819 struct chunk *cur = NULL;
820 lock(g_mem_lock);
821 cur = recycle_list.next;
822 if ((cur == NULL) || (cur == &recycle_list)) {
823 unlock(g_mem_lock);
824 return NULL;
825 }
826 recycle_list.next = cur->next;
827 cur->next->prev = cur->prev;
828 --g_recycle_num;
829 g_recycle_size -= CHUNK_SIZE(cur);
830 unlock(g_mem_lock);
831 return cur;
832 }
833
clean_recycle_list(bool clean_all)834 void clean_recycle_list(bool clean_all)
835 {
836 struct chunk *self = NULL;
837 self = get_free_head();
838 while (self) {
839 __bin_chunk(self);
840 if ((!clean_all) && (g_recycle_size < RECYCLE_SIZE_MAX)) {
841 break;
842 }
843 self = get_free_head();
844 }
845 }
846
847