1 #define _GNU_SOURCE
2 #include <stdlib.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <stdint.h>
6 #include <errno.h>
7 #ifdef __LITEOS_DEBUG__
8 #include <unistd.h>
9 #include <debug.h>
10 #endif
11 #include <sys/mman.h>
12 #include "libc.h"
13 #include "atomic.h"
14 #include "pthread_impl.h"
15 #include "malloc_impl.h"
16 #include "fork_impl.h"
17
18 #define malloc __libc_malloc_impl
19 #define realloc __libc_realloc
20 #define free __libc_free
21
22 #if defined(__GNUC__) && defined(__PIC__)
23 #define inline inline __attribute__((always_inline))
24 #endif
25
26 static struct {
27 volatile uint64_t binmap;
28 struct bin bins[64];
29 volatile int split_merge_lock[2];
30 } mal;
31
32 /* Synchronization tools */
33
lock(volatile int *lk)34 static inline void lock(volatile int *lk)
35 {
36 int need_locks = libc.need_locks;
37 if (need_locks) {
38 while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
39 if (need_locks < 0) libc.need_locks = 0;
40 }
41 }
42
unlock(volatile int *lk)43 static inline void unlock(volatile int *lk)
44 {
45 if (lk[0]) {
46 a_store(lk, 0);
47 if (lk[1]) __wake(lk, 1, 1);
48 }
49 }
50
lock_bin(int i)51 static inline void lock_bin(int i)
52 {
53 lock(mal.bins[i].lock);
54 if (!mal.bins[i].head)
55 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
56 }
57
unlock_bin(int i)58 static inline void unlock_bin(int i)
59 {
60 unlock(mal.bins[i].lock);
61 }
62
first_set(uint64_t x)63 static int first_set(uint64_t x)
64 {
65 #if 1
66 return a_ctz_64(x);
67 #else
68 static const char debruijn64[64] = {
69 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
70 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
71 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
72 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
73 };
74 static const char debruijn32[32] = {
75 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
76 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
77 };
78 if (sizeof(long) < 8) {
79 uint32_t y = x;
80 if (!y) {
81 y = x>>32;
82 return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
83 }
84 return debruijn32[(y&-y)*0x076be629 >> 27];
85 }
86 return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
87 #endif
88 }
89
90 static const unsigned char bin_tab[60] = {
91 32,33,34,35,36,36,37,37,38,38,39,39,
92 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
93 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
94 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
95 };
96
bin_index(size_t x)97 static int bin_index(size_t x)
98 {
99 x = x / SIZE_ALIGN - 1;
100 if (x <= 32) return x;
101 if (x < 512) return bin_tab[x/8-4];
102 if (x > 0x1c00) return 63;
103 return bin_tab[x/128-4] + 16;
104 }
105
bin_index_up(size_t x)106 static int bin_index_up(size_t x)
107 {
108 x = x / SIZE_ALIGN - 1;
109 if (x <= 32) return x;
110 x--;
111 if (x < 512) return bin_tab[x/8-4] + 1;
112 return bin_tab[x/128-4] + 17;
113 }
114
115 #if 0
116 void __dump_heap(int x)
117 {
118 struct chunk *c;
119 int i;
120 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
121 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
122 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
123 c->csize & 15,
124 NEXT_CHUNK(c)->psize & 15);
125 for (i=0; i<64; i++) {
126 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
127 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
128 if (!(mal.binmap & 1ULL<<i))
129 fprintf(stderr, "missing from binmap!\n");
130 } else if (mal.binmap & 1ULL<<i)
131 fprintf(stderr, "binmap wrongly contains %d!\n", i);
132 }
133 }
134 #endif
135
136 /* This function returns true if the interval [old,new]
137 * intersects the 'len'-sized interval below &libc.auxv
138 * (interpreted as the main-thread stack) or below &b
139 * (the current stack). It is used to defend against
140 * buggy brk implementations that can cross the stack. */
141
traverses_stack_p(uintptr_t old, uintptr_t new)142 static int traverses_stack_p(uintptr_t old, uintptr_t new)
143 {
144 const uintptr_t len = 8<<20;
145 uintptr_t a, b;
146
147 b = (uintptr_t)libc.auxv;
148 a = b > len ? b-len : 0;
149 if (new>a && old<b) return 1;
150
151 b = (uintptr_t)&b;
152 a = b > len ? b-len : 0;
153 if (new>a && old<b) return 1;
154
155 return 0;
156 }
157
158 /* Expand the heap in-place if brk can be used, or otherwise via mmap,
159 * using an exponential lower bound on growth by mmap to make
160 * fragmentation asymptotically irrelevant. The size argument is both
161 * an input and an output, since the caller needs to know the size
162 * allocated, which will be larger than requested due to page alignment
163 * and mmap minimum size rules. The caller is responsible for locking
164 * to prevent concurrent calls. */
165
166 #ifdef __LITEOS_DEBUG__
__expand_heap(size_t *pn)167 void *__expand_heap(size_t *pn)
168 #else
169 static void *__expand_heap(size_t *pn)
170 #endif
171 {
172 static uintptr_t brk;
173 static unsigned mmap_step;
174 size_t n = *pn;
175
176 if (n > SIZE_MAX/2 - PAGE_SIZE) {
177 errno = ENOMEM;
178 return 0;
179 }
180 n += -n & PAGE_SIZE-1;
181
182 if (!brk) {
183 brk = __syscall(SYS_brk, 0);
184 brk += -brk & PAGE_SIZE-1;
185 }
186
187 if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n)
188 && __syscall(SYS_brk, brk+n)==brk+n) {
189 *pn = n;
190 brk += n;
191 return (void *)(brk-n);
192 }
193
194 size_t min = (size_t)PAGE_SIZE << mmap_step/2;
195 if (n < min) n = min;
196 void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
197 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
198 if (area == MAP_FAILED) return 0;
199 *pn = n;
200 mmap_step++;
201 return area;
202 }
203
expand_heap(size_t n)204 static struct chunk *expand_heap(size_t n)
205 {
206 static void *end;
207 void *p;
208 struct chunk *w;
209
210 /* The argument n already accounts for the caller's chunk
211 * overhead needs, but if the heap can't be extended in-place,
212 * we need room for an extra zero-sized sentinel chunk. */
213 n += SIZE_ALIGN;
214
215 p = __expand_heap(&n);
216 if (!p) return 0;
217
218 #ifdef __LITEOS_DEBUG__
219 lock(g_mem_lock);
220 #endif
221 /* If not just expanding existing space, we need to make a
222 * new sentinel chunk below the allocated space. */
223 if (p != end) {
224 /* Valid/safe because of the prologue increment. */
225 n -= SIZE_ALIGN;
226 p = (char *)p + SIZE_ALIGN;
227 w = MEM_TO_CHUNK(p);
228 w->psize = 0 | C_INUSE;
229 #ifdef __LITEOS_DEBUG__
230 insert_block_list(w);
231 #endif
232 }
233
234 /* Record new heap end and fill in footer. */
235 end = (char *)p + n;
236 w = MEM_TO_CHUNK(end);
237 w->psize = n | C_INUSE;
238 w->csize = 0 | C_INUSE;
239
240 /* Fill in header, which may be new or may be replacing a
241 * zero-size sentinel header at the old end-of-heap. */
242 w = MEM_TO_CHUNK(p);
243 w->csize = n | C_INUSE;
244 #ifdef __LITEOS_DEBUG__
245 calculate_checksum(w, MEM_TO_CHUNK(end));
246
247 unlock(g_mem_lock);
248 #endif
249
250 return w;
251 }
252
adjust_size(size_t *n)253 static int adjust_size(size_t *n)
254 {
255 /* Result of pointer difference must fit in ptrdiff_t. */
256 if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
257 if (*n) {
258 errno = ENOMEM;
259 return -1;
260 } else {
261 *n = SIZE_ALIGN;
262 return 0;
263 }
264 }
265 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
266 return 0;
267 }
268
unbin(struct chunk *c, int i)269 static void unbin(struct chunk *c, int i)
270 {
271 if (c->prev == c->next)
272 a_and_64(&mal.binmap, ~(1ULL<<i));
273 c->prev->next = c->next;
274 c->next->prev = c->prev;
275 c->csize |= C_INUSE;
276 NEXT_CHUNK(c)->psize |= C_INUSE;
277 }
278
bin_chunk(struct chunk *self, int i)279 static void bin_chunk(struct chunk *self, int i)
280 {
281 self->next = BIN_TO_CHUNK(i);
282 self->prev = mal.bins[i].tail;
283 self->next->prev = self;
284 self->prev->next = self;
285 if (self->prev == BIN_TO_CHUNK(i))
286 a_or_64(&mal.binmap, 1ULL<<i);
287 }
288
trim(struct chunk *self, size_t n)289 static void trim(struct chunk *self, size_t n)
290 {
291 size_t n1 = CHUNK_SIZE(self);
292 struct chunk *next, *split;
293
294 if (n >= n1 - DONTCARE) return;
295
296 next = NEXT_CHUNK(self);
297 split = (void *)((char *)self + n);
298 #ifdef __LITEOS_DEBUG__
299 lock(g_mem_lock);
300 #endif
301 split->psize = n | C_INUSE;
302 split->csize = n1-n;
303 next->psize = n1-n;
304 #ifdef __LITEOS_DEBUG__
305 calculate_checksum(split, next);
306 #endif
307 self->csize = n | C_INUSE;
308 #ifdef __LITEOS_DEBUG__
309 calculate_checksum(self, NULL);
310 unlock(g_mem_lock);
311 #endif
312
313 int i = bin_index(n1-n);
314 lock_bin(i);
315
316 bin_chunk(split, i);
317
318 unlock_bin(i);
319 }
320
malloc(size_t n)321 void *malloc(size_t n)
322 {
323 struct chunk *c;
324 int i, j;
325 uint64_t mask;
326
327 if (adjust_size(&n) < 0) return 0;
328
329 if (n > MMAP_THRESHOLD) {
330 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
331 #ifdef __LITEOS_DEBUG__
332 if (g_enable_check) {
333 /* Allocate two more pages for protection, loacted at the head and tail of user memory respectively */
334 len += PAGE_SIZE << 1;
335 }
336 #endif
337 char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
338 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
339 if (base == (void *)-1) return 0;
340 #ifdef __LITEOS_DEBUG__
341 if (g_enable_check) {
342 if (mprotect(base, PAGE_SIZE, PROT_NONE) ||
343 mprotect(base + len - PAGE_SIZE, PAGE_SIZE, PROT_NONE)) {
344 printf("%s %d, mprotect failed, err: %s!\n", __func__, __LINE__, strerror(errno));
345 }
346 base += PAGE_SIZE;
347 }
348 #endif
349 c = (void *)(base + SIZE_ALIGN - OVERHEAD);
350 c->csize = len - (SIZE_ALIGN - OVERHEAD);
351 c->psize = SIZE_ALIGN - OVERHEAD;
352 #ifdef __LITEOS_DEBUG__
353 if (g_enable_check) {
354 c->csize -= PAGE_SIZE << 1;
355 insert_node(CHUNK_TO_MEM(c), CHUNK_SIZE(c));
356 }
357 #endif
358 return CHUNK_TO_MEM(c);
359 }
360
361 i = bin_index_up(n);
362 if (i<63 && (mal.binmap & (1ULL<<i))) {
363 lock_bin(i);
364 c = mal.bins[i].head;
365 if (c != BIN_TO_CHUNK(i) && CHUNK_SIZE(c)-n <= DONTCARE) {
366 unbin(c, i);
367 unlock_bin(i);
368 return CHUNK_TO_MEM(c);
369 }
370 unlock_bin(i);
371 }
372 lock(mal.split_merge_lock);
373 for (mask = mal.binmap & -(1ULL<<i); mask; mask -= (mask&-mask)) {
374 j = first_set(mask);
375 lock_bin(j);
376 c = mal.bins[j].head;
377 if (c != BIN_TO_CHUNK(j)) {
378 unbin(c, j);
379 unlock_bin(j);
380 break;
381 }
382 unlock_bin(j);
383 }
384 if (!mask) {
385 c = expand_heap(n);
386 if (!c) {
387 unlock(mal.split_merge_lock);
388 return 0;
389 }
390 }
391 trim(c, n);
392 #ifdef __LITEOS_DEBUG__
393 if (g_enable_check) {
394 insert_node(CHUNK_TO_MEM(c), CHUNK_SIZE(c));
395 }
396 #endif
397 unlock(mal.split_merge_lock);
398 return CHUNK_TO_MEM(c);
399 }
400
__malloc_allzerop(void *p)401 int __malloc_allzerop(void *p)
402 {
403 return IS_MMAPPED(MEM_TO_CHUNK(p));
404 }
405
realloc(void *p, size_t n)406 void *realloc(void *p, size_t n)
407 {
408 struct chunk *self, *next;
409 size_t n0, n1;
410 void *new;
411
412 if (!p) return malloc(n);
413
414 if (adjust_size(&n) < 0) return 0;
415
416 self = MEM_TO_CHUNK(p);
417 n1 = n0 = CHUNK_SIZE(self);
418
419 if (n<=n0 && n0-n<=DONTCARE) return p;
420
421 if (IS_MMAPPED(self)) {
422 size_t extra = self->psize;
423 char *base = (char *)self - extra;
424 size_t oldlen = n0 + extra;
425 size_t newlen = n + extra;
426 /* Crash on realloc of freed chunk */
427 #ifdef __LITEOS_DEBUG__
428 if (extra & 1) {
429 if (g_enable_check) {
430 get_free_trace(CHUNK_TO_MEM(self));
431 a_crash();
432 } else {
433 a_crash();
434 }
435 }
436 #else
437 if (extra & 1) a_crash();
438 #endif
439 if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
440 n0 = n;
441 goto copy_free_ret;
442 }
443 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
444 if (oldlen == newlen) return p;
445 #ifdef __LITEOS_DEBUG__
446 if (g_enable_check) {
447 goto copy_realloc;
448 }
449 #endif
450 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
451 if (base == (void *)-1)
452 goto copy_realloc;
453 self = (void *)(base + extra);
454 self->csize = newlen - extra;
455 return CHUNK_TO_MEM(self);
456 }
457
458 next = NEXT_CHUNK(self);
459
460 /* Crash on corrupted footer (likely from buffer overflow) */
461 if (next->psize != self->csize) a_crash();
462
463 if (n < n0) {
464 int i = bin_index_up(n);
465 int j = bin_index(n0);
466 if (i<j && (mal.binmap & (1ULL << i)))
467 goto copy_realloc;
468 struct chunk *split = (void *)((char *)self + n);
469 #ifdef __LITEOS_DEBUG__
470 lock(g_mem_lock);
471 #endif
472 self->csize = split->psize = n | C_INUSE;
473 split->csize = next->psize = n0-n | C_INUSE;
474 #ifdef __LITEOS_DEBUG__
475 calculate_checksum(self, next);
476 unlock(g_mem_lock);
477 #endif
478 __bin_chunk(split);
479 return CHUNK_TO_MEM(self);
480 }
481
482 lock(mal.split_merge_lock);
483
484 size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
485 if (n0+nsize >= n) {
486 int i = bin_index(nsize);
487 lock_bin(i);
488 if (!(next->csize & C_INUSE)) {
489 unbin(next, i);
490 unlock_bin(i);
491 next = NEXT_CHUNK(next);
492 self->csize = next->psize = n0+nsize | C_INUSE;
493 trim(self, n);
494 #ifdef __LITEOS_DEBUG__
495 if (g_enable_check) {
496 int status = delete_node(p);
497 if (status != 0) {
498 get_free_trace(CHUNK_TO_MEM(self));
499 a_crash();
500 }
501 insert_node(CHUNK_TO_MEM(self), CHUNK_SIZE(self));
502 }
503 #endif
504 unlock(mal.split_merge_lock);
505 return CHUNK_TO_MEM(self);
506 }
507 unlock_bin(i);
508 }
509 unlock(mal.split_merge_lock);
510
511 copy_realloc:
512 /* As a last resort, allocate a new chunk and copy to it. */
513 new = malloc(n-OVERHEAD);
514 if (!new) return 0;
515 copy_free_ret:
516 memcpy(new, p, (n<n0 ? n : n0) - OVERHEAD);
517 free(CHUNK_TO_MEM(self));
518 return new;
519 }
520
__bin_chunk(struct chunk *self)521 void __bin_chunk(struct chunk *self)
522 {
523 struct chunk *next = NEXT_CHUNK(self);
524
525 /* Crash on corrupted footer (likely from buffer overflow) */
526 if (next->psize != self->csize) a_crash();
527
528 lock(mal.split_merge_lock);
529
530 size_t osize = CHUNK_SIZE(self), size = osize;
531
532 /* Since we hold split_merge_lock, only transition from free to
533 * in-use can race; in-use to free is impossible */
534 #ifdef __LITEOS_DEBUG__
535 lock(g_mem_lock);
536 #endif
537 size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self);
538 size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next);
539 #ifdef __LITEOS_DEBUG__
540 calculate_checksum(self, next);
541 unlock(g_mem_lock);
542 #endif
543
544 if (psize) {
545 int i = bin_index(psize);
546 lock_bin(i);
547 if (!(self->psize & C_INUSE)) {
548 struct chunk *prev = PREV_CHUNK(self);
549 unbin(prev, i);
550 self = prev;
551 size += psize;
552 }
553 unlock_bin(i);
554 }
555 if (nsize) {
556 int i = bin_index(nsize);
557 lock_bin(i);
558 if (!(next->csize & C_INUSE)) {
559 unbin(next, i);
560 next = NEXT_CHUNK(next);
561 size += nsize;
562 }
563 unlock_bin(i);
564 }
565
566 int i = bin_index(size);
567 lock_bin(i);
568 #ifdef __LITEOS_DEBUG__
569 lock(g_mem_lock);
570 #endif
571 self->csize = size;
572 next->psize = size;
573 #ifdef __LITEOS_DEBUG__
574 calculate_checksum(self, next);
575 unlock(g_mem_lock);
576 #endif
577 bin_chunk(self, i);
578 unlock(mal.split_merge_lock);
579
580 /* Replace middle of large chunks with fresh zero pages */
581 if (size > RECLAIM && (size^(size-osize)) > size-osize) {
582 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
583 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
584 int e = errno;
585 #ifndef __LITEOS_A__
586 __madvise((void *)a, b-a, MADV_DONTNEED);
587 #else
588 __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
589 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
590 #endif
591 errno = e;
592 }
593
594 unlock_bin(i);
595 }
596
unmap_chunk(struct chunk *self)597 static void unmap_chunk(struct chunk *self)
598 {
599 size_t extra = self->psize;
600 char *base = (char *)self - extra;
601 size_t len = CHUNK_SIZE(self) + extra;
602 /* Crash on double free */
603 #ifdef __LITEOS_DEBUG__
604 if (extra & 1) {
605 if (g_enable_check) {
606 get_free_trace(CHUNK_TO_MEM(self));
607 a_crash();
608 } else {
609 a_crash();
610 }
611 }
612 if (g_enable_check) {
613 base -= PAGE_SIZE;
614 len += PAGE_SIZE << 1;
615 }
616 #else
617 if (extra & 1) a_crash();
618 #endif
619 int e = errno;
620 __munmap(base, len);
621 errno = e;
622 }
623
free(void *p)624 void free(void *p)
625 {
626 if (!p) return;
627
628 struct chunk *self = MEM_TO_CHUNK(p);
629 #ifdef __LITEOS_DEBUG__
630 if (g_enable_check) {
631 if (!IS_MMAPPED(self)) {
632 check_chunk_integrity(self);
633 }
634 int status = delete_node(p);
635 if (status != 0) {
636 get_free_trace(p);
637 a_crash();
638 }
639 }
640 #endif
641
642 if (IS_MMAPPED(self))
643 unmap_chunk(self);
644 else {
645 #ifdef __LITEOS_DEBUG__
646 if (g_enable_check) {
647 insert_free_tail(self);
648 if (g_recycle_size >= RECYCLE_SIZE_MAX) {
649 clean_recycle_list(false);
650 return;
651 }
652 if (g_recycle_num < RECYCLE_MAX) {
653 return;
654 }
655 self = get_free_head();
656 }
657 #endif
658 __bin_chunk(self);
659 }
660 }
661
__malloc_donate(char *start, char *end)662 void __malloc_donate(char *start, char *end)
663 {
664 #ifdef __LITEOS_DEBUG__
665 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - BLOCK_HEAD);
666 #else
667 size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
668 #endif
669 size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
670
671 /* Getting past this condition ensures that the padding for alignment
672 * and header overhead will not overflow and will leave a nonzero
673 * multiple of SIZE_ALIGN bytes between start and end. */
674 #ifdef __LITEOS_DEBUG__
675 if (end - start <= BLOCK_HEAD + align_start_up + align_end_down)
676 return;
677 start += align_start_up + BLOCK_HEAD;
678 #else
679 if (end - start <= OVERHEAD + align_start_up + align_end_down)
680 return;
681 start += align_start_up + OVERHEAD;
682 #endif
683 end -= align_end_down;
684
685 #ifdef __LITEOS_DEBUG__
686 lock(g_mem_lock);
687 #endif
688 struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
689 c->psize = n->csize = C_INUSE;
690 c->csize = n->psize = C_INUSE | (end-start);
691 #ifdef __LITEOS_DEBUG__
692 calculate_checksum(c, n);
693 insert_block_list(c);
694 unlock(g_mem_lock);
695 #endif
696 __bin_chunk(c);
697 }
698
__malloc_atfork(int who)699 void __malloc_atfork(int who)
700 {
701 if (who<0) {
702 lock(mal.split_merge_lock);
703 for (int i=0; i<64; i++)
704 lock(mal.bins[i].lock);
705 } else if (!who) {
706 for (int i=0; i<64; i++)
707 unlock(mal.bins[i].lock);
708 unlock(mal.split_merge_lock);
709 } else {
710 for (int i=0; i<64; i++)
711 mal.bins[i].lock[0] = mal.bins[i].lock[1] = 0;
712 mal.split_merge_lock[1] = 0;
713 mal.split_merge_lock[0] = 0;
714 }
715 }
716