1 #include <stdlib.h>
2 #include <stdint.h>
3 #include <limits.h>
4 #include <errno.h>
5 #include <sys/mman.h>
6 #include "libc.h"
7 #include "lock.h"
8 #include "syscall.h"
9 #include "fork_impl.h"
10
11 #ifdef USE_JEMALLOC
12 extern void* je_malloc(size_t size);
13 #endif
14
15 #define ALIGN 16
16
17 /* This function returns true if the interval [old,new]
18 * intersects the 'len'-sized interval below &libc.auxv
19 * (interpreted as the main-thread stack) or below &b
20 * (the current stack). It is used to defend against
21 * buggy brk implementations that can cross the stack. */
22
traverses_stack_p(uintptr_t old, uintptr_t new)23 static int traverses_stack_p(uintptr_t old, uintptr_t new)
24 {
25 const uintptr_t len = 8<<20;
26 uintptr_t a, b;
27
28 b = (uintptr_t)libc.auxv;
29 a = b > len ? b-len : 0;
30 if (new>a && old<b) return 1;
31
32 b = (uintptr_t)&b;
33 a = b > len ? b-len : 0;
34 if (new>a && old<b) return 1;
35
36 return 0;
37 }
38
39 static volatile int lock[1];
40 volatile int *const __bump_lockptr = lock;
41
__simple_malloc(size_t n)42 static void *__simple_malloc(size_t n)
43 {
44 static uintptr_t brk, cur, end;
45 static unsigned mmap_step;
46 size_t align=1;
47 void *p;
48
49 if (n > SIZE_MAX/2) {
50 errno = ENOMEM;
51 return 0;
52 }
53
54 if (!n) n++;
55 while (align<n && align<ALIGN)
56 align += align;
57
58 LOCK(lock);
59
60 cur += -cur & align-1;
61
62 if (n > end-cur) {
63 size_t req = n - (end-cur) + PAGE_SIZE-1 & -PAGE_SIZE;
64
65 if (!cur) {
66 brk = __syscall(SYS_brk, 0);
67 brk += -brk & PAGE_SIZE-1;
68 cur = end = brk;
69 }
70
71 if (brk == end && req < SIZE_MAX-brk
72 && !traverses_stack_p(brk, brk+req)
73 && __syscall(SYS_brk, brk+req)==brk+req) {
74 brk = end += req;
75 } else {
76 int new_area = 0;
77 req = n + PAGE_SIZE-1 & -PAGE_SIZE;
78 /* Only make a new area rather than individual mmap
79 * if wasted space would be over 1/8 of the map. */
80 if (req-n > req/8) {
81 /* Geometric area size growth up to 64 pages,
82 * bounding waste by 1/8 of the area. */
83 size_t min = PAGE_SIZE<<(mmap_step/2);
84 if (min-n > end-cur) {
85 if (req < min) {
86 req = min;
87 if (mmap_step < 12)
88 mmap_step++;
89 }
90 new_area = 1;
91 }
92 }
93 void *mem = __mmap(0, req, PROT_READ|PROT_WRITE,
94 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
95 if (mem == MAP_FAILED || !new_area) {
96 UNLOCK(lock);
97 return mem==MAP_FAILED ? 0 : mem;
98 }
99 cur = (uintptr_t)mem;
100 end = cur + req;
101 }
102 }
103
104 p = (void *)cur;
105 cur += n;
106 UNLOCK(lock);
107 return p;
108 }
109
110 weak_alias(__simple_malloc, __libc_malloc_impl);
111
__libc_malloc(size_t n)112 void *__libc_malloc(size_t n)
113 {
114 return __libc_malloc_impl(n);
115 }
116
default_malloc(size_t n)117 static void *default_malloc(size_t n)
118 {
119 #ifdef USE_JEMALLOC
120 return je_malloc(n);
121 #endif
122 return __libc_malloc_impl(n);
123 }
124
125 weak_alias(default_malloc, malloc);
126