1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/mem_map_allocator.h"
17 #include "ecmascript/platform/os.h"
18
19 namespace panda::ecmascript {
GetInstance()20 MemMapAllocator *MemMapAllocator::GetInstance()
21 {
22 static MemMapAllocator *vmAllocator_ = new MemMapAllocator();
23 return vmAllocator_;
24 }
25
InitializeRegularRegionMap([[maybe_unused]] size_t alignment)26 void MemMapAllocator::InitializeRegularRegionMap([[maybe_unused]] size_t alignment)
27 {
28 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
29 size_t initialRegularObjectCapacity = std::min(capacity_ / 2, INITIAL_REGULAR_OBJECT_CAPACITY);
30 size_t i = 0;
31 while (i < MEM_MAP_RETRY_NUM) {
32 void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(REGULAR_OBJECT_MEM_MAP_BEGIN_ADDR)) +
33 i * STEP_INCREASE_MEM_MAP_ADDR);
34 MemMap memMap = PageMap(initialRegularObjectCapacity, PAGE_PROT_NONE, alignment, addr);
35 if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr)) {
36 PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::MEMPOOL_CACHE);
37 PageRelease(memMap.GetMem(), memMap.GetSize());
38 memMapPool_.InsertMemMap(memMap);
39 memMapPool_.SplitMemMapToCache(memMap);
40 break;
41 } else {
42 PageUnmap(memMap);
43 LOG_ECMA(ERROR) << "Regular object mem map big addr fail: " << errno;
44 }
45 i++;
46 }
47 #endif
48 }
49
InitializeHugeRegionMap(size_t alignment)50 void MemMapAllocator::InitializeHugeRegionMap(size_t alignment)
51 {
52 size_t initialHugeObjectCapacity = std::min(capacity_ / 2, INITIAL_HUGE_OBJECT_CAPACITY);
53 #if defined(PANDA_TARGET_64) && !WIN_OR_MAC_OR_IOS_PLATFORM
54 size_t i = 0;
55 while (i <= MEM_MAP_RETRY_NUM) {
56 void *addr = reinterpret_cast<void *>(ToUintPtr(RandomGenerateBigAddr(HUGE_OBJECT_MEM_MAP_BEGIN_ADDR)) +
57 i * STEP_INCREASE_MEM_MAP_ADDR);
58 MemMap memMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment, addr);
59 if (ToUintPtr(memMap.GetMem()) >= ToUintPtr(addr) || i == MEM_MAP_RETRY_NUM) {
60 PageTag(memMap.GetMem(), memMap.GetSize(), PageTagType::MEMPOOL_CACHE);
61 PageRelease(memMap.GetMem(), memMap.GetSize());
62 memMapFreeList_.Initialize(memMap, capacity_);
63 break;
64 } else {
65 PageUnmap(memMap);
66 LOG_ECMA(ERROR) << "Huge object mem map big addr fail: " << errno;
67 }
68 i++;
69 }
70 #else
71 MemMap hugeMemMap = PageMap(initialHugeObjectCapacity, PAGE_PROT_NONE, alignment);
72 PageTag(hugeMemMap.GetMem(), hugeMemMap.GetSize(), PageTagType::MEMPOOL_CACHE);
73 PageRelease(hugeMemMap.GetMem(), hugeMemMap.GetSize());
74 memMapFreeList_.Initialize(hugeMemMap, capacity_);
75 #endif
76 }
77
PageProtectMem(bool machineCodeSpace, void *mem, size_t size, [[maybe_unused]] bool isEnableJitFort)78 static bool PageProtectMem(bool machineCodeSpace, void *mem, size_t size, [[maybe_unused]] bool isEnableJitFort)
79 {
80 int prot = machineCodeSpace ? PAGE_PROT_EXEC_READWRITE : PAGE_PROT_READWRITE;
81
82 if (!machineCodeSpace) {
83 return PageProtect(mem, size, prot);
84 }
85
86 // MachineCode and HugeMachineCode space pages:
87 #if defined(PANDA_TARGET_ARM64) && defined(PANDA_TARGET_OHOS)
88 if (isEnableJitFort) {
89 // if JitFort enabled, Jit code will be in JitFort space, so only need READWRITE here
90 return PageProtect(mem, size, PAGE_PROT_READWRITE);
91 } else {
92 // else Jit code will be in MachineCode space, need EXEC_READWRITE and MAP_EXECUTABLE (0x1000)
93 void *addr = PageMapExecFortSpace(mem, size, PAGE_PROT_EXEC_READWRITE);
94 if (addr != mem) {
95 return false;
96 }
97 return true;
98 }
99 #else
100 // not running phone kernel. Jit code will be MachineCode space
101 return PageProtect(mem, size, PAGE_PROT_EXEC_READWRITE);
102 #endif
103 }
104
Allocate(const uint32_t threadId, size_t size, size_t alignment, const std::string &spaceName, bool regular, bool isMachineCode, bool isEnableJitFort)105 MemMap MemMapAllocator::Allocate(const uint32_t threadId, size_t size, size_t alignment,
106 const std::string &spaceName, bool regular, bool isMachineCode, bool isEnableJitFort)
107 {
108 MemMap mem;
109 PageTagType type = isMachineCode ? PageTagType::MACHINE_CODE : PageTagType::HEAP;
110
111 if (regular) {
112 mem = memMapPool_.GetRegularMemFromCommitted(size);
113 if (mem.GetMem() != nullptr) {
114 bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
115 if (!res) {
116 return MemMap();
117 }
118 PageTag(mem.GetMem(), size, type, spaceName, threadId);
119 return mem;
120 }
121 if (UNLIKELY(memMapTotalSize_ + size > capacity_)) {
122 LOG_GC(ERROR) << "memory map overflow";
123 return MemMap();
124 }
125 mem = memMapPool_.GetMemFromCache(size);
126 if (mem.GetMem() != nullptr) {
127 memMapTotalSize_ += size;
128 bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
129 if (!res) {
130 return MemMap();
131 }
132 PageTag(mem.GetMem(), size, type, spaceName, threadId);
133 return mem;
134 }
135 mem = PageMap(REGULAR_REGION_MMAP_SIZE, PAGE_PROT_NONE, alignment);
136 memMapPool_.InsertMemMap(mem);
137 mem = memMapPool_.SplitMemFromCache(mem);
138 } else {
139 if (UNLIKELY(memMapTotalSize_ + size > capacity_)) {
140 LOG_GC(ERROR) << "memory map overflow";
141 return MemMap();
142 }
143 mem = memMapFreeList_.GetMemFromList(size);
144 }
145 if (mem.GetMem() != nullptr) {
146 bool res = PageProtectMem(isMachineCode, mem.GetMem(), mem.GetSize(), isEnableJitFort);
147 if (!res) {
148 return MemMap();
149 }
150 PageTag(mem.GetMem(), mem.GetSize(), type, spaceName, threadId);
151 memMapTotalSize_ += mem.GetSize();
152 }
153 return mem;
154 }
155
CacheOrFree(void *mem, size_t size, bool isRegular, size_t cachedSize)156 void MemMapAllocator::CacheOrFree(void *mem, size_t size, bool isRegular, size_t cachedSize)
157 {
158 if (isRegular && !memMapPool_.IsRegularCommittedFull(cachedSize)) {
159 // Cache regions to accelerate allocation.
160 // Clear ThreadId tag and tag the mem with ARKTS HEAP.
161 PageClearTag(mem, size);
162 PageTag(mem, size, PageTagType::HEAP);
163 memMapPool_.AddMemToCommittedCache(mem, size);
164 return;
165 }
166 Free(mem, size, isRegular);
167 if (isRegular && memMapPool_.ShouldFreeMore(cachedSize) > 0) {
168 int freeNum = memMapPool_.ShouldFreeMore(cachedSize);
169 for (int i = 0; i < freeNum; i++) {
170 void *freeMem = memMapPool_.GetRegularMemFromCommitted(size).GetMem();
171 if (freeMem != nullptr) {
172 Free(freeMem, size, isRegular);
173 } else {
174 return;
175 }
176 }
177 }
178 }
179
Free(void *mem, size_t size, bool isRegular)180 void MemMapAllocator::Free(void *mem, size_t size, bool isRegular)
181 {
182 memMapTotalSize_ -= size;
183 PageTag(mem, size, PageTagType::MEMPOOL_CACHE);
184 if (!PageProtect(mem, size, PAGE_PROT_NONE)) {
185 return;
186 }
187 PageRelease(mem, size);
188 if (isRegular) {
189 memMapPool_.AddMemToCache(mem, size);
190 } else {
191 memMapFreeList_.AddMemToList(MemMap(mem, size));
192 }
193 }
194
AdapterSuitablePoolCapacity()195 void MemMapAllocator::AdapterSuitablePoolCapacity()
196 {
197 size_t physicalSize = PhysicalSize();
198 capacity_ = std::min<size_t>(physicalSize * DEFAULT_CAPACITY_RATE, MAX_MEM_POOL_CAPACITY);
199 LOG_GC(INFO) << "Ark Auto adapter memory pool capacity:" << capacity_;
200 }
201 } // namespace panda::ecmascript
202