1/*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
17#define ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
18
19#include <deque>
20#include <map>
21#include <random>
22#include <set>
23
24#include "ecmascript/platform/map.h"
25#include "ecmascript/mem/mem.h"
26#include "ecmascript/mem/mem_common.h"
27#include "ecmascript/log_wrapper.h"
28
29#include "ecmascript/platform/mutex.h"
30
31namespace panda::ecmascript {
32// Regular region with length of DEFAULT_REGION_SIZE(256kb)
33class MemMapPool {
34public:
35    MemMapPool() = default;
36    ~MemMapPool() = default;
37
38    void Finalize()
39    {
40        LockHolder lock(lock_);
41        for (auto &it : memMapVector_) {
42            PageUnmap(it);
43        }
44        for (auto &it : regularMapCommitted_) {
45            PageUnmap(it);
46        }
47        regularMapCommitted_.clear();
48        memMapVector_.clear();
49        memMapCache_.clear();
50    }
51
52    NO_COPY_SEMANTIC(MemMapPool);
53    NO_MOVE_SEMANTIC(MemMapPool);
54
55    MemMap GetMemFromCache([[maybe_unused]] size_t size)
56    {
57        ASSERT(size == REGULAR_MMAP_SIZE);
58        LockHolder lock(lock_);
59        if (!memMapCache_.empty()) {
60            MemMap mem = memMapCache_.front();
61            memMapCache_.pop_front();
62            return mem;
63        }
64        return MemMap();
65    }
66
67    MemMap GetRegularMemFromCommitted([[maybe_unused]] size_t size)
68    {
69        ASSERT(size == REGULAR_MMAP_SIZE);
70        LockHolder lock(lock_);
71        if (!regularMapCommitted_.empty()) {
72            MemMap mem = regularMapCommitted_.back();
73            regularMapCommitted_.pop_back();
74            return mem;
75        }
76        return MemMap();
77    }
78
79    bool IsRegularCommittedFull(size_t cachedSize)
80    {
81        LockHolder lock(lock_);
82        size_t size = regularMapCommitted_.size();
83        return size >= (cachedSize / REGULAR_MMAP_SIZE) ? true : false;
84    }
85
86    int ShouldFreeMore(size_t cachedSize)
87    {
88        LockHolder lock(lock_);
89        int result = static_cast<int>(regularMapCommitted_.size());
90        return result - static_cast<int>(cachedSize / REGULAR_MMAP_SIZE);
91    }
92
93    void AddMemToCommittedCache(void *mem, size_t size)
94    {
95        ASSERT(size == REGULAR_MMAP_SIZE);
96        LockHolder lock(lock_);
97        regularMapCommitted_.emplace_back(mem, size);
98    }
99
100    void AddMemToCache(void *mem, size_t size)
101    {
102        ASSERT(size == REGULAR_MMAP_SIZE);
103        LockHolder lock(lock_);
104        memMapCache_.emplace_back(mem, size);
105    }
106
107    MemMap SplitMemFromCache(MemMap memMap)
108    {
109        LockHolder lock(lock_);
110        auto remainderMem = reinterpret_cast<uintptr_t>(memMap.GetMem()) + REGULAR_MMAP_SIZE;
111        size_t remainderSize = AlignDown(memMap.GetSize() - REGULAR_MMAP_SIZE, REGULAR_MMAP_SIZE);
112        size_t count = remainderSize / REGULAR_MMAP_SIZE;
113        while (count-- > 0) {
114            memMapCache_.emplace_back(reinterpret_cast<void *>(remainderMem), REGULAR_MMAP_SIZE);
115            remainderMem = remainderMem + REGULAR_MMAP_SIZE;
116        }
117        return MemMap(memMap.GetMem(), REGULAR_MMAP_SIZE);
118    }
119
120    void SplitMemMapToCache(MemMap memMap)
121    {
122        auto memAddr = reinterpret_cast<uintptr_t>(memMap.GetMem());
123        size_t memTotalSize = AlignDown(memMap.GetSize(), REGULAR_MMAP_SIZE);
124        size_t count = memTotalSize / REGULAR_MMAP_SIZE;
125        while (count-- > 0) {
126            memMapCache_.emplace_back(reinterpret_cast<void *>(memAddr), REGULAR_MMAP_SIZE);
127            memAddr += REGULAR_MMAP_SIZE;
128        }
129    }
130
131    void InsertMemMap(MemMap memMap)
132    {
133        LockHolder lock(lock_);
134        memMapVector_.emplace_back(memMap);
135    }
136
137private:
138    static constexpr size_t REGULAR_MMAP_SIZE = 256_KB;
139    Mutex lock_;
140    std::deque<MemMap> memMapCache_;
141    std::vector<MemMap> regularMapCommitted_;
142    std::vector<MemMap> memMapVector_;
143};
144
145// Non regular region with length of DEFAULT_REGION_SIZE(256kb) multiple
146class MemMapFreeList {
147public:
148    MemMapFreeList() = default;
149    ~MemMapFreeList() = default;
150
151    void Initialize(MemMap memMap, size_t capacity)
152    {
153        memMaps_.emplace_back(memMap);
154        freeList_.emplace(memMap.GetSize(), memMap);
155        capacity_ = capacity;
156    }
157
158    void Finalize()
159    {
160        for (auto &memMap : memMaps_) {
161            PageUnmap(memMap);
162        }
163        memMaps_.clear();
164        freeList_.clear();
165    }
166
167    NO_COPY_SEMANTIC(MemMapFreeList);
168    NO_MOVE_SEMANTIC(MemMapFreeList);
169
170    void MergeList()
171    {
172        auto it = freeList_.begin();
173        while (it != freeList_.end()) {
174            bool isEqual = false;
175            void *startMem = (*it).second.GetMem();
176            size_t newSize = (*it).second.GetSize();
177            auto startIt = it++;
178            if (it == freeList_.end()) {
179                break;
180            }
181            auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
182            while (it != freeList_.end() && next == (*it).second.GetMem()) {
183                newSize += (*it).second.GetSize();
184                it = freeList_.erase(it);
185                next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(startMem) + newSize);
186                isEqual = true;
187            }
188            if (isEqual) {
189                freeList_.erase(startIt);
190                freeList_.emplace(newSize, MemMap(startMem, newSize));
191            }
192        }
193    }
194
195    MemMap GetMemFromList(size_t size)
196    {
197        if (freeListPoolSize_ + size > capacity_) {
198            LOG_GC(ERROR) << "Freelist pool oom: overflow(" << freeListPoolSize_ << ")";
199            return MemMap();
200        }
201        LockHolder lock(lock_);
202        auto iterate = freeList_.lower_bound(size);
203        if (iterate == freeList_.end()) {
204            MergeList();
205            iterate = freeList_.lower_bound(size);
206            // Unable to get memory from freeList, use PageMap
207            if (iterate == freeList_.end()) {
208                size_t incrementCapacity = std::max(size, INCREMENT_HUGE_OBJECT_CAPACITY);
209                MemMap smemMap = PageMap(incrementCapacity, PAGE_PROT_NONE, DEFAULT_REGION_SIZE);
210                LOG_GC(INFO) << "Huge object mem pool increase PageMap size: " << smemMap.GetSize();
211                memMaps_.emplace_back(smemMap);
212                freeList_.emplace(smemMap.GetSize(), smemMap);
213                iterate = freeList_.lower_bound(size);
214                ASSERT(iterate != freeList_.end());
215            }
216        }
217        MemMap memMap = iterate->second;
218        size_t remainderSize = memMap.GetSize() - size;
219        freeList_.erase(iterate);
220        if (remainderSize >= DEFAULT_REGION_SIZE) {
221            auto next = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memMap.GetMem()) + size);
222            freeList_.emplace(remainderSize, MemMap(next, remainderSize));
223        }
224        freeListPoolSize_ += size;
225        return MemMap(memMap.GetMem(), size);
226    }
227
228    void AddMemToList(MemMap memMap)
229    {
230        LockHolder lock(lock_);
231        freeListPoolSize_ -= memMap.GetSize();
232        freeList_.emplace(memMap.GetSize(), memMap);
233    }
234
235private:
236    Mutex lock_;
237    std::vector<MemMap> memMaps_;
238    std::multimap<size_t, MemMap> freeList_;
239    std::atomic_size_t freeListPoolSize_ {0};
240    size_t capacity_ {0};
241};
242
243class MemMapAllocator {
244public:
245    MemMapAllocator() = default;
246    ~MemMapAllocator() = default;
247
248    NO_COPY_SEMANTIC(MemMapAllocator);
249    NO_MOVE_SEMANTIC(MemMapAllocator);
250
251    void Initialize(size_t alignment)
252    {
253        AdapterSuitablePoolCapacity();
254        memMapTotalSize_ = 0;
255        InitializeHugeRegionMap(alignment);
256        InitializeRegularRegionMap(alignment);
257    }
258
259    void Finalize()
260    {
261        memMapTotalSize_ = 0;
262        capacity_ = 0;
263        memMapFreeList_.Finalize();
264        memMapPool_.Finalize();
265    }
266
267    size_t GetCapacity()
268    {
269        return capacity_;
270    }
271
272    static MemMapAllocator *GetInstance();
273
274    MemMap Allocate(const uint32_t threadId, size_t size, size_t alignment,
275                    const std::string &spaceName, bool regular, bool isMachineCode,
276                    bool isEnableJitFort);
277
278    void CacheOrFree(void *mem, size_t size, bool isRegular, size_t cachedSize);
279
280private:
281    void InitializeRegularRegionMap(size_t alignment);
282    void InitializeHugeRegionMap(size_t alignment);
283    // Random generate big mem map addr to avoid js heap is written by others
284    void *RandomGenerateBigAddr(uint64_t addr)
285    {
286        // Use the current time as the seed
287        unsigned seed = static_cast<unsigned>(std::chrono::system_clock::now().time_since_epoch().count());
288        std::mt19937_64 generator(seed);
289
290        // Generate a random number between 0 and RANDOM_NUM_MAX
291        std::uniform_int_distribution<uint64_t> distribution(0, RANDOM_NUM_MAX);
292        uint64_t randomNum = distribution(generator);
293
294        // Big addr random change in 0x2000000000 ~ 0x2FF0000000
295        return reinterpret_cast<void *>(addr + (randomNum << RANDOM_SHIFT_BIT));
296    }
297
298    static constexpr size_t REGULAR_REGION_MMAP_SIZE = 4_MB;
299    static constexpr uint64_t HUGE_OBJECT_MEM_MAP_BEGIN_ADDR = 0x1000000000;
300    static constexpr uint64_t REGULAR_OBJECT_MEM_MAP_BEGIN_ADDR = 0x2000000000;
301    static constexpr uint64_t STEP_INCREASE_MEM_MAP_ADDR = 0x1000000000;
302    static constexpr size_t RANDOM_NUM_MAX = 0xFF;
303    static constexpr size_t RANDOM_SHIFT_BIT = 28;
304    static constexpr size_t MEM_MAP_RETRY_NUM = 10;
305
306    void AdapterSuitablePoolCapacity();
307    void Free(void *mem, size_t size, bool isRegular);
308    MemMapPool memMapPool_;
309    MemMapFreeList memMapFreeList_;
310    std::atomic_size_t memMapTotalSize_ {0};
311    size_t capacity_ {0};
312};
313}  // namespace panda::ecmascript
314#endif  // ECMASCRIPT_MEM_MEM_MAP_ALLOCATOR_H
315