1/*
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "ecmascript/js_tagged_value-inl.h"
17#include "ecmascript/mem/mem_controller.h"
18#include "ecmascript/mem/region-inl.h"
19#include "ecmascript/mem/space.h"
20#include "ecmascript/platform/os.h"
21
22namespace panda::ecmascript {
23Space::Space(BaseHeap* heap, HeapRegionAllocator *heapRegionAllocator,
24             MemSpaceType spaceType, size_t initialCapacity,
25             size_t maximumCapacity)
26    : heap_(heap),
27      heapRegionAllocator_(heapRegionAllocator),
28      spaceType_(spaceType),
29      initialCapacity_(initialCapacity),
30      maximumCapacity_(maximumCapacity),
31      committedSize_(0)
32{
33    ASSERT(heap != nullptr);
34    ASSERT(heapRegionAllocator != nullptr);
35}
36
37void Space::AddAllocationInspector(AllocationInspector* inspector)
38{
39    ASSERT(inspector != nullptr);
40    allocationCounter_.AddAllocationInspector(inspector);
41}
42
43void Space::ClearAllocationInspector()
44{
45    allocationCounter_.ClearAllocationInspector();
46}
47
48void Space::SwapAllocationCounter(Space *space)
49{
50    ASSERT(space != nullptr);
51    std::swap(allocationCounter_, space->allocationCounter_);
52}
53
54void Space::Destroy()
55{
56    ReclaimRegions();
57}
58
59void Space::ReclaimRegions(size_t cachedSize)
60{
61    ASSERT(cachedSize >= 0);
62    EnumerateRegions([this, &cachedSize](Region *current) { ClearAndFreeRegion(current, cachedSize); });
63    regionList_.Clear();
64    committedSize_ = 0;
65}
66
67void Space::ClearAndFreeRegion(Region *region, size_t cachedSize)
68{
69    ASSERT(region != nullptr);
70    LOG_ECMA_MEM(DEBUG) << "Clear region from:" << region << " to " << ToSpaceTypeName(spaceType_);
71    region->DeleteCrossRegionRSet();
72    region->DeleteNewToEdenRSet();
73    region->DeleteOldToNewRSet();
74    region->DeleteLocalToShareRSet();
75    region->DeleteSweepingOldToNewRSet();
76    region->DeleteSweepingLocalToShareRSet();
77    DecreaseCommitted(region->GetCapacity());
78    DecreaseObjectSize(region->GetSize());
79    if (spaceType_ == MemSpaceType::OLD_SPACE || spaceType_ == MemSpaceType::NON_MOVABLE ||
80        spaceType_ == MemSpaceType::MACHINE_CODE_SPACE || spaceType_ == MemSpaceType::LOCAL_SPACE ||
81        spaceType_ == MemSpaceType::APPSPAWN_SPACE || spaceType_ == MemSpaceType::SHARED_NON_MOVABLE ||
82        spaceType_ == MemSpaceType::SHARED_OLD_SPACE || spaceType_ == MemSpaceType::SHARED_LOCAL_SPACE) {
83        region->DestroyFreeObjectSets();
84    }
85    // regions of EdenSpace are allocated in EdenSpace constructor and fixed, not allocate by heapRegionAllocator_
86    if (spaceType_ != MemSpaceType::EDEN_SPACE) {
87        heapRegionAllocator_->FreeRegion(region, cachedSize);
88    }
89}
90
91HugeObjectSpace::HugeObjectSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
92                                 size_t initialCapacity, size_t maximumCapacity)
93    : Space(heap, heapRegionAllocator, MemSpaceType::HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
94{
95}
96
97HugeObjectSpace::HugeObjectSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
98                                 size_t initialCapacity, size_t maximumCapacity, MemSpaceType spaceType)
99    : Space(heap, heapRegionAllocator, spaceType, initialCapacity, maximumCapacity)
100{
101}
102
103HugeMachineCodeSpace::HugeMachineCodeSpace(Heap *heap, HeapRegionAllocator *heapRegionAllocator,
104                                           size_t initialCapacity, size_t maximumCapacity)
105    : HugeObjectSpace(heap, heapRegionAllocator, initialCapacity,
106        maximumCapacity, MemSpaceType::HUGE_MACHINE_CODE_SPACE)
107{
108}
109
110uintptr_t HugeMachineCodeSpace::GetMachineCodeObject(uintptr_t pc) const
111{
112    uintptr_t machineCode = 0;
113    EnumerateRegions([&](Region *region) {
114        if (machineCode != 0) {
115            return;
116        }
117        if (!region->InRange(pc)) {
118            return;
119        }
120        uintptr_t curPtr = region->GetBegin();
121        auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
122        if (obj->IsInText(pc)) {
123            machineCode = curPtr;
124        }
125    });
126    return machineCode;
127}
128
129Region *HugeMachineCodeSpace::AllocateFort(size_t objectSize, JSThread *thread, void *pDesc)
130{
131    // A Huge machine code object is consisted of contiguous 256Kb aligned blocks.
132    // For JitFort, a huge machine code object starts with a page aligned mutable area
133    // (which holds Region and MachineCode object header, FuncEntryDesc and StackMap), followed
134    // by a page aligned immutable (JitFort space) area for JIT generated native instructions code.
135    //
136    // allocation sizes for Huge Machine Code:
137    //     a: mutable area size (aligned up to PageSize()) =
138    //         sizeof(Region) + HUGE_OBJECT_BITSET_SIZE + MachineCode::SIZE + payLoadSize - instructionsSize
139    //         (note: payLoadSize = funcDesc size + stackMap size + instructionsSize)
140    //     b: immutable area (starts on native page boundary) size = instructionsSize
141    //     c: size to mmap for huge machine code object = Alignup(a + b, 256 Kbyte)
142    //
143    // mmap to enable JIT_FORT rights control:
144    //     1. first mmap (without JIT_FORT option flag) region of size c above
145    //     2. then mmap immutable area with MAP_FIXED and JIT_FORT option flag (to be used by codesigner verify/copy)
146    ASSERT(thread != nullptr);
147    ASSERT(pDesc != nullptr);
148    MachineCodeDesc *desc = reinterpret_cast<MachineCodeDesc *>(pDesc);
149    size_t mutableSize = AlignUp(
150        objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE - desc->instructionsSize, PageSize());
151    size_t allocSize = AlignUp(mutableSize + desc->instructionsSize, PANDA_POOL_ALIGNMENT_IN_BYTES);
152    if (heap_->OldSpaceExceedCapacity(allocSize)) {
153        LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
154        return 0;
155    }
156    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, allocSize, thread, heap_);
157    desc->instructionsAddr = region->GetAllocateBase() + mutableSize;
158
159    // Enabe JitFort rights control
160    [[maybe_unused]] void *addr = PageMapExecFortSpace((void *)desc->instructionsAddr, allocSize - mutableSize,
161        PageProtectProt(reinterpret_cast<Heap *>(heap_)->GetEcmaVM()->GetJSOptions().GetDisableCodeSign() ||
162            !JitFort::IsResourceAvailable()));
163
164    ASSERT(addr == (void *)desc->instructionsAddr);
165    return region;
166}
167
168
169uintptr_t HugeMachineCodeSpace::Allocate(size_t objectSize, JSThread *thread, void *pDesc,
170    AllocateEventType allocType)
171{
172    ASSERT(thread != nullptr);
173    ASSERT(pDesc != nullptr);
174    // JitFort path
175#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
176    if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
177        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
178        UNREACHABLE();
179    }
180#endif
181    if (allocType == AllocateEventType::NORMAL) {
182        thread->CheckSafepointIfSuspended();
183    }
184    Region *region;
185    if (reinterpret_cast<Heap *>(heap_)->GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort() &&
186        reinterpret_cast<MachineCodeDesc *>(pDesc)->isAsyncCompileMode) {
187        region = reinterpret_cast<Region *>(reinterpret_cast<MachineCodeDesc *>(pDesc)->hugeObjRegion);
188    } else {
189        region = AllocateFort(objectSize, thread, pDesc);
190    }
191    if (UNLIKELY(region == nullptr)) {
192        LOG_GC(ERROR) << "HugeMachineCodeSpace::Allocate: region is nullptr";
193        return 0;
194    }
195    AddRegion(region);
196    // It need to mark unpoison when huge object being allocated.
197    ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
198#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
199    InvokeAllocationInspector(region->GetBegin(), objectSize);
200#endif
201    return region->GetBegin();
202}
203
204uintptr_t HugeMachineCodeSpace::Allocate(size_t objectSize, JSThread *thread)
205{
206    // non JitFort path
207    return HugeObjectSpace::Allocate(objectSize, thread);
208}
209
210uintptr_t HugeObjectSpace::Allocate(size_t objectSize, JSThread *thread, AllocateEventType allocType)
211{
212#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
213    if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) {
214        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
215        UNREACHABLE();
216    }
217#endif
218    if (allocType == AllocateEventType::NORMAL) {
219        thread->CheckSafepointIfSuspended();
220    }
221    // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
222    // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
223    size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
224    if (heap_->OldSpaceExceedCapacity(alignedSize)) {
225        LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
226        return 0;
227    }
228    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
229    AddRegion(region);
230    // It need to mark unpoison when huge object being allocated.
231    ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
232#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
233    InvokeAllocationInspector(region->GetBegin(), objectSize);
234#endif
235    return region->GetBegin();
236}
237
238void HugeObjectSpace::Sweep()
239{
240    Region *currentRegion = GetRegionList().GetFirst();
241    while (currentRegion != nullptr) {
242        Region *next = currentRegion->GetNext();
243        bool isMarked = false;
244        currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
245        if (!isMarked) {
246            GetRegionList().RemoveNode(currentRegion);
247            hugeNeedFreeList_.AddNode(currentRegion);
248        }
249        currentRegion = next;
250    }
251}
252
253size_t HugeObjectSpace::GetHeapObjectSize() const
254{
255    return committedSize_;
256}
257
258void HugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
259{
260    EnumerateRegions([&](Region *region) {
261        uintptr_t curPtr = region->GetBegin();
262        objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
263    });
264}
265
266void HugeObjectSpace::ReclaimHugeRegion()
267{
268    if (hugeNeedFreeList_.IsEmpty()) {
269        return;
270    }
271    do {
272        Region *last = hugeNeedFreeList_.PopBack();
273        ClearAndFreeRegion(last);
274    } while (!hugeNeedFreeList_.IsEmpty());
275}
276
277void HugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
278{
279    if (LIKELY(!allocationCounter_.IsActive())) {
280        return;
281    }
282    if (objectSize >= allocationCounter_.NextBytes()) {
283        allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
284    }
285    allocationCounter_.AdvanceAllocationInspector(objectSize);
286}
287}  // namespace panda::ecmascript
288