1/*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "ecmascript/mem/linear_space.h"
17
18#include "ecmascript/js_hclass-inl.h"
19#include "ecmascript/mem/allocator-inl.h"
20#include "ecmascript/mem/mem_controller.h"
21
22namespace panda::ecmascript {
23LinearSpace::LinearSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
24    : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
25      localHeap_(heap),
26      thread_(heap->GetJSThread()),
27      waterLine_(0)
28{
29}
30
31uintptr_t LinearSpace::Allocate(size_t size, bool isPromoted)
32{
33#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
34    if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
35        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
36        UNREACHABLE();
37    }
38#endif
39    auto object = allocator_.Allocate(size);
40    if (object != 0) {
41#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
42        // can not heap sampling in gc.
43        if (!isPromoted) {
44            InvokeAllocationInspector(object, size, size);
45        }
46#endif
47        return object;
48    }
49    if (Expand(isPromoted)) {
50        if (!isPromoted && !localHeap_->NeedStopCollection()) {
51            localHeap_->TryTriggerIncrementalMarking();
52            localHeap_->TryTriggerIdleCollection();
53            localHeap_->TryTriggerConcurrentMarking();
54        }
55        object = allocator_.Allocate(size);
56    } else if (localHeap_->IsMarking() || !localHeap_->IsEmptyIdleTask()) {
57        // Temporary adjust semi space capacity
58        if (localHeap_->IsConcurrentFullMark()) {
59            overShootSize_ = localHeap_->CalculateLinearSpaceOverShoot();
60        } else {
61            size_t stepOverShootSize = localHeap_->GetEcmaParamConfiguration().GetSemiSpaceStepOvershootSize();
62            size_t maxOverShootSize = std::max(initialCapacity_ / 2, stepOverShootSize); // 2: half
63            if (overShootSizeForConcurrentMark_ < maxOverShootSize) {
64                overShootSize_ += stepOverShootSize;
65                overShootSizeForConcurrentMark_ += stepOverShootSize;
66            }
67        }
68
69        if (Expand(isPromoted)) {
70            object = allocator_.Allocate(size);
71        }
72    }
73#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
74    if (object != 0 && !isPromoted) {
75        InvokeAllocationInspector(object, size, size);
76    }
77#endif
78    return object;
79}
80
81bool LinearSpace::Expand(bool isPromoted)
82{
83    if (committedSize_ >= initialCapacity_ + overShootSize_ + outOfMemoryOvershootSize_ &&
84        !localHeap_->NeedStopCollection()) {
85        return false;
86    }
87
88    uintptr_t top = allocator_.GetTop();
89    auto currentRegion = GetCurrentRegion();
90    if (currentRegion != nullptr) {
91        if (!isPromoted) {
92            if (currentRegion->HasAgeMark()) {
93                allocateAfterLastGC_ +=
94                    currentRegion->GetAllocatedBytes(top) - currentRegion->GetAllocatedBytes(waterLine_);
95            } else {
96                allocateAfterLastGC_ += currentRegion->GetAllocatedBytes(top);
97            }
98        } else {
99            // For GC
100            survivalObjectSize_ += currentRegion->GetAllocatedBytes(top);
101        }
102        currentRegion->SetHighWaterMark(top);
103    }
104    JSThread *thread = localHeap_->GetJSThread();
105    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_,
106                                                                 thread_->IsConcurrentMarkingOrFinished());
107    allocator_.Reset(region->GetBegin(), region->GetEnd());
108    AddRegion(region);
109    return true;
110}
111
112void LinearSpace::Stop()
113{
114    if (GetCurrentRegion() != nullptr) {
115        GetCurrentRegion()->SetHighWaterMark(allocator_.GetTop());
116    }
117}
118
119void LinearSpace::ResetAllocator()
120{
121    auto currentRegion = GetCurrentRegion();
122    if (currentRegion != nullptr) {
123        allocator_.Reset(currentRegion->GetBegin(), currentRegion->GetEnd(), currentRegion->GetHighWaterMark());
124    }
125}
126
127void LinearSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
128{
129    auto current = GetCurrentRegion();
130    EnumerateRegions([&](Region *region) {
131        auto curPtr = region->GetBegin();
132        uintptr_t endPtr = 0;
133        if (region == current) {
134            auto top = allocator_.GetTop();
135            endPtr = curPtr + region->GetAllocatedBytes(top);
136        } else {
137            endPtr = curPtr + region->GetAllocatedBytes();
138        }
139
140        size_t objSize;
141        while (curPtr < endPtr) {
142            auto freeObject = FreeObject::Cast(curPtr);
143            // If curPtr is freeObject, It must to mark unpoison first.
144            ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
145            if (!freeObject->IsFreeObject()) {
146                auto obj = reinterpret_cast<TaggedObject *>(curPtr);
147                visitor(obj);
148                objSize = obj->GetClass()->SizeFromJSHClass(obj);
149            } else {
150                freeObject->AsanUnPoisonFreeObject();
151                objSize = freeObject->Available();
152                freeObject->AsanPoisonFreeObject();
153            }
154            curPtr += objSize;
155            CHECK_OBJECT_SIZE(objSize);
156        }
157        CHECK_REGION_END(curPtr, endPtr);
158    });
159}
160
161void LinearSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
162{
163    ASSERT(size <= alignedSize);
164    if (LIKELY(!allocationCounter_.IsActive())) {
165        return;
166    }
167    if (alignedSize >= allocationCounter_.NextBytes()) {
168        allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
169    }
170    allocationCounter_.AdvanceAllocationInspector(alignedSize);
171}
172
173EdenSpace::EdenSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
174    : LinearSpace(heap, MemSpaceType::EDEN_SPACE, initialCapacity, maximumCapacity)
175{
176    size_t memSize = AlignUp(maximumCapacity_, DEFAULT_REGION_SIZE);
177    memMap_ = PageMap(memSize, PAGE_PROT_READWRITE, DEFAULT_REGION_SIZE);
178    PageTag(memMap_.GetMem(), memMap_.GetSize(), PageTagType::HEAP, ToSpaceTypeName(MemSpaceType::EDEN_SPACE),
179            localHeap_->GetJSThread()->GetThreadId());
180    auto mem = ToUintPtr(memMap_.GetMem());
181    auto count = memMap_.GetSize() / DEFAULT_REGION_SIZE;
182    while (count-- > 0) {
183        freeRegions_.emplace_back(ToVoidPtr(mem), DEFAULT_REGION_SIZE);
184        mem = mem + DEFAULT_REGION_SIZE;
185    }
186}
187
188EdenSpace::~EdenSpace()
189{
190    PageUnmap(memMap_);
191}
192
193void EdenSpace::Initialize()
194{
195    auto region = AllocRegion();
196    if (UNLIKELY(region == nullptr)) {
197        LOG_GC(ERROR) << "EdenSpace::Initialize: region is nullptr";
198        return;
199    }
200    AddRegion(region);
201    allocator_.Reset(region->GetBegin(), region->GetEnd());
202    localHeap_->InstallEdenAllocator();
203}
204
205void EdenSpace::Restart()
206{
207    overShootSize_ = 0;
208    survivalObjectSize_ = 0;
209    allocateAfterLastGC_ = 0;
210    isFull_ = false;
211    Initialize();
212}
213
214uintptr_t EdenSpace::AllocateSync(size_t size)
215{
216    LockHolder lock(lock_);
217    return Allocate(size);
218}
219
220uintptr_t EdenSpace::Allocate(size_t size)
221{
222    if (isFull_) {
223        return 0;
224    }
225    auto object = allocator_.Allocate(size);
226    if (object != 0) {
227#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
228        // can not heap sampling in gc.
229        InvokeAllocationInspector(object, size, size);
230#endif
231        return object;
232    }
233    if (Expand()) {
234        if (!localHeap_->NeedStopCollection()) {
235            localHeap_->TryTriggerIncrementalMarking();
236            localHeap_->TryTriggerIdleCollection();
237            localHeap_->TryTriggerConcurrentMarking();
238        }
239        object = allocator_.Allocate(size);
240    } else {
241        isFull_ = true;
242        localHeap_->ReleaseEdenAllocator();
243    }
244#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
245    if (object != 0) {
246        InvokeAllocationInspector(object, size, size);
247    }
248#endif
249    return object;
250}
251
252Region *EdenSpace::AllocRegion()
253{
254    if (freeRegions_.empty()) {
255        return nullptr;
256    }
257    auto memmap = freeRegions_.back();
258    freeRegions_.pop_back();
259    heapRegionAllocator_->IncreaseAnnoMemoryUsage(memmap.GetSize());
260    auto mem = reinterpret_cast<uintptr_t>(memmap.GetMem());
261    // Check that the address is 256K byte aligned
262    LOG_ECMA_IF(AlignUp(mem, PANDA_POOL_ALIGNMENT_IN_BYTES) != mem, FATAL) << "region not align by 256KB";
263
264    // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
265    uintptr_t begin = AlignUp(mem + sizeof(Region), static_cast<size_t>(MemAlignment::MEM_ALIGN_REGION));
266    uintptr_t end = mem + memmap.GetSize();
267    auto region = new (ToVoidPtr(mem)) Region(localHeap_->GetNativeAreaAllocator(), mem, begin, end,
268                                              GetRegionFlag(), RegionTypeFlag::DEFAULT);
269    region->Initialize();
270    return region;
271}
272
273bool EdenSpace::Expand()
274{
275    Region *region = AllocRegion();
276    if (region == nullptr) {
277        return false;
278    }
279
280    uintptr_t top = allocator_.GetTop();
281    auto currentRegion = GetCurrentRegion();
282    if (currentRegion != nullptr) {
283        if (currentRegion->HasAgeMark()) {
284            allocateAfterLastGC_ +=
285                currentRegion->GetAllocatedBytes(top) - currentRegion->GetAllocatedBytes(waterLine_);
286        } else {
287            allocateAfterLastGC_ += currentRegion->GetAllocatedBytes(top);
288        }
289        currentRegion->SetHighWaterMark(top);
290    }
291    allocator_.Reset(region->GetBegin(), region->GetEnd());
292    AddRegion(region);
293    return true;
294}
295
296void EdenSpace::ReclaimRegions([[maybe_unused]] size_t cachedSize)
297{
298    const auto spaceName = ToSpaceTypeName(MemSpaceType::EDEN_SPACE);
299    EnumerateRegions([this, &spaceName](Region *current) {
300        LOG_GC(DEBUG) << "Clear region from: " << current << " to " << spaceName;
301        current->DeleteLocalToShareRSet();
302        DecreaseCommitted(current->GetCapacity());
303        DecreaseObjectSize(current->GetSize());
304        current->Invalidate();
305        current->ClearMembers();
306        void *mem = ToVoidPtr(current->GetAllocateBase());
307        size_t memSize = current->GetCapacity();
308        freeRegions_.emplace_back(mem, memSize);
309        heapRegionAllocator_->DecreaseAnnoMemoryUsage(memSize);
310    });
311    regionList_.Clear();
312    committedSize_ = 0;
313}
314
315size_t EdenSpace::GetHeapObjectSize() const
316{
317    return survivalObjectSize_ + allocateAfterLastGC_;
318}
319
320size_t EdenSpace::GetSurvivalObjectSize() const
321{
322    return survivalObjectSize_;
323}
324
325void EdenSpace::SetOverShootSize(size_t size)
326{
327    overShootSize_ = size;
328}
329
330size_t EdenSpace::GetAllocatedSizeSinceGC(uintptr_t top) const
331{
332    size_t currentRegionSize = 0;
333    auto currentRegion = GetCurrentRegion();
334    if (currentRegion != nullptr) {
335        currentRegionSize = currentRegion->GetAllocatedBytes(top);
336        if (currentRegion->HasAgeMark()) {
337            currentRegionSize -= currentRegion->GetAllocatedBytes(waterLine_);
338        }
339    }
340    return allocateAfterLastGC_ + currentRegionSize;
341}
342
343SemiSpace::SemiSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
344    : LinearSpace(heap, MemSpaceType::SEMI_SPACE, initialCapacity, maximumCapacity),
345      minimumCapacity_(initialCapacity) {}
346
347void SemiSpace::Initialize()
348{
349    JSThread *thread = localHeap_->GetJSThread();
350    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
351    AddRegion(region);
352    allocator_.Reset(region->GetBegin(), region->GetEnd());
353}
354
355void SemiSpace::Restart(size_t overShootSize)
356{
357    overShootSize_ = overShootSize;
358    overShootSizeForConcurrentMark_ = 0;
359    survivalObjectSize_ = 0;
360    allocateAfterLastGC_ = 0;
361    Initialize();
362}
363
364size_t SemiSpace::CalculateNewOverShootSize()
365{
366    return committedSize_ <= maximumCapacity_ ?
367           0 : AlignUp((committedSize_ - maximumCapacity_) / 2, DEFAULT_REGION_SIZE); // 2 is the half.
368}
369
370bool SemiSpace::CommittedSizeIsLarge()
371{
372    return committedSize_ >= maximumCapacity_ * 2; // 2 is the half.
373}
374
375uintptr_t SemiSpace::AllocateSync(size_t size)
376{
377    LockHolder lock(lock_);
378    return Allocate(size, true);
379}
380
381bool SemiSpace::SwapRegion(Region *region, SemiSpace *fromSpace)
382{
383    LockHolder lock(lock_);
384    if (committedSize_ + region->GetCapacity() > maximumCapacity_ + overShootSize_) {
385        return false;
386    }
387    fromSpace->RemoveRegion(region);
388
389    region->SetGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
390
391    if (UNLIKELY(heap_->ShouldVerifyHeap())) {
392        region->ResetInactiveSemiSpace();
393    }
394
395    regionList_.AddNodeToFront(region);
396    IncreaseCommitted(region->GetCapacity());
397    IncreaseObjectSize(region->GetSize());
398    survivalObjectSize_ += region->GetAllocatedBytes();
399    return true;
400}
401
402void SemiSpace::SetWaterLine()
403{
404    waterLine_ = allocator_.GetTop();
405    allocateAfterLastGC_ = 0;
406    Region *last = GetCurrentRegion();
407    if (last != nullptr) {
408        last->SetGCFlag(RegionGCFlags::HAS_AGE_MARK);
409
410        EnumerateRegions([&last](Region *current) {
411            if (current != last) {
412                current->SetGCFlag(RegionGCFlags::BELOW_AGE_MARK);
413            }
414        });
415        survivalObjectSize_ += last->GetAllocatedBytes(waterLine_);
416    } else {
417        LOG_GC(INFO) << "SetWaterLine: No region survival in current gc, current region available size: "
418                     << allocator_.Available();
419    }
420}
421
422size_t SemiSpace::GetHeapObjectSize() const
423{
424    return survivalObjectSize_ + allocateAfterLastGC_;
425}
426
427size_t SemiSpace::GetSurvivalObjectSize() const
428{
429    return survivalObjectSize_;
430}
431
432void SemiSpace::SetOverShootSize(size_t size)
433{
434    overShootSize_ = size;
435}
436
437void SemiSpace::AddOverShootSize(size_t size)
438{
439    overShootSize_ += size;
440}
441
442bool SemiSpace::AdjustCapacity(size_t allocatedSizeSinceGC, JSThread *thread)
443{
444    if (allocatedSizeSinceGC <= initialCapacity_ * GROW_OBJECT_SURVIVAL_RATE / GROWING_FACTOR) {
445        return false;
446    }
447    double curObjectSurvivalRate = static_cast<double>(survivalObjectSize_) / allocatedSizeSinceGC;
448    double initialObjectRate = static_cast<double>(survivalObjectSize_) / initialCapacity_;
449    if (curObjectSurvivalRate > GROW_OBJECT_SURVIVAL_RATE || initialObjectRate > GROW_OBJECT_SURVIVAL_RATE) {
450        if (GetCommittedSize() > maximumCapacity_
451            && GetHeapObjectSize() > GetCommittedSize() *  GROW_OBJECT_SURVIVAL_RATE) {
452            // Overshoot size is too large. Avoid heapObjectSize is too close to committed size.
453            AddOverShootSize(GetCommittedSize() * SHRINK_OBJECT_SURVIVAL_RATE);
454        }
455        if (initialCapacity_ >= maximumCapacity_) {
456            return false;
457        }
458        size_t newCapacity = initialCapacity_ * GROWING_FACTOR;
459        SetInitialCapacity(std::min(newCapacity, maximumCapacity_));
460        if (newCapacity == maximumCapacity_) {
461            localHeap_->GetJSObjectResizingStrategy()->UpdateGrowStep(
462                thread,
463                JSObjectResizingStrategy::PROPERTIES_GROW_SIZE * 2);   // 2: double
464        }
465        return true;
466    } else if (curObjectSurvivalRate < SHRINK_OBJECT_SURVIVAL_RATE) {
467        if (initialCapacity_ <= minimumCapacity_) {
468            return false;
469        }
470        double speed = localHeap_->GetMemController()->GetNewSpaceAllocationThroughputPerMS();
471        if (speed > LOW_ALLOCATION_SPEED_PER_MS) {
472            return false;
473        }
474        size_t newCapacity = initialCapacity_ / GROWING_FACTOR;
475        SetInitialCapacity(std::max(newCapacity, minimumCapacity_));
476        localHeap_->GetJSObjectResizingStrategy()->UpdateGrowStep(thread);
477        return true;
478    }
479    return false;
480}
481
482size_t SemiSpace::GetAllocatedSizeSinceGC(uintptr_t top) const
483{
484    size_t currentRegionSize = 0;
485    auto currentRegion = GetCurrentRegion();
486    if (currentRegion != nullptr) {
487        currentRegionSize = currentRegion->GetAllocatedBytes(top);
488        if (currentRegion->HasAgeMark()) {
489            currentRegionSize -= currentRegion->GetAllocatedBytes(waterLine_);
490        }
491    }
492    return allocateAfterLastGC_ + currentRegionSize;
493}
494
495SnapshotSpace::SnapshotSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
496    : LinearSpace(heap, MemSpaceType::SNAPSHOT_SPACE, initialCapacity, maximumCapacity) {}
497
498ReadOnlySpace::ReadOnlySpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity, MemSpaceType type)
499    : LinearSpace(heap, type, initialCapacity, maximumCapacity) {}
500}  // namespace panda::ecmascript
501