1/*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "ecmascript/mem/sparse_space.h"
17
18#include "ecmascript/js_hclass-inl.h"
19#include "ecmascript/mem/heap-inl.h"
20
21namespace panda::ecmascript {
22SparseSpace::SparseSpace(Heap *heap, MemSpaceType type, size_t initialCapacity, size_t maximumCapacity)
23    : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
24      sweepState_(SweepState::NO_SWEEP),
25      localHeap_(heap),
26      liveObjectSize_(0)
27{
28    allocator_ = new FreeListAllocator<FreeObject>(heap);
29}
30
31void SparseSpace::Initialize()
32{
33    JSThread *thread = localHeap_->GetJSThread();
34    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
35    region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
36    AddRegion(region);
37
38    allocator_->Initialize(region);
39}
40
41void SparseSpace::Reset()
42{
43    allocator_->RebuildFreeList();
44    ReclaimRegions();
45    liveObjectSize_ = 0;
46}
47
48void SparseSpace::ResetTopPointer(uintptr_t top)
49{
50    allocator_->ResetTopPointer(top);
51}
52
53uintptr_t SparseSpace::Allocate(size_t size, bool allowGC)
54{
55#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
56    if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
57        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
58        UNREACHABLE();
59    }
60#endif
61    auto object = allocator_->Allocate(size);
62    CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
63
64    if (sweepState_ == SweepState::SWEEPING) {
65        object = AllocateAfterSweepingCompleted(size);
66        CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
67    }
68
69    // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
70    if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
71        object = allocator_->Allocate(size);
72        CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
73    }
74
75    if (Expand()) {
76        object = allocator_->Allocate(size);
77        CHECK_OBJECT_AND_INC_OBJ_SIZE(size);
78    }
79
80    if (allowGC) {
81        localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
82        object = Allocate(size, false);
83        // Size is already increment
84    }
85    return object;
86}
87
88bool SparseSpace::Expand()
89{
90    if (CommittedSizeExceed()) {
91        LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
92        return false;
93    }
94    JSThread *thread = localHeap_->GetJSThread();
95    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, localHeap_);
96    region->SetLocalHeap(reinterpret_cast<uintptr_t>(localHeap_));
97    AddRegion(region);
98    allocator_->AddFree(region);
99    return true;
100}
101
102uintptr_t SparseSpace::AllocateAfterSweepingCompleted(size_t size)
103{
104    ASSERT(sweepState_ == SweepState::SWEEPING);
105    if (TryFillSweptRegion()) {
106        auto object = allocator_->Allocate(size);
107        if (object != 0) {
108            return object;
109        }
110    }
111    // Parallel sweep and fill
112    localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
113    return allocator_->Allocate(size);
114}
115
116void SparseSpace::PrepareSweeping()
117{
118    liveObjectSize_ = 0;
119    EnumerateRegions([this](Region *current) {
120        if (!current->InCollectSet()) {
121            if (UNLIKELY(localHeap_->ShouldVerifyHeap() &&
122                current->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT))) { // LOCV_EXCL_BR_LINE
123                LOG_ECMA(FATAL) << "Region should not be swept before PrepareSweeping: " << current;
124            }
125            IncreaseLiveObjectSize(current->AliveObject());
126            current->ResetWasted();
127            current->SwapOldToNewRSetForCS();
128            current->SwapLocalToShareRSetForCS();
129            AddSweepingRegion(current);
130        }
131    });
132    SortSweepingRegion();
133    sweepState_ = SweepState::SWEEPING;
134    allocator_->RebuildFreeList();
135}
136
137void SparseSpace::AsyncSweep(bool isMain)
138{
139    Region *current = GetSweepingRegionSafe();
140    while (current != nullptr) {
141        FreeRegion(current, isMain);
142        // Main thread sweeping region is added;
143        if (!isMain) {
144            AddSweptRegionSafe(current);
145        } else {
146            current->MergeOldToNewRSetForCS();
147            current->MergeLocalToShareRSetForCS();
148        }
149        current = GetSweepingRegionSafe();
150    }
151}
152
153void SparseSpace::Sweep()
154{
155    liveObjectSize_ = 0;
156    allocator_->RebuildFreeList();
157    EnumerateRegions([this](Region *current) {
158        if (!current->InCollectSet()) {
159            IncreaseLiveObjectSize(current->AliveObject());
160            current->ResetWasted();
161            FreeRegion(current);
162        }
163    });
164}
165
166bool SparseSpace::TryFillSweptRegion()
167{
168    if (sweptList_.empty()) {
169        return false;
170    }
171    Region *region = nullptr;
172    while ((region = GetSweptRegionSafe()) != nullptr) {
173        allocator_->CollectFreeObjectSet(region);
174        region->ResetSwept();
175        region->MergeOldToNewRSetForCS();
176        region->MergeLocalToShareRSetForCS();
177    }
178    return true;
179}
180
181bool SparseSpace::FinishFillSweptRegion()
182{
183    bool ret = TryFillSweptRegion();
184    sweepState_ = SweepState::SWEPT;
185    return ret;
186}
187
188void SparseSpace::AddSweepingRegion(Region *region)
189{
190    sweepingList_.emplace_back(region);
191}
192
193void SparseSpace::SortSweepingRegion()
194{
195    // Sweep low alive object size at first
196    std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
197        return first->AliveObject() < second->AliveObject();
198    });
199}
200
201Region *SparseSpace::GetSweepingRegionSafe()
202{
203    LockHolder holder(lock_);
204    Region *region = nullptr;
205    if (!sweepingList_.empty()) {
206        region = sweepingList_.back();
207        sweepingList_.pop_back();
208    }
209    return region;
210}
211
212void SparseSpace::AddSweptRegionSafe(Region *region)
213{
214    LockHolder holder(lock_);
215    sweptList_.emplace_back(region);
216    region->SetSwept();
217}
218
219Region *SparseSpace::GetSweptRegionSafe()
220{
221    LockHolder holder(lock_);
222    Region *region = nullptr;
223    if (!sweptList_.empty()) {
224        region = sweptList_.back();
225        sweptList_.pop_back();
226    }
227    return region;
228}
229
230void SparseSpace::FreeRegionFromSpace(Region *region)
231{
232    region->ResetSwept();
233    region->MergeOldToNewRSetForCS();
234    region->MergeLocalToShareRSetForCS();
235    RemoveRegion(region);
236    DecreaseLiveObjectSize(region->AliveObject());
237}
238
239Region *SparseSpace::TryToGetSuitableSweptRegion(size_t size)
240{
241    if (sweepState_ != SweepState::SWEEPING) {
242        return nullptr;
243    }
244    if (sweptList_.empty()) {
245        return nullptr;
246    }
247    LockHolder holder(lock_);
248    for (auto iter = sweptList_.begin(); iter != sweptList_.end(); iter++) {
249        if (allocator_->MatchFreeObjectSet(*iter, size)) {
250            Region *region = *iter;
251            FreeRegionFromSpace(region);
252            sweptList_.erase(iter);
253            return region;
254        }
255    }
256    return nullptr;
257}
258
259void SparseSpace::FreeRegion(Region *current, bool isMain)
260{
261    uintptr_t freeStart = current->GetBegin();
262    current->IterateAllMarkedBits([this, &current, &freeStart, isMain](void *mem) {
263        ASSERT(current->InRange(ToUintPtr(mem)));
264        auto header = reinterpret_cast<TaggedObject *>(mem);
265        auto klass = header->GetClass();
266        auto size = klass->SizeFromJSHClass(header);
267
268        uintptr_t freeEnd = ToUintPtr(mem);
269        if (freeStart != freeEnd) {
270            FreeLiveRange(current, freeStart, freeEnd, isMain);
271        }
272        freeStart = freeEnd + size;
273    });
274    uintptr_t freeEnd = current->GetEnd();
275    if (freeStart != freeEnd) {
276        FreeLiveRange(current, freeStart, freeEnd, isMain);
277    }
278}
279
280void SparseSpace::FreeLiveRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
281{
282    localHeap_->GetSweeper()->ClearRSetInRange(current, freeStart, freeEnd);
283    allocator_->Free(freeStart, freeEnd - freeStart, isMain);
284}
285
286void SparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
287{
288    allocator_->FillBumpPointer();
289    EnumerateRegions([&](Region *region) {
290        if (region->InCollectSet()) {
291            return;
292        }
293        uintptr_t curPtr = region->GetBegin();
294        uintptr_t endPtr = region->GetEnd();
295        while (curPtr < endPtr) {
296            auto freeObject = FreeObject::Cast(curPtr);
297            size_t objSize;
298            // If curPtr is freeObject, It must to mark unpoison first.
299            ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
300            if (!freeObject->IsFreeObject()) {
301                auto obj = reinterpret_cast<TaggedObject *>(curPtr);
302                visitor(obj);
303                objSize = obj->GetClass()->SizeFromJSHClass(obj);
304            } else {
305                freeObject->AsanUnPoisonFreeObject();
306                objSize = freeObject->Available();
307                freeObject->AsanPoisonFreeObject();
308            }
309            curPtr += objSize;
310            CHECK_OBJECT_SIZE(objSize);
311        }
312        CHECK_REGION_END(curPtr, endPtr);
313    });
314}
315
316void SparseSpace::IterateOldToNewOverObjects(
317    const std::function<void(TaggedObject *object, JSTaggedValue value)> &visitor) const
318{
319    auto cb = [visitor](void *mem) -> bool {
320        ObjectSlot slot(ToUintPtr(mem));
321        visitor(reinterpret_cast<TaggedObject *>(mem), JSTaggedValue(slot.GetTaggedType()));
322        return true;
323    };
324    EnumerateRegions([cb] (Region *region) {
325        region->IterateAllSweepingRSetBits(cb);
326        region->IterateAllOldToNewBits(cb);
327    });
328}
329
330size_t SparseSpace::GetHeapObjectSize() const
331{
332    return liveObjectSize_;
333}
334
335void SparseSpace::IncreaseAllocatedSize(size_t size)
336{
337    allocator_->IncreaseAllocatedSize(size);
338}
339
340size_t SparseSpace::GetTotalAllocatedSize() const
341{
342    return allocator_->GetAllocatedSize();
343}
344
345void SparseSpace::DetachFreeObjectSet(Region *region)
346{
347    allocator_->DetachFreeObjectSet(region);
348}
349
350void SparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
351{
352    ASSERT(size <= alignedSize);
353    if (LIKELY(!allocationCounter_.IsActive())) {
354        return;
355    }
356    if (alignedSize >= allocationCounter_.NextBytes()) {
357        allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
358    }
359    allocationCounter_.AdvanceAllocationInspector(alignedSize);
360}
361
362OldSpace::OldSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
363    : SparseSpace(heap, OLD_SPACE, initialCapacity, maximumCapacity) {}
364
365Region *OldSpace::TrySweepToGetSuitableRegion(size_t size)
366{
367    // Try Sweeping region to get space for allocation
368    // since sweepingList_ is ordered, we just need to check once
369    Region *availableRegion = GetSweepingRegionSafe();
370    if (availableRegion != nullptr) {
371        FreeRegion(availableRegion, false);
372        // if region has free enough space for the size,
373        // free the region from current space
374        // and return for local space to use
375        // otherwise, we add region to sweptList_.
376        if (allocator_->MatchFreeObjectSet(availableRegion, size)) {
377            FreeRegionFromSpace(availableRegion);
378            return availableRegion;
379        } else {
380            AddSweptRegionSafe(availableRegion);
381        }
382    }
383    return nullptr;
384}
385
386Region *OldSpace::TryToGetExclusiveRegion(size_t size)
387{
388    LockHolder lock(lock_);
389    uintptr_t result = allocator_->LookupSuitableFreeObject(size);
390    if (result != 0) {
391        // Remove region from global old space
392        Region *region = Region::ObjectAddressToRange(result);
393        RemoveRegion(region);
394        allocator_->DetachFreeObjectSet(region);
395        DecreaseLiveObjectSize(region->AliveObject());
396        return region;
397    }
398    if (sweepState_ == SweepState::SWEEPING) {
399        Region *availableRegion = nullptr;
400        availableRegion = TryToGetSuitableSweptRegion(size);
401        if (availableRegion != nullptr) {
402            return availableRegion;
403        }
404        return TrySweepToGetSuitableRegion(size);
405    }
406    return nullptr;
407}
408
409void OldSpace::Merge(LocalSpace *localSpace)
410{
411    localSpace->FreeBumpPoint();
412    LockHolder lock(lock_);
413    size_t oldCommittedSize = committedSize_;
414    localSpace->EnumerateRegions([&](Region *region) {
415        localSpace->DetachFreeObjectSet(region);
416        localSpace->RemoveRegion(region);
417        localSpace->DecreaseLiveObjectSize(region->AliveObject());
418        AddRegion(region);
419        IncreaseLiveObjectSize(region->AliveObject());
420        allocator_->CollectFreeObjectSet(region);
421    });
422    size_t hugeSpaceCommitSize = localHeap_->GetHugeObjectSpace()->GetCommittedSize();
423    if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
424        LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
425        if (localHeap_->CanThrowOOMError()) {
426            localHeap_->ShouldThrowOOMError(true);
427        }
428        IncreaseMergeSize(committedSize_ - oldCommittedSize);
429        size_t committedOverSizeLimit = committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity();
430        IncreaseCommittedOverSizeLimit(committedOverSizeLimit);
431        // if throw OOM, temporarily increase space size to avoid vm crash
432        IncreaseOutOfMemoryOvershootSize(committedOverSizeLimit);
433    }
434
435    localSpace->GetRegionList().Clear();
436    allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
437}
438
439void OldSpace::SelectCSet()
440{
441    if (localHeap_->IsMarking()) {
442        localHeap_->GetEcmaGCStats()->RecordStatisticBeforeGC(TriggerGCType::OLD_GC, GCReason::OTHER);
443    }
444    CheckRegionSize();
445    // 1、Select region which alive object larger than limit
446    int64_t evacuateSizeLimit = 0;
447    if (!localHeap_->IsInBackground()) {
448        evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_FOREGROUND;
449        EnumerateRegions([this](Region *region) {
450            if (!region->MostObjectAlive()) {
451                collectRegionSet_.emplace_back(region);
452            }
453        });
454    } else {
455        evacuateSizeLimit = PARTIAL_GC_MAX_EVACUATION_SIZE_BACKGROUND;
456        EnumerateRegions([this](Region *region) {
457            if (region->BelowCompressThreasholdAlive() || !region->MostObjectAlive()) {
458                collectRegionSet_.emplace_back(region);
459            }
460        });
461    }
462    if (collectRegionSet_.size() < PARTIAL_GC_MIN_COLLECT_REGION_SIZE) {
463        LOG_ECMA_MEM(DEBUG) << "Select CSet failure: number is too few";
464        collectRegionSet_.clear();
465        return;
466    }
467    // sort
468    std::sort(collectRegionSet_.begin(), collectRegionSet_.end(), [](Region *first, Region *second) {
469        return first->AliveObject() < second->AliveObject();
470    });
471
472    // Limit cset size
473    unsigned long selectedRegionNumber = 0;
474    int64_t expectFreeSize =
475        static_cast<int64_t>(localHeap_->GetCommittedSize() - localHeap_->GetHeapAliveSizeAfterGC());
476    int64_t evacuateSize = std::min(evacuateSizeLimit, expectFreeSize);
477    EnumerateCollectRegionSet([&](Region *current) {
478        if (evacuateSize > 0) {
479            selectedRegionNumber++;
480            evacuateSize -= current->AliveObject();
481        } else {
482            return;
483        }
484    });
485    LOG_ECMA_MEM(DEBUG) << "Max evacuation size is 6_MB. The CSet region number: "
486        << selectedRegionNumber;
487    selectedRegionNumber = std::max(selectedRegionNumber, GetSelectedRegionNumber());
488    if (collectRegionSet_.size() > selectedRegionNumber) {
489        collectRegionSet_.resize(selectedRegionNumber);
490    }
491
492    localHeap_->GetEcmaGCStats()->SetRecordData(
493        RecordData::COLLECT_REGION_SET_SIZE, collectRegionSet_.size() * Region::AVERAGE_REGION_EVACUATE_SIZE);
494    EnumerateCollectRegionSet([&](Region *current) {
495        RemoveRegion(current);
496        DecreaseLiveObjectSize(current->AliveObject());
497        allocator_->DetachFreeObjectSet(current);
498        current->SetGCFlag(RegionGCFlags::IN_COLLECT_SET);
499    });
500    sweepState_ = SweepState::NO_SWEEP;
501    LOG_ECMA_MEM(DEBUG) << "Select CSet success: number is " << collectRegionSet_.size();
502}
503
504void OldSpace::CheckRegionSize()
505{
506#ifndef NDEBUG
507    if (sweepState_ == SweepState::SWEEPING) {
508        localHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
509    }
510    size_t available = allocator_->GetAvailableSize();
511    size_t wasted = allocator_->GetWastedSize();
512    if (GetHeapObjectSize() + wasted + available != objectSize_) {
513        LOG_GC(DEBUG) << "Actual live object size:" << GetHeapObjectSize()
514                            << ", free object size:" << available
515                            << ", wasted size:" << wasted
516                            << ", but exception total size:" << objectSize_;
517    }
518#endif
519}
520
521void OldSpace::RevertCSet()
522{
523    EnumerateCollectRegionSet([&](Region *region) {
524        region->ClearGCFlag(RegionGCFlags::IN_COLLECT_SET);
525        AddRegion(region);
526        allocator_->CollectFreeObjectSet(region);
527        IncreaseLiveObjectSize(region->AliveObject());
528    });
529    collectRegionSet_.clear();
530}
531
532void OldSpace::ReclaimCSet()
533{
534    size_t cachedSize = localHeap_->GetRegionCachedSize();
535    EnumerateCollectRegionSet([this, &cachedSize](Region *region) {
536        region->DeleteCrossRegionRSet();
537        region->DeleteOldToNewRSet();
538        region->DeleteLocalToShareRSet();
539        region->DeleteSweepingOldToNewRSet();
540        region->DeleteSweepingLocalToShareRSet();
541        region->DestroyFreeObjectSets();
542        heapRegionAllocator_->FreeRegion(region, cachedSize);
543    });
544    collectRegionSet_.clear();
545}
546
547LocalSpace::LocalSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
548    : SparseSpace(heap, LOCAL_SPACE, initialCapacity, maximumCapacity) {}
549
550bool LocalSpace::AddRegionToList(Region *region)
551{
552    if (committedSize_ >= maximumCapacity_) { // LOCV_EXCL_BR_LINE
553        LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
554        return false;
555    }
556    AddRegion(region);
557    allocator_->CollectFreeObjectSet(region);
558    IncreaseLiveObjectSize(region->AliveObject());
559    return true;
560}
561
562void LocalSpace::FreeBumpPoint()
563{
564    allocator_->FreeBumpPoint();
565}
566
567void LocalSpace::Stop()
568{
569    Region *currentRegion = GetCurrentRegion();
570    if (GetCurrentRegion() != nullptr) {
571        // Do not use allocator_->GetTop(), because it may point to freeObj from other regions.
572        currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
573    }
574}
575
576uintptr_t NonMovableSpace::CheckAndAllocate(size_t size)
577{
578    if (maximumCapacity_ == committedSize_ && GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE &&
579        !localHeap_->GetOldGCRequested() && !localHeap_->NeedStopCollection()) {
580        localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
581    }
582    return Allocate(size);
583}
584
585NonMovableSpace::NonMovableSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
586    : SparseSpace(heap, MemSpaceType::NON_MOVABLE, initialCapacity, maximumCapacity)
587{
588}
589
590AppSpawnSpace::AppSpawnSpace(Heap *heap, size_t initialCapacity)
591    : SparseSpace(heap, MemSpaceType::APPSPAWN_SPACE, initialCapacity, initialCapacity)
592{
593}
594
595void AppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
596{
597    EnumerateRegions([&](Region *current) {
598        current->IterateAllMarkedBits([&](void *mem) {
599            ASSERT(current->InRange(ToUintPtr(mem)));
600            visitor(reinterpret_cast<TaggedObject *>(mem));
601        });
602    });
603}
604
605uintptr_t LocalSpace::Allocate(size_t size, bool isExpand)
606{
607    auto object = allocator_->Allocate(size);
608    if (object == 0 && isExpand) {
609        if (Expand()) {
610            object = allocator_->Allocate(size);
611        } else {
612            localHeap_->ThrowOutOfMemoryErrorForDefault(localHeap_->GetJSThread(), size,
613                " LocalSpace::Allocate", false);
614        }
615    }
616    if (object != 0) {
617        Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
618    }
619    return object;
620}
621
622MachineCodeSpace::MachineCodeSpace(Heap *heap, size_t initialCapacity, size_t maximumCapacity)
623    : SparseSpace(heap, MemSpaceType::MACHINE_CODE_SPACE, initialCapacity, maximumCapacity)
624{
625}
626
627MachineCodeSpace::~MachineCodeSpace()
628{
629    if (localHeap_->GetEcmaVM()->GetJSOptions().GetEnableJitFort()) {
630        if (jitFort_) {
631            delete jitFort_;
632            jitFort_ = nullptr;
633        }
634    }
635}
636
637void MachineCodeSpace::PrepareSweeping()
638{
639    SparseSpace::PrepareSweeping();
640    if (jitFort_) {
641        jitFort_->PrepareSweeping();
642    }
643}
644
645void MachineCodeSpace::Sweep()
646{
647    SparseSpace::Sweep();
648    if (jitFort_) {
649        jitFort_->Sweep();
650    }
651}
652
653void MachineCodeSpace::AsyncSweep(bool isMain)
654{
655    LockHolder holder(asyncSweepMutex_);
656    SparseSpace::AsyncSweep(isMain);
657    if (jitFort_) {
658        jitFort_->AsyncSweep();
659    }
660}
661
662uintptr_t MachineCodeSpace::JitFortAllocate(MachineCodeDesc *desc)
663{
664    if (!jitFort_) {
665        jitFort_ = new JitFort();
666    }
667    localHeap_->GetSweeper()->EnsureTaskFinishedNoCheck(spaceType_);
668    return jitFort_->Allocate(desc);
669}
670
671uintptr_t MachineCodeSpace::Allocate(size_t size, bool allowGC)
672{
673    return SparseSpace::Allocate(size, allowGC);
674}
675
676uintptr_t MachineCodeSpace::Allocate(size_t size, MachineCodeDesc *desc, bool allowGC)
677{
678#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
679    if (UNLIKELY(!localHeap_->GetJSThread()->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
680        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
681        UNREACHABLE();
682    }
683#endif
684    // Include JitFort allocation size in Space LiveObjectSize and Region AliveObject size
685    // in CHECK_AND_INC_OBJ_SIZE. Could be a problem with InvokeAllocationInspectr with
686    // instruction separated from Machine Code object into Jit FortSpace.
687
688    auto object = allocator_->Allocate(size);
689    CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
690
691    if (sweepState_ == SweepState::SWEEPING) {
692        object = AllocateAfterSweepingCompleted(size);
693        CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
694    }
695
696    // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
697    if (allowGC && localHeap_->CheckAndTriggerOldGC()) {
698        object = allocator_->Allocate(size);
699        CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
700    }
701
702    if (Expand()) {
703        object = allocator_->Allocate(size);
704        CHECK_OBJECT_AND_INC_OBJ_SIZE(size + desc->instructionsSize);
705    }
706
707    if (allowGC) {
708        localHeap_->CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
709        object = Allocate(size, desc, false);
710        // Size is already increment
711    }
712    return object;
713}
714
715size_t MachineCodeSpace::CheckMachineCodeObject(uintptr_t curPtr, uintptr_t &machineCode, uintptr_t pc)
716{
717    auto freeObject = FreeObject::Cast(curPtr);
718    size_t objSize = 0;
719    if (!freeObject->IsFreeObject()) {
720        auto obj = MachineCode::Cast(reinterpret_cast<TaggedObject*>(curPtr));
721        if (obj->IsInText(pc)) {
722            machineCode = curPtr;
723        }
724        objSize = obj->GetClass()->SizeFromJSHClass(obj);
725    } else {
726        objSize = freeObject->Available();
727    }
728    return objSize;
729}
730
731uintptr_t MachineCodeSpace::GetMachineCodeObject(uintptr_t pc)
732{
733    uintptr_t machineCode = 0;
734    LockHolder holder(asyncSweepMutex_);
735    allocator_->FillBumpPointer();
736
737    EnumerateRegions([&](Region *region) {
738        if (machineCode != 0) {
739            return;
740        }
741        if (region->InCollectSet() || (!region->InRange(pc) && !InJitFortRange(pc))) {
742            return;
743        }
744        uintptr_t curPtr = region->GetBegin();
745        uintptr_t endPtr = region->GetEnd();
746        while (curPtr < endPtr) {
747            size_t objSize = CheckMachineCodeObject(curPtr, machineCode, pc);
748            if (machineCode != 0) {
749                return;
750            }
751            curPtr += objSize;
752            CHECK_OBJECT_SIZE(objSize);
753        }
754        CHECK_REGION_END(curPtr, endPtr);
755    });
756    return machineCode;
757}
758}  // namespace panda::ecmascript
759