1/*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include "ecmascript/mem/shared_heap/shared_space.h"
17
18#include "ecmascript/js_hclass-inl.h"
19#include "ecmascript/mem/heap-inl.h"
20#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
21#include "ecmascript/checkpoint/thread_state_transition.h"
22
23namespace panda::ecmascript {
24SharedSparseSpace::SharedSparseSpace(SharedHeap *heap,
25                                     MemSpaceType type,
26                                     size_t initialCapacity,
27                                     size_t maximumCapacity)
28    : Space(heap, heap->GetHeapRegionAllocator(), type, initialCapacity, maximumCapacity),
29      sweepState_(SweepState::NO_SWEEP),
30      sHeap_(heap),
31      liveObjectSize_(0)
32{
33    triggerLocalFullMarkLimit_ = maximumCapacity * LIVE_OBJECT_SIZE_RATIO;
34    allocator_ = new FreeListAllocator<FreeObject>(heap);
35}
36
37void SharedSparseSpace::Reset()
38{
39    allocator_->RebuildFreeList();
40    ReclaimRegions();
41    liveObjectSize_ = 0;
42}
43
44void SharedSparseSpace::ResetTopPointer(uintptr_t top)
45{
46    allocator_->ResetTopPointer(top);
47}
48
49// only used in share heap initialize before first vmThread created.
50uintptr_t SharedSparseSpace::AllocateWithoutGC(JSThread *thread, size_t size)
51{
52    uintptr_t object = TryAllocate(thread, size);
53    CHECK_SOBJECT_NOT_NULL();
54    object = AllocateWithExpand(thread, size);
55    return object;
56}
57
58uintptr_t SharedSparseSpace::Allocate(JSThread *thread, size_t size, bool allowGC)
59{
60#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
61    if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
62        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
63        UNREACHABLE();
64    }
65#endif
66    // Shared old space cannot use this allocate func. Shared full gc may happen in trigger and thread state update.
67    // Shared old space pointer might change by shared full gc.
68    // jit thread no heap
69    allowGC = allowGC && (!thread->IsJitThread());
70    if (allowGC) {
71        auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
72        localHeap->TryTriggerFullMarkBySharedSize(size);
73    }
74    uintptr_t object = TryAllocate(thread, size);
75    CHECK_SOBJECT_NOT_NULL();
76    if (sweepState_ == SweepState::SWEEPING) {
77        object = AllocateAfterSweepingCompleted(thread, size);
78        CHECK_SOBJECT_NOT_NULL();
79    }
80    // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
81    if (allowGC && sHeap_->CheckAndTriggerSharedGC(thread)) {
82        object = TryAllocate(thread, size);
83        CHECK_SOBJECT_NOT_NULL();
84    }
85    object = AllocateWithExpand(thread, size);
86    CHECK_SOBJECT_NOT_NULL();
87    if (allowGC) {
88        sHeap_->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
89        object = Allocate(thread, size, false);
90    }
91    return object;
92}
93
94uintptr_t SharedSparseSpace::TryAllocateAndExpand(JSThread *thread, size_t size, bool expand)
95{
96    uintptr_t object = TryAllocate(thread, size);
97    CHECK_SOBJECT_NOT_NULL();
98    if (sweepState_ == SweepState::SWEEPING) {
99        object = AllocateAfterSweepingCompleted(thread, size);
100        CHECK_SOBJECT_NOT_NULL();
101    }
102    if (expand) {
103        object = AllocateWithExpand(thread, size);
104    }
105    return object;
106}
107
108uintptr_t SharedSparseSpace::AllocateNoGCAndExpand(JSThread *thread, size_t size)
109{
110#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
111    if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
112        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
113        UNREACHABLE();
114    }
115#endif
116    uintptr_t object = TryAllocate(thread, size);
117    CHECK_SOBJECT_NOT_NULL();
118    if (sweepState_ == SweepState::SWEEPING) {
119        object = AllocateAfterSweepingCompleted(thread, size);
120    }
121    return object;
122}
123
124uintptr_t SharedSparseSpace::TryAllocate([[maybe_unused]] JSThread *thread, size_t size)
125{
126    LockHolder lock(allocateLock_);
127    uintptr_t object = allocator_->Allocate(size);
128    IncAllocSObjectSize(object, size);
129    return object;
130}
131
132uintptr_t SharedSparseSpace::AllocateWithExpand(JSThread *thread, size_t size)
133{
134    LockHolder lock(allocateLock_);
135    // In order to avoid expand twice by different threads, try allocate first.
136    CheckAndTriggerLocalFullMark();
137    auto object = allocator_->Allocate(size);
138    if (object == 0 && Expand(thread)) {
139        object = allocator_->Allocate(size);
140    }
141    IncAllocSObjectSize(object, size);
142    return object;
143}
144
145bool SharedSparseSpace::Expand(JSThread *thread)
146{
147    if (CommittedSizeExceed()) {
148        LOG_ECMA_MEM(INFO) << "Expand::Committed size " << committedSize_ << " of Sparse Space is too big. ";
149        return false;
150    }
151    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
152    if (region == nullptr) { // LOCV_EXCL_BR_LINE
153        LOG_ECMA(FATAL) << "SharedSparseSpace::Expand:region is nullptr";
154    }
155    AddRegion(region);
156    allocator_->AddFree(region);
157    return true;
158}
159
160Region *SharedSparseSpace::AllocateDeserializeRegion(JSThread *thread)
161{
162    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, sHeap_);
163    if (region == nullptr) { // LOCV_EXCL_BR_LINE
164        LOG_ECMA(FATAL) << "SharedSparseSpace::AllocateDeserializeRegion:region is nullptr";
165    }
166    return region;
167}
168
169void SharedSparseSpace::MergeDeserializeAllocateRegions(const std::vector<Region *> &allocateRegions)
170{
171    LockHolder lock(allocateLock_);
172    for (auto region : allocateRegions) {
173        AddRegion(region);
174        allocator_->AddFree(region);
175        allocator_->ResetTopPointer(region->GetHighWaterMark());
176        region->SetHighWaterMark(region->GetEnd());
177    }
178}
179
180uintptr_t SharedSparseSpace::AllocateAfterSweepingCompleted([[maybe_unused]] JSThread *thread, size_t size)
181{
182    LockHolder lock(allocateLock_);
183    uintptr_t object = 0U;
184    if (sweepState_ != SweepState::SWEEPING) {
185        object = allocator_->Allocate(size);
186        IncAllocSObjectSize(object, size);
187        return object;
188    }
189    if (TryFillSweptRegion()) {
190        object = allocator_->Allocate(size);
191        IncAllocSObjectSize(object, size);
192        if (object != 0) {
193            return object;
194        }
195    }
196    // Parallel sweep and fill
197    sHeap_->GetSweeper()->EnsureTaskFinished(spaceType_);
198    object = allocator_->Allocate(size);
199    IncAllocSObjectSize(object, size);
200    return object;
201}
202
203void SharedSparseSpace::PrepareSweeping()
204{
205    liveObjectSize_ = 0;
206    EnumerateRegions([this](Region *current) {
207        IncreaseLiveObjectSize(current->AliveObject());
208        current->ResetWasted();
209        AddSweepingRegion(current);
210    });
211    SortSweepingRegion();
212    sweepState_ = SweepState::SWEEPING;
213    allocator_->RebuildFreeList();
214}
215
216void SharedSparseSpace::AsyncSweep(bool isMain)
217{
218    Region *current = GetSweepingRegionSafe();
219    while (current != nullptr) {
220        FreeRegion(current, isMain);
221        // Main thread sweeping region is added;
222        if (!isMain) {
223            AddSweptRegionSafe(current);
224        }
225        current = GetSweepingRegionSafe();
226    }
227}
228
229void SharedSparseSpace::Sweep()
230{
231    liveObjectSize_ = 0;
232    allocator_->RebuildFreeList();
233    EnumerateRegions([this](Region *current) {
234        IncreaseLiveObjectSize(current->AliveObject());
235        current->ResetWasted();
236        FreeRegion(current);
237    });
238}
239
240bool SharedSparseSpace::TryFillSweptRegion()
241{
242    if (sweptList_.empty()) {
243        return false;
244    }
245    Region *region = nullptr;
246    while ((region = GetSweptRegionSafe()) != nullptr) {
247        allocator_->CollectFreeObjectSet(region);
248        region->ResetSwept();
249    }
250    return true;
251}
252
253bool SharedSparseSpace::FinishFillSweptRegion()
254{
255    bool ret = TryFillSweptRegion();
256    sweepState_ = SweepState::SWEPT;
257    return ret;
258}
259
260void SharedSparseSpace::AddSweepingRegion(Region *region)
261{
262    sweepingList_.emplace_back(region);
263}
264
265void SharedSparseSpace::SortSweepingRegion()
266{
267    // Sweep low alive object size at first
268    std::sort(sweepingList_.begin(), sweepingList_.end(), [](Region *first, Region *second) {
269        return first->AliveObject() < second->AliveObject();
270    });
271}
272
273Region *SharedSparseSpace::GetSweepingRegionSafe()
274{
275    LockHolder holder(lock_);
276    Region *region = nullptr;
277    if (!sweepingList_.empty()) {
278        region = sweepingList_.back();
279        sweepingList_.pop_back();
280    }
281    return region;
282}
283
284void SharedSparseSpace::AddSweptRegionSafe(Region *region)
285{
286    LockHolder holder(lock_);
287    sweptList_.emplace_back(region);
288}
289
290Region *SharedSparseSpace::GetSweptRegionSafe()
291{
292    LockHolder holder(lock_);
293    Region *region = nullptr;
294    if (!sweptList_.empty()) {
295        region = sweptList_.back();
296        sweptList_.pop_back();
297    }
298    return region;
299}
300
301void SharedSparseSpace::FreeRegion(Region *current, bool isMain)
302{
303    uintptr_t freeStart = current->GetBegin();
304    current->IterateAllMarkedBits([this, &freeStart, isMain](void *mem) {
305        auto header = reinterpret_cast<TaggedObject *>(mem);
306        auto klass = header->GetClass();
307        auto size = klass->SizeFromJSHClass(header);
308
309        uintptr_t freeEnd = ToUintPtr(mem);
310        if (freeStart != freeEnd) {
311            FreeLiveRange(freeStart, freeEnd, isMain);
312        }
313        freeStart = freeEnd + size;
314    });
315    uintptr_t freeEnd = current->GetEnd();
316    if (freeStart != freeEnd) {
317        FreeLiveRange(freeStart, freeEnd, isMain);
318    }
319}
320
321void SharedSparseSpace::DetachFreeObjectSet(Region *region)
322{
323    allocator_->DetachFreeObjectSet(region);
324}
325
326void SharedSparseSpace::FreeLiveRange(uintptr_t freeStart, uintptr_t freeEnd, bool isMain)
327{
328    // No need to clear rememberset here, because shared region has no remember set now.
329    allocator_->Free(freeStart, freeEnd - freeStart, isMain);
330}
331
332void SharedSparseSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
333{
334    allocator_->FillBumpPointer();
335    EnumerateRegions([&](Region *region) {
336        uintptr_t curPtr = region->GetBegin();
337        uintptr_t endPtr = region->GetEnd();
338        while (curPtr < endPtr) {
339            auto freeObject = FreeObject::Cast(curPtr);
340            size_t objSize;
341            // If curPtr is freeObject, It must to mark unpoison first.
342            ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
343            if (!freeObject->IsFreeObject()) {
344                auto obj = reinterpret_cast<TaggedObject *>(curPtr);
345                visitor(obj);
346                objSize = obj->GetClass()->SizeFromJSHClass(obj);
347            } else {
348                freeObject->AsanUnPoisonFreeObject();
349                objSize = freeObject->Available();
350                freeObject->AsanPoisonFreeObject();
351            }
352            curPtr += objSize;
353            CHECK_OBJECT_SIZE(objSize);
354        }
355        CHECK_REGION_END(curPtr, endPtr);
356    });
357}
358
359size_t SharedSparseSpace::GetHeapObjectSize() const
360{
361    return liveObjectSize_;
362}
363
364void SharedSparseSpace::IncreaseAllocatedSize(size_t size)
365{
366    allocator_->IncreaseAllocatedSize(size);
367}
368
369size_t SharedSparseSpace::GetTotalAllocatedSize() const
370{
371    return allocator_->GetAllocatedSize();
372}
373
374void SharedSparseSpace::InvokeAllocationInspector(Address object, size_t size, size_t alignedSize)
375{
376    ASSERT(size <= alignedSize);
377    if (LIKELY(!allocationCounter_.IsActive())) {
378        return;
379    }
380    if (alignedSize >= allocationCounter_.NextBytes()) {
381        allocationCounter_.InvokeAllocationInspector(object, size, alignedSize);
382    }
383    allocationCounter_.AdvanceAllocationInspector(alignedSize);
384}
385
386void SharedSparseSpace::CheckAndTriggerLocalFullMark()
387{
388    if (liveObjectSize_ >= triggerLocalFullMarkLimit_) {
389        sHeap_->TryTriggerLocalConcurrentMarking();
390    }
391}
392
393void SharedSparseSpace::IncAllocSObjectSize(uintptr_t object, size_t size)
394{
395    if (object != 0) {
396        IncreaseLiveObjectSize(size);
397        if (sHeap_->IsReadyToConcurrentMark()) {
398            Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
399        }
400#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
401        InvokeAllocationInspector(object, size, size);
402#endif
403    }
404}
405
406SharedAppSpawnSpace::SharedAppSpawnSpace(SharedHeap *heap, size_t initialCapacity)
407    : SharedSparseSpace(heap, MemSpaceType::SHARED_APPSPAWN_SPACE, initialCapacity, initialCapacity)
408{
409}
410
411void SharedAppSpawnSpace::IterateOverMarkedObjects(const std::function<void(TaggedObject *object)> &visitor) const
412{
413    EnumerateRegions([&](Region *current) {
414        current->IterateAllMarkedBits([&](void *mem) {
415            ASSERT(current->InRange(ToUintPtr(mem)));
416            visitor(reinterpret_cast<TaggedObject *>(mem));
417        });
418    });
419}
420
421SharedNonMovableSpace::SharedNonMovableSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
422    : SharedSparseSpace(heap, MemSpaceType::SHARED_NON_MOVABLE, initialCapacity, maximumCapacity)
423{
424}
425
426SharedOldSpace::SharedOldSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
427    : SharedSparseSpace(heap, MemSpaceType::SHARED_OLD_SPACE, initialCapacity, maximumCapacity)
428{
429}
430
431void SharedOldSpace::Merge(SharedLocalSpace *localSpace)
432{
433    localSpace->FreeBumpPoint();
434    LockHolder lock(lock_);
435    size_t oldCommittedSize = committedSize_;
436    localSpace->EnumerateRegions([&](Region *region) {
437        localSpace->DetachFreeObjectSet(region);
438        localSpace->RemoveRegion(region);
439        localSpace->DecreaseLiveObjectSize(region->AliveObject());
440        AddRegion(region);
441        IncreaseLiveObjectSize(region->AliveObject());
442        allocator_->CollectFreeObjectSet(region);
443    });
444    size_t hugeSpaceCommitSize = sHeap_->GetHugeObjectSpace()->GetCommittedSize();
445    if (committedSize_ + hugeSpaceCommitSize > GetOverShootMaximumCapacity()) {
446        LOG_ECMA_MEM(ERROR) << "Merge::Committed size " << committedSize_ << " of old space is too big. ";
447        if (sHeap_->CanThrowOOMError()) {
448            sHeap_->ShouldThrowOOMError(true);
449        }
450        IncreaseMergeSize(committedSize_ - oldCommittedSize);
451        // if throw OOM, temporarily increase space size to avoid vm crash
452        IncreaseOutOfMemoryOvershootSize(committedSize_ + hugeSpaceCommitSize - GetOverShootMaximumCapacity());
453    }
454
455    localSpace->GetRegionList().Clear();
456    allocator_->IncreaseAllocatedSize(localSpace->GetTotalAllocatedSize());
457}
458
459SharedLocalSpace::SharedLocalSpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
460    : SharedSparseSpace(heap, MemSpaceType::SHARED_LOCAL_SPACE, initialCapacity, maximumCapacity) {}
461
462bool SharedLocalSpace::AddRegionToList(Region *region)
463{
464    if (committedSize_ >= maximumCapacity_) { // LOCV_EXCL_BR_LINE
465        LOG_ECMA_MEM(FATAL) << "AddRegionTotList::Committed size " << committedSize_ << " of local space is too big.";
466        return false;
467    }
468    AddRegion(region);
469    allocator_->CollectFreeObjectSet(region);
470    IncreaseLiveObjectSize(region->AliveObject());
471    return true;
472}
473
474void SharedLocalSpace::FreeBumpPoint()
475{
476    allocator_->FreeBumpPoint();
477}
478
479void SharedLocalSpace::Stop()
480{
481    Region *currentRegion = GetCurrentRegion();
482    if (currentRegion != nullptr) {
483        currentRegion->SetHighWaterMark(currentRegion->GetBegin() + currentRegion->AliveObject());
484    }
485}
486
487uintptr_t SharedLocalSpace::Allocate(size_t size, bool isExpand)
488{
489    auto object = allocator_->Allocate(size);
490    if (object == 0) {
491        // Shared Full GC will compress all regions and cannot recognize all threads' region.
492        if (isExpand && Expand(Runtime::GetInstance()->GetMainThread())) {
493            object = allocator_->Allocate(size);
494        }
495    }
496    if (object != 0) {
497        Region::ObjectAddressToRange(object)->IncreaseAliveObject(size);
498    }
499    return object;
500}
501
502SharedReadOnlySpace::SharedReadOnlySpace(SharedHeap *heap, size_t initialCapacity, size_t maximumCapacity)
503    : Space(
504          heap, heap->GetHeapRegionAllocator(), MemSpaceType::SHARED_READ_ONLY_SPACE, initialCapacity, maximumCapacity)
505{
506}
507
508bool SharedReadOnlySpace::Expand(JSThread *thread)
509{
510    if (committedSize_ >= initialCapacity_ + outOfMemoryOvershootSize_ &&
511        !heap_->NeedStopCollection()) {
512        return false;
513    }
514    uintptr_t top = allocator_.GetTop();
515    auto currentRegion = GetCurrentRegion();
516    if (currentRegion != nullptr) {
517        currentRegion->SetHighWaterMark(top);
518    }
519    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, DEFAULT_REGION_SIZE, thread, heap_);
520    if (region == nullptr) { // LOCV_EXCL_BR_LINE
521        LOG_ECMA(FATAL) << "SharedReadOnlySpace::Expand:region is nullptr";
522    }
523    allocator_.Reset(region->GetBegin(), region->GetEnd());
524    AddRegion(region);
525    return true;
526}
527
528uintptr_t SharedReadOnlySpace::Allocate(JSThread *thread, size_t size)
529{
530#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
531    if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
532        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
533        UNREACHABLE();
534    }
535#endif
536    thread->CheckSafepointIfSuspended();
537    LockHolder holder(allocateLock_);
538    auto object = allocator_.Allocate(size);
539    if (object != 0) {
540        return object;
541    }
542    if (Expand(thread)) {
543        object = allocator_.Allocate(size);
544    }
545    return object;
546}
547
548void SharedReadOnlySpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &visitor) const
549{
550    size_t size = allocator_.Available();
551    if (size != 0) {
552        FreeObject::FillFreeObject(heap_, allocator_.GetTop(), size);
553    }
554    EnumerateRegions([&](Region *region) {
555        if (region->InCollectSet()) {
556            return;
557        }
558        uintptr_t curPtr = region->GetBegin();
559        uintptr_t endPtr = region->GetEnd();
560        while (curPtr < endPtr) {
561            auto freeObject = FreeObject::Cast(curPtr);
562            size_t objSize;
563            // If curPtr is freeObject, It must to mark unpoison first.
564            ASAN_UNPOISON_MEMORY_REGION(freeObject, TaggedObject::TaggedObjectSize());
565            if (!freeObject->IsFreeObject()) {
566                auto obj = reinterpret_cast<TaggedObject *>(curPtr);
567                visitor(obj);
568                objSize = obj->GetClass()->SizeFromJSHClass(obj);
569            } else {
570                freeObject->AsanUnPoisonFreeObject();
571                objSize = freeObject->Available();
572                freeObject->AsanPoisonFreeObject();
573            }
574            curPtr += objSize;
575            CHECK_OBJECT_SIZE(objSize);
576        }
577        CHECK_REGION_END(curPtr, endPtr);
578    });
579}
580
581SharedHugeObjectSpace::SharedHugeObjectSpace(BaseHeap *heap, HeapRegionAllocator *heapRegionAllocator,
582                                             size_t initialCapacity, size_t maximumCapacity)
583    : Space(heap, heapRegionAllocator, MemSpaceType::SHARED_HUGE_OBJECT_SPACE, initialCapacity, maximumCapacity)
584{
585    triggerLocalFullMarkLimit_ = maximumCapacity * HUGE_OBJECT_SIZE_RATIO;
586}
587
588
589uintptr_t SharedHugeObjectSpace::Allocate(JSThread *thread, size_t objectSize, AllocateEventType allocType)
590{
591#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
592    if (UNLIKELY(!thread->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
593        LOG_ECMA(FATAL) << "Allocate must be in jsthread running state";
594        UNREACHABLE();
595    }
596#endif
597    // In HugeObject allocation, we have a revervation of 8 bytes for markBitSet in objectSize.
598    // In case Region is not aligned by 16 bytes, HUGE_OBJECT_BITSET_SIZE is 8 bytes more.
599    size_t alignedSize = AlignUp(objectSize + sizeof(Region) + HUGE_OBJECT_BITSET_SIZE, PANDA_POOL_ALIGNMENT_IN_BYTES);
600    if (allocType == AllocateEventType::NORMAL) {
601        thread->CheckSafepointIfSuspended();
602        CheckAndTriggerLocalFullMark(thread, alignedSize);
603    }
604    LockHolder lock(allocateLock_);
605    if (CommittedSizeExceed(alignedSize)) {
606        LOG_ECMA_MEM(INFO) << "Committed size " << committedSize_ << " of huge object space is too big.";
607        return 0;
608    }
609    Region *region = heapRegionAllocator_->AllocateAlignedRegion(this, alignedSize, thread, heap_);
610    if (region == nullptr) { // LOCV_EXCL_BR_LINE
611        LOG_ECMA(FATAL) << "SharedHugeObjectSpace::Allocate:region is nullptr";
612    }
613    AddRegion(region);
614    // It need to mark unpoison when huge object being allocated.
615    ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(region->GetBegin()), objectSize);
616#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
617    InvokeAllocationInspector(region->GetBegin(), objectSize);
618#endif
619    return region->GetBegin();
620}
621
622void SharedHugeObjectSpace::Sweep()
623{
624    Region *currentRegion = GetRegionList().GetFirst();
625    while (currentRegion != nullptr) {
626        Region *next = currentRegion->GetNext();
627        bool isMarked = false;
628        currentRegion->IterateAllMarkedBits([&isMarked]([[maybe_unused]] void *mem) { isMarked = true; });
629        if (!isMarked) {
630            GetRegionList().RemoveNode(currentRegion);
631            hugeNeedFreeList_.AddNode(currentRegion);
632        }
633        currentRegion = next;
634    }
635}
636
637size_t SharedHugeObjectSpace::GetHeapObjectSize() const
638{
639    return committedSize_;
640}
641
642void SharedHugeObjectSpace::IterateOverObjects(const std::function<void(TaggedObject *object)> &objectVisitor) const
643{
644    EnumerateRegions([&](Region *region) {
645        uintptr_t curPtr = region->GetBegin();
646        objectVisitor(reinterpret_cast<TaggedObject *>(curPtr));
647    });
648}
649
650void SharedHugeObjectSpace::ReclaimHugeRegion()
651{
652    if (hugeNeedFreeList_.IsEmpty()) {
653        return;
654    }
655    do {
656        Region *last = hugeNeedFreeList_.PopBack();
657        ClearAndFreeRegion(last);
658    } while (!hugeNeedFreeList_.IsEmpty());
659}
660
661void SharedHugeObjectSpace::InvokeAllocationInspector(Address object, size_t objectSize)
662{
663    if (LIKELY(!allocationCounter_.IsActive())) {
664        return;
665    }
666    if (objectSize >= allocationCounter_.NextBytes()) {
667        allocationCounter_.InvokeAllocationInspector(object, objectSize, objectSize);
668    }
669    allocationCounter_.AdvanceAllocationInspector(objectSize);
670}
671
672void SharedHugeObjectSpace::CheckAndTriggerLocalFullMark(JSThread *thread, size_t size)
673{
674    if (committedSize_ >= triggerLocalFullMarkLimit_) {
675        reinterpret_cast<SharedHeap*>(heap_)->TryTriggerLocalConcurrentMarking();
676    } else {
677        auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
678        if (!thread->IsJitThread()) {
679            localHeap->TryTriggerFullMarkBySharedSize(size);
680        }
681    }
682}
683}  // namespace panda::ecmascript
684