1/*
2 * Copyright (c) 2022-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#include <chrono>
16#include <thread>
17
18#include "ecmascript/base/block_hook_scope.h"
19#include "ecmascript/checkpoint/thread_state_transition.h"
20#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
21#include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
22#endif
23
24#include "ecmascript/mem/incremental_marker.h"
25#include "ecmascript/mem/partial_gc.h"
26#include "ecmascript/mem/parallel_evacuator.h"
27#include "ecmascript/mem/parallel_marker-inl.h"
28#include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
29#include "ecmascript/mem/shared_heap/shared_gc_marker-inl.h"
30#include "ecmascript/mem/shared_heap/shared_gc.h"
31#include "ecmascript/mem/shared_heap/shared_full_gc.h"
32#include "ecmascript/mem/shared_heap/shared_concurrent_marker.h"
33#include "ecmascript/mem/verification.h"
34#include "ecmascript/runtime_call_id.h"
35#include "ecmascript/jit/jit.h"
36#include "ecmascript/ohos/ohos_params.h"
37#if !WIN_OR_MAC_OR_IOS_PLATFORM
38#include "ecmascript/dfx/hprof/heap_profiler_interface.h"
39#include "ecmascript/dfx/hprof/heap_profiler.h"
40#endif
41#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
42#include "ecmascript/dfx/cpu_profiler/cpu_profiler.h"
43#endif
44#include "ecmascript/dfx/tracing/tracing.h"
45#if defined(ENABLE_DUMP_IN_FAULTLOG)
46#include "syspara/parameter.h"
47#endif
48
49#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
50#include "parameters.h"
51#include "hisysevent.h"
52static constexpr uint32_t DEC_TO_INT = 100;
53static size_t g_threshold = OHOS::system::GetUintParameter<size_t>("persist.dfx.leak.threshold", 85);
54static uint64_t g_lastHeapDumpTime = 0;
55static bool g_debugLeak = OHOS::system::GetBoolParameter("debug.dfx.tags.enableleak", false);
56static constexpr uint64_t HEAP_DUMP_REPORT_INTERVAL = 24 * 3600 * 1000;
57static bool g_betaVersion = OHOS::system::GetParameter("const.logsystem.versiontype", "unknown") == "beta";
58static bool g_developMode = (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "enable") ||
59                            (OHOS::system::GetParameter("persist.hiview.leak_detector", "unknown") == "true");
60#endif
61
62namespace panda::ecmascript {
63SharedHeap *SharedHeap::instance_ = nullptr;
64
65void SharedHeap::CreateNewInstance()
66{
67    ASSERT(instance_ == nullptr);
68    size_t heapShared = 0;
69#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
70    heapShared = OHOS::system::GetUintParameter<size_t>("persist.ark.heap.sharedsize", 0) * 1_MB;
71#endif
72    EcmaParamConfiguration config(EcmaParamConfiguration::HeapType::SHARED_HEAP,
73        MemMapAllocator::GetInstance()->GetCapacity(), heapShared);
74    instance_ = new SharedHeap(config);
75}
76
77SharedHeap *SharedHeap::GetInstance()
78{
79    ASSERT(instance_ != nullptr);
80    return instance_;
81}
82
83void SharedHeap::DestroyInstance()
84{
85    ASSERT(instance_ != nullptr);
86    instance_->Destroy();
87    delete instance_;
88    instance_ = nullptr;
89}
90
91void SharedHeap::ForceCollectGarbageWithoutDaemonThread(TriggerGCType gcType, GCReason gcReason, JSThread *thread)
92{
93    ASSERT(!dThread_->IsRunning());
94    SuspendAllScope scope(thread);
95    SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
96    RecursionScope recurScope(this, HeapType::SHARED_HEAP);
97    CheckInHeapProfiler();
98    GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
99    if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
100        // pre gc heap verify
101        LOG_ECMA(DEBUG) << "pre gc shared heap verify";
102        sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
103        SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
104    }
105    switch (gcType) { // LCOV_EXCL_BR_LINE
106        case TriggerGCType::SHARED_GC: {
107            sharedGC_->RunPhases();
108            break;
109        }
110        case TriggerGCType::SHARED_FULL_GC: {
111            sharedFullGC_->RunPhases();
112            break;
113        }
114        default: // LOCV_EXCL_BR_LINE
115            LOG_ECMA(FATAL) << "this branch is unreachable";
116            UNREACHABLE();
117            break;
118    }
119    if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
120        // pre gc heap verify
121        LOG_ECMA(DEBUG) << "after gc shared heap verify";
122        SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
123    }
124    CollectGarbageFinish(false, gcType);
125    InvokeSharedNativePointerCallbacks();
126}
127
128bool SharedHeap::CheckAndTriggerSharedGC(JSThread *thread)
129{
130    if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
131        return false;
132    }
133    if ((OldSpaceExceedLimit() || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
134        !NeedStopCollection()) {
135        CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
136        return true;
137    }
138    return false;
139}
140
141bool SharedHeap::CheckHugeAndTriggerSharedGC(JSThread *thread, size_t size)
142{
143    if (thread->IsSharedConcurrentMarkingOrFinished() && !ObjectExceedMaxHeapSize()) {
144        return false;
145    }
146    if ((sHugeObjectSpace_->CommittedSizeExceed(size) || GetHeapObjectSize() > globalSpaceAllocLimit_) &&
147        !NeedStopCollection()) {
148        CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
149        return true;
150    }
151    return false;
152}
153
154// Shared gc trigger
155void SharedHeap::AdjustGlobalSpaceAllocLimit()
156{
157    globalSpaceAllocLimit_ = std::max(GetHeapObjectSize() * growingFactor_,
158                                      config_.GetDefaultGlobalAllocLimit() * 2); // 2: double
159    globalSpaceAllocLimit_ = std::min(std::min(globalSpaceAllocLimit_, GetCommittedSize() + growingStep_),
160                                      config_.GetMaxHeapSize());
161    globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
162                                                          TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
163    constexpr double OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT = 1.1;
164    size_t markLimitByIncrement = static_cast<size_t>(GetHeapObjectSize() * OBJECT_INCREMENT_FACTOR_FOR_MARK_LIMIT);
165    globalSpaceConcurrentMarkLimit_ = std::max(globalSpaceConcurrentMarkLimit_, markLimitByIncrement);
166    LOG_ECMA_IF(optionalLogEnabled_, INFO) << "Shared gc adjust global space alloc limit to: "
167        << globalSpaceAllocLimit_;
168}
169
170bool SharedHeap::ObjectExceedMaxHeapSize() const
171{
172    return OldSpaceExceedLimit() || sHugeObjectSpace_->CommittedSizeExceed();
173}
174
175void SharedHeap::StartConcurrentMarking(TriggerGCType gcType, GCReason gcReason)
176{
177    ASSERT(JSThread::GetCurrent() == dThread_);
178    sConcurrentMarker_->Mark(gcType, gcReason);
179}
180
181bool SharedHeap::CheckCanTriggerConcurrentMarking(JSThread *thread)
182{
183    return thread->IsReadyToSharedConcurrentMark() &&
184           sConcurrentMarker_ != nullptr && sConcurrentMarker_->IsEnabled();
185}
186
187void SharedHeap::Initialize(NativeAreaAllocator *nativeAreaAllocator, HeapRegionAllocator *heapRegionAllocator,
188    const JSRuntimeOptions &option, DaemonThread *dThread)
189{
190    sGCStats_ = new SharedGCStats(this, option.EnableGCTracer());
191    nativeAreaAllocator_ = nativeAreaAllocator;
192    heapRegionAllocator_ = heapRegionAllocator;
193    shouldVerifyHeap_ = option.EnableHeapVerify();
194    parallelGC_ = option.EnableParallelGC();
195    optionalLogEnabled_ = option.EnableOptionalLog();
196    size_t maxHeapSize = config_.GetMaxHeapSize();
197    size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
198    sNonMovableSpace_ = new SharedNonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
199
200    size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
201    size_t oldSpaceCapacity = (maxHeapSize - nonmovableSpaceCapacity - readOnlySpaceCapacity) / 2; // 2: half
202    globalSpaceAllocLimit_ = config_.GetDefaultGlobalAllocLimit();
203    globalSpaceConcurrentMarkLimit_ = static_cast<size_t>(globalSpaceAllocLimit_ *
204                                                          TRIGGER_SHARED_CONCURRENT_MARKING_OBJECT_LIMIT_RATE);
205
206    sOldSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
207    sCompressSpace_ = new SharedOldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
208    sReadOnlySpace_ = new SharedReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
209    sHugeObjectSpace_ = new SharedHugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
210    sharedMemController_ = new SharedMemController(this);
211    sAppSpawnSpace_ = new SharedAppSpawnSpace(this, oldSpaceCapacity);
212    growingFactor_ = config_.GetSharedHeapLimitGrowingFactor();
213    growingStep_ = config_.GetSharedHeapLimitGrowingStep();
214    incNativeSizeTriggerSharedCM_= config_.GetStepNativeSizeInc();
215    incNativeSizeTriggerSharedGC_ = config_.GetMaxNativeSizeInc();
216
217    dThread_ = dThread;
218}
219
220void SharedHeap::Destroy()
221{
222    if (sWorkManager_ != nullptr) {
223        delete sWorkManager_;
224        sWorkManager_ = nullptr;
225    }
226    if (sOldSpace_ != nullptr) {
227        sOldSpace_->Reset();
228        delete sOldSpace_;
229        sOldSpace_ = nullptr;
230    }
231    if (sCompressSpace_ != nullptr) {
232        sCompressSpace_->Reset();
233        delete sCompressSpace_;
234        sCompressSpace_ = nullptr;
235    }
236    if (sNonMovableSpace_ != nullptr) {
237        sNonMovableSpace_->Reset();
238        delete sNonMovableSpace_;
239        sNonMovableSpace_ = nullptr;
240    }
241    if (sHugeObjectSpace_ != nullptr) {
242        sHugeObjectSpace_->Destroy();
243        delete sHugeObjectSpace_;
244        sHugeObjectSpace_ = nullptr;
245    }
246    if (sReadOnlySpace_ != nullptr) {
247        sReadOnlySpace_->ClearReadOnly();
248        sReadOnlySpace_->Destroy();
249        delete sReadOnlySpace_;
250        sReadOnlySpace_ = nullptr;
251    }
252    if (sAppSpawnSpace_ != nullptr) {
253        sAppSpawnSpace_->Reset();
254        delete sAppSpawnSpace_;
255        sAppSpawnSpace_ = nullptr;
256    }
257    if (sharedGC_ != nullptr) {
258        delete sharedGC_;
259        sharedGC_ = nullptr;
260    }
261    if (sharedFullGC_ != nullptr) {
262        delete sharedFullGC_;
263        sharedFullGC_ = nullptr;
264    }
265
266    nativeAreaAllocator_ = nullptr;
267    heapRegionAllocator_ = nullptr;
268
269    if (sSweeper_ != nullptr) {
270        delete sSweeper_;
271        sSweeper_ = nullptr;
272    }
273    if (sConcurrentMarker_ != nullptr) {
274        delete sConcurrentMarker_;
275        sConcurrentMarker_ = nullptr;
276    }
277    if (sharedGCMarker_ != nullptr) {
278        delete sharedGCMarker_;
279        sharedGCMarker_ = nullptr;
280    }
281    if (sharedGCMovableMarker_ != nullptr) {
282        delete sharedGCMovableMarker_;
283        sharedGCMovableMarker_ = nullptr;
284    }
285    if (sharedMemController_ != nullptr) {
286        delete sharedMemController_;
287        sharedMemController_ = nullptr;
288    }
289
290    dThread_ = nullptr;
291}
292
293void SharedHeap::PostInitialization(const GlobalEnvConstants *globalEnvConstants, const JSRuntimeOptions &option)
294{
295    globalEnvConstants_ = globalEnvConstants;
296    uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
297    maxMarkTaskCount_ = totalThreadNum - 1;
298    sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
299    sharedGCMarker_ = new SharedGCMarker(sWorkManager_);
300    sharedGCMovableMarker_ = new SharedGCMovableMarker(sWorkManager_, this);
301    sConcurrentMarker_ = new SharedConcurrentMarker(option.EnableSharedConcurrentMark() ?
302        EnableConcurrentMarkType::ENABLE : EnableConcurrentMarkType::CONFIG_DISABLE);
303    sSweeper_ = new SharedConcurrentSweeper(this, option.EnableConcurrentSweep() ?
304        EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
305    sharedGC_ = new SharedGC(this);
306    sharedFullGC_ = new SharedFullGC(this);
307}
308
309void SharedHeap::PostGCMarkingTask(SharedParallelMarkPhase sharedTaskPhase)
310{
311    IncreaseTaskCount();
312    Taskpool::GetCurrentTaskpool()->PostTask(std::make_unique<ParallelMarkTask>(dThread_->GetThreadId(),
313                                                                                this, sharedTaskPhase));
314}
315
316bool SharedHeap::ParallelMarkTask::Run(uint32_t threadIndex)
317{
318    // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
319    while (!sHeap_->GetWorkManager()->HasInitialized());
320    switch (taskPhase_) {
321        case SharedParallelMarkPhase::SHARED_MARK_TASK:
322            sHeap_->GetSharedGCMarker()->ProcessMarkStack(threadIndex);
323            break;
324        case SharedParallelMarkPhase::SHARED_COMPRESS_TASK:
325            sHeap_->GetSharedGCMovableMarker()->ProcessMarkStack(threadIndex);
326            break;
327        default: // LOCV_EXCL_BR_LINE
328            break;
329    }
330    sHeap_->ReduceTaskCount();
331    return true;
332}
333
334bool SharedHeap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
335{
336    sHeap_->ReclaimRegions(gcType_);
337    return true;
338}
339
340void SharedHeap::NotifyGCCompleted()
341{
342    ASSERT(JSThread::GetCurrent() == dThread_);
343    LockHolder lock(waitGCFinishedMutex_);
344    gcFinished_ = true;
345    waitGCFinishedCV_.SignalAll();
346}
347
348void SharedHeap::WaitGCFinished(JSThread *thread)
349{
350    ASSERT(thread->GetThreadId() != dThread_->GetThreadId());
351    ASSERT(thread->IsInRunningState());
352    ThreadSuspensionScope scope(thread);
353    ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SuspendTime::WaitGCFinished");
354    LockHolder lock(waitGCFinishedMutex_);
355    while (!gcFinished_) {
356        waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
357    }
358}
359
360void SharedHeap::WaitGCFinishedAfterAllJSThreadEliminated()
361{
362    ASSERT(Runtime::GetInstance()->vmCount_ == 0);
363    LockHolder lock(waitGCFinishedMutex_);
364    while (!gcFinished_) {
365        waitGCFinishedCV_.Wait(&waitGCFinishedMutex_);
366    }
367}
368
369void SharedHeap::DaemonCollectGarbage([[maybe_unused]]TriggerGCType gcType, [[maybe_unused]]GCReason gcReason)
370{
371    RecursionScope recurScope(this, HeapType::SHARED_HEAP);
372    ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
373    ASSERT(JSThread::GetCurrent() == dThread_);
374    {
375        ThreadManagedScope runningScope(dThread_);
376        SuspendAllScope scope(dThread_);
377        SharedGCScope sharedGCScope;  // SharedGCScope should be after SuspendAllScope.
378        CheckInHeapProfiler();
379        gcType_ = gcType;
380        GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, gcReason);
381        if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
382            // pre gc heap verify
383            LOG_ECMA(DEBUG) << "pre gc shared heap verify";
384            sharedGCMarker_->MergeBackAndResetRSetWorkListHandler();
385            SharedHeapVerification(this, VerifyKind::VERIFY_PRE_SHARED_GC).VerifyAll();
386        }
387        switch (gcType) {
388            case TriggerGCType::SHARED_GC: {
389                sharedGC_->RunPhases();
390                break;
391            }
392            case TriggerGCType::SHARED_FULL_GC: {
393                sharedFullGC_->RunPhases();
394                break;
395            }
396            default: // LOCV_EXCL_BR_LINE
397                LOG_ECMA(FATAL) << "this branch is unreachable";
398                UNREACHABLE();
399                break;
400        }
401
402        if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
403            // after gc heap verify
404            LOG_ECMA(DEBUG) << "after gc shared heap verify";
405            SharedHeapVerification(this, VerifyKind::VERIFY_POST_SHARED_GC).VerifyAll();
406        }
407        CollectGarbageFinish(true, gcType);
408    }
409    InvokeSharedNativePointerCallbacks();
410    // Don't process weak node nativeFinalizeCallback here. These callbacks would be called after localGC.
411}
412
413void SharedHeap::WaitAllTasksFinished(JSThread *thread)
414{
415    WaitGCFinished(thread);
416    sSweeper_->WaitAllTaskFinished();
417    WaitClearTaskFinished();
418}
419
420void SharedHeap::WaitAllTasksFinishedAfterAllJSThreadEliminated()
421{
422    WaitGCFinishedAfterAllJSThreadEliminated();
423    sSweeper_->WaitAllTaskFinished();
424    WaitClearTaskFinished();
425}
426
427bool SharedHeap::CheckOngoingConcurrentMarking()
428{
429    if (sConcurrentMarker_->IsEnabled() && !dThread_->IsReadyToConcurrentMark() &&
430        sConcurrentMarker_->IsTriggeredConcurrentMark()) {
431        // This is only called in SharedGC to decide whether to remark, so do not need to wait marking finish here
432        return true;
433    }
434    return false;
435}
436
437void SharedHeap::CheckInHeapProfiler()
438{
439#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
440    Runtime::GetInstance()->GCIterateThreadList([this](JSThread *thread) {
441        if (thread->GetEcmaVM()->GetHeapProfile() != nullptr) {
442            inHeapProfiler_ = true;
443            return;
444        }
445    });
446#else
447    inHeapProfiler_ = false;
448#endif
449}
450
451void SharedHeap::Prepare(bool inTriggerGCThread)
452{
453    WaitRunningTaskFinished();
454    if (inTriggerGCThread) {
455        sSweeper_->EnsureAllTaskFinished();
456    } else {
457        sSweeper_->WaitAllTaskFinished();
458    }
459    WaitClearTaskFinished();
460}
461
462SharedHeap::SharedGCScope::SharedGCScope()
463{
464    Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
465        std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
466        if (pgoProfiler != nullptr) {
467            pgoProfiler->SuspendByGC();
468        }
469#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
470        thread->SetGcState(true);
471#endif
472    });
473}
474
475SharedHeap::SharedGCScope::~SharedGCScope()
476{
477    Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
478        ASSERT(!thread->IsInRunningState());
479        const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
480        std::shared_ptr<pgo::PGOProfiler> pgoProfiler =  thread->GetEcmaVM()->GetPGOProfiler();
481        if (pgoProfiler != nullptr) {
482            pgoProfiler->ResumeByGC();
483        }
484#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
485        thread->SetGcState(false);
486#endif
487    });
488}
489
490void SharedHeap::PrepareRecordRegionsForReclaim()
491{
492    sOldSpace_->SetRecordRegion();
493    sNonMovableSpace_->SetRecordRegion();
494    sHugeObjectSpace_->SetRecordRegion();
495}
496
497void SharedHeap::Reclaim(TriggerGCType gcType)
498{
499    PrepareRecordRegionsForReclaim();
500    sHugeObjectSpace_->ReclaimHugeRegion();
501
502    if (parallelGC_) {
503        clearTaskFinished_ = false;
504        Taskpool::GetCurrentTaskpool()->PostTask(
505            std::make_unique<AsyncClearTask>(dThread_->GetThreadId(), this, gcType));
506    } else {
507        ReclaimRegions(gcType);
508    }
509}
510
511void SharedHeap::ReclaimRegions(TriggerGCType gcType)
512{
513    if (gcType == TriggerGCType::SHARED_FULL_GC) {
514        sCompressSpace_->Reset();
515    }
516    sSweeper_->WaitAllTaskFinished();
517    EnumerateOldSpaceRegionsWithRecord([] (Region *region) {
518        region->ClearMarkGCBitset();
519        region->ResetAliveObject();
520    });
521    if (!clearTaskFinished_) {
522        LockHolder holder(waitClearTaskFinishedMutex_);
523        clearTaskFinished_ = true;
524        waitClearTaskFinishedCV_.SignalAll();
525    }
526}
527
528void SharedHeap::DisableParallelGC(JSThread *thread)
529{
530    WaitAllTasksFinished(thread);
531    dThread_->WaitFinished();
532    parallelGC_ = false;
533    maxMarkTaskCount_ = 0;
534    sSweeper_->ConfigConcurrentSweep(false);
535    sConcurrentMarker_->ConfigConcurrentMark(false);
536}
537
538void SharedHeap::EnableParallelGC(JSRuntimeOptions &option)
539{
540    uint32_t totalThreadNum = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
541    maxMarkTaskCount_ = totalThreadNum - 1;
542    parallelGC_ = option.EnableParallelGC();
543    if (auto workThreadNum = sWorkManager_->GetTotalThreadNum();
544        workThreadNum != totalThreadNum + 1) {
545        LOG_ECMA_MEM(ERROR) << "TheadNum mismatch, totalThreadNum(sWorkerManager): " << workThreadNum << ", "
546                            << "totalThreadNum(taskpool): " << (totalThreadNum + 1);
547        delete sWorkManager_;
548        sWorkManager_ = new SharedGCWorkManager(this, totalThreadNum + 1);
549        UpdateWorkManager(sWorkManager_);
550    }
551    sConcurrentMarker_->ConfigConcurrentMark(option.EnableSharedConcurrentMark());
552    sSweeper_->ConfigConcurrentSweep(option.EnableConcurrentSweep());
553}
554
555void SharedHeap::UpdateWorkManager(SharedGCWorkManager *sWorkManager)
556{
557    sConcurrentMarker_->ResetWorkManager(sWorkManager);
558    sharedGCMarker_->ResetWorkManager(sWorkManager);
559    sharedGCMovableMarker_->ResetWorkManager(sWorkManager);
560    sharedGC_->ResetWorkManager(sWorkManager);
561    sharedFullGC_->ResetWorkManager(sWorkManager);
562}
563
564void SharedHeap::TryTriggerLocalConcurrentMarking()
565{
566    if (localFullMarkTriggered_) {
567        return;
568    }
569    if (reinterpret_cast<std::atomic<bool>*>(&localFullMarkTriggered_)->exchange(true, std::memory_order_relaxed)
570            != false) {
571        return;
572    }
573    ASSERT(localFullMarkTriggered_ == true);
574    Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
575        thread->SetFullMarkRequest();
576    });
577}
578
579size_t SharedHeap::VerifyHeapObjects(VerifyKind verifyKind) const
580{
581    size_t failCount = 0;
582    {
583        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
584        sOldSpace_->IterateOverObjects(verifier);
585    }
586    {
587        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
588        sNonMovableSpace_->IterateOverObjects(verifier);
589    }
590    {
591        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
592        sHugeObjectSpace_->IterateOverObjects(verifier);
593    }
594    {
595        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
596        sAppSpawnSpace_->IterateOverMarkedObjects(verifier);
597    }
598    return failCount;
599}
600
601bool SharedHeap::IsReadyToConcurrentMark() const
602{
603    return dThread_->IsReadyToConcurrentMark();
604}
605
606bool SharedHeap::NeedStopCollection()
607{
608    if (!InSensitiveStatus()) {
609        return false;
610    }
611
612    if (!ObjectExceedMaxHeapSize()) {
613        return true;
614    }
615    return false;
616}
617
618void SharedHeap::CompactHeapBeforeFork(JSThread *thread)
619{
620    ThreadManagedScope managedScope(thread);
621    WaitGCFinished(thread);
622    sharedFullGC_->SetForAppSpawn(true);
623    CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::OTHER>(thread);
624    sharedFullGC_->SetForAppSpawn(false);
625}
626
627void SharedHeap::MoveOldSpaceToAppspawn()
628{
629    auto committedSize = sOldSpace_->GetCommittedSize();
630    sAppSpawnSpace_->SetInitialCapacity(committedSize);
631    sAppSpawnSpace_->SetMaximumCapacity(committedSize);
632    sOldSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity() - committedSize);
633    sOldSpace_->SetMaximumCapacity(sOldSpace_->GetMaximumCapacity() - committedSize);
634#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
635    sAppSpawnSpace_->SwapAllocationCounter(sOldSpace_);
636#endif
637    auto threadId = Runtime::GetInstance()->GetMainThread()->GetThreadId();
638    sOldSpace_->EnumerateRegions([&](Region *region) {
639        region->SetRegionSpaceFlag(RegionSpaceFlag::IN_SHARED_APPSPAWN_SPACE);
640        PageTag(region, region->GetCapacity(), PageTagType::HEAP, region->GetSpaceTypeName(), threadId);
641        sAppSpawnSpace_->AddRegion(region);
642        sAppSpawnSpace_->IncreaseLiveObjectSize(region->AliveObject());
643    });
644    sOldSpace_->GetRegionList().Clear();
645    sOldSpace_->Reset();
646}
647
648void SharedHeap::ReclaimForAppSpawn()
649{
650    sSweeper_->WaitAllTaskFinished();
651    sHugeObjectSpace_->ReclaimHugeRegion();
652    sCompressSpace_->Reset();
653    MoveOldSpaceToAppspawn();
654    auto cb = [] (Region *region) {
655        region->ClearMarkGCBitset();
656        region->ResetAliveObject();
657    };
658    sNonMovableSpace_->EnumerateRegions(cb);
659    sHugeObjectSpace_->EnumerateRegions(cb);
660}
661
662void SharedHeap::DumpHeapSnapshotBeforeOOM([[maybe_unused]]bool isFullGC, [[maybe_unused]]JSThread *thread)
663{
664#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
665#if defined(ENABLE_DUMP_IN_FAULTLOG)
666    EcmaVM *vm = thread->GetEcmaVM();
667    if (vm->GetHeapProfile() != nullptr) {
668        LOG_FULL(INFO) << "GetHeapProfile nullptr";
669        return;
670    }
671    // Filter appfreeze when dump.
672    LOG_ECMA(INFO) << " DumpHeapSnapshotBeforeOOM, isFullGC" << isFullGC;
673    base::BlockHookScope blockScope;
674    HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(vm);
675    if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
676        LOG_ECMA(INFO) << " DumpHeapSnapshotBeforeOOM Success. ";
677    }
678    DumpSnapShotOption dumpOption;
679    dumpOption.dumpFormat = DumpFormat::BINARY;
680    dumpOption.isVmMode = true;
681    dumpOption.isPrivate = false;
682    dumpOption.captureNumericValue = false;
683    dumpOption.isFullGC = isFullGC;
684    dumpOption.isSimplify = true;
685    dumpOption.isSync = true;
686    dumpOption.isBeforeFill = false;
687    dumpOption.isDumpOOM = true;
688    heapProfile->DumpHeapSnapshot(dumpOption);
689    HeapProfilerInterface::Destroy(vm);
690#endif // ENABLE_DUMP_IN_FAULTLOG
691#endif // ECMASCRIPT_SUPPORT_SNAPSHOT
692}
693
694Heap::Heap(EcmaVM *ecmaVm)
695    : BaseHeap(ecmaVm->GetEcmaParamConfiguration()),
696      ecmaVm_(ecmaVm), thread_(ecmaVm->GetJSThread()), sHeap_(SharedHeap::GetInstance()) {}
697
698void Heap::Initialize()
699{
700    memController_ = new MemController(this);
701    nativeAreaAllocator_ = ecmaVm_->GetNativeAreaAllocator();
702    heapRegionAllocator_ = ecmaVm_->GetHeapRegionAllocator();
703    size_t maxHeapSize = config_.GetMaxHeapSize();
704    size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
705    size_t maxSemiSpaceCapacity = config_.GetMaxSemiSpaceSize();
706    size_t edenSpaceCapacity = 2_MB;
707    edenSpace_ = new EdenSpace(this, edenSpaceCapacity, edenSpaceCapacity);
708    edenSpace_->Restart();
709    activeSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
710    activeSemiSpace_->Restart();
711    activeSemiSpace_->SetWaterLine();
712
713    auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
714    auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
715    thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
716    sOldTlab_ = new ThreadLocalAllocationBuffer(this);
717    thread_->ReSetSOldSpaceAllocationAddress(sOldTlab_->GetTopAddress(), sOldTlab_->GetEndAddress());
718    sNonMovableTlab_ = new ThreadLocalAllocationBuffer(this);
719    thread_->ReSetSNonMovableSpaceAllocationAddress(sNonMovableTlab_->GetTopAddress(),
720                                                    sNonMovableTlab_->GetEndAddress());
721    inactiveSemiSpace_ = new SemiSpace(this, minSemiSpaceCapacity, maxSemiSpaceCapacity);
722
723    // whether should verify heap duration gc
724    shouldVerifyHeap_ = ecmaVm_->GetJSOptions().EnableHeapVerify();
725    // not set up from space
726
727    size_t readOnlySpaceCapacity = config_.GetDefaultReadOnlySpaceSize();
728    readOnlySpace_ = new ReadOnlySpace(this, readOnlySpaceCapacity, readOnlySpaceCapacity);
729    appSpawnSpace_ = new AppSpawnSpace(this, maxHeapSize);
730    size_t nonmovableSpaceCapacity = config_.GetDefaultNonMovableSpaceSize();
731    if (ecmaVm_->GetJSOptions().WasSetMaxNonmovableSpaceCapacity()) {
732        nonmovableSpaceCapacity = ecmaVm_->GetJSOptions().MaxNonmovableSpaceCapacity();
733    }
734    nonMovableSpace_ = new NonMovableSpace(this, nonmovableSpaceCapacity, nonmovableSpaceCapacity);
735    nonMovableSpace_->Initialize();
736    size_t snapshotSpaceCapacity = config_.GetDefaultSnapshotSpaceSize();
737    snapshotSpace_ = new SnapshotSpace(this, snapshotSpaceCapacity, snapshotSpaceCapacity);
738    size_t machineCodeSpaceCapacity = config_.GetDefaultMachineCodeSpaceSize();
739    machineCodeSpace_ = new MachineCodeSpace(this, machineCodeSpaceCapacity, machineCodeSpaceCapacity);
740
741    size_t capacities = minSemiSpaceCapacity * 2 + nonmovableSpaceCapacity + snapshotSpaceCapacity +
742        machineCodeSpaceCapacity + readOnlySpaceCapacity;
743    if (maxHeapSize < capacities || maxHeapSize - capacities < MIN_OLD_SPACE_LIMIT) { // LOCV_EXCL_BR_LINE
744        LOG_ECMA_MEM(FATAL) << "HeapSize is too small to initialize oldspace, heapSize = " << maxHeapSize;
745    }
746    size_t oldSpaceCapacity = maxHeapSize - capacities;
747    globalSpaceAllocLimit_ = maxHeapSize - minSemiSpaceCapacity;
748    globalSpaceNativeLimit_ = INIT_GLOBAL_SPACE_NATIVE_SIZE_LIMIT;
749    oldSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
750    compressSpace_ = new OldSpace(this, oldSpaceCapacity, oldSpaceCapacity);
751    oldSpace_->Initialize();
752
753    hugeObjectSpace_ = new HugeObjectSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
754    hugeMachineCodeSpace_ = new HugeMachineCodeSpace(this, heapRegionAllocator_, oldSpaceCapacity, oldSpaceCapacity);
755    maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
756    maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
757        maxEvacuateTaskCount_ - 1);
758
759    LOG_GC(DEBUG) << "heap initialize: heap size = " << (maxHeapSize / 1_MB) << "MB"
760                 << ", semispace capacity = " << (minSemiSpaceCapacity / 1_MB) << "MB"
761                 << ", nonmovablespace capacity = " << (nonmovableSpaceCapacity / 1_MB) << "MB"
762                 << ", snapshotspace capacity = " << (snapshotSpaceCapacity / 1_MB) << "MB"
763                 << ", machinecodespace capacity = " << (machineCodeSpaceCapacity / 1_MB) << "MB"
764                 << ", oldspace capacity = " << (oldSpaceCapacity / 1_MB) << "MB"
765                 << ", globallimit = " << (globalSpaceAllocLimit_ / 1_MB) << "MB"
766                 << ", gcThreadNum = " << maxMarkTaskCount_;
767    parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
768    bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
769    markType_ = MarkType::MARK_YOUNG;
770#if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
771    concurrentMarkerEnabled = false;
772#endif
773    workManager_ = new WorkManager(this, Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1);
774    fullGC_ = new FullGC(this);
775
776    partialGC_ = new PartialGC(this);
777    sweeper_ = new ConcurrentSweeper(this, ecmaVm_->GetJSOptions().EnableConcurrentSweep() ?
778        EnableConcurrentSweepType::ENABLE : EnableConcurrentSweepType::CONFIG_DISABLE);
779    concurrentMarker_ = new ConcurrentMarker(this, concurrentMarkerEnabled ? EnableConcurrentMarkType::ENABLE :
780        EnableConcurrentMarkType::CONFIG_DISABLE);
781    nonMovableMarker_ = new NonMovableMarker(this);
782    semiGCMarker_ = new SemiGCMarker(this);
783    compressGCMarker_ = new CompressGCMarker(this);
784    evacuator_ = new ParallelEvacuator(this);
785    incrementalMarker_ = new IncrementalMarker(this);
786    gcListeners_.reserve(16U);
787    nativeSizeTriggerGCThreshold_ = config_.GetMaxNativeSizeInc();
788    incNativeSizeTriggerGC_ = config_.GetStepNativeSizeInc();
789    nativeSizeOvershoot_ = config_.GetNativeSizeOvershoot();
790    asyncClearNativePointerThreshold_ = config_.GetAsyncClearNativePointerThreshold();
791    idleGCTrigger_ = new IdleGCTrigger(this, sHeap_, thread_, GetEcmaVM()->GetJSOptions().EnableOptionalLog());
792}
793
794void Heap::ResetTlab()
795{
796    sOldTlab_->Reset();
797    sNonMovableTlab_->Reset();
798}
799
800void Heap::FillBumpPointerForTlab()
801{
802    sOldTlab_->FillBumpPointer();
803    sNonMovableTlab_->FillBumpPointer();
804}
805
806void Heap::ProcessSharedGCMarkingLocalBuffer()
807{
808    if (sharedGCData_.sharedConcurrentMarkingLocalBuffer_ != nullptr) {
809        ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
810        sHeap_->GetWorkManager()->PushLocalBufferToGlobal(sharedGCData_.sharedConcurrentMarkingLocalBuffer_);
811        ASSERT(sharedGCData_.sharedConcurrentMarkingLocalBuffer_ == nullptr);
812    }
813}
814
815void Heap::ProcessSharedGCRSetWorkList()
816{
817    if (sharedGCData_.rSetWorkListHandler_ != nullptr) {
818        ASSERT(thread_->IsSharedConcurrentMarkingOrFinished());
819        ASSERT(this == sharedGCData_.rSetWorkListHandler_->GetHeap());
820        sHeap_->GetSharedGCMarker()->ProcessThenMergeBackRSetFromBoundJSThread(sharedGCData_.rSetWorkListHandler_);
821        ASSERT(sharedGCData_.rSetWorkListHandler_ == nullptr);
822    }
823}
824
825const GlobalEnvConstants *Heap::GetGlobalConst() const
826{
827    return thread_->GlobalConstants();
828}
829
830void Heap::Destroy()
831{
832    ProcessSharedGCRSetWorkList();
833    ProcessSharedGCMarkingLocalBuffer();
834    if (sOldTlab_ != nullptr) {
835        sOldTlab_->Reset();
836        delete sOldTlab_;
837        sOldTlab_ = nullptr;
838    }
839    if (sNonMovableTlab_!= nullptr) {
840        sNonMovableTlab_->Reset();
841        delete sNonMovableTlab_;
842        sNonMovableTlab_= nullptr;
843    }
844    if (workManager_ != nullptr) {
845        delete workManager_;
846        workManager_ = nullptr;
847    }
848    if (edenSpace_ != nullptr) {
849        edenSpace_->Destroy();
850        delete edenSpace_;
851        edenSpace_ = nullptr;
852    }
853    if (activeSemiSpace_ != nullptr) {
854        activeSemiSpace_->Destroy();
855        delete activeSemiSpace_;
856        activeSemiSpace_ = nullptr;
857    }
858    if (inactiveSemiSpace_ != nullptr) {
859        inactiveSemiSpace_->Destroy();
860        delete inactiveSemiSpace_;
861        inactiveSemiSpace_ = nullptr;
862    }
863    if (oldSpace_ != nullptr) {
864        oldSpace_->Reset();
865        delete oldSpace_;
866        oldSpace_ = nullptr;
867    }
868    if (compressSpace_ != nullptr) {
869        compressSpace_->Destroy();
870        delete compressSpace_;
871        compressSpace_ = nullptr;
872    }
873    if (nonMovableSpace_ != nullptr) {
874        nonMovableSpace_->Reset();
875        delete nonMovableSpace_;
876        nonMovableSpace_ = nullptr;
877    }
878    if (snapshotSpace_ != nullptr) {
879        snapshotSpace_->Destroy();
880        delete snapshotSpace_;
881        snapshotSpace_ = nullptr;
882    }
883    if (machineCodeSpace_ != nullptr) {
884        machineCodeSpace_->Reset();
885        delete machineCodeSpace_;
886        machineCodeSpace_ = nullptr;
887    }
888    if (hugeObjectSpace_ != nullptr) {
889        hugeObjectSpace_->Destroy();
890        delete hugeObjectSpace_;
891        hugeObjectSpace_ = nullptr;
892    }
893    if (hugeMachineCodeSpace_ != nullptr) {
894        hugeMachineCodeSpace_->Destroy();
895        delete hugeMachineCodeSpace_;
896        hugeMachineCodeSpace_ = nullptr;
897    }
898    if (readOnlySpace_ != nullptr && mode_ != HeapMode::SHARE) {
899        readOnlySpace_->ClearReadOnly();
900        readOnlySpace_->Destroy();
901        delete readOnlySpace_;
902        readOnlySpace_ = nullptr;
903    }
904    if (appSpawnSpace_ != nullptr) {
905        appSpawnSpace_->Reset();
906        delete appSpawnSpace_;
907        appSpawnSpace_ = nullptr;
908    }
909    if (partialGC_ != nullptr) {
910        delete partialGC_;
911        partialGC_ = nullptr;
912    }
913    if (fullGC_ != nullptr) {
914        delete fullGC_;
915        fullGC_ = nullptr;
916    }
917
918    nativeAreaAllocator_ = nullptr;
919    heapRegionAllocator_ = nullptr;
920
921    if (memController_ != nullptr) {
922        delete memController_;
923        memController_ = nullptr;
924    }
925    if (sweeper_ != nullptr) {
926        delete sweeper_;
927        sweeper_ = nullptr;
928    }
929    if (concurrentMarker_ != nullptr) {
930        delete concurrentMarker_;
931        concurrentMarker_ = nullptr;
932    }
933    if (incrementalMarker_ != nullptr) {
934        delete incrementalMarker_;
935        incrementalMarker_ = nullptr;
936    }
937    if (nonMovableMarker_ != nullptr) {
938        delete nonMovableMarker_;
939        nonMovableMarker_ = nullptr;
940    }
941    if (semiGCMarker_ != nullptr) {
942        delete semiGCMarker_;
943        semiGCMarker_ = nullptr;
944    }
945    if (compressGCMarker_ != nullptr) {
946        delete compressGCMarker_;
947        compressGCMarker_ = nullptr;
948    }
949    if (evacuator_ != nullptr) {
950        delete evacuator_;
951        evacuator_ = nullptr;
952    }
953}
954
955void Heap::Prepare()
956{
957    MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, HeapPrepare);
958    WaitRunningTaskFinished();
959    sweeper_->EnsureAllTaskFinished();
960    WaitClearTaskFinished();
961}
962
963void Heap::GetHeapPrepare()
964{
965    // Ensure local and shared heap prepared.
966    Prepare();
967    SharedHeap *sHeap = SharedHeap::GetInstance();
968    sHeap->Prepare(false);
969}
970
971void Heap::Resume(TriggerGCType gcType)
972{
973    if (edenSpace_->ShouldTryEnable()) {
974        TryEnableEdenGC();
975    }
976    if (enableEdenGC_) {
977        edenSpace_->ReclaimRegions(edenSpace_->GetInitialCapacity());
978        edenSpace_->Restart();
979        if (IsEdenMark()) {
980            activeSemiSpace_->EnumerateRegions([](Region *region) { region->ResetRegionTypeFlag(); });
981            activeSemiSpace_->SetWaterLine();
982            return;
983        }
984    }
985
986    activeSemiSpace_->SetWaterLine();
987
988    if (mode_ != HeapMode::SPAWN &&
989        activeSemiSpace_->AdjustCapacity(inactiveSemiSpace_->GetAllocatedSizeSinceGC(), thread_)) {
990        // if activeSpace capacity changes, oldSpace maximumCapacity should change, too.
991        size_t multiple = 2;
992        size_t oldSpaceMaxLimit = 0;
993        if (activeSemiSpace_->GetInitialCapacity() >= inactiveSemiSpace_->GetInitialCapacity()) {
994            size_t delta = activeSemiSpace_->GetInitialCapacity() - inactiveSemiSpace_->GetInitialCapacity();
995            oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() - delta * multiple;
996        } else {
997            size_t delta = inactiveSemiSpace_->GetInitialCapacity() - activeSemiSpace_->GetInitialCapacity();
998            oldSpaceMaxLimit = oldSpace_->GetMaximumCapacity() + delta * multiple;
999        }
1000        inactiveSemiSpace_->SetInitialCapacity(activeSemiSpace_->GetInitialCapacity());
1001    }
1002
1003    PrepareRecordRegionsForReclaim();
1004    hugeObjectSpace_->ReclaimHugeRegion();
1005    hugeMachineCodeSpace_->ReclaimHugeRegion();
1006    if (parallelGC_) {
1007        if (gcType == TriggerGCType::OLD_GC) {
1008            isCSetClearing_.store(true, std::memory_order_release);
1009        }
1010        clearTaskFinished_ = false;
1011        Taskpool::GetCurrentTaskpool()->PostTask(
1012            std::make_unique<AsyncClearTask>(GetJSThread()->GetThreadId(), this, gcType));
1013    } else {
1014        ReclaimRegions(gcType);
1015    }
1016}
1017
1018void Heap::ResumeForAppSpawn()
1019{
1020    sweeper_->WaitAllTaskFinished();
1021    hugeObjectSpace_->ReclaimHugeRegion();
1022    hugeMachineCodeSpace_->ReclaimHugeRegion();
1023    edenSpace_->ReclaimRegions();
1024    inactiveSemiSpace_->ReclaimRegions();
1025    oldSpace_->Reset();
1026    auto cb = [] (Region *region) {
1027        region->ClearMarkGCBitset();
1028    };
1029    nonMovableSpace_->EnumerateRegions(cb);
1030    machineCodeSpace_->EnumerateRegions(cb);
1031    hugeObjectSpace_->EnumerateRegions(cb);
1032    hugeMachineCodeSpace_->EnumerateRegions(cb);
1033}
1034
1035void Heap::CompactHeapBeforeFork()
1036{
1037    CollectGarbage(TriggerGCType::APPSPAWN_FULL_GC);
1038}
1039
1040void Heap::DisableParallelGC()
1041{
1042    WaitAllTasksFinished();
1043    parallelGC_ = false;
1044    maxEvacuateTaskCount_ = 0;
1045    maxMarkTaskCount_ = 0;
1046    sweeper_->ConfigConcurrentSweep(false);
1047    concurrentMarker_->ConfigConcurrentMark(false);
1048    Taskpool::GetCurrentTaskpool()->Destroy(GetJSThread()->GetThreadId());
1049}
1050
1051void Heap::EnableParallelGC()
1052{
1053    parallelGC_ = ecmaVm_->GetJSOptions().EnableParallelGC();
1054    maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
1055    if (auto totalThreadNum = workManager_->GetTotalThreadNum();
1056        totalThreadNum != maxEvacuateTaskCount_ + 1) {
1057        LOG_ECMA_MEM(WARN) << "TheadNum mismatch, totalThreadNum(workerManager): " << totalThreadNum << ", "
1058                           << "totalThreadNum(taskpool): " << (maxEvacuateTaskCount_ + 1);
1059        delete workManager_;
1060        workManager_ = new WorkManager(this, maxEvacuateTaskCount_ + 1);
1061        UpdateWorkManager(workManager_);
1062    }
1063    ASSERT(maxEvacuateTaskCount_ > 0);
1064    maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
1065                                         maxEvacuateTaskCount_ - 1);
1066    bool concurrentMarkerEnabled = ecmaVm_->GetJSOptions().EnableConcurrentMark();
1067#if ECMASCRIPT_DISABLE_CONCURRENT_MARKING
1068    concurrentMarkerEnabled = false;
1069#endif
1070    sweeper_->ConfigConcurrentSweep(ecmaVm_->GetJSOptions().EnableConcurrentSweep());
1071    concurrentMarker_->ConfigConcurrentMark(concurrentMarkerEnabled);
1072}
1073
1074TriggerGCType Heap::SelectGCType() const
1075{
1076    // If concurrent mark is enabled, the TryTriggerConcurrentMarking decide which GC to choose.
1077    if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark()) {
1078        return YOUNG_GC;
1079    }
1080    if (!OldSpaceExceedLimit() && !OldSpaceExceedCapacity(activeSemiSpace_->GetCommittedSize()) &&
1081        GetHeapObjectSize() <= globalSpaceAllocLimit_  + oldSpace_->GetOvershootSize() &&
1082        !GlobalNativeSizeLargerThanLimit()) {
1083        return YOUNG_GC;
1084    }
1085    return OLD_GC;
1086}
1087
1088void Heap::CollectGarbage(TriggerGCType gcType, GCReason reason)
1089{
1090    Jit::JitGCLockHolder lock(GetEcmaVM()->GetJSThread());
1091    {
1092#if ECMASCRIPT_ENABLE_THREAD_STATE_CHECK
1093        if (UNLIKELY(!thread_->IsInRunningStateOrProfiling())) { // LOCV_EXCL_BR_LINE
1094            LOG_ECMA(FATAL) << "Local GC must be in jsthread running state";
1095            UNREACHABLE();
1096        }
1097#endif
1098        if (thread_->IsCrossThreadExecutionEnable() || GetOnSerializeEvent()) {
1099            ProcessGCListeners();
1100            return;
1101        }
1102        RecursionScope recurScope(this, HeapType::LOCAL_HEAP);
1103#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER)
1104        [[maybe_unused]] GcStateScope scope(thread_);
1105#endif
1106        CHECK_NO_GC;
1107        if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
1108            // pre gc heap verify
1109            LOG_ECMA(DEBUG) << "pre gc heap verify";
1110            ProcessSharedGCRSetWorkList();
1111            Verification(this, VerifyKind::VERIFY_PRE_GC).VerifyAll();
1112        }
1113
1114#if ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC
1115        gcType = TriggerGCType::FULL_GC;
1116#endif
1117        if (fullGCRequested_ && thread_->IsReadyToConcurrentMark() && gcType != TriggerGCType::FULL_GC) {
1118            gcType = TriggerGCType::FULL_GC;
1119        }
1120        if (oldGCRequested_ && gcType != TriggerGCType::FULL_GC) {
1121            gcType = TriggerGCType::OLD_GC;
1122        }
1123        if (shouldThrowOOMError_) {
1124            // Force Full GC after failed Old GC to avoid OOM
1125            LOG_ECMA(INFO) << "Old space is almost OOM, attempt trigger full gc to avoid OOM.";
1126            gcType = TriggerGCType::FULL_GC;
1127        }
1128        oldGCRequested_ = false;
1129        oldSpace_->AdjustOvershootSize();
1130
1131        size_t originalNewSpaceSize = IsEdenMark() ? edenSpace_->GetHeapObjectSize() :
1132                (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize());
1133        if (!GetJSThread()->IsReadyToConcurrentMark() && markType_ == MarkType::MARK_FULL) {
1134            GetEcmaGCStats()->SetGCReason(reason);
1135        } else {
1136            GetEcmaGCStats()->RecordStatisticBeforeGC(gcType, reason);
1137        }
1138        memController_->StartCalculationBeforeGC();
1139        StatisticHeapObject(gcType);
1140        gcType_ = gcType;
1141        {
1142            pgo::PGODumpPauseScope pscope(GetEcmaVM()->GetPGOProfiler());
1143            switch (gcType) {
1144                case TriggerGCType::EDEN_GC:
1145                    if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1146                        SetMarkType(MarkType::MARK_EDEN);
1147                    }
1148                    if (markType_ == MarkType::MARK_YOUNG) {
1149                        gcType_ = TriggerGCType::YOUNG_GC;
1150                    }
1151                    if (markType_ == MarkType::MARK_FULL) {
1152                        // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1153                        gcType_ = TriggerGCType::OLD_GC;
1154                    }
1155                    partialGC_->RunPhases();
1156                    break;
1157                case TriggerGCType::YOUNG_GC:
1158                    // Use partial GC for young generation.
1159                    if (!concurrentMarker_->IsEnabled() && !incrementalMarker_->IsTriggeredIncrementalMark()) {
1160                        SetMarkType(MarkType::MARK_YOUNG);
1161                    }
1162                    if (markType_ == MarkType::MARK_FULL) {
1163                        // gcType_ must be sure. Functions ProcessNativeReferences need to use it.
1164                        gcType_ = TriggerGCType::OLD_GC;
1165                    }
1166                    partialGC_->RunPhases();
1167                    break;
1168                case TriggerGCType::OLD_GC: {
1169                    bool fullConcurrentMarkRequested = false;
1170                    // Check whether it's needed to trigger full concurrent mark instead of trigger old gc
1171                    if (concurrentMarker_->IsEnabled() &&
1172                        (thread_->IsReadyToConcurrentMark() || markType_ == MarkType::MARK_YOUNG) &&
1173                        reason == GCReason::ALLOCATION_LIMIT) {
1174                        fullConcurrentMarkRequested = true;
1175                    }
1176                    if (concurrentMarker_->IsEnabled() && markType_ == MarkType::MARK_YOUNG) {
1177                        // Wait for existing concurrent marking tasks to be finished (if any),
1178                        // and reset concurrent marker's status for full mark.
1179                        bool concurrentMark = CheckOngoingConcurrentMarking();
1180                        if (concurrentMark) {
1181                            concurrentMarker_->Reset();
1182                        }
1183                    }
1184                    SetMarkType(MarkType::MARK_FULL);
1185                    if (fullConcurrentMarkRequested && idleTask_ == IdleTaskType::NO_TASK) {
1186                        LOG_ECMA(INFO)
1187                            << "Trigger old gc here may cost long time, trigger full concurrent mark instead";
1188                        oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1189                        TriggerConcurrentMarking();
1190                        oldGCRequested_ = true;
1191                        ProcessGCListeners();
1192                        return;
1193                    }
1194                    partialGC_->RunPhases();
1195                    break;
1196                }
1197                case TriggerGCType::FULL_GC:
1198                    fullGC_->SetForAppSpawn(false);
1199                    fullGC_->RunPhases();
1200                    if (fullGCRequested_) {
1201                        fullGCRequested_ = false;
1202                    }
1203                    break;
1204                case TriggerGCType::APPSPAWN_FULL_GC:
1205                    fullGC_->SetForAppSpawn(true);
1206                    fullGC_->RunPhasesForAppSpawn();
1207                    break;
1208                default: // LOCV_EXCL_BR_LINE
1209                    LOG_ECMA(FATAL) << "this branch is unreachable";
1210                    UNREACHABLE();
1211                    break;
1212            }
1213            ASSERT(thread_->IsPropertyCacheCleared());
1214        }
1215        UpdateHeapStatsAfterGC(gcType_);
1216        ClearIdleTask();
1217        // Adjust the old space capacity and global limit for the first partial GC with full mark.
1218        // Trigger full mark next time if the current survival rate is much less than half the average survival rates.
1219        AdjustBySurvivalRate(originalNewSpaceSize);
1220        memController_->StopCalculationAfterGC(gcType);
1221        if (gcType == TriggerGCType::FULL_GC || IsConcurrentFullMark()) {
1222            // Only when the gc type is not semiGC and after the old space sweeping has been finished,
1223            // the limits of old space and global space can be recomputed.
1224            RecomputeLimits();
1225            ResetNativeSizeAfterLastGC();
1226            OPTIONAL_LOG(ecmaVm_, INFO) << " GC after: is full mark" << IsConcurrentFullMark()
1227                                        << " global object size " << GetHeapObjectSize()
1228                                        << " global committed size " << GetCommittedSize()
1229                                        << " global limit " << globalSpaceAllocLimit_;
1230            markType_ = MarkType::MARK_YOUNG;
1231        }
1232        if (concurrentMarker_->IsRequestDisabled()) {
1233            concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
1234        }
1235        // GC log
1236        GetEcmaGCStats()->RecordStatisticAfterGC();
1237#ifdef ENABLE_HISYSEVENT
1238        GetEcmaGCKeyStats()->IncGCCount();
1239        if (GetEcmaGCKeyStats()->CheckIfMainThread() && GetEcmaGCKeyStats()->CheckIfKeyPauseTime()) {
1240            GetEcmaGCKeyStats()->AddGCStatsToKey();
1241        }
1242#endif
1243        GetEcmaGCStats()->PrintGCStatistic();
1244    }
1245
1246    if (gcType_ == TriggerGCType::OLD_GC) {
1247        // During full concurrent mark, non movable space can have 2M overshoot size temporarily, which means non
1248        // movable space max heap size can reach to 18M temporarily, but after partial old gc, the size must retract to
1249        // below 16M, Otherwise, old GC will be triggered frequently. Non-concurrent mark period, non movable space max
1250        // heap size is 16M, if exceeded, an OOM exception will be thrown, this check is to do this.
1251        CheckNonMovableSpaceOOM();
1252    }
1253    // OOMError object is not allowed to be allocated during gc process, so throw OOMError after gc
1254    if (shouldThrowOOMError_ && gcType_ == TriggerGCType::FULL_GC) {
1255        sweeper_->EnsureAllTaskFinished();
1256        oldSpace_->ResetCommittedOverSizeLimit();
1257        if (oldSpace_->CommittedSizeExceed()) {
1258            DumpHeapSnapshotBeforeOOM(false);
1259            StatisticHeapDetail();
1260            ThrowOutOfMemoryError(thread_, oldSpace_->GetMergeSize(), " OldSpace::Merge");
1261        }
1262        oldSpace_->ResetMergeSize();
1263        shouldThrowOOMError_ = false;
1264    }
1265    // Update record heap object size after gc if in sensitive status
1266    if (GetSensitiveStatus() == AppSensitiveStatus::ENTER_HIGH_SENSITIVE) {
1267        SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
1268    }
1269
1270    if (UNLIKELY(ShouldVerifyHeap())) { // LCOV_EXCL_BR_LINE
1271        // verify post gc heap verify
1272        LOG_ECMA(DEBUG) << "post gc heap verify";
1273        Verification(this, VerifyKind::VERIFY_POST_GC).VerifyAll();
1274    }
1275
1276    // Weak node nativeFinalizeCallback may execute JS and change the weakNodeList status,
1277    // even lead to another GC, so this have to invoke after this GC process.
1278    thread_->InvokeWeakNodeNativeFinalizeCallback();
1279    // PostTask for ProcessNativeDelete
1280    CleanCallBack();
1281
1282    JSFinalizationRegistry::CheckAndCall(thread_);
1283#if defined(ECMASCRIPT_SUPPORT_TRACING)
1284    auto tracing = GetEcmaVM()->GetTracing();
1285    if (tracing != nullptr) {
1286        tracing->TraceEventRecordMemory();
1287    }
1288#endif
1289    ProcessGCListeners();
1290
1291#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
1292    if (!hasOOMDump_ && (g_betaVersion || g_developMode)) {
1293        ThresholdReachedDump();
1294    }
1295#endif
1296
1297    if (GetEcmaVM()->IsEnableBaselineJit() || GetEcmaVM()->IsEnableFastJit()) {
1298        // check machine code space if enough
1299        int remainSize = static_cast<int>(config_.GetDefaultMachineCodeSpaceSize()) -
1300            static_cast<int>(GetMachineCodeSpace()->GetHeapObjectSize());
1301        Jit::GetInstance()->CheckMechineCodeSpaceMemory(GetEcmaVM()->GetJSThread(), remainSize);
1302    }
1303}
1304
1305void BaseHeap::ThrowOutOfMemoryError(JSThread *thread, size_t size, std::string functionName,
1306    bool NonMovableObjNearOOM)
1307{ // LCOV_EXCL_START
1308    GetEcmaGCStats()->PrintGCMemoryStatistic();
1309    std::ostringstream oss;
1310    if (NonMovableObjNearOOM) {
1311        oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1312            << " function name: " << functionName.c_str();
1313    } else {
1314        oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1315            << functionName.c_str();
1316    }
1317    LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1318    THROW_OOM_ERROR(thread, oss.str().c_str());
1319} // LCOV_EXCL_STOP
1320
1321void BaseHeap::SetMachineCodeOutOfMemoryError(JSThread *thread, size_t size, std::string functionName)
1322{
1323    std::ostringstream oss;
1324    oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: "
1325        << functionName.c_str();
1326    LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1327
1328    EcmaVM *ecmaVm = thread->GetEcmaVM();
1329    ObjectFactory *factory = ecmaVm->GetFactory();
1330    JSHandle<JSObject> error = factory->GetJSError(ErrorType::OOM_ERROR, oss.str().c_str(), StackCheck::NO);
1331    thread->SetException(error.GetTaggedValue());
1332}
1333
1334void BaseHeap::SetAppFreezeFilterCallback(AppFreezeFilterCallback cb)
1335{
1336    if (cb != nullptr) {
1337        appfreezeCallback_ = cb;
1338    }
1339}
1340
1341void BaseHeap::ThrowOutOfMemoryErrorForDefault(JSThread *thread, size_t size, std::string functionName,
1342    bool NonMovableObjNearOOM)
1343{ // LCOV_EXCL_START
1344    GetEcmaGCStats()->PrintGCMemoryStatistic();
1345    std::ostringstream oss;
1346    if (NonMovableObjNearOOM) {
1347        oss << "OutOfMemory when nonmovable live obj size: " << size << " bytes"
1348            << " function name: " << functionName.c_str();
1349    } else {
1350        oss << "OutOfMemory when trying to allocate " << size << " bytes" << " function name: " << functionName.c_str();
1351    }
1352    LOG_ECMA_MEM(ERROR) << oss.str().c_str();
1353    EcmaVM *ecmaVm = thread->GetEcmaVM();
1354    JSHandle<GlobalEnv> env = ecmaVm->GetGlobalEnv();
1355    JSHandle<JSObject> error = JSHandle<JSObject>::Cast(env->GetOOMErrorObject());
1356
1357    thread->SetException(error.GetTaggedValue());
1358    ecmaVm->HandleUncatchableError();
1359} // LCOV_EXCL_STOP
1360
1361void BaseHeap::FatalOutOfMemoryError(size_t size, std::string functionName)
1362{ // LCOV_EXCL_START
1363    GetEcmaGCStats()->PrintGCMemoryStatistic();
1364    LOG_ECMA_MEM(FATAL) << "OOM fatal when trying to allocate " << size << " bytes"
1365                        << " function name: " << functionName.c_str();
1366} // LCOV_EXCL_STOP
1367
1368void Heap::CheckNonMovableSpaceOOM()
1369{
1370    if (nonMovableSpace_->GetHeapObjectSize() > MAX_NONMOVABLE_LIVE_OBJ_SIZE) {
1371        sweeper_->EnsureAllTaskFinished();
1372        DumpHeapSnapshotBeforeOOM(false);
1373        StatisticHeapDetail();
1374        ThrowOutOfMemoryError(thread_, nonMovableSpace_->GetHeapObjectSize(), "Heap::CheckNonMovableSpaceOOM", true);
1375    }
1376}
1377
1378void Heap::AdjustBySurvivalRate(size_t originalNewSpaceSize)
1379{
1380    promotedSize_ = GetEvacuator()->GetPromotedSize();
1381    edenToYoungSize_ = GetEvacuator()->GetEdenToYoungSize();
1382    if (originalNewSpaceSize <= 0) {
1383        return;
1384    }
1385    semiSpaceCopiedSize_ = IsEdenMark() ? edenToYoungSize_ : activeSemiSpace_->GetHeapObjectSize();
1386    double copiedRate = semiSpaceCopiedSize_ * 1.0 / originalNewSpaceSize;
1387    double promotedRate = promotedSize_ * 1.0 / originalNewSpaceSize;
1388    double survivalRate = std::min(copiedRate + promotedRate, 1.0);
1389    OPTIONAL_LOG(ecmaVm_, INFO) << " copiedRate: " << copiedRate << " promotedRate: " << promotedRate
1390                                << " survivalRate: " << survivalRate;
1391    if (IsEdenMark()) {
1392        memController_->AddEdenSurvivalRate(survivalRate);
1393        return;
1394    }
1395    if (!oldSpaceLimitAdjusted_) {
1396        memController_->AddSurvivalRate(survivalRate);
1397        AdjustOldSpaceLimit();
1398    } else {
1399        double averageSurvivalRate = memController_->GetAverageSurvivalRate();
1400        // 2 means half
1401        if ((averageSurvivalRate / 2) > survivalRate && averageSurvivalRate > GROW_OBJECT_SURVIVAL_RATE) {
1402            SetFullMarkRequestedState(true);
1403            OPTIONAL_LOG(ecmaVm_, INFO) << " Current survival rate: " << survivalRate
1404                << " is less than half the average survival rates: " << averageSurvivalRate
1405                << ". Trigger full mark next time.";
1406            // Survival rate of full mark is precise. Reset recorded survival rates.
1407            memController_->ResetRecordedSurvivalRates();
1408        }
1409        memController_->AddSurvivalRate(survivalRate);
1410    }
1411}
1412
1413size_t Heap::VerifyHeapObjects(VerifyKind verifyKind) const
1414{
1415    size_t failCount = 0;
1416    {
1417        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1418        activeSemiSpace_->IterateOverObjects(verifier);
1419    }
1420
1421    {
1422        if (verifyKind == VerifyKind::VERIFY_EVACUATE_YOUNG ||
1423            verifyKind == VerifyKind::VERIFY_EVACUATE_OLD ||
1424            verifyKind == VerifyKind::VERIFY_EVACUATE_FULL) {
1425                inactiveSemiSpace_->EnumerateRegions([this](Region *region) {
1426                    region->IterateAllMarkedBits([this](void *addr) {
1427                        VerifyObjectVisitor::VerifyInactiveSemiSpaceMarkedObject(this, addr);
1428                    });
1429                });
1430            }
1431    }
1432
1433    {
1434        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1435        oldSpace_->IterateOverObjects(verifier);
1436    }
1437
1438    {
1439        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1440        appSpawnSpace_->IterateOverMarkedObjects(verifier);
1441    }
1442
1443    {
1444        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1445        nonMovableSpace_->IterateOverObjects(verifier);
1446    }
1447
1448    {
1449        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1450        hugeObjectSpace_->IterateOverObjects(verifier);
1451    }
1452    {
1453        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1454        hugeMachineCodeSpace_->IterateOverObjects(verifier);
1455    }
1456    {
1457        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1458        machineCodeSpace_->IterateOverObjects(verifier);
1459    }
1460    {
1461        VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1462        snapshotSpace_->IterateOverObjects(verifier);
1463    }
1464    return failCount;
1465}
1466
1467size_t Heap::VerifyOldToNewRSet(VerifyKind verifyKind) const
1468{
1469    size_t failCount = 0;
1470    VerifyObjectVisitor verifier(this, &failCount, verifyKind);
1471    oldSpace_->IterateOldToNewOverObjects(verifier);
1472    appSpawnSpace_->IterateOldToNewOverObjects(verifier);
1473    nonMovableSpace_->IterateOldToNewOverObjects(verifier);
1474    machineCodeSpace_->IterateOldToNewOverObjects(verifier);
1475    return failCount;
1476}
1477
1478void Heap::AdjustOldSpaceLimit()
1479{
1480    if (oldSpaceLimitAdjusted_) {
1481        return;
1482    }
1483    size_t minGrowingStep = ecmaVm_->GetEcmaParamConfiguration().GetMinGrowingStep();
1484    size_t oldSpaceAllocLimit = GetOldSpace()->GetInitialCapacity();
1485    size_t newOldSpaceAllocLimit = std::max(oldSpace_->GetHeapObjectSize() + minGrowingStep,
1486        static_cast<size_t>(oldSpaceAllocLimit * memController_->GetAverageSurvivalRate()));
1487    if (newOldSpaceAllocLimit <= oldSpaceAllocLimit) {
1488        GetOldSpace()->SetInitialCapacity(newOldSpaceAllocLimit);
1489    } else {
1490        oldSpaceLimitAdjusted_ = true;
1491    }
1492
1493    size_t newGlobalSpaceAllocLimit = std::max(GetHeapObjectSize() + minGrowingStep,
1494        static_cast<size_t>(globalSpaceAllocLimit_ * memController_->GetAverageSurvivalRate()));
1495    if (newGlobalSpaceAllocLimit < globalSpaceAllocLimit_) {
1496        globalSpaceAllocLimit_ = newGlobalSpaceAllocLimit;
1497    }
1498    OPTIONAL_LOG(ecmaVm_, INFO) << "AdjustOldSpaceLimit oldSpaceAllocLimit_: " << oldSpaceAllocLimit
1499        << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_;
1500}
1501
1502void BaseHeap::OnAllocateEvent([[maybe_unused]] EcmaVM *ecmaVm, [[maybe_unused]] TaggedObject* address,
1503                               [[maybe_unused]] size_t size)
1504{
1505#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1506    HeapProfilerInterface *profiler = ecmaVm->GetHeapProfile();
1507    if (profiler != nullptr) {
1508        base::BlockHookScope blockScope;
1509        profiler->AllocationEvent(address, size);
1510    }
1511#endif
1512}
1513
1514void Heap::DumpHeapSnapshotBeforeOOM([[maybe_unused]] bool isFullGC)
1515{
1516#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT)
1517#if defined(ENABLE_DUMP_IN_FAULTLOG)
1518    if (ecmaVm_->GetHeapProfile() != nullptr) {
1519        return;
1520    }
1521    // Filter appfreeze when dump.
1522    LOG_ECMA(INFO) << " DumpHeapSnapshotBeforeOOM, isFullGC" << isFullGC;
1523    base::BlockHookScope blockScope;
1524    HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
1525    if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
1526        LOG_ECMA(INFO) << " DumpHeapSnapshotBeforeOOM Success. ";
1527    }
1528#ifdef ENABLE_HISYSEVENT
1529    GetEcmaGCKeyStats()->SendSysEventBeforeDump("OOMDump", GetHeapLimitSize(), GetLiveObjectSize());
1530    hasOOMDump_ = true;
1531#endif
1532    // Vm should always allocate young space successfully. Really OOM will occur in the non-young spaces.
1533    DumpSnapShotOption dumpOption;
1534    dumpOption.dumpFormat = DumpFormat::BINARY;
1535    dumpOption.isVmMode = true;
1536    dumpOption.isPrivate = false;
1537    dumpOption.captureNumericValue = false;
1538    dumpOption.isFullGC = isFullGC;
1539    dumpOption.isSimplify = true;
1540    dumpOption.isSync = true;
1541    dumpOption.isBeforeFill = false;
1542    dumpOption.isDumpOOM = true;
1543    heapProfile->DumpHeapSnapshot(dumpOption);
1544    HeapProfilerInterface::Destroy(ecmaVm_);
1545#endif // ENABLE_DUMP_IN_FAULTLOG
1546#endif // ECMASCRIPT_SUPPORT_SNAPSHOT
1547}
1548
1549void Heap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
1550                       [[maybe_unused]] size_t size)
1551{
1552#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1553    HeapProfilerInterface *profiler = GetEcmaVM()->GetHeapProfile();
1554    if (profiler != nullptr) {
1555        base::BlockHookScope blockScope;
1556        profiler->MoveEvent(address, forwardAddress, size);
1557    }
1558#endif
1559}
1560
1561void SharedHeap::OnMoveEvent([[maybe_unused]] uintptr_t address, [[maybe_unused]] TaggedObject* forwardAddress,
1562                             [[maybe_unused]] size_t size)
1563{
1564#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
1565    Runtime::GetInstance()->GCIterateThreadListWithoutLock([&](JSThread *thread) {
1566        HeapProfilerInterface *profiler = thread->GetEcmaVM()->GetHeapProfile();
1567        if (profiler != nullptr) {
1568            base::BlockHookScope blockScope;
1569            profiler->MoveEvent(address, forwardAddress, size);
1570        }
1571    });
1572#endif
1573}
1574
1575void Heap::AdjustSpaceSizeForAppSpawn()
1576{
1577    SetHeapMode(HeapMode::SPAWN);
1578    size_t minSemiSpaceCapacity = config_.GetMinSemiSpaceSize();
1579    activeSemiSpace_->SetInitialCapacity(minSemiSpaceCapacity);
1580    auto committedSize = appSpawnSpace_->GetCommittedSize();
1581    appSpawnSpace_->SetInitialCapacity(committedSize);
1582    appSpawnSpace_->SetMaximumCapacity(committedSize);
1583    oldSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity() - committedSize);
1584    oldSpace_->SetMaximumCapacity(oldSpace_->GetMaximumCapacity() - committedSize);
1585}
1586
1587bool Heap::ShouldMoveToRoSpace(JSHClass *hclass, TaggedObject *object)
1588{
1589    return hclass->IsString() && !Region::ObjectAddressToRange(object)->InHugeObjectSpace();
1590}
1591
1592void Heap::AddAllocationInspectorToAllSpaces(AllocationInspector *inspector)
1593{
1594    ASSERT(inspector != nullptr);
1595    // activeSemiSpace_/inactiveSemiSpace_:
1596    // only add an inspector to activeSemiSpace_, and while sweeping for gc, inspector need be swept.
1597    activeSemiSpace_->AddAllocationInspector(inspector);
1598    // oldSpace_/compressSpace_:
1599    // only add an inspector to oldSpace_, and while sweeping for gc, inspector need be swept.
1600    oldSpace_->AddAllocationInspector(inspector);
1601    // readOnlySpace_ need not allocationInspector.
1602    // appSpawnSpace_ need not allocationInspector.
1603    nonMovableSpace_->AddAllocationInspector(inspector);
1604    machineCodeSpace_->AddAllocationInspector(inspector);
1605    hugeObjectSpace_->AddAllocationInspector(inspector);
1606    hugeMachineCodeSpace_->AddAllocationInspector(inspector);
1607}
1608
1609void Heap::ClearAllocationInspectorFromAllSpaces()
1610{
1611    edenSpace_->ClearAllocationInspector();
1612    activeSemiSpace_->ClearAllocationInspector();
1613    oldSpace_->ClearAllocationInspector();
1614    nonMovableSpace_->ClearAllocationInspector();
1615    machineCodeSpace_->ClearAllocationInspector();
1616    hugeObjectSpace_->ClearAllocationInspector();
1617    hugeMachineCodeSpace_->ClearAllocationInspector();
1618}
1619
1620void Heap::RecomputeLimits()
1621{
1622    double gcSpeed = memController_->CalculateMarkCompactSpeedPerMS();
1623    double mutatorSpeed = memController_->GetCurrentOldSpaceAllocationThroughputPerMS();
1624    size_t oldSpaceSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1625        hugeMachineCodeSpace_->GetHeapObjectSize();
1626    size_t newSpaceCapacity = activeSemiSpace_->GetInitialCapacity();
1627
1628    double growingFactor = memController_->CalculateGrowingFactor(gcSpeed, mutatorSpeed);
1629    size_t maxOldSpaceCapacity = oldSpace_->GetMaximumCapacity() - newSpaceCapacity;
1630    size_t newOldSpaceLimit = memController_->CalculateAllocLimit(oldSpaceSize, MIN_OLD_SPACE_LIMIT,
1631        maxOldSpaceCapacity, newSpaceCapacity, growingFactor);
1632    size_t maxGlobalSize = config_.GetMaxHeapSize() - newSpaceCapacity;
1633    size_t newGlobalSpaceLimit = memController_->CalculateAllocLimit(GetHeapObjectSize(), MIN_HEAP_SIZE,
1634                                                                     maxGlobalSize, newSpaceCapacity, growingFactor);
1635    globalSpaceAllocLimit_ = newGlobalSpaceLimit;
1636    oldSpace_->SetInitialCapacity(newOldSpaceLimit);
1637    globalSpaceNativeLimit_ = memController_->CalculateAllocLimit(GetGlobalNativeSize(), MIN_HEAP_SIZE,
1638                                                                  MAX_GLOBAL_NATIVE_LIMIT, newSpaceCapacity,
1639                                                                  growingFactor);
1640    OPTIONAL_LOG(ecmaVm_, INFO) << "RecomputeLimits oldSpaceAllocLimit_: " << newOldSpaceLimit
1641        << " globalSpaceAllocLimit_: " << globalSpaceAllocLimit_
1642        << " globalSpaceNativeLimit_:" << globalSpaceNativeLimit_;
1643    if ((oldSpace_->GetHeapObjectSize() * 1.0 / SHRINK_OBJECT_SURVIVAL_RATE) < oldSpace_->GetCommittedSize() &&
1644        (oldSpace_->GetCommittedSize() / 2) > newOldSpaceLimit) { // 2: means half
1645        OPTIONAL_LOG(ecmaVm_, INFO) << " Old space heap object size is too much lower than committed size"
1646                                    << " heapObjectSize: "<< oldSpace_->GetHeapObjectSize()
1647                                    << " Committed Size: " << oldSpace_->GetCommittedSize();
1648        SetFullMarkRequestedState(true);
1649    }
1650}
1651
1652bool Heap::CheckAndTriggerOldGC(size_t size)
1653{
1654    bool isFullMarking = IsConcurrentFullMark() && GetJSThread()->IsMarking();
1655    bool isNativeSizeLargeTrigger = isFullMarking ? false : GlobalNativeSizeLargerThanLimit();
1656    if (isFullMarking && oldSpace_->GetOvershootSize() == 0) {
1657        oldSpace_->SetOvershootSize(config_.GetOldSpaceStepOvershootSize());
1658    }
1659    if ((isNativeSizeLargeTrigger || OldSpaceExceedLimit() || OldSpaceExceedCapacity(size) ||
1660        GetHeapObjectSize() > globalSpaceAllocLimit_ + oldSpace_->GetOvershootSize()) &&
1661        !NeedStopCollection()) {
1662        if (isFullMarking && oldSpace_->GetOvershootSize() < config_.GetOldSpaceMaxOvershootSize()) {
1663            oldSpace_->IncreaseOvershootSize(config_.GetOldSpaceStepOvershootSize());
1664            return false;
1665        }
1666        CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_LIMIT);
1667        if (!oldGCRequested_) {
1668            return true;
1669        }
1670    }
1671    return false;
1672}
1673
1674bool Heap::CheckAndTriggerHintGC(MemoryReduceDegree degree, GCReason reason)
1675{
1676    if (InSensitiveStatus()) {
1677        return false;
1678    }
1679    LOG_GC(INFO) << "HintGC degree:"<< static_cast<int>(degree) << " reason:" << GCStats::GCReasonToString(reason);
1680    switch (degree) {
1681        case MemoryReduceDegree::LOW: {
1682            if (idleGCTrigger_->HintGCInLowDegree<Heap>(this)) {
1683                if (CheckCanTriggerConcurrentMarking()) {
1684                    markType_ = MarkType::MARK_FULL;
1685                    TriggerConcurrentMarking();
1686                    LOG_GC(INFO) << " MemoryReduceDegree::LOW TriggerConcurrentMark.";
1687                    return true;
1688                }
1689            }
1690            if (idleGCTrigger_->HintGCInLowDegree<SharedHeap>(sHeap_)) {
1691                if (sHeap_->CheckCanTriggerConcurrentMarking(thread_)) {
1692                    LOG_GC(INFO) << " MemoryReduceDegree::LOW TriggerSharedConcurrentMark.";
1693                    sHeap_->TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::HINT_GC>(thread_);
1694                    return true;
1695                }
1696            }
1697            break;
1698        }
1699        case MemoryReduceDegree::MIDDLE: {
1700            if (idleGCTrigger_->HintGCInMiddleDegree<Heap>(this)) {
1701                CollectGarbage(TriggerGCType::FULL_GC, reason);
1702                return true;
1703            }
1704            if (idleGCTrigger_->HintGCInMiddleDegree<SharedHeap>(sHeap_)) {
1705                sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::HINT_GC>(thread_);
1706                return true;
1707            }
1708            break;
1709        }
1710        case MemoryReduceDegree::HIGH: {
1711            bool result = false;
1712            if (idleGCTrigger_->HintGCInHighDegree<Heap>(this)) {
1713                CollectGarbage(TriggerGCType::FULL_GC, reason);
1714                result = true;
1715            }
1716            if (idleGCTrigger_->HintGCInHighDegree<SharedHeap>(sHeap_)) {
1717                sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::HINT_GC>(thread_);
1718                result = true;
1719            }
1720            return result;
1721        }
1722        default:
1723            LOG_GC(INFO) << "HintGC invalid degree value: " << static_cast<int>(degree);
1724            break;
1725    }
1726    return false;
1727}
1728
1729bool Heap::CheckOngoingConcurrentMarking()
1730{
1731    if (concurrentMarker_->IsEnabled() && !thread_->IsReadyToConcurrentMark() &&
1732        concurrentMarker_->IsTriggeredConcurrentMark()) {
1733        TRACE_GC(GCStats::Scope::ScopeId::WaitConcurrentMarkFinished, GetEcmaVM()->GetEcmaGCStats());
1734        if (thread_->IsMarking()) {
1735            ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "Heap::CheckOngoingConcurrentMarking");
1736            MEM_ALLOCATE_AND_GC_TRACE(ecmaVm_, WaitConcurrentMarkingFinished);
1737            GetNonMovableMarker()->ProcessMarkStack(MAIN_THREAD_INDEX);
1738            WaitConcurrentMarkingFinished();
1739        }
1740        WaitRunningTaskFinished();
1741        memController_->RecordAfterConcurrentMark(markType_, concurrentMarker_);
1742        return true;
1743    }
1744    return false;
1745}
1746
1747void Heap::ClearIdleTask()
1748{
1749    SetIdleTask(IdleTaskType::NO_TASK);
1750    idleTaskFinishTime_ = incrementalMarker_->GetCurrentTimeInMs();
1751}
1752
1753void Heap::TryTriggerIdleCollection()
1754{
1755    if (idleTask_ != IdleTaskType::NO_TASK || !GetJSThread()->IsReadyToConcurrentMark() || !enableIdleGC_) {
1756        return;
1757    }
1758    if (thread_->IsMarkFinished() && concurrentMarker_->IsTriggeredConcurrentMark()) {
1759        SetIdleTask(IdleTaskType::FINISH_MARKING);
1760        EnableNotifyIdle();
1761        CalculateIdleDuration();
1762        return;
1763    }
1764
1765    double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1766    double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1767    double newSpaceAllocToLimitDuration = (static_cast<double>(activeSemiSpace_->GetInitialCapacity()) -
1768                                           static_cast<double>(activeSemiSpace_->GetCommittedSize())) /
1769                                           newSpaceAllocSpeed;
1770    double newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1771    double newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1772    // 2 means double
1773    if (newSpaceRemainSize < 2 * DEFAULT_REGION_SIZE) {
1774        SetIdleTask(IdleTaskType::YOUNG_GC);
1775        SetMarkType(MarkType::MARK_YOUNG);
1776        EnableNotifyIdle();
1777        CalculateIdleDuration();
1778        return;
1779    }
1780}
1781
1782void Heap::CalculateIdleDuration()
1783{
1784    size_t updateReferenceSpeed = 0;
1785    // clear native object duration
1786    size_t clearNativeObjSpeed = 0;
1787    if (markType_ == MarkType::MARK_EDEN) {
1788        updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_UPDATE_REFERENCE_SPEED);
1789        clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_CLEAR_NATIVE_OBJ_SPEED);
1790    } else if (markType_ == MarkType::MARK_YOUNG) {
1791        updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_UPDATE_REFERENCE_SPEED);
1792        clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_CLEAR_NATIVE_OBJ_SPEED);
1793    } else if (markType_ == MarkType::MARK_FULL) {
1794        updateReferenceSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::UPDATE_REFERENCE_SPEED);
1795        clearNativeObjSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_CLEAR_NATIVE_OBJ_SPEED);
1796    }
1797
1798    // update reference duration
1799    idlePredictDuration_ = 0.0f;
1800    if (updateReferenceSpeed != 0) {
1801        idlePredictDuration_ += (float)GetHeapObjectSize() / updateReferenceSpeed;
1802    }
1803
1804    if (clearNativeObjSpeed != 0) {
1805        idlePredictDuration_ += (float)GetNativePointerListSize() / clearNativeObjSpeed;
1806    }
1807
1808    // sweep and evacuate duration
1809    size_t edenEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::EDEN_EVACUATE_SPACE_SPEED);
1810    size_t youngEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::YOUNG_EVACUATE_SPACE_SPEED);
1811    double survivalRate = GetEcmaGCStats()->GetAvgSurvivalRate();
1812    if (markType_ == MarkType::MARK_EDEN && edenEvacuateSpeed != 0) {
1813        idlePredictDuration_ += survivalRate * edenSpace_->GetHeapObjectSize() / edenEvacuateSpeed;
1814    } else if (markType_ == MarkType::MARK_YOUNG && youngEvacuateSpeed != 0) {
1815        idlePredictDuration_ += (activeSemiSpace_->GetHeapObjectSize() + edenSpace_->GetHeapObjectSize()) *
1816            survivalRate / youngEvacuateSpeed;
1817    } else if (markType_ == MarkType::MARK_FULL) {
1818        size_t sweepSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::SWEEP_SPEED);
1819        size_t oldEvacuateSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::OLD_EVACUATE_SPACE_SPEED);
1820        if (sweepSpeed != 0) {
1821            idlePredictDuration_ += (float)GetHeapObjectSize() / sweepSpeed;
1822        }
1823        if (oldEvacuateSpeed != 0) {
1824            size_t collectRegionSetSize = GetEcmaGCStats()->GetRecordData(
1825                RecordData::COLLECT_REGION_SET_SIZE);
1826            idlePredictDuration_ += (survivalRate * activeSemiSpace_->GetHeapObjectSize() + collectRegionSetSize) /
1827                                    oldEvacuateSpeed;
1828        }
1829    }
1830
1831    // Idle YoungGC mark duration
1832    size_t markSpeed = GetEcmaGCStats()->GetGCSpeed(SpeedData::MARK_SPEED);
1833    if (idleTask_ == IdleTaskType::YOUNG_GC && markSpeed != 0) {
1834        idlePredictDuration_ += (float)activeSemiSpace_->GetHeapObjectSize() / markSpeed;
1835    }
1836    OPTIONAL_LOG(ecmaVm_, INFO) << "Predict idle gc pause: " << idlePredictDuration_ << "ms";
1837}
1838
1839void Heap::TryTriggerIncrementalMarking()
1840{
1841    if (!GetJSThread()->IsReadyToConcurrentMark() || idleTask_ != IdleTaskType::NO_TASK || !enableIdleGC_) {
1842        return;
1843    }
1844    size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1845    size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1846        hugeMachineCodeSpace_->GetHeapObjectSize();
1847    double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1848    double oldSpaceIncrementalMarkSpeed = incrementalMarker_->GetAverageIncrementalMarkingSpeed();
1849    double oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1850    double oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceIncrementalMarkSpeed;
1851
1852    double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1853    // mark finished before allocate limit
1854    if ((oldSpaceRemainSize < DEFAULT_REGION_SIZE) || GetHeapObjectSize() >= globalSpaceAllocLimit_) {
1855        // The object allocated in incremental marking should lower than limit,
1856        // otherwise select trigger concurrent mark.
1857        size_t allocateSize = oldSpaceAllocSpeed * oldSpaceMarkDuration;
1858        if (allocateSize < ALLOCATE_SIZE_LIMIT) {
1859            EnableNotifyIdle();
1860            SetIdleTask(IdleTaskType::INCREMENTAL_MARK);
1861        }
1862    }
1863}
1864
1865bool Heap::CheckCanTriggerConcurrentMarking()
1866{
1867    return concurrentMarker_->IsEnabled() && thread_->IsReadyToConcurrentMark() &&
1868        !incrementalMarker_->IsTriggeredIncrementalMark() &&
1869        (idleTask_ == IdleTaskType::NO_TASK || idleTask_ == IdleTaskType::YOUNG_GC);
1870}
1871
1872void Heap::TryTriggerConcurrentMarking()
1873{
1874    // When concurrent marking is enabled, concurrent marking will be attempted to trigger.
1875    // When the size of old space or global space reaches the limit, isFullMarkNeeded will be set to true.
1876    // If the predicted duration of current full mark may not result in the new and old spaces reaching their limit,
1877    // full mark will be triggered.
1878    // In the same way, if the size of the new space reaches the capacity, and the predicted duration of current
1879    // young mark may not result in the new space reaching its limit, young mark can be triggered.
1880    // If it spends much time in full mark, the compress full GC will be requested when the spaces reach the limit.
1881    // If the global space is larger than half max heap size, we will turn to use full mark and trigger partial GC.
1882    if (!CheckCanTriggerConcurrentMarking()) {
1883        return;
1884    }
1885    if (fullMarkRequested_) {
1886        markType_ = MarkType::MARK_FULL;
1887        OPTIONAL_LOG(ecmaVm_, INFO) << " fullMarkRequested, trigger full mark.";
1888        TriggerConcurrentMarking();
1889        return;
1890    }
1891    double oldSpaceMarkDuration = 0, newSpaceMarkDuration = 0, newSpaceRemainSize = 0, newSpaceAllocToLimitDuration = 0,
1892           oldSpaceAllocToLimitDuration = 0;
1893    double oldSpaceAllocSpeed = memController_->GetOldSpaceAllocationThroughputPerMS();
1894    double oldSpaceConcurrentMarkSpeed = memController_->GetFullSpaceConcurrentMarkSpeedPerMS();
1895    size_t oldSpaceHeapObjectSize = oldSpace_->GetHeapObjectSize() + hugeObjectSpace_->GetHeapObjectSize() +
1896        hugeMachineCodeSpace_->GetHeapObjectSize();
1897    size_t globalHeapObjectSize = GetHeapObjectSize();
1898    size_t oldSpaceAllocLimit = oldSpace_->GetInitialCapacity();
1899    if (oldSpaceConcurrentMarkSpeed == 0 || oldSpaceAllocSpeed == 0) {
1900        if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1901            GlobalNativeSizeLargerThanLimit()) {
1902            markType_ = MarkType::MARK_FULL;
1903            OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first full mark";
1904            TriggerConcurrentMarking();
1905            return;
1906        }
1907    } else {
1908        if (oldSpaceHeapObjectSize >= oldSpaceAllocLimit || globalHeapObjectSize >= globalSpaceAllocLimit_ ||
1909            GlobalNativeSizeLargerThanLimit()) {
1910            markType_ = MarkType::MARK_FULL;
1911            TriggerConcurrentMarking();
1912            OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1913            return;
1914        }
1915        oldSpaceAllocToLimitDuration = (oldSpaceAllocLimit - oldSpaceHeapObjectSize) / oldSpaceAllocSpeed;
1916        oldSpaceMarkDuration = GetHeapObjectSize() / oldSpaceConcurrentMarkSpeed;
1917        // oldSpaceRemainSize means the predicted size which can be allocated after the full concurrent mark.
1918        double oldSpaceRemainSize = (oldSpaceAllocToLimitDuration - oldSpaceMarkDuration) * oldSpaceAllocSpeed;
1919        if (oldSpaceRemainSize > 0 && oldSpaceRemainSize < DEFAULT_REGION_SIZE) {
1920            markType_ = MarkType::MARK_FULL;
1921            TriggerConcurrentMarking();
1922            OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger full mark";
1923            return;
1924        }
1925    }
1926
1927    double newSpaceAllocSpeed = memController_->GetNewSpaceAllocationThroughputPerMS();
1928    double newSpaceConcurrentMarkSpeed = memController_->GetNewSpaceConcurrentMarkSpeedPerMS();
1929    if (newSpaceConcurrentMarkSpeed == 0 || newSpaceAllocSpeed == 0) {
1930        if (activeSemiSpace_->GetCommittedSize() >= config_.GetSemiSpaceTriggerConcurrentMark()) {
1931            markType_ = MarkType::MARK_YOUNG;
1932            TriggerConcurrentMarking();
1933            OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first semi mark" << fullGCRequested_;
1934        }
1935        return;
1936    }
1937    size_t semiSpaceCapacity = activeSemiSpace_->GetInitialCapacity() + activeSemiSpace_->GetOvershootSize();
1938    size_t semiSpaceCommittedSize = activeSemiSpace_->GetCommittedSize();
1939    bool triggerMark = semiSpaceCapacity <= semiSpaceCommittedSize;
1940    if (!triggerMark) {
1941        newSpaceAllocToLimitDuration = (semiSpaceCapacity - semiSpaceCommittedSize) / newSpaceAllocSpeed;
1942        newSpaceMarkDuration = activeSemiSpace_->GetHeapObjectSize() / newSpaceConcurrentMarkSpeed;
1943        // newSpaceRemainSize means the predicted size which can be allocated after the semi concurrent mark.
1944        newSpaceRemainSize = (newSpaceAllocToLimitDuration - newSpaceMarkDuration) * newSpaceAllocSpeed;
1945        triggerMark = newSpaceRemainSize < DEFAULT_REGION_SIZE;
1946    }
1947
1948    if (triggerMark) {
1949        markType_ = MarkType::MARK_YOUNG;
1950        TriggerConcurrentMarking();
1951        OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger semi mark";
1952        return;
1953    }
1954
1955    if (!enableEdenGC_ || IsInBackground()) {
1956        return;
1957    }
1958
1959    double edenSurvivalRate = memController_->GetAverageEdenSurvivalRate();
1960    double survivalRate = memController_->GetAverageSurvivalRate();
1961    constexpr double expectMaxSurvivalRate = 0.4;
1962    if ((edenSurvivalRate == 0 || edenSurvivalRate >= expectMaxSurvivalRate) && survivalRate >= expectMaxSurvivalRate) {
1963        return;
1964    }
1965
1966    double edenSpaceAllocSpeed = memController_->GetEdenSpaceAllocationThroughputPerMS();
1967    double edenSpaceConcurrentMarkSpeed = memController_->GetEdenSpaceConcurrentMarkSpeedPerMS();
1968    if (edenSpaceConcurrentMarkSpeed == 0 || edenSpaceAllocSpeed == 0) {
1969        auto &config = ecmaVm_->GetEcmaParamConfiguration();
1970        if (edenSpace_->GetCommittedSize() >= config.GetEdenSpaceTriggerConcurrentMark()) {
1971            markType_ = MarkType::MARK_EDEN;
1972            TriggerConcurrentMarking();
1973            OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger the first eden mark " << fullGCRequested_;
1974        }
1975        return;
1976    }
1977
1978    auto &config = ecmaVm_->GetEcmaParamConfiguration();
1979    size_t edenCommittedSize = edenSpace_->GetCommittedSize();
1980    triggerMark = edenCommittedSize >= config.GetEdenSpaceTriggerConcurrentMark();
1981    if (!triggerMark && edenSpaceAllocSpeed != 0 && edenSpaceConcurrentMarkSpeed != 0 &&
1982            edenSpace_->GetHeapObjectSize() > 0) {
1983        double edenSpaceLimit = edenSpace_->GetInitialCapacity();
1984        double edenSpaceAllocToLimitDuration = (edenSpaceLimit - edenCommittedSize) / edenSpaceAllocSpeed;
1985        double edenSpaceMarkDuration = edenSpace_->GetHeapObjectSize() / edenSpaceConcurrentMarkSpeed;
1986        double edenSpaceRemainSize = (edenSpaceAllocToLimitDuration - edenSpaceMarkDuration) * newSpaceAllocSpeed;
1987        triggerMark = edenSpaceRemainSize < DEFAULT_REGION_SIZE;
1988    }
1989
1990    if (triggerMark) {
1991        markType_ = MarkType::MARK_EDEN;
1992        TriggerConcurrentMarking();
1993        OPTIONAL_LOG(ecmaVm_, INFO) << "Trigger eden mark";
1994    }
1995}
1996
1997void Heap::TryTriggerFullMarkOrGCByNativeSize()
1998{
1999    // In high sensitive scene and native size larger than limit, trigger old gc directly
2000    if (InSensitiveStatus() && GlobalNativeSizeLargerToTriggerGC()) {
2001        CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
2002    } else if (GlobalNativeSizeLargerThanLimit()) {
2003        if (concurrentMarker_->IsEnabled()) {
2004            SetFullMarkRequestedState(true);
2005            TryTriggerConcurrentMarking();
2006        } else {
2007            CheckAndTriggerOldGC();
2008        }
2009    }
2010}
2011
2012bool Heap::TryTriggerFullMarkBySharedLimit()
2013{
2014    bool keepFullMarkRequest = false;
2015    if (concurrentMarker_->IsEnabled()) {
2016        if (!CheckCanTriggerConcurrentMarking()) {
2017            return keepFullMarkRequest;
2018        }
2019        markType_ = MarkType::MARK_FULL;
2020        if (ConcurrentMarker::TryIncreaseTaskCounts()) {
2021            concurrentMarker_->Mark();
2022        } else {
2023            // need retry full mark request again.
2024            keepFullMarkRequest = true;
2025        }
2026    }
2027    return keepFullMarkRequest;
2028}
2029
2030void Heap::CheckAndTriggerTaskFinishedGC()
2031{
2032    size_t objectSizeOfTaskBegin = GetRecordObjectSize();
2033    size_t objectSizeOfTaskFinished = GetHeapObjectSize();
2034    size_t nativeSizeOfTaskBegin = GetRecordNativeSize();
2035    size_t nativeSizeOfTaskFinished = GetGlobalNativeSize();
2036    // GC would be triggered when heap size increase more than Max(20M, 10%*SizeOfTaskBegin)
2037    bool objectSizeFlag = objectSizeOfTaskFinished > objectSizeOfTaskBegin &&
2038        objectSizeOfTaskFinished - objectSizeOfTaskBegin > std::max(TRIGGER_OLDGC_OBJECT_SIZE_LIMIT,
2039            TRIGGER_OLDGC_OBJECT_LIMIT_RATE * objectSizeOfTaskBegin);
2040    bool nativeSizeFlag = nativeSizeOfTaskFinished > nativeSizeOfTaskBegin &&
2041        nativeSizeOfTaskFinished - nativeSizeOfTaskBegin > std::max(TRIGGER_OLDGC_NATIVE_SIZE_LIMIT,
2042            TRIGGER_OLDGC_NATIVE_LIMIT_RATE * nativeSizeOfTaskBegin);
2043    if (objectSizeFlag || nativeSizeFlag) {
2044        CollectGarbage(TriggerGCType::OLD_GC, GCReason::TRIGGER_BY_TASKPOOL);
2045        RecordOrResetObjectSize(0);
2046        RecordOrResetNativeSize(0);
2047    }
2048}
2049
2050bool Heap::IsMarking() const
2051{
2052    return thread_->IsMarking();
2053}
2054
2055void Heap::TryTriggerFullMarkBySharedSize(size_t size)
2056{
2057    newAllocatedSharedObjectSize_ += size;
2058    if (newAllocatedSharedObjectSize_ >= NEW_ALLOCATED_SHARED_OBJECT_SIZE_LIMIT) {
2059        if (concurrentMarker_->IsEnabled()) {
2060            SetFullMarkRequestedState(true);
2061            TryTriggerConcurrentMarking();
2062            newAllocatedSharedObjectSize_ = 0;
2063        }
2064    }
2065}
2066
2067bool Heap::IsReadyToConcurrentMark() const
2068{
2069    return thread_->IsReadyToConcurrentMark();
2070}
2071
2072void Heap::IncreaseNativeBindingSize(JSNativePointer *object)
2073{
2074    size_t size = object->GetBindingSize();
2075    if (size == 0) {
2076        return;
2077    }
2078    nativeBindingSize_ += size;
2079}
2080
2081void Heap::IncreaseNativeBindingSize(size_t size)
2082{
2083    if (size == 0) {
2084        return;
2085    }
2086    nativeBindingSize_ += size;
2087}
2088
2089void Heap::DecreaseNativeBindingSize(size_t size)
2090{
2091    ASSERT(size <= nativeBindingSize_);
2092    nativeBindingSize_ -= size;
2093}
2094
2095void Heap::PrepareRecordRegionsForReclaim()
2096{
2097    activeSemiSpace_->SetRecordRegion();
2098    oldSpace_->SetRecordRegion();
2099    snapshotSpace_->SetRecordRegion();
2100    nonMovableSpace_->SetRecordRegion();
2101    hugeObjectSpace_->SetRecordRegion();
2102    machineCodeSpace_->SetRecordRegion();
2103    hugeMachineCodeSpace_->SetRecordRegion();
2104}
2105
2106void Heap::TriggerConcurrentMarking()
2107{
2108    ASSERT(idleTask_ != IdleTaskType::INCREMENTAL_MARK);
2109    if (idleTask_ == IdleTaskType::YOUNG_GC && IsConcurrentFullMark()) {
2110        ClearIdleTask();
2111        DisableNotifyIdle();
2112    }
2113    if (concurrentMarker_->IsEnabled() && !fullGCRequested_ && ConcurrentMarker::TryIncreaseTaskCounts()) {
2114        concurrentMarker_->Mark();
2115    }
2116}
2117
2118void Heap::WaitAllTasksFinished()
2119{
2120    WaitRunningTaskFinished();
2121    sweeper_->EnsureAllTaskFinished();
2122    WaitClearTaskFinished();
2123    if (concurrentMarker_->IsEnabled() && thread_->IsMarking() && concurrentMarker_->IsTriggeredConcurrentMark()) {
2124        concurrentMarker_->WaitMarkingFinished();
2125    }
2126}
2127
2128void Heap::WaitConcurrentMarkingFinished()
2129{
2130    concurrentMarker_->WaitMarkingFinished();
2131}
2132
2133void Heap::PostParallelGCTask(ParallelGCTaskPhase gcTask)
2134{
2135    IncreaseTaskCount();
2136    Taskpool::GetCurrentTaskpool()->PostTask(
2137        std::make_unique<ParallelGCTask>(GetJSThread()->GetThreadId(), this, gcTask));
2138}
2139
2140void Heap::ChangeGCParams(bool inBackground)
2141{
2142    const double doubleOne = 1.0;
2143    inBackground_ = inBackground;
2144    if (inBackground) {
2145        LOG_GC(INFO) << "app is inBackground";
2146        if (GetHeapObjectSize() - heapAliveSizeAfterGC_ > BACKGROUND_GROW_LIMIT &&
2147            GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2148            doubleOne * GetHeapObjectSize() / GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2149            CollectGarbage(TriggerGCType::FULL_GC, GCReason::SWITCH_BACKGROUND);
2150        }
2151        if (sHeap_->GetHeapObjectSize() - sHeap_->GetHeapAliveSizeAfterGC() > BACKGROUND_GROW_LIMIT &&
2152            sHeap_->GetCommittedSize() >= MIN_BACKGROUNG_GC_LIMIT &&
2153            doubleOne * sHeap_->GetHeapObjectSize() / sHeap_->GetCommittedSize() <= MIN_OBJECT_SURVIVAL_RATE) {
2154            sHeap_->CollectGarbage<TriggerGCType::SHARED_FULL_GC, GCReason::SWITCH_BACKGROUND>(thread_);
2155        }
2156        if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2157            SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2158            LOG_GC(DEBUG) << "Heap Growing Type CONSERVATIVE";
2159        }
2160        concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::DISABLE);
2161        sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::DISABLE);
2162        maxMarkTaskCount_ = 1;
2163        maxEvacuateTaskCount_ = 1;
2164        Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::BACKGROUND);
2165    } else {
2166        LOG_GC(INFO) << "app is not inBackground";
2167        if (GetMemGrowingType() != MemGrowingType::PRESSURE) {
2168            SetMemGrowingType(MemGrowingType::HIGH_THROUGHPUT);
2169            LOG_GC(DEBUG) << "Heap Growing Type HIGH_THROUGHPUT";
2170        }
2171        concurrentMarker_->EnableConcurrentMarking(EnableConcurrentMarkType::ENABLE);
2172        sweeper_->EnableConcurrentSweep(EnableConcurrentSweepType::ENABLE);
2173        maxMarkTaskCount_ = std::min<size_t>(ecmaVm_->GetJSOptions().GetGcThreadNum(),
2174            Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() - 1);
2175        maxEvacuateTaskCount_ = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum();
2176        Taskpool::GetCurrentTaskpool()->SetThreadPriority(PriorityMode::FOREGROUND);
2177    }
2178}
2179
2180GCStats *Heap::GetEcmaGCStats()
2181{
2182    return ecmaVm_->GetEcmaGCStats();
2183}
2184
2185GCKeyStats *Heap::GetEcmaGCKeyStats()
2186{
2187    return ecmaVm_->GetEcmaGCKeyStats();
2188}
2189
2190JSObjectResizingStrategy *Heap::GetJSObjectResizingStrategy()
2191{
2192    return ecmaVm_->GetJSObjectResizingStrategy();
2193}
2194
2195void Heap::TriggerIdleCollection(int idleMicroSec)
2196{
2197    if (idleTask_ == IdleTaskType::NO_TASK) {
2198        if (incrementalMarker_->GetCurrentTimeInMs() - idleTaskFinishTime_ > IDLE_MAINTAIN_TIME) {
2199            DisableNotifyIdle();
2200        }
2201        return;
2202    }
2203
2204    // Incremental mark initialize and process
2205    if (idleTask_ == IdleTaskType::INCREMENTAL_MARK &&
2206        incrementalMarker_->GetIncrementalGCStates() != IncrementalGCStates::REMARK) {
2207        incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2208        if (incrementalMarker_->GetIncrementalGCStates() == IncrementalGCStates::REMARK) {
2209            CalculateIdleDuration();
2210        }
2211        return;
2212    }
2213
2214    if (idleMicroSec < idlePredictDuration_ && idleMicroSec < IDLE_TIME_LIMIT) {
2215        return;
2216    }
2217
2218    switch (idleTask_) {
2219        case IdleTaskType::FINISH_MARKING: {
2220            if (markType_ == MarkType::MARK_FULL) {
2221                CollectGarbage(TriggerGCType::OLD_GC, GCReason::IDLE);
2222            } else {
2223                CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2224            }
2225            break;
2226        }
2227        case IdleTaskType::YOUNG_GC:
2228            CollectGarbage(TriggerGCType::YOUNG_GC, GCReason::IDLE);
2229            break;
2230        case IdleTaskType::INCREMENTAL_MARK:
2231            incrementalMarker_->TriggerIncrementalMark(idleMicroSec);
2232            break;
2233        default: // LCOV_EXCL_BR_LINE
2234            break;
2235    }
2236    ClearIdleTask();
2237}
2238
2239void Heap::NotifyMemoryPressure(bool inHighMemoryPressure)
2240{
2241    if (inHighMemoryPressure) {
2242        LOG_GC(INFO) << "app is inHighMemoryPressure";
2243        SetMemGrowingType(MemGrowingType::PRESSURE);
2244    } else {
2245        LOG_GC(INFO) << "app is not inHighMemoryPressure";
2246        SetMemGrowingType(MemGrowingType::CONSERVATIVE);
2247    }
2248}
2249
2250void Heap::NotifyFinishColdStart(bool isMainThread)
2251{
2252    if (!FinishStartupEvent()) {
2253        return;
2254    }
2255    ASSERT(!OnStartupEvent());
2256    LOG_GC(INFO) << "SmartGC: finish app cold start";
2257
2258    // set overshoot size to increase gc threashold larger 8MB than current heap size.
2259    int64_t semiRemainSize =
2260        static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
2261    int64_t overshootSize =
2262        static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2263    // overshoot size should be larger than 0.
2264    GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2265
2266    if (isMainThread && CheckCanTriggerConcurrentMarking()) {
2267        TryTriggerConcurrentMarking();
2268    }
2269    GetEdenSpace()->AllowTryEnable();
2270}
2271
2272void Heap::NotifyFinishColdStartSoon()
2273{
2274    if (!OnStartupEvent()) {
2275        return;
2276    }
2277
2278    // post 2s task
2279    Taskpool::GetCurrentTaskpool()->PostTask(
2280        std::make_unique<FinishColdStartTask>(GetJSThread()->GetThreadId(), this));
2281}
2282
2283void Heap::NotifyHighSensitive(bool isStart)
2284{
2285    ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "SmartGC: set high sensitive status: " + std::to_string(isStart));
2286    isStart ? SetSensitiveStatus(AppSensitiveStatus::ENTER_HIGH_SENSITIVE)
2287        : SetSensitiveStatus(AppSensitiveStatus::EXIT_HIGH_SENSITIVE);
2288    LOG_GC(DEBUG) << "SmartGC: set high sensitive status: " << isStart;
2289}
2290
2291bool Heap::HandleExitHighSensitiveEvent()
2292{
2293    AppSensitiveStatus status = GetSensitiveStatus();
2294    if (status == AppSensitiveStatus::EXIT_HIGH_SENSITIVE
2295        && CASSensitiveStatus(status, AppSensitiveStatus::NORMAL_SCENE) && !OnStartupEvent()) {
2296        // Set record heap obj size 0 after exit high senstive
2297        SetRecordHeapObjectSizeBeforeSensitive(0);
2298        // set overshoot size to increase gc threashold larger 8MB than current heap size.
2299        int64_t semiRemainSize =
2300            static_cast<int64_t>(GetNewSpace()->GetInitialCapacity() - GetNewSpace()->GetCommittedSize());
2301        int64_t overshootSize =
2302            static_cast<int64_t>(config_.GetOldSpaceStepOvershootSize()) - semiRemainSize;
2303        // overshoot size should be larger than 0.
2304        GetNewSpace()->SetOverShootSize(std::max(overshootSize, (int64_t)0));
2305
2306        // fixme: IncrementalMarking and IdleCollection is currently not enabled
2307        TryTriggerIncrementalMarking();
2308        TryTriggerIdleCollection();
2309        TryTriggerConcurrentMarking();
2310        return true;
2311    }
2312    return false;
2313}
2314
2315// On high sensitive scene, heap object size can reach to MaxHeapSize - 8M temporarily, 8M is reserved for
2316// concurrent mark
2317bool Heap::ObjectExceedMaxHeapSize() const
2318{
2319    size_t configMaxHeapSize = config_.GetMaxHeapSize();
2320    size_t overshootSize = config_.GetOldSpaceStepOvershootSize();
2321    return GetHeapObjectSize() > configMaxHeapSize - overshootSize;
2322}
2323
2324bool Heap::NeedStopCollection()
2325{
2326    // gc is not allowed during value serialize
2327    if (onSerializeEvent_) {
2328        return true;
2329    }
2330
2331    if (!InSensitiveStatus()) {
2332        return false;
2333    }
2334
2335    // During app cold start, gc threshold adjust to max heap size
2336    if (OnStartupEvent() && !ObjectExceedMaxHeapSize()) {
2337        return true;
2338    }
2339
2340    if (GetRecordHeapObjectSizeBeforeSensitive() == 0) {
2341        SetRecordHeapObjectSizeBeforeSensitive(GetHeapObjectSize());
2342    }
2343
2344    if (GetHeapObjectSize() < GetRecordHeapObjectSizeBeforeSensitive() + config_.GetIncObjSizeThresholdInSensitive()
2345        && !ObjectExceedMaxHeapSize()) {
2346        return true;
2347    }
2348
2349    OPTIONAL_LOG(ecmaVm_, INFO) << "SmartGC: heap obj size: " << GetHeapObjectSize()
2350        << " exceed sensitive gc threshold, have to trigger gc";
2351    return false;
2352}
2353
2354bool Heap::ParallelGCTask::Run(uint32_t threadIndex)
2355{
2356    // Synchronizes-with. Ensure that WorkManager::Initialize must be seen by MarkerThreads.
2357    ASSERT(heap_->GetWorkManager()->HasInitialized());
2358    while (!heap_->GetWorkManager()->HasInitialized());
2359    switch (taskPhase_) {
2360        case ParallelGCTaskPhase::SEMI_HANDLE_THREAD_ROOTS_TASK:
2361            heap_->GetSemiGCMarker()->MarkRoots(threadIndex);
2362            heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2363            break;
2364        case ParallelGCTaskPhase::SEMI_HANDLE_SNAPSHOT_TASK:
2365            heap_->GetSemiGCMarker()->ProcessSnapshotRSet(threadIndex);
2366            break;
2367        case ParallelGCTaskPhase::SEMI_HANDLE_GLOBAL_POOL_TASK:
2368            heap_->GetSemiGCMarker()->ProcessMarkStack(threadIndex);
2369            break;
2370        case ParallelGCTaskPhase::OLD_HANDLE_GLOBAL_POOL_TASK:
2371            heap_->GetNonMovableMarker()->ProcessMarkStack(threadIndex);
2372            break;
2373        case ParallelGCTaskPhase::COMPRESS_HANDLE_GLOBAL_POOL_TASK:
2374            heap_->GetCompressGCMarker()->ProcessMarkStack(threadIndex);
2375            break;
2376        case ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK:
2377            heap_->GetConcurrentMarker()->ProcessConcurrentMarkTask(threadIndex);
2378            break;
2379        case ParallelGCTaskPhase::CONCURRENT_HANDLE_OLD_TO_NEW_TASK:
2380            heap_->GetNonMovableMarker()->ProcessOldToNew(threadIndex);
2381            break;
2382        default: // LOCV_EXCL_BR_LINE
2383            LOG_GC(FATAL) << "this branch is unreachable, type: " << static_cast<int>(taskPhase_);
2384            UNREACHABLE();
2385    }
2386    heap_->ReduceTaskCount();
2387    return true;
2388}
2389
2390bool Heap::AsyncClearTask::Run([[maybe_unused]] uint32_t threadIndex)
2391{
2392    heap_->ReclaimRegions(gcType_);
2393    return true;
2394}
2395
2396bool Heap::FinishColdStartTask::Run([[maybe_unused]] uint32_t threadIndex)
2397{
2398    std::this_thread::sleep_for(std::chrono::microseconds(2000000));  // 2000000 means 2s
2399    heap_->NotifyFinishColdStart(false);
2400    return true;
2401}
2402
2403void Heap::CleanCallBack()
2404{
2405    auto &concurrentCallbacks = this->GetEcmaVM()->GetConcurrentNativePointerCallbacks();
2406    if (!concurrentCallbacks.empty()) {
2407        Taskpool::GetCurrentTaskpool()->PostTask(
2408            std::make_unique<DeleteCallbackTask>(thread_->GetThreadId(), concurrentCallbacks)
2409        );
2410    }
2411    ASSERT(concurrentCallbacks.empty());
2412
2413    AsyncNativeCallbacksPack &asyncCallbacksPack = this->GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
2414    if (asyncCallbacksPack.Empty()) {
2415        ASSERT(asyncCallbacksPack.TotallyEmpty());
2416        return;
2417    }
2418    AsyncNativeCallbacksPack *asyncCallbacks = new AsyncNativeCallbacksPack();
2419    std::swap(*asyncCallbacks, asyncCallbacksPack);
2420    NativePointerTaskCallback asyncTaskCb = thread_->GetAsyncCleanTaskCallback();
2421    if (asyncTaskCb != nullptr && thread_->IsMainThreadFast() &&
2422        pendingAsyncNativeCallbackSize_ < asyncClearNativePointerThreshold_) {
2423        IncreasePendingAsyncNativeCallbackSize(asyncCallbacks->GetTotalBindingSize());
2424        asyncCallbacks->RegisterFinishNotify([this] (size_t bindingSize) {
2425            this->DecreasePendingAsyncNativeCallbackSize(bindingSize);
2426        });
2427        asyncTaskCb(asyncCallbacks);
2428    } else {
2429        ThreadNativeScope nativeScope(thread_);
2430        asyncCallbacks->ProcessAll();
2431        delete asyncCallbacks;
2432    }
2433    ASSERT(asyncCallbacksPack.TotallyEmpty());
2434}
2435
2436bool Heap::DeleteCallbackTask::Run([[maybe_unused]] uint32_t threadIndex)
2437{
2438    for (auto iter : nativePointerCallbacks_) {
2439        if (iter.first != nullptr) {
2440            iter.first(std::get<0>(iter.second),
2441                std::get<1>(iter.second), std::get<2>(iter.second)); // 2 is the param.
2442        }
2443    }
2444    return true;
2445}
2446
2447size_t Heap::GetArrayBufferSize() const
2448{
2449    size_t result = 0;
2450    sweeper_->EnsureAllTaskFinished();
2451    this->IterateOverObjects([&result](TaggedObject *obj) {
2452        JSHClass* jsClass = obj->GetClass();
2453        result += jsClass->IsArrayBuffer() ? jsClass->GetObjectSize() : 0;
2454    });
2455    return result;
2456}
2457
2458size_t Heap::GetLiveObjectSize() const
2459{
2460    size_t objectSize = 0;
2461    sweeper_->EnsureAllTaskFinished();
2462    this->IterateOverObjects([&objectSize]([[maybe_unused]] TaggedObject *obj) {
2463        objectSize += obj->GetClass()->SizeFromJSHClass(obj);
2464    });
2465    return objectSize;
2466}
2467
2468size_t Heap::GetHeapLimitSize() const
2469{
2470    // Obtains the theoretical upper limit of space that can be allocated to JS heap.
2471    return config_.GetMaxHeapSize();
2472}
2473
2474bool BaseHeap::IsAlive(TaggedObject *object) const
2475{
2476    if (!ContainObject(object)) {
2477        LOG_GC(ERROR) << "The region is already free";
2478        return false;
2479    }
2480
2481    bool isFree = object->GetClass() != nullptr && FreeObject::Cast(ToUintPtr(object))->IsFreeObject();
2482    if (isFree) {
2483        Region *region = Region::ObjectAddressToRange(object);
2484        LOG_GC(ERROR) << "The object " << object << " in "
2485                            << region->GetSpaceTypeName()
2486                            << " already free";
2487    }
2488    return !isFree;
2489}
2490
2491bool BaseHeap::ContainObject(TaggedObject *object) const
2492{
2493    /*
2494     * fixme: There's no absolutely safe appraoch to doing this, given that the region object is currently
2495     * allocated and maintained in the JS object heap. We cannot safely tell whether a region object
2496     * calculated from an object address is still valid or alive in a cheap way.
2497     * This will introduce inaccurate result to verify if an object is contained in the heap, and it may
2498     * introduce additional incorrect memory access issues.
2499     * Unless we can tolerate the performance impact of iterating the region list of each space and change
2500     * the implementation to that approach, don't rely on current implementation to get accurate result.
2501     */
2502    Region *region = Region::ObjectAddressToRange(object);
2503    return region->InHeapSpace();
2504}
2505
2506void Heap::PrintHeapInfo(TriggerGCType gcType) const
2507{
2508    OPTIONAL_LOG(ecmaVm_, INFO) << "-----------------------Statistic Heap Object------------------------";
2509    OPTIONAL_LOG(ecmaVm_, INFO) << "GC Reason:" << ecmaVm_->GetEcmaGCStats()->GCReasonToString()
2510                                << ";OnStartup:" << OnStartupEvent()
2511                                << ";OnHighSensitive:" << static_cast<int>(GetSensitiveStatus())
2512                                << ";ConcurrentMark Status:" << static_cast<int>(thread_->GetMarkStatus());
2513    OPTIONAL_LOG(ecmaVm_, INFO) << "Heap::CollectGarbage, gcType(" << gcType << "), Concurrent Mark("
2514                                << concurrentMarker_->IsEnabled() << "), Full Mark(" << IsConcurrentFullMark()
2515                                << ") Eden Mark(" << IsEdenMark() << ")";
2516    OPTIONAL_LOG(ecmaVm_, INFO) << "Eden(" << edenSpace_->GetHeapObjectSize() << "/" << edenSpace_->GetInitialCapacity()
2517                 << "), ActiveSemi(" << activeSemiSpace_->GetHeapObjectSize() << "/"
2518                 << activeSemiSpace_->GetInitialCapacity() << "), NonMovable(" << nonMovableSpace_->GetHeapObjectSize()
2519                 << "/" << nonMovableSpace_->GetCommittedSize() << "/" << nonMovableSpace_->GetInitialCapacity()
2520                 << "), Old(" << oldSpace_->GetHeapObjectSize() << "/" << oldSpace_->GetCommittedSize() << "/"
2521                 << oldSpace_->GetInitialCapacity() << "), HugeObject(" << hugeObjectSpace_->GetHeapObjectSize() << "/"
2522                 << hugeObjectSpace_->GetCommittedSize() << "/" << hugeObjectSpace_->GetInitialCapacity()
2523                 << "), ReadOnlySpace(" << readOnlySpace_->GetCommittedSize() << "/"
2524                 << readOnlySpace_->GetInitialCapacity() << "), AppspawnSpace(" << appSpawnSpace_->GetHeapObjectSize()
2525                 << "/" << appSpawnSpace_->GetCommittedSize() << "/" << appSpawnSpace_->GetInitialCapacity()
2526                 << "), GlobalLimitSize(" << globalSpaceAllocLimit_ << ").";
2527}
2528
2529void Heap::StatisticHeapObject(TriggerGCType gcType) const
2530{
2531    PrintHeapInfo(gcType);
2532#if ECMASCRIPT_ENABLE_HEAP_DETAIL_STATISTICS
2533    StatisticHeapDetail();
2534#endif
2535}
2536
2537void Heap::StatisticHeapDetail()
2538{
2539    Prepare();
2540    static const int JS_TYPE_LAST = static_cast<int>(JSType::TYPE_LAST);
2541    int typeCount[JS_TYPE_LAST] = { 0 };
2542    static const int MIN_COUNT_THRESHOLD = 1000;
2543
2544    nonMovableSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2545        typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2546    });
2547    for (int i = 0; i < JS_TYPE_LAST; i++) {
2548        if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2549            LOG_ECMA(INFO) << "NonMovable space type " << JSHClass::DumpJSType(JSType(i))
2550                           << " count:" << typeCount[i];
2551        }
2552        typeCount[i] = 0;
2553    }
2554
2555    oldSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2556        typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2557    });
2558    for (int i = 0; i < JS_TYPE_LAST; i++) {
2559        if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2560            LOG_ECMA(INFO) << "Old space type " << JSHClass::DumpJSType(JSType(i))
2561                           << " count:" << typeCount[i];
2562        }
2563        typeCount[i] = 0;
2564    }
2565
2566    activeSemiSpace_->IterateOverObjects([&typeCount] (TaggedObject *object) {
2567        typeCount[static_cast<int>(object->GetClass()->GetObjectType())]++;
2568    });
2569    for (int i = 0; i < JS_TYPE_LAST; i++) {
2570        if (typeCount[i] > MIN_COUNT_THRESHOLD) {
2571            LOG_ECMA(INFO) << "Active semi space type " << JSHClass::DumpJSType(JSType(i))
2572                           << " count:" << typeCount[i];
2573        }
2574        typeCount[i] = 0;
2575    }
2576}
2577
2578void Heap::UpdateWorkManager(WorkManager *workManager)
2579{
2580    concurrentMarker_->workManager_ = workManager;
2581    fullGC_->workManager_ = workManager;
2582    incrementalMarker_->workManager_ = workManager;
2583    nonMovableMarker_->workManager_ = workManager;
2584    semiGCMarker_->workManager_ = workManager;
2585    compressGCMarker_->workManager_ = workManager;
2586    partialGC_->workManager_ = workManager;
2587}
2588
2589MachineCode *Heap::GetMachineCodeObject(uintptr_t pc) const
2590{
2591    MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2592    MachineCode *machineCode = reinterpret_cast<MachineCode*>(machineCodeSpace->GetMachineCodeObject(pc));
2593    if (machineCode != nullptr) {
2594        return machineCode;
2595    }
2596    HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2597    return reinterpret_cast<MachineCode*>(hugeMachineCodeSpace->GetMachineCodeObject(pc));
2598}
2599
2600std::tuple<uint64_t, uint8_t *, int, kungfu::CalleeRegAndOffsetVec> Heap::CalCallSiteInfo(uintptr_t retAddr) const
2601{
2602    MachineCodeSpace *machineCodeSpace = GetMachineCodeSpace();
2603    MachineCode *code = nullptr;
2604    // 1. find return
2605    // 2. gc
2606    machineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2607        if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2608            return;
2609        }
2610        if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2611            code = MachineCode::Cast(obj);
2612            return;
2613        }
2614    });
2615    if (code == nullptr) {
2616        HugeMachineCodeSpace *hugeMachineCodeSpace = GetHugeMachineCodeSpace();
2617        hugeMachineCodeSpace->IterateOverObjects([&code, &retAddr](TaggedObject *obj) {
2618            if (code != nullptr || !JSTaggedValue(obj).IsMachineCodeObject()) {
2619                return;
2620            }
2621            if (MachineCode::Cast(obj)->IsInText(retAddr)) {
2622                code = MachineCode::Cast(obj);
2623                return;
2624            }
2625        });
2626    }
2627
2628    if (code == nullptr ||
2629        (code->GetPayLoadSizeInBytes() ==
2630         code->GetInstructionsSize() + code->GetStackMapOrOffsetTableSize())) { // baseline code
2631        return {};
2632    }
2633    return code->CalCallSiteInfo();
2634};
2635
2636GCListenerId Heap::AddGCListener(FinishGCListener listener, void *data)
2637{
2638    gcListeners_.emplace_back(std::make_pair(listener, data));
2639    return std::prev(gcListeners_.cend());
2640}
2641
2642void Heap::ProcessGCListeners()
2643{
2644    for (auto &&[listener, data] : gcListeners_) {
2645        listener(data);
2646    }
2647}
2648
2649void SharedHeap::ProcessAllGCListeners()
2650{
2651    Runtime::GetInstance()->GCIterateThreadList([](JSThread *thread) {
2652        ASSERT(!thread->IsInRunningState());
2653        const_cast<Heap *>(thread->GetEcmaVM()->GetHeap())->ProcessGCListeners();
2654    });
2655}
2656
2657#if defined(ECMASCRIPT_SUPPORT_SNAPSHOT) && defined(PANDA_TARGET_OHOS) && defined(ENABLE_HISYSEVENT)
2658uint64_t Heap::GetCurrentTickMillseconds()
2659{
2660    return std::chrono::duration_cast<std::chrono::milliseconds>(
2661    std::chrono::steady_clock::now().time_since_epoch()).count();
2662}
2663
2664void Heap::SetJsDumpThresholds(size_t thresholds) const
2665{
2666    if (thresholds < MIN_JSDUMP_THRESHOLDS || thresholds > MAX_JSDUMP_THRESHOLDS) {
2667        LOG_GC(INFO) << "SetJsDumpThresholds thresholds is invaild" << thresholds;
2668        return;
2669    }
2670    g_threshold = thresholds;
2671}
2672
2673void Heap::ThresholdReachedDump()
2674{
2675    size_t limitSize = GetHeapLimitSize();
2676    if (!limitSize) {
2677        LOG_GC(INFO) << "ThresholdReachedDump limitSize is invaild";
2678        return;
2679    }
2680    size_t nowPrecent = GetHeapObjectSize() * DEC_TO_INT / limitSize;
2681    if (g_debugLeak || (nowPrecent >= g_threshold && (g_lastHeapDumpTime == 0 ||
2682        GetCurrentTickMillseconds() - g_lastHeapDumpTime > HEAP_DUMP_REPORT_INTERVAL))) {
2683            size_t liveObjectSize = GetLiveObjectSize();
2684            size_t nowPrecentRecheck = liveObjectSize * DEC_TO_INT / limitSize;
2685            LOG_GC(INFO) << "ThresholdReachedDump nowPrecentCheck is " << nowPrecentRecheck;
2686            if (nowPrecentRecheck < g_threshold) {
2687                return;
2688            }
2689            g_lastHeapDumpTime = GetCurrentTickMillseconds();
2690            base::BlockHookScope blockScope;
2691            HeapProfilerInterface *heapProfile = HeapProfilerInterface::GetInstance(ecmaVm_);
2692            if (appfreezeCallback_ != nullptr && appfreezeCallback_(getprocpid())) {
2693                LOG_ECMA(INFO) << "ThresholdReachedDump and avoid freeze success.";
2694            } else {
2695                LOG_ECMA(WARN) << "ThresholdReachedDump but avoid freeze failed.";
2696            }
2697            GetEcmaGCKeyStats()->SendSysEventBeforeDump("thresholdReachedDump",
2698                                                        GetHeapLimitSize(), GetLiveObjectSize());
2699            DumpSnapShotOption dumpOption;
2700            dumpOption.dumpFormat = DumpFormat::BINARY;
2701            dumpOption.isVmMode = true;
2702            dumpOption.isPrivate = false;
2703            dumpOption.captureNumericValue = false;
2704            dumpOption.isFullGC = false;
2705            dumpOption.isSimplify = true;
2706            dumpOption.isSync = false;
2707            dumpOption.isBeforeFill = false;
2708            dumpOption.isDumpOOM = true; // aim's to do binary dump
2709            heapProfile->DumpHeapSnapshot(dumpOption);
2710            hasOOMDump_ = false;
2711            HeapProfilerInterface::Destroy(ecmaVm_);
2712        }
2713}
2714#endif
2715
2716void Heap::RemoveGCListener(GCListenerId listenerId)
2717{
2718    gcListeners_.erase(listenerId);
2719}
2720
2721void BaseHeap::IncreaseTaskCount()
2722{
2723    LockHolder holder(waitTaskFinishedMutex_);
2724    runningTaskCount_++;
2725}
2726
2727void BaseHeap::WaitRunningTaskFinished()
2728{
2729    LockHolder holder(waitTaskFinishedMutex_);
2730    while (runningTaskCount_ > 0) {
2731        waitTaskFinishedCV_.Wait(&waitTaskFinishedMutex_);
2732    }
2733}
2734
2735bool BaseHeap::CheckCanDistributeTask()
2736{
2737    LockHolder holder(waitTaskFinishedMutex_);
2738    return runningTaskCount_ < maxMarkTaskCount_;
2739}
2740
2741void BaseHeap::ReduceTaskCount()
2742{
2743    LockHolder holder(waitTaskFinishedMutex_);
2744    runningTaskCount_--;
2745    if (runningTaskCount_ == 0) {
2746        waitTaskFinishedCV_.SignalAll();
2747    }
2748}
2749
2750void BaseHeap::WaitClearTaskFinished()
2751{
2752    LockHolder holder(waitClearTaskFinishedMutex_);
2753    while (!clearTaskFinished_) {
2754        waitClearTaskFinishedCV_.Wait(&waitClearTaskFinishedMutex_);
2755    }
2756}
2757
2758void Heap::ReleaseEdenAllocator()
2759{
2760    auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
2761    auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
2762    if (!topAddress || !endAddress) {
2763        return;
2764    }
2765    thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2766}
2767
2768void Heap::InstallEdenAllocator()
2769{
2770    if (!enableEdenGC_) {
2771        return;
2772    }
2773    auto topAddress = edenSpace_->GetAllocationTopAddress();
2774    auto endAddress = edenSpace_->GetAllocationEndAddress();
2775    if (!topAddress || !endAddress) {
2776        return;
2777    }
2778    thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
2779}
2780
2781void Heap::EnableEdenGC()
2782{
2783    enableEdenGC_ = true;
2784    thread_->EnableEdenGCBarriers();
2785}
2786
2787void Heap::TryEnableEdenGC()
2788{
2789    if (ohos::OhosParams::IsEdenGCEnable()) {
2790        EnableEdenGC();
2791    }
2792}
2793}  // namespace panda::ecmascript
2794