1/*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#ifndef ECMASCRIPT_MEM_HEAP_INL_H
17#define ECMASCRIPT_MEM_HEAP_INL_H
18
19#include "ecmascript/mem/heap.h"
20
21#include "ecmascript/js_native_pointer.h"
22#include "ecmascript/daemon/daemon_task-inl.h"
23#include "ecmascript/dfx/hprof/heap_tracker.h"
24#include "ecmascript/ecma_vm.h"
25#include "ecmascript/mem/allocator-inl.h"
26#include "ecmascript/mem/concurrent_sweeper.h"
27#include "ecmascript/mem/linear_space.h"
28#include "ecmascript/mem/mem_controller.h"
29#include "ecmascript/mem/shared_mem_controller.h"
30#include "ecmascript/mem/sparse_space.h"
31#include "ecmascript/mem/tagged_object.h"
32#include "ecmascript/mem/thread_local_allocation_buffer.h"
33#include "ecmascript/mem/barriers-inl.h"
34#include "ecmascript/mem/mem_map_allocator.h"
35#include "ecmascript/runtime.h"
36
37namespace panda::ecmascript {
38#define CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, space, message)                                         \
39    if (UNLIKELY((object) == nullptr)) {                                                                    \
40        EcmaVM *vm = GetEcmaVM();                                                                           \
41        size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
42        (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
43        if ((space)->IsOOMDumpSpace()) {                                                                    \
44            DumpHeapSnapshotBeforeOOM();                                                                    \
45        }                                                                                                   \
46        StatisticHeapDetail();                                                                              \
47        ThrowOutOfMemoryError(GetJSThread(), size, message);                                                \
48        (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
49    }
50
51#define CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, space, message)                                \
52    if (UNLIKELY((object) == nullptr)) {                                                                    \
53        size_t oomOvershootSize = GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();                \
54        (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
55        DumpHeapSnapshotBeforeOOM(true, thread);                                                            \
56        ThrowOutOfMemoryError(thread, size, message);                                                       \
57        (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(thread, size));                       \
58    }
59
60#define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, space, desc, message)                   \
61    if (UNLIKELY((object) == nullptr)) {                                                                    \
62        EcmaVM *vm = GetEcmaVM();                                                                           \
63        size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
64        (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
65        SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
66        (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size, desc));                         \
67    }
68
69#define CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, space, message)                              \
70    if (UNLIKELY((object) == nullptr)) {                                                                    \
71        EcmaVM *vm = GetEcmaVM();                                                                           \
72        size_t oomOvershootSize = vm->GetEcmaParamConfiguration().GetOutOfMemoryOvershootSize();            \
73        (space)->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);                                        \
74        SetMachineCodeOutOfMemoryError(GetJSThread(), size, message);                                       \
75        (object) = reinterpret_cast<TaggedObject *>((space)->Allocate(size));                               \
76    }
77
78template<class Callback>
79void SharedHeap::EnumerateOldSpaceRegions(const Callback &cb) const
80{
81    sOldSpace_->EnumerateRegions(cb);
82    sNonMovableSpace_->EnumerateRegions(cb);
83    sHugeObjectSpace_->EnumerateRegions(cb);
84    sAppSpawnSpace_->EnumerateRegions(cb);
85}
86
87template<class Callback>
88void SharedHeap::EnumerateOldSpaceRegionsWithRecord(const Callback &cb) const
89{
90    sOldSpace_->EnumerateRegionsWithRecord(cb);
91    sNonMovableSpace_->EnumerateRegionsWithRecord(cb);
92    sHugeObjectSpace_->EnumerateRegionsWithRecord(cb);
93}
94
95template<class Callback>
96void SharedHeap::IterateOverObjects(const Callback &cb) const
97{
98    sOldSpace_->IterateOverObjects(cb);
99    sNonMovableSpace_->IterateOverObjects(cb);
100    sHugeObjectSpace_->IterateOverObjects(cb);
101    sAppSpawnSpace_->IterateOverMarkedObjects(cb);
102}
103
104template<class Callback>
105void Heap::EnumerateOldSpaceRegions(const Callback &cb, Region *region) const
106{
107    oldSpace_->EnumerateRegions(cb, region);
108    appSpawnSpace_->EnumerateRegions(cb);
109    nonMovableSpace_->EnumerateRegions(cb);
110    hugeObjectSpace_->EnumerateRegions(cb);
111    machineCodeSpace_->EnumerateRegions(cb);
112    hugeMachineCodeSpace_->EnumerateRegions(cb);
113}
114
115template<class Callback>
116void Heap::EnumerateSnapshotSpaceRegions(const Callback &cb) const
117{
118    snapshotSpace_->EnumerateRegions(cb);
119}
120
121template<class Callback>
122void Heap::EnumerateNonNewSpaceRegions(const Callback &cb) const
123{
124    oldSpace_->EnumerateRegions(cb);
125    if (!isCSetClearing_.load(std::memory_order_acquire)) {
126        oldSpace_->EnumerateCollectRegionSet(cb);
127    }
128    appSpawnSpace_->EnumerateRegions(cb);
129    snapshotSpace_->EnumerateRegions(cb);
130    nonMovableSpace_->EnumerateRegions(cb);
131    hugeObjectSpace_->EnumerateRegions(cb);
132    machineCodeSpace_->EnumerateRegions(cb);
133    hugeMachineCodeSpace_->EnumerateRegions(cb);
134}
135
136template<class Callback>
137void Heap::EnumerateNonNewSpaceRegionsWithRecord(const Callback &cb) const
138{
139    oldSpace_->EnumerateRegionsWithRecord(cb);
140    snapshotSpace_->EnumerateRegionsWithRecord(cb);
141    nonMovableSpace_->EnumerateRegionsWithRecord(cb);
142    hugeObjectSpace_->EnumerateRegionsWithRecord(cb);
143    machineCodeSpace_->EnumerateRegionsWithRecord(cb);
144    hugeMachineCodeSpace_->EnumerateRegionsWithRecord(cb);
145}
146
147template<class Callback>
148void Heap::EnumerateEdenSpaceRegions(const Callback &cb) const
149{
150    edenSpace_->EnumerateRegions(cb);
151}
152
153template<class Callback>
154void Heap::EnumerateNewSpaceRegions(const Callback &cb) const
155{
156    activeSemiSpace_->EnumerateRegions(cb);
157}
158
159template<class Callback>
160void Heap::EnumerateNonMovableRegions(const Callback &cb) const
161{
162    snapshotSpace_->EnumerateRegions(cb);
163    appSpawnSpace_->EnumerateRegions(cb);
164    nonMovableSpace_->EnumerateRegions(cb);
165    hugeObjectSpace_->EnumerateRegions(cb);
166    machineCodeSpace_->EnumerateRegions(cb);
167    hugeMachineCodeSpace_->EnumerateRegions(cb);
168}
169
170template<class Callback>
171void Heap::EnumerateRegions(const Callback &cb) const
172{
173    edenSpace_->EnumerateRegions(cb);
174    activeSemiSpace_->EnumerateRegions(cb);
175    oldSpace_->EnumerateRegions(cb);
176    if (!isCSetClearing_.load(std::memory_order_acquire)) {
177        oldSpace_->EnumerateCollectRegionSet(cb);
178    }
179    appSpawnSpace_->EnumerateRegions(cb);
180    snapshotSpace_->EnumerateRegions(cb);
181    nonMovableSpace_->EnumerateRegions(cb);
182    hugeObjectSpace_->EnumerateRegions(cb);
183    machineCodeSpace_->EnumerateRegions(cb);
184    hugeMachineCodeSpace_->EnumerateRegions(cb);
185}
186
187template<class Callback>
188void Heap::IterateOverObjects(const Callback &cb, bool isSimplify) const
189{
190    edenSpace_->IterateOverObjects(cb);
191    activeSemiSpace_->IterateOverObjects(cb);
192    oldSpace_->IterateOverObjects(cb);
193    nonMovableSpace_->IterateOverObjects(cb);
194    hugeObjectSpace_->IterateOverObjects(cb);
195    machineCodeSpace_->IterateOverObjects(cb);
196    hugeMachineCodeSpace_->IterateOverObjects(cb);
197    snapshotSpace_->IterateOverObjects(cb);
198    if (!isSimplify) {
199        readOnlySpace_->IterateOverObjects(cb);
200        appSpawnSpace_->IterateOverMarkedObjects(cb);
201    }
202}
203
204TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass)
205{
206    size_t size = hclass->GetObjectSize();
207    return AllocateYoungOrHugeObject(hclass, size);
208}
209
210TaggedObject *Heap::AllocateYoungOrHugeObject(size_t size)
211{
212    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
213    TaggedObject *object = nullptr;
214    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
215        object = AllocateHugeObject(size);
216    } else {
217        object = AllocateInGeneralNewSpace(size);
218        if (object == nullptr) {
219            if (!HandleExitHighSensitiveEvent()) {
220                CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
221            }
222            object = AllocateInGeneralNewSpace(size);
223            if (object == nullptr) {
224                CollectGarbage(SelectGCType(), GCReason::ALLOCATION_FAILED);
225                object = AllocateInGeneralNewSpace(size);
226                CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, activeSemiSpace_, "Heap::AllocateYoungOrHugeObject");
227            }
228        }
229    }
230    return object;
231}
232
233TaggedObject *Heap::AllocateInGeneralNewSpace(size_t size)
234{
235    if (enableEdenGC_) {
236        auto object = reinterpret_cast<TaggedObject *>(edenSpace_->Allocate(size));
237        if (object != nullptr) {
238            return object;
239        }
240    }
241    return reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
242}
243
244TaggedObject *Heap::AllocateYoungOrHugeObject(JSHClass *hclass, size_t size)
245{
246    auto object = AllocateYoungOrHugeObject(size);
247    ASSERT(object != nullptr);
248    object->SetClass(thread_, hclass);
249#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
250    OnAllocateEvent(GetEcmaVM(), object, size);
251#endif
252    return object;
253}
254
255void BaseHeap::SetHClassAndDoAllocateEvent(JSThread *thread, TaggedObject *object, JSHClass *hclass,
256                                           [[maybe_unused]] size_t size)
257{
258    ASSERT(object != nullptr);
259    object->SetClass(thread, hclass);
260#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
261    OnAllocateEvent(thread->GetEcmaVM(), object, size);
262#endif
263}
264
265uintptr_t Heap::AllocateYoungSync(size_t size)
266{
267    return activeSemiSpace_->AllocateSync(size);
268}
269
270bool Heap::MoveYoungRegionSync(Region *region)
271{
272    return activeSemiSpace_->SwapRegion(region, inactiveSemiSpace_);
273}
274
275void Heap::MergeToOldSpaceSync(LocalSpace *localSpace)
276{
277    oldSpace_->Merge(localSpace);
278}
279
280bool Heap::InHeapProfiler()
281{
282#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
283    return GetEcmaVM()->GetHeapProfile() != nullptr;
284#else
285    return false;
286#endif
287}
288
289void SharedHeap::MergeToOldSpaceSync(SharedLocalSpace *localSpace)
290{
291    sOldSpace_->Merge(localSpace);
292}
293
294TaggedObject *Heap::TryAllocateYoungGeneration(JSHClass *hclass, size_t size)
295{
296    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
297    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
298        return nullptr;
299    }
300    auto object = reinterpret_cast<TaggedObject *>(activeSemiSpace_->Allocate(size));
301    if (object != nullptr) {
302        object->SetClass(thread_, hclass);
303    }
304#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
305    OnAllocateEvent(GetEcmaVM(), object, size);
306#endif
307    return object;
308}
309
310TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass)
311{
312    size_t size = hclass->GetObjectSize();
313    return AllocateOldOrHugeObject(hclass, size);
314}
315
316TaggedObject *Heap::AllocateOldOrHugeObject(size_t size)
317{
318    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
319    TaggedObject *object = nullptr;
320    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
321        object = AllocateHugeObject(size);
322    } else {
323        object = reinterpret_cast<TaggedObject *>(oldSpace_->Allocate(size));
324        CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, oldSpace_, "Heap::AllocateOldOrHugeObject");
325    }
326    return object;
327}
328
329TaggedObject *Heap::AllocateOldOrHugeObject(JSHClass *hclass, size_t size)
330{
331    auto object = AllocateOldOrHugeObject(size);
332    object->SetClass(thread_, hclass);
333#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
334    OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject*>(object), size);
335#endif
336    return object;
337}
338
339TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass)
340{
341    size_t size = hclass->GetObjectSize();
342    TaggedObject *object = AllocateReadOnlyOrHugeObject(hclass, size);
343#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
344    OnAllocateEvent(GetEcmaVM(), object, size);
345#endif
346    return object;
347}
348
349TaggedObject *Heap::AllocateReadOnlyOrHugeObject(JSHClass *hclass, size_t size)
350{
351    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
352    TaggedObject *object = nullptr;
353    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
354        object = AllocateHugeObject(hclass, size);
355    } else {
356        object = reinterpret_cast<TaggedObject *>(readOnlySpace_->Allocate(size));
357        CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, readOnlySpace_, "Heap::AllocateReadOnlyOrHugeObject");
358        object->SetClass(thread_, hclass);
359    }
360#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
361    OnAllocateEvent(GetEcmaVM(), object, size);
362#endif
363    return object;
364}
365
366TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass)
367{
368    size_t size = hclass->GetObjectSize();
369    TaggedObject *object = AllocateNonMovableOrHugeObject(hclass, size);
370    if (object == nullptr) {
371        LOG_ECMA(FATAL) << "Heap::AllocateNonMovableOrHugeObject:object is nullptr";
372    }
373#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
374    OnAllocateEvent(GetEcmaVM(), object, size);
375#endif
376    return object;
377}
378
379TaggedObject *Heap::AllocateNonMovableOrHugeObject(JSHClass *hclass, size_t size)
380{
381    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
382    TaggedObject *object = nullptr;
383    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
384        object = AllocateHugeObject(hclass, size);
385    } else {
386        object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->CheckAndAllocate(size));
387        CHECK_OBJ_AND_THROW_OOM_ERROR(object, size, nonMovableSpace_, "Heap::AllocateNonMovableOrHugeObject");
388        object->SetClass(thread_, hclass);
389    }
390#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
391    OnAllocateEvent(GetEcmaVM(), object, size);
392#endif
393    return object;
394}
395
396TaggedObject *Heap::AllocateClassClass(JSHClass *hclass, size_t size)
397{
398    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
399    auto object = reinterpret_cast<TaggedObject *>(nonMovableSpace_->Allocate(size));
400    if (UNLIKELY(object == nullptr)) {
401        LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
402        UNREACHABLE();
403    }
404    *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
405#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
406    OnAllocateEvent(GetEcmaVM(), object, size);
407#endif
408    return object;
409}
410
411TaggedObject *SharedHeap::AllocateClassClass(JSThread *thread, JSHClass *hclass, size_t size)
412{
413    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
414    auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
415    if (UNLIKELY(object == nullptr)) {
416        LOG_ECMA_MEM(FATAL) << "Heap::AllocateClassClass can not allocate any space";
417        UNREACHABLE();
418    }
419    *reinterpret_cast<MarkWordType *>(ToUintPtr(object)) = reinterpret_cast<MarkWordType>(hclass);
420#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
421    OnAllocateEvent(thread->GetEcmaVM(), object, size);
422#endif
423    return object;
424}
425
426TaggedObject *Heap::AllocateHugeObject(size_t size)
427{
428    // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
429    CheckAndTriggerOldGC(size);
430
431    auto *object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
432    if (UNLIKELY(object == nullptr)) {
433        CollectGarbage(TriggerGCType::OLD_GC, GCReason::ALLOCATION_FAILED);
434        object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
435        if (UNLIKELY(object == nullptr)) {
436            // if allocate huge object OOM, temporarily increase space size to avoid vm crash
437            size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
438            oldSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
439            DumpHeapSnapshotBeforeOOM();
440            StatisticHeapDetail();
441            object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
442            ThrowOutOfMemoryError(thread_, size, "Heap::AllocateHugeObject");
443            object = reinterpret_cast<TaggedObject *>(hugeObjectSpace_->Allocate(size, thread_));
444            if (UNLIKELY(object == nullptr)) {
445                FatalOutOfMemoryError(size, "Heap::AllocateHugeObject");
446            }
447        }
448    }
449    return object;
450}
451
452TaggedObject *Heap::AllocateHugeObject(JSHClass *hclass, size_t size)
453{
454    // Check whether it is necessary to trigger Old GC before expanding to avoid OOM risk.
455    CheckAndTriggerOldGC(size);
456    auto object = AllocateHugeObject(size);
457    object->SetClass(thread_, hclass);
458#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
459    OnAllocateEvent(GetEcmaVM(), object, size);
460#endif
461    return object;
462}
463
464TaggedObject *Heap::AllocateHugeMachineCodeObject(size_t size, MachineCodeDesc *desc)
465{
466    TaggedObject *object;
467    if (desc) {
468        object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
469            size, thread_, reinterpret_cast<void *>(desc)));
470    } else {
471        object = reinterpret_cast<TaggedObject *>(hugeMachineCodeSpace_->Allocate(
472            size, thread_));
473    }
474    return object;
475}
476
477TaggedObject *Heap::AllocateMachineCodeObject(JSHClass *hclass, size_t size, MachineCodeDesc *desc)
478{
479    TaggedObject *object;
480    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
481    if (!desc) {
482        // Jit Fort disabled
483        ASSERT(!GetEcmaVM()->GetJSOptions().GetEnableJitFort());
484        object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
485            reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size)) :
486            reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size));
487        CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR(object, size, machineCodeSpace_,
488            "Heap::AllocateMachineCodeObject");
489        object->SetClass(thread_, hclass);
490#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
491        OnAllocateEvent(GetEcmaVM(), object, size);
492#endif
493        return object;
494    }
495
496    // Jit Fort enabled
497    ASSERT(GetEcmaVM()->GetJSOptions().GetEnableJitFort());
498    if (!GetEcmaVM()->GetJSOptions().GetEnableAsyncCopyToFort() || !desc->isAsyncCompileMode) {
499        desc->instructionsAddr = 0;
500        if (size <= MAX_REGULAR_HEAP_OBJECT_SIZE) {
501            // for non huge code cache obj, allocate fort space before allocating the code object
502            uintptr_t mem = machineCodeSpace_->JitFortAllocate(desc);
503            if (mem == ToUintPtr(nullptr)) {
504                return nullptr;
505            }
506            desc->instructionsAddr = mem;
507        }
508    }
509    object = (size > MAX_REGULAR_HEAP_OBJECT_SIZE) ?
510        reinterpret_cast<TaggedObject *>(AllocateHugeMachineCodeObject(size, desc)) :
511        reinterpret_cast<TaggedObject *>(machineCodeSpace_->Allocate(size, desc, true));
512    CHECK_MACHINE_CODE_OBJ_AND_SET_OOM_ERROR_FORT(object, size, machineCodeSpace_, desc,
513        "Heap::AllocateMachineCodeObject");
514    object->SetClass(thread_, hclass);
515#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
516    OnAllocateEvent(GetEcmaVM(), object, size);
517#endif
518    return object;
519}
520
521uintptr_t Heap::AllocateSnapshotSpace(size_t size)
522{
523    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
524    uintptr_t object = snapshotSpace_->Allocate(size);
525    if (UNLIKELY(object == 0)) {
526        FatalOutOfMemoryError(size, "Heap::AllocateSnapshotSpaceObject");
527    }
528#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
529    OnAllocateEvent(GetEcmaVM(), reinterpret_cast<TaggedObject *>(object), size);
530#endif
531    return object;
532}
533
534TaggedObject *Heap::AllocateSharedNonMovableSpaceFromTlab(JSThread *thread, size_t size)
535{
536    ASSERT(!thread->IsJitThread());
537    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
538    TaggedObject *object = reinterpret_cast<TaggedObject*>(sNonMovableTlab_->Allocate(size));
539    if (object != nullptr) {
540        return object;
541    }
542    if (!sNonMovableTlab_->NeedNewTlab(size)) {
543        // slowpath
544        return nullptr;
545    }
546    size_t newTlabSize = sNonMovableTlab_->ComputeSize();
547    object = sHeap_->AllocateSNonMovableTlab(thread, newTlabSize);
548    if (object == nullptr) {
549        sNonMovableTlab_->DisableNewTlab();
550        return nullptr;
551    }
552    uintptr_t begin = reinterpret_cast<uintptr_t>(object);
553    sNonMovableTlab_->Reset(begin, begin + newTlabSize, begin + size);
554    auto topAddress = sNonMovableTlab_->GetTopAddress();
555    auto endAddress = sNonMovableTlab_->GetEndAddress();
556    thread->ReSetSNonMovableSpaceAllocationAddress(topAddress, endAddress);
557    sHeap_->TryTriggerConcurrentMarking(thread);
558    return object;
559}
560
561TaggedObject *Heap::AllocateSharedOldSpaceFromTlab(JSThread *thread, size_t size)
562{
563    ASSERT(!thread->IsJitThread());
564    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
565    TaggedObject *object = reinterpret_cast<TaggedObject*>(sOldTlab_->Allocate(size));
566    if (object != nullptr) {
567        return object;
568    }
569    if (!sOldTlab_->NeedNewTlab(size)) {
570        // slowpath
571        return nullptr;
572    }
573    size_t newTlabSize = sOldTlab_->ComputeSize();
574    object = sHeap_->AllocateSOldTlab(thread, newTlabSize);
575    if (object == nullptr) {
576        sOldTlab_->DisableNewTlab();
577        return nullptr;
578    }
579    uintptr_t begin = reinterpret_cast<uintptr_t>(object);
580    sOldTlab_->Reset(begin, begin + newTlabSize, begin + size);
581    auto topAddress = sOldTlab_->GetTopAddress();
582    auto endAddress = sOldTlab_->GetEndAddress();
583    thread->ReSetSOldSpaceAllocationAddress(topAddress, endAddress);
584    sHeap_->TryTriggerConcurrentMarking(thread);
585    return object;
586}
587
588void Heap::SwapNewSpace()
589{
590    activeSemiSpace_->Stop();
591    size_t newOverShootSize = 0;
592    if (!inBackground_ && gcType_ != TriggerGCType::FULL_GC && gcType_ != TriggerGCType::APPSPAWN_FULL_GC) {
593        newOverShootSize = activeSemiSpace_->CalculateNewOverShootSize();
594    }
595    inactiveSemiSpace_->Restart(newOverShootSize);
596
597    SemiSpace *newSpace = inactiveSemiSpace_;
598    inactiveSemiSpace_ = activeSemiSpace_;
599    activeSemiSpace_ = newSpace;
600    if (UNLIKELY(ShouldVerifyHeap())) {
601        inactiveSemiSpace_->EnumerateRegions([](Region *region) {
602            region->SetInactiveSemiSpace();
603        });
604    }
605#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
606    activeSemiSpace_->SwapAllocationCounter(inactiveSemiSpace_);
607#endif
608    auto topAddress = activeSemiSpace_->GetAllocationTopAddress();
609    auto endAddress = activeSemiSpace_->GetAllocationEndAddress();
610    thread_->ReSetNewSpaceAllocationAddress(topAddress, endAddress);
611}
612
613void Heap::SwapOldSpace()
614{
615    compressSpace_->SetInitialCapacity(oldSpace_->GetInitialCapacity());
616    auto *oldSpace = compressSpace_;
617    compressSpace_ = oldSpace_;
618    oldSpace_ = oldSpace;
619#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
620    oldSpace_->SwapAllocationCounter(compressSpace_);
621#endif
622}
623
624void SharedHeap::SwapOldSpace()
625{
626    sCompressSpace_->SetInitialCapacity(sOldSpace_->GetInitialCapacity());
627    auto *oldSpace = sCompressSpace_;
628    sCompressSpace_ = sOldSpace_;
629    sOldSpace_ = oldSpace;
630#ifdef ECMASCRIPT_SUPPORT_HEAPSAMPLING
631    sOldSpace_->SwapAllocationCounter(sCompressSpace_);
632#endif
633}
634
635void Heap::ReclaimRegions(TriggerGCType gcType)
636{
637    activeSemiSpace_->EnumerateRegionsWithRecord([] (Region *region) {
638        region->ResetRegionTypeFlag();
639        region->ClearMarkGCBitset();
640        region->ClearCrossRegionRSet();
641        region->ResetAliveObject();
642        region->DeleteNewToEdenRSet();
643        region->ClearGCFlag(RegionGCFlags::IN_NEW_TO_NEW_SET);
644    });
645    size_t cachedSize = inactiveSemiSpace_->GetInitialCapacity();
646    if (gcType == TriggerGCType::FULL_GC) {
647        compressSpace_->Reset();
648        cachedSize = 0;
649    } else if (gcType == TriggerGCType::OLD_GC) {
650        oldSpace_->ReclaimCSet();
651        isCSetClearing_.store(false, std::memory_order_release);
652    }
653
654    inactiveSemiSpace_->ReclaimRegions(cachedSize);
655    sweeper_->WaitAllTaskFinished();
656    EnumerateNonNewSpaceRegionsWithRecord([] (Region *region) {
657        region->ClearMarkGCBitset();
658        region->ClearCrossRegionRSet();
659    });
660    if (!clearTaskFinished_) {
661        LockHolder holder(waitClearTaskFinishedMutex_);
662        clearTaskFinished_ = true;
663        waitClearTaskFinishedCV_.SignalAll();
664    }
665}
666
667// only call in js-thread
668void Heap::ClearSlotsRange(Region *current, uintptr_t freeStart, uintptr_t freeEnd)
669{
670    if (!current->InGeneralNewSpace()) {
671        // This clear may exist data race with concurrent sweeping, so use CAS
672        current->AtomicClearSweepingOldToNewRSetInRange(freeStart, freeEnd);
673        current->ClearOldToNewRSetInRange(freeStart, freeEnd);
674        current->AtomicClearCrossRegionRSetInRange(freeStart, freeEnd);
675    }
676    current->ClearLocalToShareRSetInRange(freeStart, freeEnd);
677    current->AtomicClearSweepingLocalToShareRSetInRange(freeStart, freeEnd);
678}
679
680size_t Heap::GetCommittedSize() const
681{
682    size_t result = edenSpace_->GetCommittedSize() +
683                    activeSemiSpace_->GetCommittedSize() +
684                    oldSpace_->GetCommittedSize() +
685                    hugeObjectSpace_->GetCommittedSize() +
686                    nonMovableSpace_->GetCommittedSize() +
687                    machineCodeSpace_->GetCommittedSize() +
688                    hugeMachineCodeSpace_->GetCommittedSize() +
689                    readOnlySpace_->GetCommittedSize() +
690                    appSpawnSpace_->GetCommittedSize() +
691                    snapshotSpace_->GetCommittedSize();
692    return result;
693}
694
695size_t Heap::GetHeapObjectSize() const
696{
697    size_t result = edenSpace_->GetHeapObjectSize() +
698                    activeSemiSpace_->GetHeapObjectSize() +
699                    oldSpace_->GetHeapObjectSize() +
700                    hugeObjectSpace_->GetHeapObjectSize() +
701                    nonMovableSpace_->GetHeapObjectSize() +
702                    machineCodeSpace_->GetCommittedSize() +
703                    hugeMachineCodeSpace_->GetCommittedSize() +
704                    readOnlySpace_->GetCommittedSize() +
705                    appSpawnSpace_->GetHeapObjectSize() +
706                    snapshotSpace_->GetHeapObjectSize();
707    return result;
708}
709
710void Heap::NotifyRecordMemorySize()
711{
712    if (GetRecordObjectSize() == 0) {
713        RecordOrResetObjectSize(GetHeapObjectSize());
714    }
715    if (GetRecordNativeSize() == 0) {
716        RecordOrResetNativeSize(GetNativeBindingSize());
717    }
718}
719
720size_t Heap::GetRegionCount() const
721{
722    size_t result = edenSpace_->GetRegionCount() +
723        activeSemiSpace_->GetRegionCount() +
724        oldSpace_->GetRegionCount() +
725        oldSpace_->GetCollectSetRegionCount() +
726        appSpawnSpace_->GetRegionCount() +
727        snapshotSpace_->GetRegionCount() +
728        nonMovableSpace_->GetRegionCount() +
729        hugeObjectSpace_->GetRegionCount() +
730        machineCodeSpace_->GetRegionCount() +
731        hugeMachineCodeSpace_->GetRegionCount();
732    return result;
733}
734
735uint32_t Heap::GetHeapObjectCount() const
736{
737    uint32_t count = 0;
738    sweeper_->EnsureAllTaskFinished();
739    this->IterateOverObjects([&count]([[maybe_unused]] TaggedObject *obj) {
740        ++count;
741    });
742    return count;
743}
744
745void Heap::InitializeIdleStatusControl(std::function<void(bool)> callback)
746{
747    notifyIdleStatusCallback = callback;
748    if (callback != nullptr) {
749        OPTIONAL_LOG(ecmaVm_, INFO) << "Received idle status control call back";
750        enableIdleGC_ = ecmaVm_->GetJSOptions().EnableIdleGC();
751    }
752}
753
754void SharedHeap::TryTriggerConcurrentMarking(JSThread *thread)
755{
756    if (!CheckCanTriggerConcurrentMarking(thread)) {
757        return;
758    }
759    if (GetHeapObjectSize() >= globalSpaceConcurrentMarkLimit_) {
760        TriggerConcurrentMarking<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
761    }
762}
763
764void SharedHeap::CollectGarbageFinish(bool inDaemon, TriggerGCType gcType)
765{
766    if (inDaemon) {
767        ASSERT(JSThread::GetCurrent() == dThread_);
768#ifndef NDEBUG
769        ASSERT(dThread_->HasLaunchedSuspendAll());
770#endif
771        dThread_->FinishRunningTask();
772        NotifyGCCompleted();
773        // Update to forceGC_ is in DaemeanSuspendAll, and protected by the Runtime::mutatorLock_,
774        // so do not need lock.
775        smartGCStats_.forceGC_ = false;
776    }
777    localFullMarkTriggered_ = false;
778    // Record alive object size after shared gc and other stats
779    UpdateHeapStatsAfterGC(gcType);
780    // Adjust shared gc trigger threshold
781    AdjustGlobalSpaceAllocLimit();
782    GetEcmaGCStats()->RecordStatisticAfterGC();
783    GetEcmaGCStats()->PrintGCStatistic();
784    ProcessAllGCListeners();
785}
786
787TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass)
788{
789    size_t size = hclass->GetObjectSize();
790    return AllocateNonMovableOrHugeObject(thread, hclass, size);
791}
792
793TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
794{
795    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
796    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
797        return AllocateHugeObject(thread, hclass, size);
798    }
799    TaggedObject *object = thread->IsJitThread() ? nullptr :
800        const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
801    if (object == nullptr) {
802        object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
803        CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
804            "SharedHeap::AllocateNonMovableOrHugeObject");
805        object->SetClass(thread, hclass);
806        TryTriggerConcurrentMarking(thread);
807    } else {
808        object->SetClass(thread, hclass);
809    }
810#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
811    OnAllocateEvent(thread->GetEcmaVM(), object, size);
812#endif
813    return object;
814}
815
816TaggedObject *SharedHeap::AllocateNonMovableOrHugeObject(JSThread *thread, size_t size)
817{
818    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
819    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
820        return AllocateHugeObject(thread, size);
821    }
822    TaggedObject *object = thread->IsJitThread() ? nullptr :
823        const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedNonMovableSpaceFromTlab(thread, size);
824    if (object == nullptr) {
825        object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
826        CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sNonMovableSpace_,
827            "SharedHeap::AllocateNonMovableOrHugeObject");
828        TryTriggerConcurrentMarking(thread);
829    }
830#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
831    OnAllocateEvent(thread->GetEcmaVM(), object, size);
832#endif
833    return object;
834}
835
836TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass)
837{
838    size_t size = hclass->GetObjectSize();
839    return AllocateOldOrHugeObject(thread, hclass, size);
840}
841
842TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
843{
844    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
845    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
846        return AllocateHugeObject(thread, hclass, size);
847    }
848    TaggedObject *object = thread->IsJitThread() ? nullptr :
849        const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
850    if (object == nullptr) {
851        object = AllocateInSOldSpace(thread, size);
852        CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
853        object->SetClass(thread, hclass);
854        TryTriggerConcurrentMarking(thread);
855    } else {
856        object->SetClass(thread, hclass);
857    }
858#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
859    OnAllocateEvent(thread->GetEcmaVM(), object, size);
860#endif
861    return object;
862}
863
864TaggedObject *SharedHeap::AllocateOldOrHugeObject(JSThread *thread, size_t size)
865{
866    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
867    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
868        return AllocateHugeObject(thread, size);
869    }
870    TaggedObject *object = thread->IsJitThread() ? nullptr :
871        const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->AllocateSharedOldSpaceFromTlab(thread, size);
872    if (object == nullptr) {
873        object = AllocateInSOldSpace(thread, size);
874        CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sOldSpace_, "SharedHeap::AllocateOldOrHugeObject");
875        TryTriggerConcurrentMarking(thread);
876    }
877    return object;
878}
879
880TaggedObject *SharedHeap::AllocateInSOldSpace(JSThread *thread, size_t size)
881{
882    // jit thread no heap
883    bool allowGC = !thread->IsJitThread();
884    if (allowGC) {
885        auto localHeap = const_cast<Heap*>(thread->GetEcmaVM()->GetHeap());
886        localHeap->TryTriggerFullMarkBySharedSize(size);
887    }
888    TaggedObject *object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, false));
889     // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
890    if (object == nullptr) {
891        if (allowGC) {
892            CheckAndTriggerSharedGC(thread);
893        }
894        object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
895        if (object == nullptr) {
896            if (allowGC) {
897                CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_FAILED>(thread);
898            }
899            object = reinterpret_cast<TaggedObject *>(sOldSpace_->TryAllocateAndExpand(thread, size, true));
900        }
901    }
902    return object;
903}
904
905TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
906{
907    auto object = AllocateHugeObject(thread, size);
908    object->SetClass(thread, hclass);
909#if defined(ECMASCRIPT_SUPPORT_HEAPPROFILER)
910    OnAllocateEvent(thread->GetEcmaVM(), object, size);
911#endif
912    return object;
913}
914
915TaggedObject *SharedHeap::AllocateHugeObject(JSThread *thread, size_t size)
916{
917    // Check whether it is necessary to trigger Shared GC before expanding to avoid OOM risk.
918    CheckHugeAndTriggerSharedGC(thread, size);
919    auto *object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
920    if (UNLIKELY(object == nullptr)) {
921        CollectGarbage<TriggerGCType::SHARED_GC, GCReason::ALLOCATION_LIMIT>(thread);
922        object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
923        if (UNLIKELY(object == nullptr)) {
924            // if allocate huge object OOM, temporarily increase space size to avoid vm crash
925            size_t oomOvershootSize = config_.GetOutOfMemoryOvershootSize();
926            sHugeObjectSpace_->IncreaseOutOfMemoryOvershootSize(oomOvershootSize);
927            DumpHeapSnapshotBeforeOOM(true, thread);
928            ThrowOutOfMemoryError(thread, size, "SharedHeap::AllocateHugeObject");
929            object = reinterpret_cast<TaggedObject *>(sHugeObjectSpace_->Allocate(thread, size));
930            if (UNLIKELY(object == nullptr)) {
931                FatalOutOfMemoryError(size, "SharedHeap::AllocateHugeObject");
932            }
933        }
934    }
935    TryTriggerConcurrentMarking(thread);
936    return object;
937}
938
939TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass)
940{
941    size_t size = hclass->GetObjectSize();
942    return AllocateReadOnlyOrHugeObject(thread, hclass, size);
943}
944
945TaggedObject *SharedHeap::AllocateReadOnlyOrHugeObject(JSThread *thread, JSHClass *hclass, size_t size)
946{
947    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
948    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
949        return AllocateHugeObject(thread, hclass, size);
950    }
951    auto object = reinterpret_cast<TaggedObject *>(sReadOnlySpace_->Allocate(thread, size));
952    CHECK_SOBJ_AND_THROW_OOM_ERROR(thread, object, size, sReadOnlySpace_, "SharedHeap::AllocateReadOnlyOrHugeObject");
953    ASSERT(object != nullptr);
954    object->SetClass(thread, hclass);
955    return object;
956}
957
958TaggedObject *SharedHeap::AllocateSOldTlab(JSThread *thread, size_t size)
959{
960    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
961    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
962        return nullptr;
963    }
964    TaggedObject *object = nullptr;
965    if (sOldSpace_->GetCommittedSize() > sOldSpace_->GetInitialCapacity() / 2) { // 2: half
966        object = reinterpret_cast<TaggedObject *>(sOldSpace_->AllocateNoGCAndExpand(thread, size));
967    } else {
968        object = AllocateInSOldSpace(thread, size);
969    }
970    return object;
971}
972
973TaggedObject *SharedHeap::AllocateSNonMovableTlab(JSThread *thread, size_t size)
974{
975    size = AlignUp(size, static_cast<size_t>(MemAlignment::MEM_ALIGN_OBJECT));
976    if (size > MAX_REGULAR_HEAP_OBJECT_SIZE) {
977        return nullptr;
978    }
979    TaggedObject *object = nullptr;
980    object = reinterpret_cast<TaggedObject *>(sNonMovableSpace_->Allocate(thread, size));
981    return object;
982}
983
984template<TriggerGCType gcType, GCReason gcReason>
985void SharedHeap::TriggerConcurrentMarking(JSThread *thread)
986{
987    ASSERT(gcType == TriggerGCType::SHARED_GC);
988    // lock is outside to prevent extreme case, maybe could move update gcFinished_ into CheckAndPostTask
989    // instead of an outside locking.
990    LockHolder lock(waitGCFinishedMutex_);
991    if (dThread_->CheckAndPostTask(TriggerConcurrentMarkTask<gcType, gcReason>(thread))) {
992        ASSERT(gcFinished_);
993        gcFinished_ = false;
994    }
995}
996
997template<TriggerGCType gcType, GCReason gcReason>
998void SharedHeap::CollectGarbage(JSThread *thread)
999{
1000    ASSERT(gcType == TriggerGCType::SHARED_GC || gcType == TriggerGCType::SHARED_FULL_GC);
1001#ifndef NDEBUG
1002    ASSERT(!thread->HasLaunchedSuspendAll());
1003#endif
1004    if (UNLIKELY(!dThread_->IsRunning())) {
1005        // Hope this will not happen, unless the AppSpawn run smth after PostFork
1006        LOG_GC(ERROR) << "Try to collect garbage in shared heap, but daemon thread is not running.";
1007        ForceCollectGarbageWithoutDaemonThread(gcType, gcReason, thread);
1008        return;
1009    }
1010    {
1011        // lock here is outside post task to prevent the extreme case: another js thread succeeed posting a
1012        // concurrentmark task, so here will directly go into WaitGCFinished, but gcFinished_ is somehow
1013        // not set by that js thread before the WaitGCFinished done, and maybe cause an unexpected OOM
1014        LockHolder lock(waitGCFinishedMutex_);
1015        if (dThread_->CheckAndPostTask(TriggerCollectGarbageTask<gcType, gcReason>(thread))) {
1016            ASSERT(gcFinished_);
1017            gcFinished_ = false;
1018        }
1019    }
1020    ASSERT(!gcFinished_);
1021    SetForceGC(true);
1022    WaitGCFinished(thread);
1023}
1024
1025static void SwapBackAndPop(CVector<JSNativePointer*>& vec, CVector<JSNativePointer*>::iterator& iter)
1026{
1027    *iter = vec.back();
1028    if (iter + 1 == vec.end()) {
1029        vec.pop_back();
1030        iter = vec.end();
1031    } else {
1032        vec.pop_back();
1033    }
1034}
1035
1036static void ShrinkWithFactor(CVector<JSNativePointer*>& vec)
1037{
1038    constexpr size_t SHRINK_FACTOR = 2;
1039    if (vec.size() < vec.capacity() / SHRINK_FACTOR) {
1040        vec.shrink_to_fit();
1041    }
1042}
1043
1044void SharedHeap::InvokeSharedNativePointerCallbacks()
1045{
1046    Runtime *runtime = Runtime::GetInstance();
1047    if (!runtime->GetSharedNativePointerCallbacks().empty()) {
1048        runtime->InvokeSharedNativePointerCallbacks();
1049    }
1050}
1051
1052void SharedHeap::PushToSharedNativePointerList(JSNativePointer* pointer)
1053{
1054    ASSERT(JSTaggedValue(pointer).IsInSharedHeap());
1055    std::lock_guard<std::mutex> lock(sNativePointerListMutex_);
1056    sharedNativePointerList_.emplace_back(pointer);
1057}
1058
1059void SharedHeap::ProcessSharedNativeDelete(const WeakRootVisitor& visitor)
1060{
1061#ifndef NDEBUG
1062    ASSERT(JSThread::GetCurrent()->HasLaunchedSuspendAll());
1063#endif
1064    auto& sharedNativePointerCallbacks = Runtime::GetInstance()->GetSharedNativePointerCallbacks();
1065    auto sharedIter = sharedNativePointerList_.begin();
1066    while (sharedIter != sharedNativePointerList_.end()) {
1067        JSNativePointer* object = *sharedIter;
1068        auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1069        if (fwd == nullptr) {
1070            sharedNativePointerCallbacks.emplace_back(
1071                object->GetDeleter(), std::make_pair(object->GetExternalPointer(), object->GetData()));
1072            SwapBackAndPop(sharedNativePointerList_, sharedIter);
1073        } else {
1074            if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1075                *sharedIter = reinterpret_cast<JSNativePointer*>(fwd);
1076            }
1077            ++sharedIter;
1078        }
1079    }
1080    ShrinkWithFactor(sharedNativePointerList_);
1081}
1082
1083void Heap::ProcessNativeDelete(const WeakRootVisitor& visitor)
1084{
1085    // ProcessNativeDelete should be limited to OldGC or FullGC only
1086    if (!IsGeneralYoungGC()) {
1087        auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1088        auto iter = nativePointerList_.begin();
1089        ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessNativeDeleteNum:" + std::to_string(nativePointerList_.size()));
1090        while (iter != nativePointerList_.end()) {
1091            JSNativePointer* object = *iter;
1092            auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1093            if (fwd == nullptr) {
1094                size_t bindingSize = object->GetBindingSize();
1095                asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1096                    std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1097                nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1098                SwapBackAndPop(nativePointerList_, iter);
1099            } else {
1100                ++iter;
1101            }
1102        }
1103        ShrinkWithFactor(nativePointerList_);
1104
1105        auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1106        auto newIter = concurrentNativePointerList_.begin();
1107        while (newIter != concurrentNativePointerList_.end()) {
1108            JSNativePointer* object = *newIter;
1109            auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1110            if (fwd == nullptr) {
1111                nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1112                concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1113                    std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1114                SwapBackAndPop(concurrentNativePointerList_, newIter);
1115            } else {
1116                ++newIter;
1117            }
1118        }
1119        ShrinkWithFactor(concurrentNativePointerList_);
1120    }
1121}
1122
1123void Heap::ProcessReferences(const WeakRootVisitor& visitor)
1124{
1125    // process native ref should be limited to OldGC or FullGC only
1126    if (!IsGeneralYoungGC()) {
1127        auto& asyncNativeCallbacksPack = GetEcmaVM()->GetAsyncNativePointerCallbacksPack();
1128        ResetNativeBindingSize();
1129        // array buffer
1130        auto iter = nativePointerList_.begin();
1131        ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ProcessReferencesNum:" + std::to_string(nativePointerList_.size()));
1132        while (iter != nativePointerList_.end()) {
1133            JSNativePointer* object = *iter;
1134            auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1135            if (fwd == nullptr) {
1136                size_t bindingSize = object->GetBindingSize();
1137                asyncNativeCallbacksPack.AddCallback(std::make_pair(object->GetDeleter(),
1138                    std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData())), bindingSize);
1139                nativeAreaAllocator_->DecreaseNativeSizeStats(bindingSize, object->GetNativeFlag());
1140                SwapBackAndPop(nativePointerList_, iter);
1141                continue;
1142            }
1143            IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1144            if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1145                *iter = JSNativePointer::Cast(fwd);
1146            }
1147            ++iter;
1148        }
1149        ShrinkWithFactor(nativePointerList_);
1150
1151        auto& concurrentNativeCallbacks = GetEcmaVM()->GetConcurrentNativePointerCallbacks();
1152        auto newIter = concurrentNativePointerList_.begin();
1153        while (newIter != concurrentNativePointerList_.end()) {
1154            JSNativePointer* object = *newIter;
1155            auto fwd = visitor(reinterpret_cast<TaggedObject*>(object));
1156            if (fwd == nullptr) {
1157                nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1158                concurrentNativeCallbacks.emplace_back(object->GetDeleter(),
1159                    std::make_tuple(thread_->GetEnv(), object->GetExternalPointer(), object->GetData()));
1160                SwapBackAndPop(concurrentNativePointerList_, newIter);
1161                continue;
1162            }
1163            IncreaseNativeBindingSize(JSNativePointer::Cast(fwd));
1164            if (fwd != reinterpret_cast<TaggedObject*>(object)) {
1165                *newIter = JSNativePointer::Cast(fwd);
1166            }
1167            ++newIter;
1168        }
1169        ShrinkWithFactor(concurrentNativePointerList_);
1170    }
1171}
1172
1173void Heap::PushToNativePointerList(JSNativePointer* pointer, bool isConcurrent)
1174{
1175    ASSERT(!JSTaggedValue(pointer).IsInSharedHeap());
1176    if (isConcurrent) {
1177        concurrentNativePointerList_.emplace_back(pointer);
1178    } else {
1179        nativePointerList_.emplace_back(pointer);
1180    }
1181}
1182
1183void Heap::RemoveFromNativePointerList(const JSNativePointer* pointer)
1184{
1185    auto iter = std::find(nativePointerList_.begin(), nativePointerList_.end(), pointer);
1186    if (iter != nativePointerList_.end()) {
1187        JSNativePointer* object = *iter;
1188        nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1189        object->Destroy(thread_);
1190        SwapBackAndPop(nativePointerList_, iter);
1191    }
1192    auto newIter = std::find(concurrentNativePointerList_.begin(), concurrentNativePointerList_.end(), pointer);
1193    if (newIter != concurrentNativePointerList_.end()) {
1194        JSNativePointer* object = *newIter;
1195        nativeAreaAllocator_->DecreaseNativeSizeStats(object->GetBindingSize(), object->GetNativeFlag());
1196        object->Destroy(thread_);
1197        SwapBackAndPop(concurrentNativePointerList_, newIter);
1198    }
1199}
1200
1201void Heap::ClearNativePointerList()
1202{
1203    for (auto iter : nativePointerList_) {
1204        iter->Destroy(thread_);
1205    }
1206    for (auto iter : concurrentNativePointerList_) {
1207        iter->Destroy(thread_);
1208    }
1209    nativePointerList_.clear();
1210}
1211
1212}  // namespace panda::ecmascript
1213
1214#endif  // ECMASCRIPT_MEM_HEAP_INL_H
1215