1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/parallel_evacuator-inl.h"
17 
18 #include "ecmascript/mem/tlab_allocator-inl.h"
19 #include "ecmascript/runtime_call_id.h"
20 
21 namespace panda::ecmascript {
Initialize()22 void ParallelEvacuator::Initialize()
23 {
24     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorInitialize);
25     waterLine_ = heap_->GetNewSpace()->GetWaterLine();
26     if (heap_->IsEdenMark()) {
27         heap_->ReleaseEdenAllocator();
28     } else {
29         ASSERT(heap_->IsYoungMark() || heap_->IsFullMark());
30         heap_->SwapNewSpace();
31     }
32     allocator_ = new TlabAllocator(heap_);
33     promotedSize_ = 0;
34     edenToYoungSize_ = 0;
35 }
36 
Finalize()37 void ParallelEvacuator::Finalize()
38 {
39     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuatorFinalize);
40     delete allocator_;
41     evacuateWorkloadSet_.Clear();
42     updateWorkloadSet_.Clear();
43 }
44 
Evacuate()45 void ParallelEvacuator::Evacuate()
46 {
47     Initialize();
48     EvacuateSpace();
49     UpdateReference();
50     Finalize();
51 }
52 
UpdateTrackInfo()53 void ParallelEvacuator::UpdateTrackInfo()
54 {
55     for (uint32_t i = 0; i <= MAX_TASKPOOL_THREAD_NUM; i++) {
56         auto &trackInfoSet = ArrayTrackInfoSet(i);
57         for (auto &each : trackInfoSet) {
58             auto trackInfoVal = JSTaggedValue(each);
59             if (!trackInfoVal.IsHeapObject() || !trackInfoVal.IsWeak()) {
60                 continue;
61             }
62             auto trackInfo = trackInfoVal.GetWeakReferentUnChecked();
63             trackInfo = UpdateAddressAfterEvacation(trackInfo);
64             if (trackInfo) {
65                 heap_->GetEcmaVM()->GetPGOProfiler()->UpdateTrackSpaceFlag(trackInfo, RegionSpaceFlag::IN_OLD_SPACE);
66             }
67         }
68         trackInfoSet.clear();
69     }
70 }
71 
EvacuateSpace()72 void ParallelEvacuator::EvacuateSpace()
73 {
74     TRACE_GC(GCStats::Scope::ScopeId::EvacuateSpace, heap_->GetEcmaVM()->GetEcmaGCStats());
75     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::EvacuateSpace");
76     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelEvacuator);
77     auto &workloadSet = evacuateWorkloadSet_;
78     if (heap_->IsEdenMark()) {
79         heap_->GetEdenSpace()->EnumerateRegions([this, &workloadSet](Region *current) {
80             workloadSet.Add(std::make_unique<EvacuateWorkload>(this, current));
81         });
82     } else if (heap_->IsConcurrentFullMark() || heap_->IsYoungMark()) {
83         heap_->GetEdenSpace()->EnumerateRegions([this, &workloadSet](Region *current) {
84             workloadSet.Add(std::make_unique<EvacuateWorkload>(this, current));
85         });
86         heap_->GetFromSpaceDuringEvacuation()->EnumerateRegions([this, &workloadSet](Region *current) {
87             workloadSet.Add(std::make_unique<EvacuateWorkload>(this, current));
88         });
89         heap_->GetOldSpace()->EnumerateCollectRegionSet([this, &workloadSet](Region *current) {
90             workloadSet.Add(std::make_unique<EvacuateWorkload>(this, current));
91         });
92     }
93     workloadSet.PrepareWorkloads();
94     if (heap_->IsParallelGCEnabled()) {
95         LockHolder holder(mutex_);
96         parallel_ = CalculateEvacuationThreadNum();
97         ASSERT(parallel_ >= 0);
98         evacuateTaskNum_ = static_cast<uint32_t>(parallel_);
99         for (uint32_t i = 1; i <= evacuateTaskNum_; i++) {
100             Taskpool::GetCurrentTaskpool()->PostTask(
101                 std::make_unique<EvacuationTask>(heap_->GetJSThread()->GetThreadId(), i, this));
102         }
103     } else {
104         evacuateTaskNum_ = 0;
105     }
106     {
107         GCStats::Scope sp2(GCStats::Scope::ScopeId::EvacuateRegion, heap_->GetEcmaVM()->GetEcmaGCStats());
108         EvacuateSpace(allocator_, MAIN_THREAD_INDEX, 0, true);
109     }
110 
111     {
112         GCStats::Scope sp2(GCStats::Scope::ScopeId::WaitFinish, heap_->GetEcmaVM()->GetEcmaGCStats());
113         WaitFinished();
114     }
115 
116     if (heap_->GetJSThread()->IsPGOProfilerEnable()) {
117         UpdateTrackInfo();
118     }
119 }
120 
EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, uint32_t idOrder, bool isMain)121 bool ParallelEvacuator::EvacuateSpace(TlabAllocator *allocator, uint32_t threadIndex, uint32_t idOrder, bool isMain)
122 {
123     UpdateRecordWeakReferenceInParallel(idOrder);
124 
125     auto &arrayTrackInfoSet = ArrayTrackInfoSet(threadIndex);
126     DrainWorkloads(evacuateWorkloadSet_, [&](std::unique_ptr<Workload> &region) {
127         EvacuateRegion(allocator, region->GetRegion(), arrayTrackInfoSet);
128     });
129     allocator->Finalize();
130     if (!isMain) {
131         LockHolder holder(mutex_);
132         if (--parallel_ <= 0) {
133             condition_.SignalAll();
134         }
135     }
136     return true;
137 }
138 
UpdateRecordWeakReferenceInParallel(uint32_t idOrder)139 void ParallelEvacuator::UpdateRecordWeakReferenceInParallel(uint32_t idOrder)
140 {
141     auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
142     for (uint32_t i = idOrder; i < totalThreadCount; i += (evacuateTaskNum_ + 1)) {
143         ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
144         while (true) {
145             auto obj = queue->PopBack();
146             if (UNLIKELY(obj == nullptr)) {
147                 break;
148             }
149             ObjectSlot slot(ToUintPtr(obj));
150             JSTaggedType value = slot.GetTaggedType();
151             if (JSTaggedValue(value).IsWeak()) {
152                 ASSERT(heap_->IsConcurrentFullMark());
153                 Region *objectRegion = Region::ObjectAddressToRange(value);
154                 if (!objectRegion->InGeneralNewSpaceOrCSet() && !objectRegion->InSharedHeap() &&
155                         (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(value))) {
156                     slot.Clear();
157                 }
158             }
159         }
160     }
161 }
162 
EvacuateRegion(TlabAllocator *allocator, Region *region, std::unordered_set<JSTaggedType> &trackSet)163 void ParallelEvacuator::EvacuateRegion(TlabAllocator *allocator, Region *region,
164                                        std::unordered_set<JSTaggedType> &trackSet)
165 {
166     bool isInEden = region->InEdenSpace();
167     bool isInOldGen = region->InOldSpace();
168     bool isBelowAgeMark = region->BelowAgeMark();
169     bool pgoEnabled = heap_->GetJSThread()->IsPGOProfilerEnable();
170     bool inHeapProfiler = heap_->InHeapProfiler();
171     size_t promotedSize = 0;
172     size_t edenToYoungSize = 0;
173     if (WholeRegionEvacuate(region)) {
174         return;
175     }
176     region->IterateAllMarkedBits([this, &region, &isInOldGen, &isBelowAgeMark, isInEden, &pgoEnabled,
177                                   &promotedSize, &allocator, &trackSet, &edenToYoungSize, inHeapProfiler](void *mem) {
178         ASSERT(region->InRange(ToUintPtr(mem)));
179         auto header = reinterpret_cast<TaggedObject *>(mem);
180         auto klass = header->GetClass();
181         auto size = klass->SizeFromJSHClass(header);
182 
183         uintptr_t address = 0;
184         bool actualPromoted = false;
185         bool hasAgeMark = isBelowAgeMark || (region->HasAgeMark() && ToUintPtr(mem) < waterLine_);
186         if (hasAgeMark) {
187             address = allocator->Allocate(size, OLD_SPACE);
188             actualPromoted = true;
189             promotedSize += size;
190         } else if (isInOldGen) {
191             address = allocator->Allocate(size, OLD_SPACE);
192             actualPromoted = true;
193         } else {
194             address = allocator->Allocate(size, SEMI_SPACE);
195             if (address == 0) {
196                 address = allocator->Allocate(size, OLD_SPACE);
197                 actualPromoted = true;
198                 promotedSize += size;
199             } else if (isInEden) {
200                 edenToYoungSize += size;
201             }
202         }
203         LOG_ECMA_IF(address == 0, FATAL) << "Evacuate object failed:" << size;
204 
205         if (memcpy_s(ToVoidPtr(address), size, ToVoidPtr(ToUintPtr(mem)), size) != EOK) { // LOCV_EXCL_BR_LINE
206             LOG_FULL(FATAL) << "memcpy_s failed";
207         }
208         if (inHeapProfiler) {
209             heap_->OnMoveEvent(reinterpret_cast<uintptr_t>(mem), reinterpret_cast<TaggedObject *>(address), size);
210         }
211         if (pgoEnabled) {
212             if (actualPromoted && klass->IsJSArray()) {
213                 auto trackInfo = JSArray::Cast(header)->GetTrackInfo();
214                 trackSet.emplace(trackInfo.GetRawData());
215             }
216         }
217         Barriers::SetPrimitive(header, 0, MarkWord::FromForwardingAddress(address));
218 
219         if (actualPromoted) {
220             SetObjectFieldRSet<false>(reinterpret_cast<TaggedObject *>(address), klass);
221         } else if (isInEden) {
222             SetObjectFieldRSet<true>(reinterpret_cast<TaggedObject *>(address), klass);
223         } else if (region->HasLocalToShareRememberedSet()) {
224             UpdateLocalToShareRSet(reinterpret_cast<TaggedObject *>(address), klass);
225         }
226     });
227     promotedSize_.fetch_add(promotedSize);
228     edenToYoungSize_.fetch_add(edenToYoungSize);
229 }
230 
UpdateReference()231 void ParallelEvacuator::UpdateReference()
232 {
233     TRACE_GC(GCStats::Scope::ScopeId::UpdateReference, heap_->GetEcmaVM()->GetEcmaGCStats());
234     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), ParallelUpdateReference);
235     // Update reference pointers
236     uint32_t youngeRegionMoveCount = 0;
237     uint32_t youngeRegionCopyCount = 0;
238     uint32_t oldRegionCount = 0;
239     auto &workloadSet = updateWorkloadSet_;
240     if (heap_->IsEdenMark()) {
241         heap_->GetNewSpace()->EnumerateRegions([&]([[maybe_unused]] Region *current) {
242             workloadSet.Add(
243                 std::make_unique<UpdateNewToEdenRSetWorkload>(this, current));
244         });
245     } else {
246         heap_->GetNewSpace()->EnumerateRegions([&](Region *current) {
247             if (current->InNewToNewSet()) {
248                 workloadSet.Add(
249                     std::make_unique<UpdateAndSweepNewRegionWorkload>(
250                         this, current, heap_->IsYoungMark()));
251                 youngeRegionMoveCount++;
252             } else {
253                 workloadSet.Add(
254                     std::make_unique<UpdateNewRegionWorkload>(this, current, heap_->IsYoungMark()));
255                 youngeRegionCopyCount++;
256             }
257         });
258     }
259     heap_->EnumerateOldSpaceRegions([this, &oldRegionCount, &workloadSet](Region *current) {
260         if (current->InCollectSet()) {
261             return;
262         }
263         workloadSet.Add(std::make_unique<UpdateRSetWorkload>(this, current, heap_->IsEdenMark()));
264         oldRegionCount++;
265     });
266     heap_->EnumerateSnapshotSpaceRegions([this, &workloadSet](Region *current) {
267         workloadSet.Add(std::make_unique<UpdateRSetWorkload>(this, current, heap_->IsEdenMark()));
268     });
269     workloadSet.PrepareWorkloads();
270     LOG_GC(DEBUG) << "UpdatePointers statistic: younge space region compact moving count:"
271                         << youngeRegionMoveCount
272                         << "younge space region compact coping count:" << youngeRegionCopyCount
273                         << "old space region count:" << oldRegionCount;
274 
275     if (heap_->IsParallelGCEnabled()) {
276         LockHolder holder(mutex_);
277         parallel_ = CalculateUpdateThreadNum();
278         for (int i = 0; i < parallel_; i++) {
279             Taskpool::GetCurrentTaskpool()->PostTask(
280                 std::make_unique<UpdateReferenceTask>(heap_->GetJSThread()->GetThreadId(), this));
281         }
282     }
283     {
284         GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateRoot, heap_->GetEcmaVM()->GetEcmaGCStats());
285         UpdateRoot();
286     }
287 
288     {
289         GCStats::Scope sp2(GCStats::Scope::ScopeId::UpdateWeekRef, heap_->GetEcmaVM()->GetEcmaGCStats());
290         if (heap_->IsEdenMark()) {
291             UpdateWeakReferenceOpt<TriggerGCType::EDEN_GC>();
292         } else if (heap_->IsYoungMark()) {
293             UpdateWeakReferenceOpt<TriggerGCType::YOUNG_GC>();
294         } else {
295             UpdateWeakReferenceOpt<TriggerGCType::OLD_GC>();
296         }
297     }
298     {
299         GCStats::Scope sp2(GCStats::Scope::ScopeId::ProceeWorkload, heap_->GetEcmaVM()->GetEcmaGCStats());\
300         ProcessWorkloads(true);
301     }
302     WaitFinished();
303 }
304 
UpdateRoot()305 void ParallelEvacuator::UpdateRoot()
306 {
307     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateRoot);
308     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateRoot");
309     RootVisitor gcUpdateYoung = [this]([[maybe_unused]] Root type, ObjectSlot slot) {
310         UpdateObjectSlot(slot);
311     };
312     RootRangeVisitor gcUpdateRangeYoung = [this]([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) {
313         for (ObjectSlot slot = start; slot < end; slot++) {
314             UpdateObjectSlot(slot);
315         }
316     };
317     RootBaseAndDerivedVisitor gcUpdateDerived =
318         []([[maybe_unused]] Root type, ObjectSlot base, ObjectSlot derived, uintptr_t baseOldObject) {
319         if (JSTaggedValue(base.GetTaggedType()).IsHeapObject()) {
320             derived.Update(base.GetTaggedType() + derived.GetTaggedType() - baseOldObject);
321         }
322     };
323 
324     ObjectXRay::VisitVMRoots(heap_->GetEcmaVM(), gcUpdateYoung, gcUpdateRangeYoung, gcUpdateDerived,
325                              VMRootVisitType::UPDATE_ROOT);
326 }
327 
UpdateRecordWeakReference()328 void ParallelEvacuator::UpdateRecordWeakReference()
329 {
330     auto totalThreadCount = Taskpool::GetCurrentTaskpool()->GetTotalThreadNum() + 1;
331     for (uint32_t i = 0; i < totalThreadCount; i++) {
332         ProcessQueue *queue = heap_->GetWorkManager()->GetWeakReferenceQueue(i);
333 
334         while (true) {
335             auto obj = queue->PopBack();
336             if (UNLIKELY(obj == nullptr)) {
337                 break;
338             }
339             ObjectSlot slot(ToUintPtr(obj));
340             JSTaggedValue value(slot.GetTaggedType());
341             if (value.IsWeak()) {
342                 UpdateWeakObjectSlot(value.GetTaggedWeakRef(), slot);
343             }
344         }
345     }
346 }
347 
UpdateWeakReference()348 void ParallelEvacuator::UpdateWeakReference()
349 {
350     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
351     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReference");
352     UpdateRecordWeakReference();
353     bool isFullMark = heap_->IsConcurrentFullMark();
354     bool isEdenMark = heap_->IsEdenMark();
355     WeakRootVisitor gcUpdateWeak = [isFullMark, isEdenMark](TaggedObject *header) -> TaggedObject* {
356         Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
357         if (UNLIKELY(objectRegion == nullptr)) {
358             LOG_GC(ERROR) << "PartialGC updateWeakReference: region is nullptr, header is " << header;
359             return nullptr;
360         }
361         // The weak object in shared heap is always alive during partialGC.
362         if (objectRegion->InSharedHeap()) {
363             return header;
364         }
365         if (isEdenMark) {
366             if (!objectRegion->InEdenSpace()) {
367                 return header;
368             }
369             MarkWord markWord(header);
370             if (markWord.IsForwardingAddress()) {
371                 return markWord.ToForwardingAddress();
372             }
373             return nullptr;
374         }
375         if (objectRegion->InGeneralNewSpaceOrCSet()) {
376             if (objectRegion->InNewToNewSet()) {
377                 if (objectRegion->Test(header)) {
378                     return header;
379                 }
380             } else {
381                 MarkWord markWord(header);
382                 if (markWord.IsForwardingAddress()) {
383                     return markWord.ToForwardingAddress();
384                 }
385             }
386             return nullptr;
387         }
388         if (isFullMark) {
389             if (objectRegion->GetMarkGCBitset() == nullptr || !objectRegion->Test(header)) {
390                 return nullptr;
391             }
392         }
393         return header;
394     };
395 
396     heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
397     heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
398     heap_->GetEcmaVM()->GetJSThread()->UpdateJitCodeMapReference(gcUpdateWeak);
399 }
400 
401 template<TriggerGCType gcType>
UpdateWeakReferenceOpt()402 void ParallelEvacuator::UpdateWeakReferenceOpt()
403 {
404     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), UpdateWeakReference);
405     ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::UpdateWeakReference");
406     WeakRootVisitor gcUpdateWeak = [](TaggedObject *header) -> TaggedObject* {
407         Region *objectRegion = Region::ObjectAddressToRange(reinterpret_cast<TaggedObject *>(header));
408         ASSERT(objectRegion != nullptr);
409         if constexpr (gcType == TriggerGCType::EDEN_GC) {
410             if (!objectRegion->InEdenSpace()) {
411                 return header;
412             }
413             MarkWord markWord(header);
414             if (markWord.IsForwardingAddress()) {
415                 return markWord.ToForwardingAddress();
416             }
417             return nullptr;
418         } else if constexpr (gcType == TriggerGCType::YOUNG_GC) {
419             if (!objectRegion->InGeneralNewSpace()) {
420                 return header;
421             }
422         } else if constexpr (gcType == TriggerGCType::OLD_GC) {
423             if (!objectRegion->InGeneralNewSpaceOrCSet()) {
424                 if (!objectRegion->InSharedHeap() && (objectRegion->GetMarkGCBitset() == nullptr ||
425                                               !objectRegion->Test(header))) {
426                     return nullptr;
427                 }
428                 return header;
429             }
430         } else { // LOCV_EXCL_BR_LINE
431             LOG_GC(FATAL) << "WeakRootVisitor: not support gcType yet";
432             UNREACHABLE();
433         }
434         if (objectRegion->InNewToNewSet()) {
435             if (objectRegion->Test(header)) {
436                 return header;
437             }
438         } else {
439             MarkWord markWord(header);
440             if (markWord.IsForwardingAddress()) {
441                 return markWord.ToForwardingAddress();
442             }
443         }
444         return nullptr;
445     };
446 
447     heap_->GetEcmaVM()->GetJSThread()->IterateWeakEcmaGlobalStorage(gcUpdateWeak);
448     heap_->GetEcmaVM()->ProcessReferences(gcUpdateWeak);
449     heap_->GetEcmaVM()->GetJSThread()->UpdateJitCodeMapReference(gcUpdateWeak);
450 }
451 
452 template<bool IsEdenGC>
UpdateRSet(Region *region)453 void ParallelEvacuator::UpdateRSet(Region *region)
454 {
455     auto cb = [this](void *mem) -> bool {
456         ObjectSlot slot(ToUintPtr(mem));
457         return UpdateOldToNewObjectSlot<IsEdenGC>(slot);
458     };
459 
460     if (heap_->GetSweeper()->IsSweeping()) {
461         if (region->IsGCFlagSet(RegionGCFlags::HAS_BEEN_SWEPT)) {
462             // Region is safe while update remember set
463             region->MergeOldToNewRSetForCS();
464             region->MergeLocalToShareRSetForCS();
465         } else {
466             region->AtomicIterateAllSweepingRSetBits(cb);
467         }
468     }
469     region->IterateAllOldToNewBits(cb);
470     if (heap_->IsYoungMark()) {
471         return;
472     }
473     if constexpr (IsEdenGC) {
474         region->IterateAllCrossRegionBits([this](void *mem) {
475             ObjectSlot slot(ToUintPtr(mem));
476             UpdateObjectSlot(slot);
477         });
478     } else {
479         region->IterateAllCrossRegionBits([this](void *mem) {
480             ObjectSlot slot(ToUintPtr(mem));
481             JSTaggedType value = slot.GetTaggedType();
482             if (JSTaggedValue(value).IsHeapObject() && Region::ObjectAddressToRange(value)->InCollectSet()) {
483                 UpdateObjectSlotOpt<TriggerGCType::OLD_GC>(slot);
484             }
485         });
486     }
487     region->DeleteCrossRegionRSet();
488 }
489 
UpdateNewToEdenRSetReference(Region *region)490 void ParallelEvacuator::UpdateNewToEdenRSetReference(Region *region)
491 {
492     auto cb = [this](void *mem) -> bool {
493         ObjectSlot slot(ToUintPtr(mem));
494         return UpdateNewToEdenObjectSlot(slot);
495     };
496     region->IterateAllNewToEdenBits(cb);
497     region->ClearNewToEdenRSet();
498 }
499 
500 template<TriggerGCType gcType>
UpdateNewRegionReference(Region *region)501 void ParallelEvacuator::UpdateNewRegionReference(Region *region)
502 {
503     Region *current = heap_->GetNewSpace()->GetCurrentRegion();
504     auto curPtr = region->GetBegin();
505     uintptr_t endPtr = 0;
506     if (region == current) {
507         auto top = heap_->GetNewSpace()->GetTop();
508         endPtr = curPtr + region->GetAllocatedBytes(top);
509     } else {
510         endPtr = curPtr + region->GetAllocatedBytes();
511     }
512 
513     size_t objSize = 0;
514     while (curPtr < endPtr) {
515         auto freeObject = FreeObject::Cast(curPtr);
516         // If curPtr is freeObject, It must to mark unpoison first.
517         ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void *>(freeObject), TaggedObject::TaggedObjectSize());
518         if (!freeObject->IsFreeObject()) {
519             auto obj = reinterpret_cast<TaggedObject *>(curPtr);
520             auto klass = obj->GetClass();
521             UpdateNewObjectField<gcType>(obj, klass);
522             objSize = klass->SizeFromJSHClass(obj);
523         } else {
524             freeObject->AsanUnPoisonFreeObject();
525             objSize = freeObject->Available();
526             freeObject->AsanPoisonFreeObject();
527         }
528         curPtr += objSize;
529         CHECK_OBJECT_SIZE(objSize);
530     }
531     CHECK_REGION_END(curPtr, endPtr);
532 }
533 
534 template<TriggerGCType gcType>
UpdateAndSweepNewRegionReference(Region *region)535 void ParallelEvacuator::UpdateAndSweepNewRegionReference(Region *region)
536 {
537     uintptr_t freeStart = region->GetBegin();
538     uintptr_t freeEnd = freeStart + region->GetAllocatedBytes();
539     region->IterateAllMarkedBits([&](void *mem) {
540         ASSERT(region->InRange(ToUintPtr(mem)));
541         auto header = reinterpret_cast<TaggedObject *>(mem);
542         JSHClass *klass = header->GetClass();
543         UpdateNewObjectField<gcType>(header, klass);
544 
545         uintptr_t freeEnd = ToUintPtr(mem);
546         if (freeStart != freeEnd) {
547             size_t freeSize = freeEnd - freeStart;
548             FreeObject::FillFreeObject(heap_, freeStart, freeSize);
549             region->ClearLocalToShareRSetInRange(freeStart, freeEnd);
550         }
551 
552         freeStart = freeEnd + klass->SizeFromJSHClass(header);
553     });
554     CHECK_REGION_END(freeStart, freeEnd);
555     if (freeStart < freeEnd) {
556         FreeObject::FillFreeObject(heap_, freeStart, freeEnd - freeStart);
557         region->ClearLocalToShareRSetInRange(freeStart, freeEnd);
558     }
559 }
560 
561 template<TriggerGCType gcType>
UpdateNewObjectField(TaggedObject *object, JSHClass *cls)562 void ParallelEvacuator::UpdateNewObjectField(TaggedObject *object, JSHClass *cls)
563 {
564     ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(object, cls,
565         [this](TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area) {
566             if (area == VisitObjectArea::IN_OBJECT) {
567                 if (VisitBodyInObj(root, start, end,
568                                     [&](ObjectSlot slot) { UpdateObjectSlotOpt<gcType>(slot); })) {
569                     return;
570                 };
571             }
572             for (ObjectSlot slot = start; slot < end; slot++) {
573                 UpdateObjectSlotOpt<gcType>(slot);
574             }
575         });
576 }
577 
WaitFinished()578 void ParallelEvacuator::WaitFinished()
579 {
580     MEM_ALLOCATE_AND_GC_TRACE(heap_->GetEcmaVM(), WaitUpdateFinished);
581     if (parallel_ > 0) {
582         LockHolder holder(mutex_);
583         while (parallel_ > 0) {
584             condition_.Wait(&mutex_);
585         }
586     }
587 }
588 
ProcessWorkloads(bool isMain)589 bool ParallelEvacuator::ProcessWorkloads(bool isMain)
590 {
591     DrainWorkloads(updateWorkloadSet_, [&](std::unique_ptr<Workload> &region) {
592         region->Process(isMain);
593         });
594     if (!isMain) {
595         LockHolder holder(mutex_);
596         if (--parallel_ <= 0) {
597             condition_.SignalAll();
598         }
599     }
600     return true;
601 }
602 
603 template <typename WorkloadCallback>
DrainWorkloads(WorkloadSet &workloadSet, WorkloadCallback callback)604 void ParallelEvacuator::DrainWorkloads(WorkloadSet &workloadSet, WorkloadCallback callback)
605 {
606     std::unique_ptr<Workload> region;
607     while (workloadSet.HasRemaningWorkload()) {
608         std::optional<size_t> index = workloadSet.GetNextIndex();
609         if (!index.has_value()) {
610             return;
611         }
612         size_t count = workloadSet.GetWorkloadCount();
613         size_t finishedCount = 0;
614         for (size_t i = index.value(); i < count; i++) {
615             region = workloadSet.TryGetWorkload(i);
616             if (region == nullptr) {
617                 break;
618             }
619             callback(region);
620             finishedCount++;
621         }
622         if (finishedCount && workloadSet.FetchSubAndCheckWorkloadCount(finishedCount)) {
623             return;
624         }
625     }
626 }
627 
PrepareWorkloads()628 void ParallelEvacuator::WorkloadSet::PrepareWorkloads()
629 {
630     size_t size = workloads_.size();
631     remainingWorkloadNum_.store(size, std::memory_order_relaxed);
632     /*
633     Construct indexList_ containing starting indices for multi-threaded acquire workload.
634     The construction method starts with the interval [0, size] and recursively
635     selects midpoints as starting indices for subintervals.
636     The first starting index is 0 to ensure no workloads are missed.
637     */
638     indexList_.reserve(size);
639     indexList_.emplace_back(0);
640     std::vector<std::pair<size_t, size_t>> pairList{{0, size}};
641     pairList.reserve(size);
642     while (!pairList.empty()) {
643         auto [start, end] = pairList.back();
644         pairList.pop_back();
645         size_t mid = (start + end) >> 1;
646         indexList_.emplace_back(mid);
647         if (end - mid > 1U) {
648             pairList.emplace_back(mid, end);
649         }
650         if (mid - start > 1U) {
651             pairList.emplace_back(start, mid);
652         }
653     }
654 }
655 
GetNextIndex()656 std::optional<size_t> ParallelEvacuator::WorkloadSet::GetNextIndex()
657 {
658     size_t cursor = indexCursor_.fetch_add(1, std::memory_order_relaxed);
659     if (cursor >= indexList_.size()) {
660         return std::nullopt;
661     }
662     return indexList_[cursor];
663 }
664 
TryGetWorkload(size_t index)665 std::unique_ptr<ParallelEvacuator::Workload> ParallelEvacuator::WorkloadSet::TryGetWorkload(size_t index)
666 {
667     std::unique_ptr<Workload> workload;
668     if (workloads_.at(index).first.TryAcquire()) {
669         workload = std::move(workloads_[index].second);
670     }
671     return workload;
672 }
673 
Clear()674 void ParallelEvacuator::WorkloadSet::Clear()
675 {
676     workloads_.clear();
677     indexList_.clear();
678     indexCursor_.store(0, std::memory_order_relaxed);
679     remainingWorkloadNum_.store(0, std::memory_order_relaxed);
680 }
681 
EvacuationTask(int32_t id, uint32_t idOrder, ParallelEvacuator *evacuator)682 ParallelEvacuator::EvacuationTask::EvacuationTask(int32_t id, uint32_t idOrder, ParallelEvacuator *evacuator)
683     : Task(id), idOrder_(idOrder), evacuator_(evacuator)
684 {
685     allocator_ = new TlabAllocator(evacuator->heap_);
686 }
687 
~EvacuationTask()688 ParallelEvacuator::EvacuationTask::~EvacuationTask()
689 {
690     delete allocator_;
691 }
692 
Run(uint32_t threadIndex)693 bool ParallelEvacuator::EvacuationTask::Run(uint32_t threadIndex)
694 {
695     return evacuator_->EvacuateSpace(allocator_, threadIndex, idOrder_);
696 }
697 
Run([[maybe_unused]] uint32_t threadIndex)698 bool ParallelEvacuator::UpdateReferenceTask::Run([[maybe_unused]] uint32_t threadIndex)
699 {
700     evacuator_->ProcessWorkloads(false);
701     return true;
702 }
703 
Process([[maybe_unused]] bool isMain)704 bool ParallelEvacuator::EvacuateWorkload::Process([[maybe_unused]] bool isMain)
705 {
706     return true;
707 }
708 
Process([[maybe_unused]] bool isMain)709 bool ParallelEvacuator::UpdateRSetWorkload::Process([[maybe_unused]] bool isMain)
710 {
711     if (isEdenGC_) {
712         GetEvacuator()->UpdateRSet<true>(GetRegion());
713     } else {
714         GetEvacuator()->UpdateRSet<false>(GetRegion());
715     }
716     return true;
717 }
718 
Process([[maybe_unused]] bool isMain)719 bool ParallelEvacuator::UpdateNewToEdenRSetWorkload::Process([[maybe_unused]] bool isMain)
720 {
721     GetEvacuator()->UpdateNewToEdenRSetReference(GetRegion());
722     return true;
723 }
724 
725 
Process([[maybe_unused]] bool isMain)726 bool ParallelEvacuator::UpdateNewRegionWorkload::Process([[maybe_unused]] bool isMain)
727 {
728     if (isYoungGC_) {
729         GetEvacuator()->UpdateNewRegionReference<TriggerGCType::YOUNG_GC>(GetRegion());
730     } else {
731         GetEvacuator()->UpdateNewRegionReference<TriggerGCType::OLD_GC>(GetRegion());
732     }
733     return true;
734 }
735 
Process([[maybe_unused]] bool isMain)736 bool ParallelEvacuator::UpdateAndSweepNewRegionWorkload::Process([[maybe_unused]] bool isMain)
737 {
738     if (isYoungGC_) {
739         GetEvacuator()->UpdateAndSweepNewRegionReference<TriggerGCType::YOUNG_GC>(GetRegion());
740     } else {
741         GetEvacuator()->UpdateAndSweepNewRegionReference<TriggerGCType::OLD_GC>(GetRegion());
742     }
743     return true;
744 }
745 }  // namespace panda::ecmascript
746