1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/mem/concurrent_marker.h"
17
18 #include "ecmascript/mem/parallel_marker-inl.h"
19 #include "ecmascript/runtime_call_id.h"
20
21 namespace panda::ecmascript {
22 size_t ConcurrentMarker::taskCounts_ = 0;
23 Mutex ConcurrentMarker::taskCountMutex_;
24
ConcurrentMarker(Heap *heap, EnableConcurrentMarkType type)25 ConcurrentMarker::ConcurrentMarker(Heap *heap, EnableConcurrentMarkType type)
26 : heap_(heap),
27 vm_(heap->GetEcmaVM()),
28 thread_(vm_->GetJSThread()),
29 workManager_(heap->GetWorkManager()),
30 enableMarkType_(type)
31 {
32 thread_->SetMarkStatus(MarkStatus::READY_TO_MARK);
33 }
34
EnableConcurrentMarking(EnableConcurrentMarkType type)35 void ConcurrentMarker::EnableConcurrentMarking(EnableConcurrentMarkType type)
36 {
37 if (IsConfigDisabled()) {
38 return;
39 }
40 if (IsEnabled() && !thread_->IsReadyToConcurrentMark() && type == EnableConcurrentMarkType::DISABLE) {
41 enableMarkType_ = EnableConcurrentMarkType::REQUEST_DISABLE;
42 } else {
43 enableMarkType_ = type;
44 }
45 }
46
Mark()47 void ConcurrentMarker::Mark()
48 {
49 RecursionScope recurScope(this);
50 TRACE_GC(GCStats::Scope::ScopeId::ConcurrentMark, heap_->GetEcmaVM()->GetEcmaGCStats());
51 LOG_GC(DEBUG) << "ConcurrentMarker: Concurrent Marking Begin";
52 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "ConcurrentMarker::Mark");
53 MEM_ALLOCATE_AND_GC_TRACE(vm_, ConcurrentMarking);
54 InitializeMarking();
55 clockScope_.Reset();
56 heap_->PostParallelGCTask(ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK);
57 }
58
Finish()59 void ConcurrentMarker::Finish()
60 {
61 workManager_->Finish();
62 }
63
ReMark()64 void ConcurrentMarker::ReMark()
65 {
66 TRACE_GC(GCStats::Scope::ScopeId::ReMark, heap_->GetEcmaVM()->GetEcmaGCStats());
67 LOG_GC(DEBUG) << "ConcurrentMarker: Remarking Begin";
68 MEM_ALLOCATE_AND_GC_TRACE(vm_, ReMarking);
69 Marker *nonMovableMarker = heap_->GetNonMovableMarker();
70 nonMovableMarker->MarkRoots(MAIN_THREAD_INDEX);
71 nonMovableMarker->ProcessMarkStack(MAIN_THREAD_INDEX);
72 heap_->WaitRunningTaskFinished();
73 // MarkJitCodeMap must be call after other mark work finish to make sure which jserror object js alive.
74 nonMovableMarker->MarkJitCodeMap(MAIN_THREAD_INDEX);
75 }
76
HandleMarkingFinished(GCReason gcReason)77 void ConcurrentMarker::HandleMarkingFinished(GCReason gcReason) // js-thread wait for sweep
78 {
79 LockHolder lock(waitMarkingFinishedMutex_);
80 ASSERT(markingFinished_);
81 TriggerGCType gcType;
82 if (heap_->IsConcurrentFullMark()) {
83 gcType = TriggerGCType::OLD_GC;
84 } else if (heap_->IsEdenMark()) {
85 gcType = TriggerGCType::EDEN_GC;
86 } else {
87 gcType = TriggerGCType::YOUNG_GC;
88 }
89 heap_->CollectGarbage(gcType, gcReason);
90 }
91
WaitMarkingFinished()92 void ConcurrentMarker::WaitMarkingFinished() // call in EcmaVm thread, wait for mark finished
93 {
94 LockHolder lock(waitMarkingFinishedMutex_);
95 while (!markingFinished_) {
96 waitMarkingFinishedCV_.Wait(&waitMarkingFinishedMutex_);
97 }
98 }
99
Reset(bool revertCSet)100 void ConcurrentMarker::Reset(bool revertCSet)
101 {
102 ASSERT(runningTaskCount_ == 0);
103 Finish();
104 thread_->SetMarkStatus(MarkStatus::READY_TO_MARK);
105 isConcurrentMarking_ = false;
106 markingFinished_ = false;
107 notifyMarkingFinished_ = false;
108 if (revertCSet) {
109 // Partial gc clear cset when evacuation allocator finalize
110 heap_->GetOldSpace()->RevertCSet();
111 auto callback = [](Region *region) {
112 region->ResetRegionTypeFlag();
113 region->ClearMarkGCBitset();
114 region->ClearCrossRegionRSet();
115 region->ResetAliveObject();
116 };
117 if (heap_->IsConcurrentFullMark()) {
118 heap_->EnumerateRegions(callback);
119 } else {
120 heap_->EnumerateNewSpaceRegions(callback);
121 }
122 }
123 }
124
InitializeMarking()125 void ConcurrentMarker::InitializeMarking()
126 {
127 ASSERT(runningTaskCount_ == 0);
128 MEM_ALLOCATE_AND_GC_TRACE(vm_, ConcurrentMarkingInitialize);
129 heap_->Prepare();
130 ASSERT(VerifyAllRegionsNonFresh());
131 heap_->GetNewSpace()->RecordCurrentRegionAsHalfFresh();
132 isConcurrentMarking_ = true;
133 thread_->SetMarkStatus(MarkStatus::MARKING);
134
135 if (heap_->IsConcurrentFullMark()) {
136 heapObjectSize_ = heap_->GetHeapObjectSize();
137 heap_->GetOldSpace()->SelectCSet();
138 heap_->GetAppSpawnSpace()->EnumerateRegions([](Region *current) {
139 current->ClearMarkGCBitset();
140 current->ClearCrossRegionRSet();
141 });
142 // The alive object size of Region in OldSpace will be recalculated.
143 heap_->EnumerateNonNewSpaceRegions([](Region *current) {
144 current->ResetAliveObject();
145 });
146 } else if (heap_->IsEdenMark()) {
147 heapObjectSize_ = heap_->GetEdenSpace()->GetHeapObjectSize();
148 } else {
149 heapObjectSize_ = heap_->GetNewSpace()->GetHeapObjectSize();
150 }
151 workManager_->Initialize(TriggerGCType::OLD_GC, ParallelGCTaskPhase::CONCURRENT_HANDLE_GLOBAL_POOL_TASK);
152 if (heap_->IsYoungMark()) {
153 {
154 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::MarkOldToNew");
155 heap_->GetNonMovableMarker()->ProcessOldToNewNoMarkStack(MAIN_THREAD_INDEX);
156 }
157 heap_->GetNonMovableMarker()->ProcessSnapshotRSetNoMarkStack(MAIN_THREAD_INDEX);
158 } else if (heap_->IsEdenMark()) {
159 {
160 ECMA_BYTRACE_NAME(HITRACE_TAG_ARK, "GC::MarkNewToEden");
161 heap_->GetNonMovableMarker()->ProcessOldToNewNoMarkStack(MAIN_THREAD_INDEX);
162 heap_->GetNonMovableMarker()->ProcessNewToEdenNoMarkStack(MAIN_THREAD_INDEX);
163 }
164 heap_->GetNonMovableMarker()->ProcessSnapshotRSetNoMarkStack(MAIN_THREAD_INDEX);
165 }
166 heap_->GetNonMovableMarker()->MarkRoots(MAIN_THREAD_INDEX);
167 }
168
ShouldNotifyMarkingFinished()169 bool ConcurrentMarker::ShouldNotifyMarkingFinished()
170 {
171 if (runningTaskCount_.fetch_sub(1, std::memory_order_relaxed) != 1) {
172 return false;
173 }
174 return reinterpret_cast<std::atomic<bool>*>(¬ifyMarkingFinished_)
175 ->exchange(true, std::memory_order_relaxed) == false;
176 }
177
FinishMarking()178 void ConcurrentMarker::FinishMarking()
179 {
180 LockHolder lock(waitMarkingFinishedMutex_);
181 ASSERT(!markingFinished_);
182 ASSERT(notifyMarkingFinished_);
183 float spendTime = clockScope_.TotalSpentTime();
184 if (heap_->IsYoungMark()) {
185 heapObjectSize_ = heap_->GetNewSpace()->GetHeapObjectSize();
186 } else if (heap_->IsConcurrentFullMark()) {
187 heapObjectSize_ = heap_->GetHeapObjectSize();
188 } else if (heap_->IsEdenMark()) {
189 heapObjectSize_ = heap_->GetEdenSpace()->GetHeapObjectSize();
190 }
191 SetDuration(spendTime);
192 if (heap_->IsFullMarkRequested()) {
193 heap_->SetFullMarkRequestedState(false);
194 }
195 thread_->SetMarkStatus(MarkStatus::MARK_FINISHED);
196 thread_->SetCheckSafePointStatus();
197 markingFinished_ = true;
198 waitMarkingFinishedCV_.Signal();
199 DecreaseTaskCounts();
200 }
201
ProcessConcurrentMarkTask(uint32_t threadId)202 void ConcurrentMarker::ProcessConcurrentMarkTask(uint32_t threadId)
203 {
204 runningTaskCount_.fetch_add(1, std::memory_order_relaxed);
205 heap_->GetNonMovableMarker()->ProcessMarkStack(threadId);
206 if (ShouldNotifyMarkingFinished()) {
207 FinishMarking();
208 heap_->GetIdleGCTrigger()->TryPostHandleMarkFinished();
209 }
210 }
211
VerifyAllRegionsNonFresh()212 bool ConcurrentMarker::VerifyAllRegionsNonFresh()
213 {
214 bool ok = true;
215 heap_->EnumerateRegions([&ok](Region *region) {
216 ok &= !region->IsFreshRegion() && !region->IsHalfFreshRegion();
217 });
218 return ok;
219 }
220 } // namespace panda::ecmascript
221