1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "ecmascript/mem/work_manager.h"
17 
18 #include "ecmascript/mem/incremental_marker.h"
19 #include "ecmascript/mem/tlab_allocator-inl.h"
20 
21 namespace panda::ecmascript {
WorkManagerBase(NativeAreaAllocator *allocator)22 WorkManagerBase::WorkManagerBase(NativeAreaAllocator *allocator)
23     : spaceChunk_(allocator), workSpace_(0), spaceStart_(0), spaceEnd_(0)
24 {
25     auto allocatedSpace = GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE);
26     ASSERT(allocatedSpace != nullptr);
27     workSpace_ = ToUintPtr(allocatedSpace);
28 }
29 
AllocateWorkNode()30 WorkNode *WorkManagerBase::AllocateWorkNode()
31 {
32     LockHolder lock(mtx_);
33     size_t allocatedSize = sizeof(WorkNode) + sizeof(Stack) + STACK_AREA_SIZE;
34     ASSERT(allocatedSize < WORKNODE_SPACE_SIZE);
35 
36     uintptr_t begin = spaceStart_;
37     if (begin + allocatedSize >= spaceEnd_) {
38         agedSpaces_.emplace_back(workSpace_);
39         workSpace_ = ToUintPtr(GetSpaceChunk()->Allocate(WORKNODE_SPACE_SIZE));
40         spaceStart_ = workSpace_;
41         spaceEnd_ = workSpace_ + WORKNODE_SPACE_SIZE;
42         begin = spaceStart_;
43     }
44     spaceStart_ = begin + allocatedSize;
45     Stack *stack = reinterpret_cast<Stack *>(begin + sizeof(WorkNode));
46     stack->ResetBegin(begin + sizeof(WorkNode) + sizeof(Stack), begin + allocatedSize);
47     WorkNode *work = reinterpret_cast<WorkNode *>(begin);
48     return new (work) WorkNode(stack);
49 }
50 
~WorkManagerBase()51 WorkManagerBase::~WorkManagerBase()
52 {
53     GetSpaceChunk()->Free(reinterpret_cast<void *>(workSpace_));
54 }
55 
WorkManager(Heap *heap, uint32_t threadNum)56 WorkManager::WorkManager(Heap *heap, uint32_t threadNum)
57     : WorkManagerBase(heap->GetNativeAreaAllocator()), heap_(heap), threadNum_(threadNum),
58       continuousQueue_ { nullptr }, parallelGCTaskPhase_(UNDEFINED_TASK)
59 {
60     for (uint32_t i = 0; i < threadNum_; i++) {
61         continuousQueue_.at(i) = new ProcessQueue();
62     }
63 }
64 
~WorkManager()65 WorkManager::~WorkManager()
66 {
67     Finish();
68     for (uint32_t i = 0; i < threadNum_; i++) {
69         continuousQueue_.at(i)->Destroy();
70         delete continuousQueue_.at(i);
71         continuousQueue_.at(i) = nullptr;
72     }
73 }
74 
Push(uint32_t threadId, TaggedObject *object)75 bool WorkManager::Push(uint32_t threadId, TaggedObject *object)
76 {
77     WorkNode *&inNode = works_.at(threadId).inNode_;
78     if (!inNode->PushObject(ToUintPtr(object))) {
79         PushWorkNodeToGlobal(threadId);
80         return inNode->PushObject(ToUintPtr(object));
81     }
82     return true;
83 }
84 
PushWorkNodeToGlobal(uint32_t threadId, bool postTask)85 void WorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
86 {
87     WorkNode *&inNode = works_.at(threadId).inNode_;
88     if (!inNode->IsEmpty()) {
89         workStack_.Push(inNode);
90         inNode = AllocateWorkNode();
91         if (postTask && heap_->IsParallelGCEnabled() && heap_->CheckCanDistributeTask() &&
92             !(heap_->IsMarking() && heap_->GetIncrementalMarker()->IsTriggeredIncrementalMark())) {
93             heap_->PostParallelGCTask(parallelGCTaskPhase_);
94         }
95     }
96 }
97 
Pop(uint32_t threadId, TaggedObject **object)98 bool WorkManager::Pop(uint32_t threadId, TaggedObject **object)
99 {
100     WorkNode *&outNode = works_.at(threadId).outNode_;
101     WorkNode *&inNode = works_.at(threadId).inNode_;
102     if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
103         if (!inNode->IsEmpty()) {
104             WorkNode *tmp = outNode;
105             outNode = inNode;
106             inNode = tmp;
107         } else if (!PopWorkNodeFromGlobal(threadId)) {
108             return false;
109         }
110         return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
111     }
112     return true;
113 }
114 
PopWorkNodeFromGlobal(uint32_t threadId)115 bool WorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
116 {
117     return workStack_.Pop(&works_.at(threadId).outNode_);
118 }
119 
Finish()120 size_t WorkManager::Finish()
121 {
122     size_t aliveSize = 0;
123     for (uint32_t i = 0; i < threadNum_; i++) {
124         WorkNodeHolder &holder = works_.at(i);
125         if (holder.weakQueue_ != nullptr) {
126             holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
127             delete holder.weakQueue_;
128             holder.weakQueue_ = nullptr;
129         }
130         if (holder.allocator_ != nullptr) {
131             holder.allocator_->Finalize();
132             delete holder.allocator_;
133             holder.allocator_ = nullptr;
134         }
135         holder.pendingUpdateSlots_.clear();
136         aliveSize += holder.aliveSize_;
137     }
138     FinishBase();
139     initialized_.store(false, std::memory_order_release);
140     return aliveSize;
141 }
142 
Finish(size_t &aliveSize, size_t &promotedSize)143 void WorkManager::Finish(size_t &aliveSize, size_t &promotedSize)
144 {
145     aliveSize = Finish();
146     for (uint32_t i = 0; i < threadNum_; i++) {
147         WorkNodeHolder &holder = works_.at(i);
148         promotedSize += holder.promotedSize_;
149         if (holder.allocator_ != nullptr) {
150             holder.allocator_->Finalize();
151             delete holder.allocator_;
152             holder.allocator_ = nullptr;
153         }
154     }
155     initialized_.store(false, std::memory_order_release);
156 }
157 
Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)158 void WorkManager::Initialize(TriggerGCType gcType, ParallelGCTaskPhase taskPhase)
159 {
160     parallelGCTaskPhase_ = taskPhase;
161     InitializeBase();
162     for (uint32_t i = 0; i < threadNum_; i++) {
163         WorkNodeHolder &holder = works_.at(i);
164         holder.inNode_ = AllocateWorkNode();
165         holder.outNode_ = AllocateWorkNode();
166         holder.weakQueue_ = new ProcessQueue();
167         holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
168         holder.aliveSize_ = 0;
169         holder.promotedSize_ = 0;
170         if (gcType != TriggerGCType::OLD_GC) {
171             holder.allocator_ = new TlabAllocator(heap_);
172         }
173     }
174     if (initialized_.load(std::memory_order_acquire)) { // LOCV_EXCL_BR_LINE
175         LOG_ECMA(FATAL) << "this branch is unreachable";
176         UNREACHABLE();
177     }
178     initialized_.store(true, std::memory_order_release);
179 }
180 
SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum)181 SharedGCWorkManager::SharedGCWorkManager(SharedHeap *heap, uint32_t threadNum)
182     : WorkManagerBase(heap->GetNativeAreaAllocator()), sHeap_(heap), threadNum_(threadNum),
183       continuousQueue_ { nullptr }, sharedTaskPhase_(SHARED_UNDEFINED_TASK)
184 {
185     for (uint32_t i = 0; i < threadNum_; i++) {
186         continuousQueue_.at(i) = new ProcessQueue();
187     }
188 }
189 
~SharedGCWorkManager()190 SharedGCWorkManager::~SharedGCWorkManager()
191 {
192     Finish();
193     for (uint32_t i = 0; i < threadNum_; i++) {
194         continuousQueue_.at(i)->Destroy();
195         delete continuousQueue_.at(i);
196         continuousQueue_.at(i) = nullptr;
197     }
198 }
199 
Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase)200 void SharedGCWorkManager::Initialize(TriggerGCType gcType, SharedParallelMarkPhase taskPhase)
201 {
202     sharedTaskPhase_ = taskPhase;
203     InitializeBase();
204     for (uint32_t i = 0; i < threadNum_; i++) {
205         SharedGCWorkNodeHolder &holder = works_.at(i);
206         holder.inNode_ = AllocateWorkNode();
207         holder.outNode_ = AllocateWorkNode();
208         holder.weakQueue_ = new ProcessQueue();
209         holder.weakQueue_->BeginMarking(continuousQueue_.at(i));
210         if (gcType == TriggerGCType::SHARED_FULL_GC) {
211             holder.allocator_ = new SharedTlabAllocator(sHeap_);
212         }
213     }
214     if (initialized_.load(std::memory_order_acquire)) { // LOCV_EXCL_BR_LINE
215         LOG_ECMA(FATAL) << "this branch is unreachable";
216         UNREACHABLE();
217     }
218     initialized_.store(true, std::memory_order_release);
219 }
220 
Finish()221 size_t SharedGCWorkManager::Finish()
222 {
223     size_t aliveSize = 0;
224     for (uint32_t i = 0; i < threadNum_; i++) {
225         SharedGCWorkNodeHolder &holder = works_.at(i);
226         if (holder.weakQueue_ != nullptr) {
227             holder.weakQueue_->FinishMarking(continuousQueue_.at(i));
228             delete holder.weakQueue_;
229             holder.weakQueue_ = nullptr;
230         }
231         aliveSize += holder.aliveSize_;
232         if (holder.allocator_ != nullptr) {
233             holder.allocator_->Finalize();
234             delete holder.allocator_;
235             holder.allocator_ = nullptr;
236         }
237     }
238     FinishBase();
239     initialized_.store(false, std::memory_order_release);
240     return aliveSize;
241 }
242 
Push(uint32_t threadId, TaggedObject *object)243 bool SharedGCWorkManager::Push(uint32_t threadId, TaggedObject *object)
244 {
245     WorkNode *&inNode = works_.at(threadId).inNode_;
246     if (!inNode->PushObject(ToUintPtr(object))) {
247         PushWorkNodeToGlobal(threadId);
248         return inNode->PushObject(ToUintPtr(object));
249     }
250     return true;
251 }
252 
PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object)253 bool SharedGCWorkManager::PushToLocalMarkingBuffer(WorkNode *&markingBuffer, TaggedObject *object)
254 {
255     if (UNLIKELY(markingBuffer == nullptr)) {
256         markingBuffer = AllocateWorkNode();
257     }
258     ASSERT(markingBuffer != nullptr);
259     if (UNLIKELY(!markingBuffer->PushObject(ToUintPtr(object)))) {
260         PushLocalBufferToGlobal(markingBuffer);
261         ASSERT(markingBuffer == nullptr);
262         markingBuffer = AllocateWorkNode();
263         return markingBuffer->PushObject(ToUintPtr(object));
264     }
265     return true;
266 }
267 
PushWorkNodeToGlobal(uint32_t threadId, bool postTask)268 void SharedGCWorkManager::PushWorkNodeToGlobal(uint32_t threadId, bool postTask)
269 {
270     WorkNode *&inNode = works_.at(threadId).inNode_;
271     if (!inNode->IsEmpty()) {
272         workStack_.Push(inNode);
273         inNode = AllocateWorkNode();
274         if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
275             sHeap_->PostGCMarkingTask(sharedTaskPhase_);
276         }
277     }
278 }
279 
PushLocalBufferToGlobal(WorkNode *&node, bool postTask)280 void SharedGCWorkManager::PushLocalBufferToGlobal(WorkNode *&node, bool postTask)
281 {
282     ASSERT(node != nullptr);
283     ASSERT(!node->IsEmpty());
284     workStack_.Push(node);
285     if (postTask && sHeap_->IsParallelGCEnabled() && sHeap_->CheckCanDistributeTask()) {
286         sHeap_->PostGCMarkingTask(sharedTaskPhase_);
287     }
288     node = nullptr;
289 }
290 
Pop(uint32_t threadId, TaggedObject **object)291 bool SharedGCWorkManager::Pop(uint32_t threadId, TaggedObject **object)
292 {
293     WorkNode *&outNode = works_.at(threadId).outNode_;
294     WorkNode *&inNode = works_.at(threadId).inNode_;
295     if (!outNode->PopObject(reinterpret_cast<uintptr_t *>(object))) {
296         if (!inNode->IsEmpty()) {
297             WorkNode *tmp = outNode;
298             outNode = inNode;
299             inNode = tmp;
300         } else if (!PopWorkNodeFromGlobal(threadId)) {
301             return false;
302         }
303         return outNode->PopObject(reinterpret_cast<uintptr_t *>(object));
304     }
305     return true;
306 }
307 
PopWorkNodeFromGlobal(uint32_t threadId)308 bool SharedGCWorkManager::PopWorkNodeFromGlobal(uint32_t threadId)
309 {
310     return workStack_.Pop(&works_.at(threadId).outNode_);
311 }
312 }  // namespace panda::ecmascript
313